forked from viola1593/glacier_pinn
-
Notifications
You must be signed in to change notification settings - Fork 0
/
model_config.yaml
71 lines (68 loc) · 2 KB
/
model_config.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
experiment:
experiment_name: "LOGO_CV"
exp_dir: "CV/allunmappedglaciers_notsurging/test1"
wandb:
project: spitsbergen_nosurges_allunmapped
mode: online
model:
model: PL_PINN
output_dim: 3
input_dim: 10
hidden_dim: 256 # 256 was good for only 1482
num_layers: 8
gaussian_mapping_dim: 32 # number of fourier features, 256 might be too much, you can see waves in the prediction, for only 1482 64 was good
gaussian_scale: 10.
optimizer:
lr: 0.0001
loss_fn:
burn_in_epochs: 10 # without pinnloss at first
vel_lowerbound: 0.7
w_VelMag: 0.1
w_depthAvg: 0.1
w_negative_thickness: 100
w_pinnloss: 10
w_smoothness: 1000
w_thicknessloss: 100
ds:
dataset_type: grid
data_dir_unlabeled:
- "data/spitsbergen_allunmapped_griddeddata_nosurges_dhdt2014smoothed_complete.csv" # path to the unlabelled datapoints
data_dir_labeled:
- "data/spitsbergen_measurements_aggregated_nosurges_dhdt2014smoothed_complete_moremeasurements.csv" # path to the labelled datapoints
epsg_crs: 25833
unlabeled_sample_size: 1.0 # float for fraction of dataframe
labeled_sample_size: 1. # if >1: upsampling (with replacement)
input_features:
- POINT_LON
- POINT_LAT
- slope
- millan_vx_smoothed
- millan_vy_smoothed
- beta_v
- beta_vx
- beta_vy
- topo
- area
- dis_from_border
target:
- THICKNESS
- apparent_mb2014
min_years: 2000 # measurements must be newer than this year
train_size: 0.6
test_size: 0.4
num_glaciers: 0 # only train and validate on a subset of glaciers, set to 0 to train on all glaciers
glacier_ids: [] # empty list: all glaciers in database are in dataset
dataloader: # arguments for dataloader
batch_size: 8192
num_workers: 4
pl: # all flags to pytorch lightning trainer
devices: [7] # which GPU to use
accelerator: gpu
max_epochs: 100
max_steps: -1
check_val_every_n_epoch: 10
deterministic: True
detect_anomaly: False
fast_dev_run: False
log_every_n_steps: 10
overfit_batches: 0 # validates on validation dataset!