MalGraph/configs/default.yaml

27 lines
1.0 KiB
YAML
Raw Normal View History

2023-11-09 14:30:38 +08:00
Data:
2024-04-29 17:31:04 +08:00
preprocess_root: "/home/king/python/data/DatasetJSON_remake"
train_vocab_file: "/home/king/python/data/fun_name_sort.jsonl"
2023-11-09 14:30:38 +08:00
max_vocab_size: 10000 # modify according to the result of 1BuildExternalVocab.py
Training:
2024-04-29 17:31:04 +08:00
cuda: False # enable GPU training if cuda is available
2023-11-09 14:30:38 +08:00
dist_backend: "nccl" # if using torch.distribution, the backend to be used
dist_port: "1234"
max_epoches: 10
train_batch_size: 16
test_batch_size: 32
seed: 19920208
only_test_path: 'None'
Model:
ablation_models: "Full" # "Full"
gnn_type: "GraphSAGE" # "GraphSAGE" / "GCN"
pool_type: "global_max_pool" # "global_max_pool" / "global_mean_pool"
acfg_node_init_dims: 11
cfg_filters: "200-200"
fcg_filters: "200-200"
number_classes: 1
drapout_rate: 0.2
Optimizer:
name: "AdamW" # Adam / AdamW
learning_rate: 1e-3 # initial learning rate
weight_decay: 1e-5 # initial weight decay
learning_anneal: 1.1 # Annealing applied to learning rate after each epoch