-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain_mmpd.py
More file actions
129 lines (107 loc) · 6.86 KB
/
main_mmpd.py
File metadata and controls
129 lines (107 loc) · 6.86 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
import argparse
import os
import torch
import random
import numpy as np
from exp.exp_forecast import Exp_Forecast
from utils.tools import string_split, str2bool
import time
parser = argparse.ArgumentParser(description='MMPD Loss for Diverse Time Series Forecasting')
parser.add_argument('--data', type=str, required=True, default='ETTh1', help='data')
parser.add_argument('--root_path', type=str, default='./datasets/', help='root path of the data file')
parser.add_argument('--data_path', type=str, default='ETTh1.csv', help='data file')
parser.add_argument('--data_split', type=str, default='0.7,0.1,0.2',help='train/val/test split, can be ratio or number')
parser.add_argument('--output_root', type=str, default='./out/', help='location to store output files')
#parameters for backbone
parser.add_argument('--backbone', type=str, default='Decoder', help='backbone model')
parser.add_argument('--in_len', type=int, default=336, help='input MTS length (T)')
parser.add_argument('--out_len', type=int, default=96, help='output MTS length (\tau)')
parser.add_argument('--patch_size', type=int, default=12, help='segment length (L_seg)')
parser.add_argument('--data_dim', type=int, default=7, help='Number of dimensions of the MTS data (D)')
parser.add_argument('--d_model', type=int, default=256, help='dimension of hidden states (d_model)')
parser.add_argument('--d_ff', type=int, default=512, help='dimension of MLP in transformer')
parser.add_argument('--n_heads', type=int, default=4, help='num of heads')
parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers (N)')
parser.add_argument('--d_layers', type=int, default=2, help='num of decoder layers (M)')
parser.add_argument('--dropout', type=float, default=0.2, help='dropout')
#parameters for loss function
parser.add_argument('--loss_func', type=str, default='MMPD', help='loss function')
parser.add_argument('--point_weight', type=float, default=0.01, help='weight for point loss')
parser.add_argument('--weighted', type=str2bool, default=True, help='weighted loss')
parser.add_argument('--d_diffusion', type=int, default=256, help='dimension for MLP in diffusion projector')
parser.add_argument('--diffusion_layers', type=int, default=1, help='num of diffusion layers')
parser.add_argument('--max_diffusion_steps', type=int, default=1000, help='max denosing steps')
parser.add_argument('--beta_schedule', type=str, default='linear', help='beta schedule for diffusion process')
parser.add_argument('--radius', type=int, default=3, help='radius for adjacent patches')
#parameters for training
parser.add_argument('--training', type=str2bool, default=True, help='training process')
parser.add_argument('--num_workers', type=int, default=4, help='data loader num workers')
parser.add_argument('--batch_size', type=int, default=32, help='batch size of train input data')
parser.add_argument('--train_epochs', type=int, default=20, help='train epochs')
parser.add_argument('--patience', type=int, default=5, help='early stopping patience')
parser.add_argument('--learning_rate', type=float, default=1e-4, help='optimizer initial learning rate')
parser.add_argument('--lradj', type=str, default='cosine',help='adjust learning rate')
#parameters for testing
parser.add_argument('--test_batch_num', type=int, default=-1, help='test batch number')
#for diffusion sampling
parser.add_argument('--testing', type=str2bool, default=True, help='testing process')
parser.add_argument('--prob_pred', type=str2bool, default=True, help='sample from diffusion')
parser.add_argument('--sample_num', type=int, default=100, help='sample number to compute expectation')
parser.add_argument('--num_sampling_steps', type=str, default='20', help='number of sampling steps for diffusion process')
parser.add_argument('--temperature', type=float, default=1.0, help='temperature for sampling')
#for diffusion gaussian mixture
parser.add_argument('--gmm_components', type=int, default=10, help='maximum number of components in GMM')
parser.add_argument('--prior_pi_decay', type=float, default=0.5, help='prior for weight decay, in the range of [0, 1], smaller value activates less components')
parser.add_argument('--prior_precision_shape', type=float, default=1e2, help='prior for variance')
parser.add_argument('--gmm_iterations', type=int, default=10, help='number of EM iterations for GMM at each diffusion step')
parser.add_argument('--use_gpu', type=str2bool, default=True, help='use gpu')
parser.add_argument('--gpu', type=int, default=0, help='gpu')
parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False)
parser.add_argument('--devices', type=str, default='0,1,2,3',help='device ids of multile gpus')
args = parser.parse_args()
args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False
if args.use_gpu and args.use_multi_gpu:
args.devices = args.devices.replace(' ','')
device_ids = args.devices.split(',')
args.device_ids = [int(id_) for id_ in device_ids]
args.gpu = args.device_ids[0]
print(args.gpu)
data_parser = {
'ETTh1':{'data':'ETTh1.csv', 'data_dim':7, 'split':[12*30*24, 4*30*24, 4*30*24]},
'ETTm1':{'data':'ETTm1.csv', 'data_dim':7, 'split':[4*12*30*24, 4*4*30*24, 4*4*30*24]},
'ETTh2':{'data':'ETTh2.csv', 'data_dim':7, 'split':[12*30*24, 4*30*24, 4*30*24]},
'ETTm2':{'data':'ETTm2.csv', 'data_dim':7, 'split':[4*12*30*24, 4*4*30*24, 4*4*30*24]},
'weather':{'data':'weather.csv', 'data_dim':21, 'split':[0.7, 0.1, 0.2]},
'ECL':{'data':'electricity.csv', 'data_dim':321, 'split':[0.7, 0.1, 0.2]},
'Traffic':{'data':'traffic.csv', 'data_dim':862, 'split':[0.7, 0.1, 0.2]},
'dynamic':{'data':'dynamic_500K.csv', 'data_dim': 17, 'split':[0.7, 0.1, 0.2]},
}
if args.data in data_parser.keys():
data_info = data_parser[args.data]
args.data_path = data_info['data']
args.data_dim = data_info['data_dim']
args.data_split = data_info['split']
else:
args.data_split = string_split(args.data_split)
print('Args in experiment:')
print(args)
#fix random seed
random_seed =2024
torch.manual_seed(random_seed)
random.seed(random_seed)
np.random.seed(random_seed)
torch.cuda.manual_seed(random_seed)
torch.cuda.manual_seed_all(random_seed)
torch.backends.cudnn.deterministic = True
Exp = Exp_Forecast
setting = 'data{}_il{}_ol{}_backbone{}_loss{}_weighted{}_patch{}_pointW{}_diffH{}_diffLayer{}_radius{}_diffStep{}_beta{}'.format(args.data, \
args.in_len, args.out_len, args.backbone, args.loss_func, args.weighted,
args.patch_size, args.point_weight, args.d_diffusion, \
args.diffusion_layers, args.radius, args.max_diffusion_steps, args.beta_schedule)
exp = Exp(args) # set experiments
if args.training:
print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting))
exp.train(setting)
if args.testing:
print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))
exp.test(setting, test_batch_num=args.test_batch_num)