-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
11 changed files
with
435 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,9 @@ | ||
from data.prepare_data import prepare_data | ||
|
||
train_file_path = './train_data.xlsx' | ||
validation_file_path = './validation_data.xlsx' | ||
test_file_path = './test_data.xlsx' | ||
|
||
def load_data(time_step, batch_size): | ||
return prepare_data(train_file_path, test_file_path, validation_file_path, time_step, batch_size) | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,67 @@ | ||
import pandas as pd | ||
from keras.preprocessing.sequence import TimeseriesGenerator | ||
import numpy as np | ||
|
||
def prepare_data(train_file, test_file, val_file, time_step, batch_size): | ||
|
||
train_Data = pd.read_excel(train_file) | ||
test_Data = pd.read_excel(test_file) | ||
val_Data = pd.read_excel(val_file) | ||
|
||
train_Data = train_Data.drop(['WC'], axis=1) | ||
test_Data = test_Data.drop(['WC'], axis=1) | ||
val_Data = val_Data.drop(['WC'], axis=1) | ||
|
||
train_Data = train_Data.drop(['init_dis'], axis=1) | ||
test_Data = test_Data.drop(['init_dis'], axis=1) | ||
val_Data = val_Data.drop(['init_dis'], axis=1) | ||
|
||
print('Read_in_the_train_dataset', train_Data.shape) | ||
print('Read_in_the_test_dataset', test_Data.shape) | ||
print('Read_in_the_val_dataset', val_Data.shape) | ||
|
||
train_Data_values = train_Data.iloc[:, :].values | ||
test_Data_values = test_Data.iloc[:, :].values | ||
val_Data_values = val_Data.iloc[:, :].values | ||
|
||
train_Data_VIT, train_Data_SOC = train_Data_values[:, :3], train_Data_values[:, -1] | ||
test_Data_VIT, test_Data_SOC = test_Data_values[:, :3], test_Data_values[:, -1] | ||
val_Data_VIT, val_Data_SOC = val_Data_values[:, :3], val_Data_values[:, -1] | ||
|
||
print('test_VIT_shape:', test_Data_VIT.shape) | ||
print('test_SOC_shape:', test_Data_SOC.shape) | ||
|
||
np.save('train_Data_VIT.npy', train_Data_VIT) | ||
VIT_train_data = pd.DataFrame(train_Data_VIT) | ||
VIT_train_data.to_csv('train_Data_VIT.csv') | ||
|
||
np.save('train_Data_SOC.npy', train_Data_SOC) | ||
SOC_train_data = pd.DataFrame(train_Data_SOC) | ||
SOC_train_data.to_csv('train_Data_SOC.csv') | ||
|
||
np.save('test_Data_VIT.npy', test_Data_VIT) | ||
VIT_test_data = pd.DataFrame(test_Data_VIT) | ||
VIT_test_data.to_csv('test_Data_VIT.csv') | ||
|
||
np.save('test_Data_SOC.npy', test_Data_SOC) | ||
SOC_test_data = pd.DataFrame(test_Data_SOC) | ||
SOC_test_data.to_csv('test_Data_SOC.csv') | ||
|
||
|
||
train_generator = TimeseriesGenerator(train_Data_VIT, train_Data_SOC, | ||
length=time_step, | ||
batch_size=batch_size, | ||
sampling_rate=1, | ||
stride=1) | ||
test_generator = TimeseriesGenerator(test_Data_VIT, test_Data_SOC, | ||
length=time_step, | ||
batch_size=1, | ||
sampling_rate=1, | ||
stride=1) | ||
val_generator = TimeseriesGenerator(val_Data_VIT, val_Data_SOC, | ||
length=time_step, | ||
batch_size=batch_size, | ||
sampling_rate=1, | ||
stride=1) | ||
|
||
return test_Data_SOC, train_generator, test_generator, val_generator |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,101 @@ | ||
import pandas as pd | ||
import tensorflow as tf | ||
import keras | ||
import yaml | ||
from easydict import EasyDict | ||
from keras.models import Model, Layer | ||
import keras.optimizers | ||
import numpy as np | ||
import os | ||
import keras.backend as K | ||
import random | ||
from keras.layers import MultiHeadAttention, Dense, Input, Dropout, BatchNormalization | ||
from model.ttsnet import ttsnet | ||
from data.load_data import load_data | ||
import argparse | ||
|
||
def set_random_seed(seed): | ||
"""set random seed for reproducible""" | ||
np.random.seed(seed) | ||
tf.random.set_seed(seed) | ||
os.environ['PYTHONHASHSEED'] = str(seed) | ||
random.seed(seed) | ||
os.environ['TF_DETERMINISTIC_OPS'] = '1' | ||
|
||
def soc_model(net, time_step, input_size): | ||
inputs = Input(shape=(time_step, input_size)) | ||
output = net(inputs) | ||
return Model(inputs=[inputs], outputs=output) | ||
|
||
def run(cfg): | ||
# data | ||
test_Data_SOC, train_generator, test_generator, val_generator \ | ||
= load_data(time_step=cfg.time_step, batch_size=cfg.batch_size) | ||
|
||
# model | ||
our_net = ttsnet(time_step=cfg.time_step, hidden_layer=cfg.hidden_layer) | ||
model = soc_model(net=our_net, time_step=cfg.time_step, input_size=cfg.input_size) | ||
model.summary() | ||
|
||
# optimizer | ||
optimizer = keras.optimizers.adam_v2.Adam(learning_rate=cfg.lr) | ||
model.compile(loss='mse', optimizer=optimizer) | ||
learning_rate_reduction = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', | ||
patience=cfg.patience, factor=cfg.factor) | ||
|
||
# train and val | ||
history = model.fit(train_generator, | ||
epochs=cfg.epochs, | ||
verbose=1, | ||
validation_data=val_generator, | ||
callbacks=[learning_rate_reduction]) | ||
|
||
# save loss into loss.scv | ||
loss_data = history.history | ||
loss = pd.DataFrame(loss_data) | ||
loss.to_csv('./result/loss.csv') | ||
|
||
# test and save tested results into predictSOC.csv | ||
predict_SOC = model.predict(test_generator) | ||
predict_SOC = np.reshape(predict_SOC, (predict_SOC.size,)) | ||
predict_SOC = pd.DataFrame(predict_SOC) | ||
predict_SOC.to_csv('./result/predictSOC.csv') | ||
|
||
# save groundtruth SOC into truthSOC.csv | ||
test_SOC = pd.DataFrame(test_Data_SOC) | ||
test_SOC.to_csv('./result/trueSOC.csv') | ||
|
||
|
||
if __name__ == '__main__': | ||
os.environ["CUDA_VISIBLE_DEVICES"] = "0" | ||
set_random_seed(seed=666) | ||
config = tf.compat.v1.ConfigProto() | ||
config.gpu_options.per_process_gpu_memory_fraction = 0.7 | ||
K.set_session(tf.compat.v1.Session(config=config)) | ||
|
||
def load_yaml(file_name): | ||
with open(file_name, 'r') as f: | ||
try: | ||
config = yaml.load(f, Loader=yaml.FullLoader) | ||
except: | ||
config = yaml.load(f) | ||
return config | ||
|
||
def parse_config(): | ||
parser = argparse.ArgumentParser() | ||
parser.add_argument('--time_step', type=int, default=50, help='input time step') | ||
parser.add_argument('--input_size', type=int, default=3, help='model input size') | ||
parser.add_argument('--batch_size', type=int, default=256, help='input batch size') | ||
parser.add_argument('--lr', type=float, default=1e-4, help='learning rate') | ||
parser.add_argument('--patience', type=int, default=10, help='optimizer setting') | ||
parser.add_argument('--factor', type=float, default=0.1, help='optimizer setting') | ||
parser.add_argument('--hidden_layer', type=int, default=18, help='channel of model hidden layer') | ||
|
||
args = parser.parse_args() | ||
config = load_yaml(args.cfg) | ||
config.update(vars(args)) # override the configuration using the value in args | ||
return EasyDict(config) | ||
|
||
cfg = parse_config() | ||
run(cfg) | ||
K.clear_session() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,14 @@ | ||
#!/usr/bin/env python | ||
# coding: utf-8 | ||
import pandas as pd | ||
import numpy as np | ||
from sklearn import metrics | ||
from sklearn.metrics import mean_absolute_error | ||
|
||
|
||
CACLE_TURE = pd.read_excel('../data/REALCap.xlsx') | ||
Our_result = pd.read_excel('../result/OurCap.xlsx') | ||
|
||
# MAE | ||
print('MAE') | ||
print(metrics.mean_absolute_error(Our_result,CACLE_TURE)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,13 @@ | ||
#!/usr/bin/env python | ||
# coding: utf-8 | ||
import pandas as pd | ||
import numpy as np | ||
from sklearn import metrics | ||
from sklearn.metrics import r2_score | ||
|
||
CACLE_TURE = pd.read_excel('../data/REALCap.xlsx') | ||
Our_result = pd.read_excel('../result/OurCap.xlsx') | ||
|
||
# R2 | ||
print('R2') | ||
print(metrics.mean_squared_error(CACLE_TURE,Our_result)/ np.var(CACLE_TURE)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,13 @@ | ||
#!/usr/bin/env python | ||
# coding: utf-8 | ||
import pandas as pd | ||
import numpy as np | ||
from sklearn import metrics | ||
from sklearn.metrics import mean_squared_error | ||
|
||
CACLE_TURE = pd.read_excel('../data/REALCap.xlsx') | ||
Our_result = pd.read_excel('../result/OurCap.xlsx') | ||
|
||
# RMSE | ||
print('RMSE') | ||
print(np.sqrt(metrics.mean_squared_error(Our_result,CACLE_TURE))) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,166 @@ | ||
import tensorflow as tf | ||
import keras | ||
from keras.models import Model, Layer | ||
import numpy as np | ||
|
||
|
||
def get_angles(pos, k, d: int): | ||
|
||
i = k // 2 | ||
|
||
angles = pos / (10000 ** (2 * i / d)) | ||
|
||
return angles | ||
|
||
def positional_encoding(positions: int, d: int): | ||
|
||
angle_rads = get_angles(np.arange(positions)[:, np.newaxis], | ||
np.arange(d)[np.newaxis, :], | ||
d) | ||
|
||
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2]) | ||
|
||
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2]) | ||
|
||
pos_encoding = angle_rads[np.newaxis, :, :].reshape(1, positions, d) | ||
|
||
return tf.cast(pos_encoding, dtype=tf.float32) | ||
|
||
class attention(Model): | ||
def __init__(self, filters=96, kernel_size=[1, 3, 5, 7], reduction=24): | ||
super(attention, self).__init__() | ||
|
||
self.dconv_1 = keras.layers.Conv1D(filters=filters, kernel_size=kernel_size[0], strides=1, padding='same', dilation_rate=1) | ||
self.dconv_2 = keras.layers.Conv1D(filters=filters, kernel_size=kernel_size[1], strides=1, padding='same', dilation_rate=3) | ||
self.dconv_3 = keras.layers.Conv1D(filters=filters, kernel_size=kernel_size[2], strides=1, padding='same', dilation_rate=9) | ||
|
||
self.fc_1 = keras.layers.Dense(filters) | ||
self.fc_2 = keras.layers.Dense(filters) | ||
self.fc_3 = keras.layers.Dense(filters) | ||
self.fc_4 = keras.layers.Dense(filters) | ||
self.avgpool = keras.layers.AveragePooling1D(1) | ||
self.fc = keras.layers.Dense(reduction) | ||
self.softmax = keras.layers.Softmax() | ||
|
||
def call(self, input): | ||
conv_x = [] | ||
x_1 = self.dconv_1(input) | ||
x_2 = self.dconv_2(input) | ||
x_3 = self.dconv_3(input) | ||
|
||
conv_x.append(x_1) | ||
conv_x.append(x_2) | ||
conv_x.append(x_3) | ||
|
||
U = sum(conv_x) | ||
S = self.avgpool(U) | ||
Z = self.fc(S) | ||
|
||
weight_1 = self.fc_1(Z) | ||
weight_1 = self.softmax(weight_1) | ||
weight_2 = self.fc_2(Z) | ||
weight_2 = self.softmax(weight_2) | ||
weight_3 = self.fc_3(Z) | ||
weight_3 = self.softmax(weight_3) | ||
|
||
V_1 = weight_1 * x_1 | ||
V_2 = weight_2 * x_2 | ||
V_3 = weight_3 * x_3 | ||
|
||
V = tf.add(V_1, V_2) | ||
V = tf.add(V, V_3) | ||
|
||
return V | ||
|
||
class transformer(Model): | ||
def __init__(self): | ||
super(transformer, self).__init__() | ||
|
||
self.mha = keras.layers.MultiHeadAttention(num_heads=2, | ||
key_dim=32, | ||
dropout=0.2,) | ||
self.Dense_1 = keras.layers.Dense(1) | ||
self.act = keras.layers.ReLU() | ||
self.Norm_1 = keras.layers.BatchNormalization(momentum=0.95) | ||
self.Norm_2 = keras.layers.BatchNormalization(momentum=0.95) | ||
|
||
def __call__(self, input): | ||
|
||
x = self.mha(query=input, value=input) + input | ||
x = self.Norm_1(x) | ||
x = self.Dense_1(x) + x | ||
x = self.Norm_2(x) | ||
|
||
return x | ||
|
||
class ttsnet(Model): | ||
def __init__(self, time_step, hidden_layer): | ||
super(ttsnet, self).__init__() | ||
|
||
self.MLP_1 = keras.layers.Dense(time_step) | ||
self.softmax = keras.layers.Softmax() | ||
self.MLP_2 = keras.layers.Dense(80) | ||
self.dropout = keras.layers.Dropout(rate=0.1) | ||
self.MLP_3 = keras.layers.Dense(1) | ||
self.act = keras.layers.ReLU() | ||
|
||
self.Attention = attention() | ||
self.Attention_1 = attention(filters=64, reduction=16) | ||
self.Attention_2 = attention(filters=96, reduction=24) | ||
self.Attention_3 = attention(filters=128, reduction=32) | ||
|
||
self.transformer_V_1 = transformer() | ||
self.transformer_V_2 = transformer() | ||
|
||
self.transformer_I_1 = transformer() | ||
self.transformer_I_2 = transformer() | ||
|
||
self.transformer_T_1 = transformer() | ||
self.transformer_T_2 = transformer() | ||
|
||
self.transformer_VIT = transformer() | ||
|
||
self.CNN_V = keras.layers.Conv1D(filters=hidden_layer, kernel_size=1, strides=1, padding='same') | ||
self.CNN_I = keras.layers.Conv1D(filters=hidden_layer, kernel_size=1, strides=1, padding='same') | ||
self.CNN_T = keras.layers.Conv1D(filters=hidden_layer, kernel_size=1, strides=1, padding='same') | ||
|
||
self.LSTM_V = keras.layers.LSTM(hidden_layer, return_sequences=True) | ||
self.LSTM_I = keras.layers.LSTM(hidden_layer, return_sequences=True) | ||
self.LSTM_T = keras.layers.LSTM(hidden_layer, return_sequences=True) | ||
|
||
self.pos_encoding = positional_encoding(time_step, hidden_layer) | ||
|
||
def __call__(self, input, **kwargs): | ||
|
||
x_V = input[:,:, 0][:,:,None] | ||
x_V = self.CNN_V(x_V) | ||
x_V = self.act(x_V) | ||
x_V = self.LSTM_V(x_V) | ||
x_V = keras.layers.Permute((2, 1))(x_V) | ||
x_V = self.transformer_V_2(x_V) | ||
|
||
x_I = input[:, :, 1][:, :, None] | ||
x_I = self.CNN_I(x_I) | ||
x_I = self.act(x_I) | ||
x_I = self.LSTM_I(x_I) | ||
x_I = keras.layers.Permute((2,1))(x_I) | ||
x_I = self.transformer_I_2(x_I) | ||
|
||
x_T = input[:, :, 2][:, :, None] | ||
x_T = self.CNN_T(x_T) | ||
x_T = self.act(x_T) | ||
x_T = self.LSTM_T(x_T) | ||
x_T = keras.layers.Permute((2,1))(x_T) | ||
x_T = self.transformer_T_2(x_T) | ||
|
||
x = x_V + x_I + x_T | ||
|
||
x = self.SKAttention(x) | ||
x = self.MLP_1(x) | ||
x = self.softmax(x) | ||
x = keras.layers.Flatten()(x) | ||
x = self.act(x) | ||
x = self.dropout(x) | ||
out = self.MLP_3(x) | ||
|
||
return out |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,4 @@ | ||
tensorflow-gpu==2.5.0 | ||
easydict==1.9 | ||
pandas==1.3.5 | ||
matplotlib==3.5.1 |
Oops, something went wrong.