LeNet与DNN用于ERT图像重构

引言:LeNet与DNN用于ERT的架构:

1.DNN一维模型

2.LeNet模型


代码:

1.导入keras库以及检查GPU环境

import tensorflow as tf
import keras

from keras.models import Sequential
from keras.layers import Dense,Dropout,Softmax,Flatten
from keras.layers import Conv2D,MaxPooling2D,BatchNormalization

import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio

gpus = tf.config.experimental.list_physical_devices('GPU')
# tensorflow_version = tf.__version__
gpu_available = tf.test.is_gpu_available()
print("Num GPUs Available: ", len(gpus))
# tf.config.experimental.set_memory_growth(gpus[0], True)

# print('tensorflow version:',tensorflow_version, '	GPU available:', gpu_available)


2.导入数据集,这里我用的是.mat文件

#------------- datasets ---------------
train_input = sio.loadmat("x_train")
train_label = sio.loadmat("y_train")

x_train = train_input.get("x_train","None")
y_train = train_label.get("y_train","None")
x_train = x_train[:,:,:,np.newaxis]

x_train = x_train.astype("float32")
y_train = y_train.astype("float32")

x_train = tf.convert_to_tensor(x_train)
y_train = tf.convert_to_tensor(y_train)

test_input = sio.loadmat("x_test.mat")
test_label = sio.loadmat("y_test")

x_test = test_input.get("x_test","None")
y_test = test_label.get("y_test","None")
x_test = x_test[:,:,:,np.newaxis]

x_test = x_test.astype("float32")
y_test = y_test.astype("float32")

x_test = tf.convert_to_tensor(x_test)
y_test = tf.convert_to_tensor(y_test)

3.搭建LeNet模型

##------------- LeNet network -----------------



model = Sequential()
model.add(Conv2D(filters= 32, input_shape=(13,16,1),kernel_size= (3,3), activation= "relu", padding= "same"))
model.add(MaxPooling2D(2))
model.add(Conv2D(filters= 64, kernel_size= (3,3), activation= "relu", padding= "same"))
model.add(MaxPooling2D(2))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(units= 512,activation= "relu"))
model.add(BatchNormalization())
model.add(Dense(units= 812,activation= "softmax"))
model.add(BatchNormalization())
model.add(Softmax())
model.compile(loss= "binary_crossentropy", optimizer= "adam",metrics=[keras.metrics.BinaryAccuracy()])

4.训练LeNet模型以及检查训练结果

history = model.fit(
    x_train,y_train,
    batch_size=16,
    epochs=500,
    validation_split=0.1,
    verbose= 1

)

plt.figure()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Validation'], loc='upper left')
plt.show()
plt.savefig("model_loss.jpg")



model.save("LeNet_1")
test_result = model.evaluate(x_test,y_test)
print('TEST Loss:',test_result[0])
print('TEST Accuracy:',test_result[1])

model.summary(line_length=150, positions=[0.30, 0.60, 0.7, 1.]) #check LeNet network structure

DNN模型搭建:调用与数据集和上述方法相同,这里是模型搭建

##------------- DNN network -----------------



model = Sequential()
model.add(Conv1D(filters= 8,kernel_size= 3, activation= "relu", padding= "same"))
model.add(MaxPooling1D(pool_size=2, data_format="channels_first"))
model.add(Conv1D(filters= 16, kernel_size= 3, activation= "relu", padding= "same"))
model.add(MaxPooling1D(pool_size=2, data_format="channels_first"))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(units= 512,activation= "relu"))
model.add(BatchNormalization())
model.add(Dense(units= 812,activation= "softmax"))
model.add(BatchNormalization())
model.add(Softmax())
model.compile(loss= "binary_crossentropy", optimizer= "adam",
    metrics=[keras.metrics.BinaryAccuracy()])