《Over the Air Deep Learning Based Radio Signal Classification》最新代码及训练结果

import h5py
import numpy as np
import os, random
import tensorflow as tf

from tensorflow.keras.layers import Input, Reshape, ZeroPadding2D, Conv2D, Dropout, Flatten, Dense, Activation, MaxPooling2D, \
    AlphaDropout
from tensorflow.keras import layers
import tensorflow.keras.models as Model
# from tensorflow.keras.regularizers import *
# from tensorflow.keras.optimizers import adam
# import seaborn as sns
# import keras

import matplotlib.pyplot as plt
#plt.rcParams.update({'figure.max_open_warning': 0})


# import gc
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession

%matplotlib inline

os.environ["KERAS_BACKEND"] = "tensorflow"


# 防止出现GPU内存爆炸
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = "0,1"  # 选择哪一块gpu
config = ConfigProto()
config.allow_soft_placement = True  # 如果你指定的设备不存在,允许TF自动分配设备
config.gpu_options.per_process_gpu_memory_fraction = 0.7  # 分配百分之七十的显存给程序使用,避免内存溢出,可以自己调整
config.gpu_options.allow_growth = True  # 按需分配显存,这个比较重要
session = InteractiveSession(config=config)

"""数据集处理"""

import keras.backend.tensorflow_backend as tfback

print("tf.__version__ is", tf.__version__)
print("tf.keras.__version__ is:", tf.keras.__version__)


def _get_available_gpus():
    """Get a list of available gpu devices (formatted as strings).

    # Returns
        A list of available GPU devices.
    """
    # global _LOCAL_DEVICES
    if tfback._LOCAL_DEVICES is None:
        devices = tf.config.list_logical_devices()
        tfback._LOCAL_DEVICES = [x.name for x in devices]
    return [x for x in tfback._LOCAL_DEVICES if 'device:gpu' in x.lower()]


tfback._get_available_gpus = _get_available_gpus

############################################
# 由于硬件限制,无法使用完整数据集,因此我从完整数据集中抽取出部分数据,并分割成24个部分
# 每部分对应一种调制,有1200*26=31200条数据
# 因此,目前数据集大小为748800*1024*2
############################################
for i in range(0, 24):  # 24个数据集文件
    ########打开文件#######
    filename = 'ExtractDataset/part' + str(i) + '.h5'
    print(filename)
    f = h5py.File(filename, 'r')
    ########读取数据#######
    X_data = f['X'][:]
    Y_data = f['Y'][:]
    Z_data = f['Z'][:]
    f.close()
    #########分割训练集和测试集#########
    # 每读取到一个数据文件就直接分割为训练集和测试集,防止爆内存
    n_examples = X_data.shape[0]
    n_train = int(n_examples * 0.7)  # 70%训练样本
    train_idx = np.random.choice(range(0, n_examples), size=n_train, replace=False)  # 随机选取训练样本下标
    test_idx = list(set(range(0, n_examples)) - set(train_idx))  # 测试样本下标
    if i == 0:
        X_train = X_data[train_idx]
        Y_train = Y_data[train_idx]
        Z_train = Z_data[train_idx]
        X_test = X_data[test_idx]
        Y_test = Y_data[test_idx]
        Z_test = Z_data[test_idx]
    else:
        X_train = np.vstack((X_train, X_data[train_idx]))
        Y_train = np.vstack((Y_train, Y_data[train_idx]))
        Z_train = np.vstack((Z_train, Z_data[train_idx]))
        X_test = np.vstack((X_test, X_data[test_idx]))
        Y_test = np.vstack((Y_test, Y_data[test_idx]))
        Z_test = np.vstack((Z_test, Z_data[test_idx]))
print('训练集X维度:', X_train.shape)
print('训练集Y维度:', Y_train.shape)
print('训练集Z维度:', Z_train.shape)
print('测试集X维度:', X_test.shape)
print('测试集Y维度:', Y_test.shape)
print('测试集Z维度:', Z_test.shape)

##查看数据是否正常
sample_idx = 8736  # 随机下标
print('snr:', Z_train[sample_idx])
print('Y', Y_train[sample_idx])
plt_data = X_train[sample_idx].T
data_fig = plt.figure(figsize=(15, 5))
plt.plot(plt_data[0])
plt.plot(plt_data[1], color='red')
plt.show()


"""建立模型"""
classes = ['32PSK',
           '16APSK',
           '32QAM',
           'FM',
           'GMSK',
           '32APSK',
           'OQPSK',
           '8ASK',
           'BPSK',
           '8PSK',
           'AM-SSB-SC',
           '4ASK',
           '16PSK',
           '64APSK',
           '128QAM',
           '128APSK',
           'AM-DSB-SC',
           'AM-SSB-WC',
           '64QAM',
           'QPSK',
           '256QAM',
           'AM-DSB-WC',
           'OOK',
           '16QAM']
data_format = 'channels_first'


def residual_stack(Xm, kennel_size, Seq, pool_size):
    # 1*1 Conv Linear
    Xm = Conv2D(32, (1, 1), padding='same', name=Seq + "_conv1", kernel_initializer='glorot_normal',
                data_format=data_format)(Xm)
    # Residual Unit 1
    Xm_shortcut = Xm
    Xm = Conv2D(32, kennel_size, padding='same', activation="relu", name=Seq + "_conv2",
                kernel_initializer='glorot_normal', data_format=data_format)(Xm)
    Xm = Conv2D(32, kennel_size, padding='same', name=Seq + "_conv3", kernel_initializer='glorot_normal',
                data_format=data_format)(Xm)
    Xm = layers.add([Xm, Xm_shortcut])
    Xm = Activation("relu")(Xm)
    # Residual Unit 2
    Xm_shortcut = Xm
    Xm = Conv2D(32, kennel_size, padding='same', activation="relu", name=Seq + "_conv4",
                kernel_initializer='glorot_normal', data_format=data_format)(Xm)
    X = Conv2D(32, kennel_size, padding='same', name=Seq + "_conv5", kernel_initializer='glorot_normal',
               data_format=data_format)(Xm)
    Xm = layers.add([Xm, Xm_shortcut])
    Xm = Activation("relu")(Xm)
    # MaxPooling
    Xm = MaxPooling2D(pool_size=pool_size, strides=pool_size, padding='valid', data_format=data_format)(Xm)
    return Xm


in_shp = X_train.shape[1:]  # 每个样本的维度[1024,2]
# input layer
Xm_input = Input(in_shp)
Xm = Reshape([1, 1024, 2], input_shape=in_shp)(Xm_input)
# Residual Srack
Xm = residual_stack(Xm, kennel_size=(3, 2), Seq="ReStk0", pool_size=(2, 2))  # shape:(512,1,32)
Xm = residual_stack(Xm, kennel_size=(3, 1), Seq="ReStk1", pool_size=(2, 1))  # shape:(256,1,32)
Xm = residual_stack(Xm, kennel_size=(3, 1), Seq="ReStk2", pool_size=(2, 1))  # shape:(128,1,32)
Xm = residual_stack(Xm, kennel_size=(3, 1), Seq="ReStk3", pool_size=(2, 1))  # shape:(64,1,32)
Xm = residual_stack(Xm, kennel_size=(3, 1), Seq="ReStk4", pool_size=(2, 1))  # shape:(32,1,32)
Xm = residual_stack(Xm, kennel_size=(3, 1), Seq="ReStk5", pool_size=(2, 1))  # shape:(16,1,32)

#############################################################################
#      多次尝试发现减少一层全连接层能使loss下降更快
#      将AlphaDropout设置为0.3似乎比0.5效果更好
#############################################################################
# Full Con 1
Xm = Flatten(data_format=data_format)(Xm)
Xm = Dense(128, activation='selu', kernel_initializer='glorot_normal', name="dense1")(Xm)
Xm = AlphaDropout(0.3)(Xm)
# Full Con 2
Xm = Dense(len(classes), kernel_initializer='glorot_normal', name="dense2")(Xm)
# SoftMax
Xm = Activation('softmax')(Xm)
# Create Model
model = Model.Model(inputs=Xm_input, outputs=Xm)
adam = tf.keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model.compile(loss='categorical_crossentropy', optimizer=adam)
model.summary()

"""训练模型"""
#############################################################################
#      当val_loss连续10次迭代不再减小或总迭代次数大于100时停止
#      将最小验证损失的模型保存
#############################################################################
print(tf.test.gpu_device_name())
filepath = 'Models/ResNet_Model_72w.h5'
history = model.fit(X_train,
                    Y_train,
                    batch_size=1000,
                    epochs=100,
                    verbose=2,
                    validation_data=(X_test, Y_test),
                    # validation_split = 0.3,
                    callbacks=[
                        tf.keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=0,
                                                           save_best_only=True),
                        tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='auto')
                    ])

# we re-load the best weights once training is finished
model.load_weights(filepath)

val_loss_list = history.history['val_loss']
loss_list = history.history['loss']
plt.plot(range(len(loss_list)), val_loss_list)
plt.plot(range(len(loss_list)), loss_list)
plt.show()


# ##########从loss走势来看,预计loss还能继续下降,故再训练一次#######
# history = model.fit(X_train,
#                     Y_train,
#                     batch_size=1000,
#                     epochs=100,
#                     verbose=2,
#                     validation_data=(X_test, Y_test),
#                     # validation_split = 0.3,
#                     callbacks=[
#                         keras.callbacks.ModelCheckpoint(filepath, monitor='val_loss', verbose=0, save_best_only=True, mode='auto'),
#                         keras.callbacks.EarlyStopping(monitor='val_loss', patience=10, verbose=0, mode='auto')
#                     ])
#
# # we re-load the best weights once training is finished
# model.load_weights(filepath)

"""**测试**"""


def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues, labels=[]):
    plt.figure(figsize=(10, 10))
    plt.imshow(cm, interpolation='nearest', cmap=cmap)
    plt.title(title)
    plt.colorbar()
    tick_marks = np.arange(len(labels))
    plt.xticks(tick_marks, labels, rotation=45)
    plt.yticks(tick_marks, labels)
    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')
    plt.show()
    #plt.clf()


# Plot confusion matrix
batch_size = 1024
test_Y_hat = model.predict(X_test, batch_size=1024)
conf = np.zeros([len(classes), len(classes)])
confnorm = np.zeros([len(classes), len(classes)])
for i in range(0, X_test.shape[0]):
    j = list(Y_test[i, :]).index(1)
    k = int(np.argmax(test_Y_hat[i, :]))
    conf[j, k] = conf[j, k] + 1
for i in range(0, len(classes)):
    confnorm[i, :] = conf[i, :] / np.sum(conf[i, :])
plot_confusion_matrix(confnorm, labels=classes)

for i in range(len(confnorm)):
    print(classes[i], confnorm[i, i])

acc = {}
Z_test = Z_test.reshape((len(Z_test)))
SNRs = np.unique(Z_test)
for snr in SNRs:
    X_test_snr = X_test[Z_test == snr]
    Y_test_snr = Y_test[Z_test == snr]

    pre_Y_test = model.predict(X_test_snr)
    conf = np.zeros([len(classes), len(classes)])
    confnorm = np.zeros([len(classes), len(classes)])
    for i in range(0, X_test_snr.shape[0]):  # 该信噪比下测试数据量
        j = list(Y_test_snr[i, :]).index(1)  # 正确类别下标
        j = classes.index(classes[j])
        k = int(np.argmax(pre_Y_test[i, :]))  # 预测类别下标
        k = classes.index(classes[k])
        conf[j, k] = conf[j, k] + 1
    for i in range(0, len(classes)):
        confnorm[i, :] = conf[i, :] / np.sum(conf[i, :])

    plt.figure()
    plot_confusion_matrix(confnorm, labels=classes, title="ConvNet Confusion Matrix (SNR=%d)" % (snr))

    cor = np.sum(np.diag(conf))
    ncor = np.sum(conf) - cor
    print("Overall Accuracy %s: " % snr, cor / (cor + ncor))
    acc[snr] = 1.0 * cor / (cor + ncor)
    plt.show() # 仅仅有下面这个plt.clf()能出现标注,但是没有图片,难道是因为被clf()了吗?
    #plt.clf()  # 不关闭,容易导致 warning,且导致程序无法继续执行下去了,直接终止,让模型无法训练所有epoch,(RuntimeWarning: More than 20 figures have been opened.)

plt.plot(acc.keys(), acc.values())
plt.ylabel('ACC')
plt.xlabel('SNR')
plt.show()


snr: [26]
Y [1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]

image.png

Model: "model"


Layer (type) Output Shape Param # Connected to

input_1 (InputLayer) [(None, 1024, 2)] 0


reshape (Reshape) (None, 1, 1024, 2) 0 input_1[0][0]


ReStk0_conv1 (Conv2D) (None, 32, 1024, 2) 64 reshape[0][0]


ReStk0_conv2 (Conv2D) (None, 32, 1024, 2) 6176 ReStk0_conv1[0][0]


ReStk0_conv3 (Conv2D) (None, 32, 1024, 2) 6176 ReStk0_conv2[0][0]


add (Add) (None, 32, 1024, 2) 0 ReStk0_conv3[0][0]
ReStk0_conv1[0][0]


activation (Activation) (None, 32, 1024, 2) 0 add[0][0]


ReStk0_conv4 (Conv2D) (None, 32, 1024, 2) 6176 activation[0][0]


add_1 (Add) (None, 32, 1024, 2) 0 ReStk0_conv4[0][0]
activation[0][0]


activation_1 (Activation) (None, 32, 1024, 2) 0 add_1[0][0]


max_pooling2d (MaxPooling2D) (None, 32, 512, 1) 0 activation_1[0][0]


ReStk1_conv1 (Conv2D) (None, 32, 512, 1) 1056 max_pooling2d[0][0]


ReStk1_conv2 (Conv2D) (None, 32, 512, 1) 3104 ReStk1_conv1[0][0]


ReStk1_conv3 (Conv2D) (None, 32, 512, 1) 3104 ReStk1_conv2[0][0]


add_2 (Add) (None, 32, 512, 1) 0 ReStk1_conv3[0][0]
ReStk1_conv1[0][0]


activation_2 (Activation) (None, 32, 512, 1) 0 add_2[0][0]


ReStk1_conv4 (Conv2D) (None, 32, 512, 1) 3104 activation_2[0][0]


add_3 (Add) (None, 32, 512, 1) 0 ReStk1_conv4[0][0]
activation_2[0][0]


activation_3 (Activation) (None, 32, 512, 1) 0 add_3[0][0]


max_pooling2d_1 (MaxPooling2D) (None, 32, 256, 1) 0 activation_3[0][0]


ReStk2_conv1 (Conv2D) (None, 32, 256, 1) 1056 max_pooling2d_1[0][0]


ReStk2_conv2 (Conv2D) (None, 32, 256, 1) 3104 ReStk2_conv1[0][0]


ReStk2_conv3 (Conv2D) (None, 32, 256, 1) 3104 ReStk2_conv2[0][0]


add_4 (Add) (None, 32, 256, 1) 0 ReStk2_conv3[0][0]
ReStk2_conv1[0][0]


activation_4 (Activation) (None, 32, 256, 1) 0 add_4[0][0]


ReStk2_conv4 (Conv2D) (None, 32, 256, 1) 3104 activation_4[0][0]


add_5 (Add) (None, 32, 256, 1) 0 ReStk2_conv4[0][0]
activation_4[0][0]


activation_5 (Activation) (None, 32, 256, 1) 0 add_5[0][0]


max_pooling2d_2 (MaxPooling2D) (None, 32, 128, 1) 0 activation_5[0][0]


ReStk3_conv1 (Conv2D) (None, 32, 128, 1) 1056 max_pooling2d_2[0][0]


ReStk3_conv2 (Conv2D) (None, 32, 128, 1) 3104 ReStk3_conv1[0][0]


ReStk3_conv3 (Conv2D) (None, 32, 128, 1) 3104 ReStk3_conv2[0][0]


add_6 (Add) (None, 32, 128, 1) 0 ReStk3_conv3[0][0]
ReStk3_conv1[0][0]


activation_6 (Activation) (None, 32, 128, 1) 0 add_6[0][0]


ReStk3_conv4 (Conv2D) (None, 32, 128, 1) 3104 activation_6[0][0]


add_7 (Add) (None, 32, 128, 1) 0 ReStk3_conv4[0][0]
activation_6[0][0]


activation_7 (Activation) (None, 32, 128, 1) 0 add_7[0][0]


max_pooling2d_3 (MaxPooling2D) (None, 32, 64, 1) 0 activation_7[0][0]


ReStk4_conv1 (Conv2D) (None, 32, 64, 1) 1056 max_pooling2d_3[0][0]


ReStk4_conv2 (Conv2D) (None, 32, 64, 1) 3104 ReStk4_conv1[0][0]


ReStk4_conv3 (Conv2D) (None, 32, 64, 1) 3104 ReStk4_conv2[0][0]


add_8 (Add) (None, 32, 64, 1) 0 ReStk4_conv3[0][0]
ReStk4_conv1[0][0]


activation_8 (Activation) (None, 32, 64, 1) 0 add_8[0][0]


ReStk4_conv4 (Conv2D) (None, 32, 64, 1) 3104 activation_8[0][0]


add_9 (Add) (None, 32, 64, 1) 0 ReStk4_conv4[0][0]
activation_8[0][0]


activation_9 (Activation) (None, 32, 64, 1) 0 add_9[0][0]


max_pooling2d_4 (MaxPooling2D) (None, 32, 32, 1) 0 activation_9[0][0]


ReStk5_conv1 (Conv2D) (None, 32, 32, 1) 1056 max_pooling2d_4[0][0]


ReStk5_conv2 (Conv2D) (None, 32, 32, 1) 3104 ReStk5_conv1[0][0]


ReStk5_conv3 (Conv2D) (None, 32, 32, 1) 3104 ReStk5_conv2[0][0]


add_10 (Add) (None, 32, 32, 1) 0 ReStk5_conv3[0][0]
ReStk5_conv1[0][0]


activation_10 (Activation) (None, 32, 32, 1) 0 add_10[0][0]


ReStk5_conv4 (Conv2D) (None, 32, 32, 1) 3104 activation_10[0][0]


add_11 (Add) (None, 32, 32, 1) 0 ReStk5_conv4[0][0]
activation_10[0][0]


activation_11 (Activation) (None, 32, 32, 1) 0 add_11[0][0]


max_pooling2d_5 (MaxPooling2D) (None, 32, 16, 1) 0 activation_11[0][0]


flatten (Flatten) (None, 512) 0 max_pooling2d_5[0][0]


dense1 (Dense) (None, 128) 65664 flatten[0][0]


alpha_dropout (AlphaDropout) (None, 128) 0 dense1[0][0]


dense2 (Dense) (None, 24) 3096 alpha_dropout[0][0]


activation_12 (Activation) (None, 24) 0 dense2[0][0]

Total params: 139,192
Trainable params: 139,192
Non-trainable params: 0


/device:GPU:0
Train on 262080 samples, validate on 112320 samples
Epoch 1/100

262080/262080 - 121s - loss: 2.4649 - val_loss: 2.2968
Epoch 2/100
262080/262080 - 115s - loss: 2.0031 - val_loss: 1.8899
Epoch 3/100
262080/262080 - 114s - loss: 1.8912 - val_loss: 1.8477
Epoch 4/100
262080/262080 - 115s - loss: 1.8314 - val_loss: 2.0882
Epoch 5/100
262080/262080 - 114s - loss: 1.7901 - val_loss: 1.9614
Epoch 6/100
262080/262080 - 115s - loss: 1.7454 - val_loss: 2.4555
Epoch 7/100
262080/262080 - 114s - loss: 1.7270 - val_loss: 1.6827
Epoch 8/100
262080/262080 - 114s - loss: 1.6809 - val_loss: 1.7736
Epoch 9/100
262080/262080 - 114s - loss: 1.6645 - val_loss: 1.7251
Epoch 10/100
262080/262080 - 115s - loss: 1.6398 - val_loss: 1.7696
Epoch 11/100
262080/262080 - 114s - loss: 1.6191 - val_loss: 1.6233
Epoch 12/100
262080/262080 - 114s - loss: 1.6071 - val_loss: 1.6533
Epoch 13/100
262080/262080 - 114s - loss: 1.5874 - val_loss: 1.7658
Epoch 14/100
262080/262080 - 114s - loss: 1.5763 - val_loss: 1.6382
Epoch 15/100
262080/262080 - 114s - loss: 1.5481 - val_loss: 1.6054
Epoch 16/100
262080/262080 - 115s - loss: 1.5422 - val_loss: 1.7639
Epoch 17/100
262080/262080 - 115s - loss: 1.5170 - val_loss: 1.5619
Epoch 18/100
262080/262080 - 114s - loss: 1.5031 - val_loss: 1.5510
Epoch 19/100
262080/262080 - 117s - loss: 1.4890 - val_loss: 1.5124
Epoch 20/100
262080/262080 - 114s - loss: 1.4919 - val_loss: 1.5309
Epoch 21/100
262080/262080 - 114s - loss: 1.4662 - val_loss: 1.5423
Epoch 22/100
262080/262080 - 114s - loss: 1.4601 - val_loss: 1.4762
Epoch 23/100
262080/262080 - 114s - loss: 1.4541 - val_loss: 1.5190
Epoch 24/100
262080/262080 - 114s - loss: 1.4459 - val_loss: 1.5118
Epoch 25/100
262080/262080 - 114s - loss: 1.4465 - val_loss: 1.5295
Epoch 26/100
262080/262080 - 114s - loss: 1.4353 - val_loss: 1.4769
Epoch 27/100
262080/262080 - 114s - loss: 1.4302 - val_loss: 1.5943
Epoch 28/100
262080/262080 - 114s - loss: 1.4219 - val_loss: 1.5141
Epoch 29/100
262080/262080 - 114s - loss: 1.4198 - val_loss: 1.4589
Epoch 30/100
262080/262080 - 114s - loss: 1.4136 - val_loss: 1.5035
Epoch 31/100
262080/262080 - 114s - loss: 1.4133 - val_loss: 1.4398
Epoch 32/100
262080/262080 - 114s - loss: 1.4115 - val_loss: 1.5046
Epoch 33/100
262080/262080 - 114s - loss: 1.5056 - val_loss: 1.4821
Epoch 34/100
262080/262080 - 114s - loss: 1.4238 - val_loss: 1.5173
Epoch 35/100
262080/262080 - 114s - loss: 1.4083 - val_loss: 1.5039
Epoch 36/100
262080/262080 - 114s - loss: 1.3920 - val_loss: 1.4710
Epoch 37/100
262080/262080 - 114s - loss: 1.3912 - val_loss: 1.5334
Epoch 38/100
262080/262080 - 115s - loss: 1.3856 - val_loss: 1.4302
Epoch 39/100
262080/262080 - 114s - loss: 1.3772 - val_loss: 1.5279
Epoch 40/100
262080/262080 - 114s - loss: 1.3770 - val_loss: 1.4703
Epoch 41/100
262080/262080 - 114s - loss: 1.3725 - val_loss: 1.4585
Epoch 42/100
262080/262080 - 115s - loss: 1.3612 - val_loss: 1.4944
Epoch 43/100
262080/262080 - 115s - loss: 1.3561 - val_loss: 1.5215
Epoch 44/100
262080/262080 - 115s - loss: 1.3536 - val_loss: 1.4955
Epoch 45/100
262080/262080 - 116s - loss: 1.3903 - val_loss: 1.4142
Epoch 46/100
262080/262080 - 116s - loss: 1.3444 - val_loss: 1.4782
Epoch 47/100
262080/262080 - 115s - loss: 1.3412 - val_loss: 1.4662
Epoch 48/100
262080/262080 - 115s - loss: 1.3293 - val_loss: 1.4285
Epoch 49/100
262080/262080 - 115s - loss: 1.3182 - val_loss: 1.7751
Epoch 50/100
262080/262080 - 115s - loss: 1.3378 - val_loss: 1.5361
Epoch 51/100
262080/262080 - 115s - loss: 1.3149 - val_loss: 1.5202
Epoch 52/100
262080/262080 - 115s - loss: 1.3127 - val_loss: 1.4762
Epoch 53/100
262080/262080 - 115s - loss: 1.3057 - val_loss: 1.4900
Epoch 54/100
262080/262080 - 115s - loss: 1.3036 - val_loss: 1.5167
Epoch 55/100
262080/262080 - 115s - loss: 1.3059 - val_loss: 1.5198

image.png
image.png

32PSK 0.7243589743589743
16APSK 0.6083333333333333
32QAM 0.6581196581196581
FM 0.6679487179487179
GMSK 0.5927350427350427
32APSK 0.37286324786324787
OQPSK 0.015384615384615385
8ASK 0.5585470085470086
BPSK 0.5912393162393162
8PSK 0.5544871794871795
AM-SSB-SC 0.46025641025641023
4ASK 0.42991452991452994
16PSK 0.5912393162393162
64APSK 0.42457264957264956
128QAM 0.12799145299145298
128APSK 0.41196581196581195
AM-DSB-SC 0.4025641025641026
AM-SSB-WC 0.27735042735042736
64QAM 0.6102564102564103
QPSK 0.7651709401709401
256QAM 0.4895299145299145
AM-DSB-WC 0.7542735042735043
OOK 0.8143162393162393
16QAM 0.6284188034188034

image.png

Overall Accuracy -20: 0.03875432525951557

image.png

Overall Accuracy -18: 0.04708362614195362

image.png

Overall Accuracy -16: 0.04984929283561326

image.png

Overall Accuracy -14: 0.0497790183763666

image.png

Overall Accuracy -12: 0.0672365988909427

image.png

Overall Accuracy -10: 0.08615456455755312

image.png

Overall Accuracy -8: 0.125553226182157

image.png

Overall Accuracy -6: 0.17679558011049723

image.png

Overall Accuracy -4: 0.2675736961451247

image.png

Overall Accuracy -2: 0.37135526008864006

image.png

Overall Accuracy 0: 0.4640093786635405

image.png

Overall Accuracy 2: 0.5496642741375318

image.png

Overall Accuracy 4: 0.6252042007001167

image.png

Overall Accuracy 6: 0.7041172365666434

image.png

Overall Accuracy 8: 0.7706422018348624

image.png

Overall Accuracy 10: 0.8340817242927705

image.png

Overall Accuracy 12: 0.821840433043069

image.png

Overall Accuracy 14: 0.8342541436464088

image.png

Overall Accuracy 16: 0.841743119266055

image.png

Overall Accuracy 18: 0.8339930151338766

image.png

Overall Accuracy 20: 0.8390909090909091

image.png

Overall Accuracy 22: 0.825836216839677

image.png

Overall Accuracy 24: 0.8296792060927763

image.png

Overall Accuracy 26: 0.8382006594441828

image.png

Overall Accuracy 28: 0.8410351201478743

image.png

Overall Accuracy 30: 0.8266853277350128

image.png

你可能感兴趣的:(《Over the Air Deep Learning Based Radio Signal Classification》最新代码及训练结果)