英伟达端到端自动驾驶模型总结

本人初学小白,旨在将之前找到的资料总结一下,希望对初学者有帮助!以下只给出Autopilot V2模型的实例,需要的同学可以看原文(交流群:663046328)。

原文大佬链接:

Autopilot模型链接:https://github.com/akshaybahadur21/Autopilot

udacity's self-driving car simulator(模拟器):https://github.com/udacity/self-driving-car-sim

数据集

数据集来源第一个链接,需要自备梯子。有需要的可以留言。下面的模拟器可以生成模拟的图片和标签,可用于V1模型的训练。

Autopilot V2 dataset:

https://drive.google.com/file/d/1PZWa6H0i1PCH9zuYcIh5Ouk_p-9Gh58B/view

无梯子的可以这里下载:

https://pan.baidu.com/s/1BaJgH0_X7QtcbS335ltVaQ

提取码:35b9

第一步 数据集生成可供训练的文件features和labels

将下载的图片放在data文件夹下,data.txt标签文件与LoadData_V2.py同目录

LoadData_V2.py

from __future__ import division
import cv2
import os
import numpy as np
import scipy
import pickle
import matplotlib.pyplot as plt
from itertools import islice
import csv

LIMIT = None

DATA_FOLDER = 'data'
TRAIN_FILE = os.path.join(DATA_FOLDER, 'data.txt')

def preprocess(img):
    resized = cv2.resize((cv2.cvtColor(img, cv2.COLOR_RGB2HSV))[:, :, 1], (100, 100))
    return resized

def return_data():

    X = []
    y = []
    features = []

    with open(TRAIN_FILE) as fp:
        reader = csv.reader(fp) 
        for line in reader:
            path = line[0].split()[0]                                #这里源代码有点小问题
            angle = line[0].split()[1]
            full_path = os.path.join(DATA_FOLDER, path)
            X.append(full_path)
            # using angles from -pi to pi to avoid rescaling the atan in the network
            y.append(float(angle) * scipy.pi / 180)

    for i in range(len(X)):
        img = plt.imread(X[i])
        features.append(preprocess(img))

    features = np.array(features).astype('float32')
    labels = np.array(y).astype('float32')

    with open("features", "wb") as f:
        pickle.dump(features, f, protocol=4)
    with open("labels", "wb") as f:
        pickle.dump(labels, f, protocol=4)

return_data()
 

第二步 训练网络模型  生成Autopilot.h5模型

Train_pilot_V2.py

import tensorflow as tf
import numpy as np
import pickle

def keras_model():
    model = tf.keras.models.Sequential()
    model.add(tf.keras.layers.Lambda(lambda x: x / 127.5 - 1., input_shape=(100, 100, 1)))

    model.add(tf.keras.layers.Conv2D(32, (3, 3), padding='same'))
    model.add(tf.keras.layers.Activation('relu'))
    model.add(tf.keras.layers.MaxPooling2D((2, 2), padding='valid'))
    model.add(tf.keras.layers.Conv2D(32, (3, 3), padding='same'))
    model.add(tf.keras.layers.Activation('relu'))
    model.add(tf.keras.layers.MaxPooling2D((2, 2), padding='valid'))

    model.add(tf.keras.layers.Conv2D(64, (3, 3), padding='same'))
    model.add(tf.keras.layers.Activation('relu'))
    model.add(tf.keras.layers.MaxPooling2D((2, 2), padding='valid'))
    model.add(tf.keras.layers.Conv2D(64, (3, 3), padding='same'))
    model.add(tf.keras.layers.Activation('relu'))
    model.add(tf.keras.layers.MaxPooling2D((2, 2), padding='valid'))

    model.add(tf.keras.layers.Conv2D(128, (3, 3), padding='same'))
    model.add(tf.keras.layers.Activation('relu'))
    model.add(tf.keras.layers.MaxPooling2D((2, 2), padding='valid'))
    model.add(tf.keras.layers.Conv2D(128, (3, 3), padding='same'))
    model.add(tf.keras.layers.Activation('relu'))
    model.add(tf.keras.layers.MaxPooling2D((2, 2), padding='valid'))
    
    model.add(tf.keras.layers.Flatten())
    model.add(tf.keras.layers.Dropout(0.5))

    model.add(tf.keras.layers.Dense(1024))
    model.add(tf.keras.layers.Dense(256))
    model.add(tf.keras.layers.Dense(64))
    model.add(tf.keras.layers.Dense(1))

    model.compile(optimizer=tf.keras.optimizers.Adam(lr=0.0001), loss="mse")
    filepath = "Autopilot.h5"
    checkpoint1 = tf.keras.callbacks.ModelCheckpoint(filepath, verbose=1, save_best_only=True)
    callbacks_list = [checkpoint1]

    return model, callbacks_list


def loadFromPickle():
    with open("features", "rb") as f:
        features = np.array(pickle.load(f))
    with open("labels", "rb") as f:
        labels = np.array(pickle.load(f))

    return features, labels


def augmentData(features, labels):
    features = np.append(features, features[:, :, ::-1], axis=0)
    labels = np.append(labels, -labels, axis=0)
    return features, labels


def main():
    features, labels = loadFromPickle()
    features, labels = augmentData(features, labels)

    train_x = features[:100000,:,:]
    test_x = features[100000:,:,:]
    train_y = labels[:100000]
    test_y = labels[100000:]
    train_x = train_x.reshape(train_x.shape[0], 100, 100, 1)
    test_x = test_x.reshape(test_x.shape[0], 100, 100, 1)
    model, callbacks_list = keras_model()
    model.fit(train_x, train_y, validation_data=(test_x, test_y), epochs=10, batch_size=64,
              callbacks=callbacks_list)
    model.summary()
    model.save('Autopilot.h5')


main()

第三步 模型测试

AutopilotApp_V2.py

import numpy as np
import cv2
import tensorflow as tf

model = tf.keras.models.load_model('Autopilot.h5')

def keras_predict(model, image):
    processed = keras_process_image(image)
    steering_angle = float(model.predict(processed, batch_size=1))
    steering_angle = steering_angle * 60
    return steering_angle


def keras_process_image(img):
    image_x = 100
    image_y = 100
    img = cv2.resize(img, (image_x, image_y))
    img = np.array(img, dtype=np.float32)
    img = np.reshape(img, (-1, image_x, image_y, 1))
    return img


steer = cv2.imread('steering_wheel_image.jpg', 0)
rows, cols = steer.shape
smoothed_angle = 0

cap = cv2.VideoCapture('a.avi')
while (cap.isOpened()):
    ret, frame = cap.read()
    gray = cv2.resize((cv2.cvtColor(frame, cv2.COLOR_RGB2HSV))[:, :, 1], (100, 100))
    steering_angle = keras_predict(model, gray)
    print(steering_angle)
    cv2.imshow('frame', cv2.resize(frame, (600, 400), interpolation=cv2.INTER_AREA))
    smoothed_angle += 0.2 * pow(abs((steering_angle - smoothed_angle)), 2.0 / 3.0) * (
        steering_angle - smoothed_angle) / abs(
        steering_angle - smoothed_angle)
    M = cv2.getRotationMatrix2D((cols / 2, rows / 2), -smoothed_angle, 1)
    dst = cv2.warpAffine(steer, M, (cols, rows))
    cv2.imshow("steering wheel", dst)

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()

下面提供一个测试视频,该视频是由上面数据集部分合成:

链接:https://pan.baidu.com/s/13OZa9Myq7yEEHjYo819quQ 
提取码:vty3 

注释:以上代码有几处修改过,以适应自己电脑得环境;第三步中得方向盘图片在原文GitHub中有,可以直接下载;如果有同学不清楚可以留言一起讨论。这是第一次发文,不足之处会慢慢改进。

 

你可能感兴趣的:(英伟达端到端自动驾驶模型总结)