天池:零样本目标识别新手笔记

2018之江杯全球人工智能大赛-零样本图像目标识别

简单数据分析

天池:零样本目标识别新手笔记_第1张图片

天池:零样本目标识别新手笔记_第2张图片

jupyter:github地址

数据预处理

将label_list和class_wordembeddings合并,处理后结果如标签\t特征
sample_processing.py

import pandas as pd

dir = "d:/ZSL_ImageGame/DatasetA_train_20180813/"


def words_embed():
    """
    处理label_list和class_wordembeddings,文本特征
    :return: data/word_embeddings.csv
    """
    train_labels = pd.read_csv(dir + "label_list.txt", sep='\t', header=None)
    print(train_labels.head())

    train_words = pd.read_csv(dir +"class_wordembeddings.txt", sep=' ', header=None)
    print(train_words.head())

    res = pd.merge(train_labels, train_words, left_on=1, right_on=0)

    res = res.drop([1, '1_x', '0_y'], axis=1)
    print(res.head())
    res.to_csv('output/word_embeddings.csv', index=None, header=None, sep='\t')

def traindata():
    """
    连接数据
    :return:
    """
    train = pd.read_csv(dir + 'train.txt', header=None, sep='\t')
    train_attr = pd.read_csv(dir + "attributes_per_class.txt", sep='\t', header=None)
    train_words=pd.read_csv('output/word_embeddings.csv',sep='\t',header=None)

    print(train.head())
    print(train_attr.head())
    print(train_words.head())
    # res.to_csv('output/traindata.csv',index=None,header=None,sep='\t')

if __name__ == '__main__':
    # words_embed()
    traindata()

选取训练集和验证集

有关表明,submit.txt存放着验证数据。如下代码,其标签ZJL160打错成了ZJL178,当然178还打着178。submit.txt含有标签160~200,所有样本都可在train.txt找到。

False Label True Label
178 160
178 178
import numpy as np
import pandas as pd

src="d:/ZSL_ImageGame/DatasetA_train_20180813/"
valid=pd.read_csv(src+"submit.txt",header=None,sep="\t")
train=pd.read_csv(src+'train.txt',header=None,sep='\t')

print(valid.head())
print(valid.shape)
"""
                                       0       1
0  9f1d3113f1fcb573596ca99ecb712364.jpeg  ZJL178
1  9f73904f7a72fa7285b80f2ae8286066.jpeg  ZJL178
2  619bf8d90e1fa19a7f2966bd38b27ccd.jpeg  ZJL178
3  6773ca5a1a615fc0d67f836e0772ff46.jpeg  ZJL178
4  e1badc8feb1e4d4a6e44eb382d13bc24.jpeg  ZJL178
(8291, 2)
"""

print(train.head())
print(train.shape)
"""
                                       0     1
0  a6394b0f513290f4651cc46792e5ac86.jpeg  ZJL1
1  2fb89ef2ace869d3eb3bdd3afe184e1c.jpeg  ZJL1
2  eda9f3bef2bd8da038f6acbc8355fc25.jpeg  ZJL1
3  7d93ef45972154aae150b4f9980a79c0.jpeg  ZJL1
4  fb901b4f9a8e396c1d0155bccc5e5671.jpeg  ZJL1
(38221, 2)
"""
print(sorted([int(str(i)[3:]) for i in set(train.iloc[:,1].values)]))
"""
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, 19, 21, 22, 23, 24, 25, 26, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 135, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 149, 150, 151, 152, 153, 154, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200]

"""

mer=pd.merge(valid,train,left_on=0,right_on=0)
print(mer.head())
print(mer.shape)
"""
                                       0     1_x     1_y
0  9f1d3113f1fcb573596ca99ecb712364.jpeg  ZJL178  ZJL160
1  9f73904f7a72fa7285b80f2ae8286066.jpeg  ZJL178  ZJL160
2  619bf8d90e1fa19a7f2966bd38b27ccd.jpeg  ZJL178  ZJL160
3  6773ca5a1a615fc0d67f836e0772ff46.jpeg  ZJL178  ZJL160
4  e1badc8feb1e4d4a6e44eb382d13bc24.jpeg  ZJL178  ZJL160
(8291, 3)
"""

# ## 标签ZJL160的打错成了ZJL178
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import cohen_kappa_score
true_label=mer.iloc[:,1].values
res=mer.iloc[:,2].values

print()
print(classification_report(true_label,res))
print("kappa: ",cohen_kappa_score(true_label,res))
mat = confusion_matrix(true_label,res)
sns.heatmap(mat,annot=True,square=True,fmt="d")
plt.show()

天池:零样本目标识别新手笔记_第3张图片
此处可以删除错打成ZJL178的标签或者将错打的改成168,如下为删除代码:

mer=mer[~((mer.iloc[:,1]=="ZJL178") &(mer.iloc[:,2]=="ZJL160"))]

这样,submit.txt就从8291变成了8094,然后在从训练集删除验证集的部分。通过submit.txt和train.txt merge上看,submit的样本全部来自train.txt(其中的160~200)。干脆简单的,直接从train.txt中选取ZJLXX~ZJLXX作为验证,其他作为训练。

import numpy as np
import pandas as pd

src="d:/ZSL_ImageGame/DatasetA_train_20180813/"
train=pd.read_csv(src+'train.txt',header=None,sep='\t')
# print(train.head())
print("train.txt数量",train.shape)

s= ["ZJL"+str(i) for i in range(196,201)]
# print(s)

zsl_validate=train[train.iloc[:,1].isin(s)].sample(frac=1,random_state=2018)
# print(validate.head())
print("零样本测试集",zsl_validate.shape)

traindata=train[~train.iloc[:,1].isin(s)].sample(frac=1,random_state=2018)
# print("",traindata.shape)

train_img=traindata.iloc[:-1000,:]
print("图片训练集",train_img.shape)
validate_img=traindata.iloc[-1000:,:]
print("图片验证集",validate_img.shape)



zsl_validate.to_csv("../data/zsl_validate.csv", sep="\t", header=None, index=None)
train_img.to_csv("../data/train_img.csv", sep="\t", header=None, index=None)
validate_img.to_csv("../data/validate_img.csv", sep="\t", header=None, index=None)

train.txt数量 (38221, 2)
零样本测试集 (1016, 2)
图片训练集 (36205, 2)
图片验证集 (1000, 2)

实验部分

整体思路如下,KNN使用1近邻:
天池:零样本目标识别新手笔记_第4张图片
实验中我使用train.txt中的2000条数据作为验证集,其他作为训练集来训练CNN模型,分别使用VGG16,Xception,自定义模型微调和重训练;发现验证集的精度0.2多,训练集能达到0.9以上,据说训练样本打的标记不够好。也就是说,图片特征这一步就出现问题了 \wulian \mudenggoudai ,
天池:零样本目标识别新手笔记_第5张图片
映射使用的是神经网络(据说使用岭回归较好),线上精度较差。
这里写图片描述
此处仅贴出批量数据训练的flow(个人认为有轻微参考价值)。

import keras
import pandas as pd
import numpy as np
import cv2
from keras_preprocessing.image import ImageDataGenerator

train_attr = pd.read_csv("d:/ZSL_ImageGame/DatasetA_train_20180813/attributes_per_class.txt", sep='\t', header=None)
train_words = pd.read_csv(r"data/word_embeddings.csv", sep='\t', header=None)
labelkeys = sorted(set(train_attr.iloc[:, 0].values.tolist()))

datagen = ImageDataGenerator(rescale=1./255,rotation_range=20,width_shift_range=0.2,height_shift_range=0.2,
                                 shear_range=0.2,zoom_range=0.5,horizontal_flip=True,fill_mode='nearest')

class DataPiple:

    def __init__(self,target,imgsize=64,impro=False):
        """

        :param target:
        :param impro: 是否数据增强
        """

        self.target = pd.read_csv(target, header=None, sep='\t').sample(frac=1,random_state=2018)
        self.fea_size=len(self.target)
        self.impro=impro
        self.imgsize=imgsize

    def readOne(self,pos):
        t=self.target.iloc[pos,:]
        im = cv2.imread("d:/ZSL_ImageGame/DatasetA_train_20180813/train/" + t[0])
        im = cv2.resize(im, dsize=(self.imgsize, self.imgsize))
        attr=train_attr[train_attr[0]==t[1]].values[0,1:]
        word=train_words[train_words[0]==t[1]].values[0,1:]
        label=np.zeros(shape=(len(labelkeys)),dtype=np.uint8)
        label[labelkeys.index(t[1])]=1
        return im,attr,word,label


    def readFeather(self,pos,size):
        ims=[]
        attrs=[]
        words=[]
        labels=[]

        for i in range(pos,min(pos+size,self.fea_size)):
            im,attr,word,label=self.readOne(i)
            ims.append(im)
            attrs.append(attr)
            words.append(word)
            labels.append(label)
        ims=np.array(ims)
        # 做数据增强
        if self.impro == True:
            ims=datagen.flow(ims,batch_size=len(ims),shuffle=False).__next__()
        else:
            ims=ims/255.0
        ims = np.array(ims)
        attrs=np.array(attrs)
        words=np.array(words)
        labels=np.array(labels)

        return ims,attrs,words,labels


    def create_inputs(self,size=64):

        while True:
            for i in range(0,self.fea_size,size):
                ims, attrs, words,labels=self.readFeather(i,size)
                # print(ims.shape)
                # print(attrs.shape)
                # print(words.shape)
                yield ims, labels

if __name__ == '__main__':
    # print(labelkeys)

    dp=DataPiple(target=r"D:\ZSL_ImageGame\DatasetA_train_20180813\train.txt",impro=True)
    s=dp.create_inputs(64)
    r,p=s.__next__()

    print(len(r),r.shape,p.shape)

    import matplotlib.pyplot as plt

    plt.imshow(r[2])
    plt.show()

    # print(labelkeys)

其他代码放在Github上。第一次参加此种比赛,无论设备(训练真的慢,……,训练差不多我就终止程序了)还是水平,发现真是cai奥,还是学好基础再来闲逛。此记,用以缅怀。

你可能感兴趣的:(python与人工睿智,机器学习入门与放弃)