2019-08-08工作进展

set odps.sql.mapper.split.size=1;
昨天工作:

  1. 重新处理数据集,给一些重要的关键词,如含有季节,性别等关键信息的词更多的权重。使用新的数据集重新对dssm模型进行训练测试。有一定效果,但是关联出来的商品中还是有类似‘反季冬装女:春秋装女2018新款chic碎花雪纺上衣长袖洋气小衫超仙甜美时尚花衫’这样的bad case,但是这种bad case的比例明显变少。
  2. inference 结果:验证集中视频数量有20w,使用叶子类目过滤后可以关联到14w左右的视频。
  3. 遇到的问题:alinlp的分词结果有非常多的不准确的地方,比如“网红包”会被分成“网”和“红包”,这种类似情况会对模型造成比较大干扰。

今天计划:
改善dssm模型inference效果。

  1. 测试电扇命名实体UDF

hs_dssm_dic_title_inf_1
select title, search_kg:alinlp_ner_ecom(title,' ','|','full') from hs_dssm_dic_title_inf_1 limit 500;

  1. 去停用词

select title, hs_return_clean(title) as clean_title from graph_embedding.hs_tmp_149 limit 100;


train_query : hs_dssm_dic_query_1 - | id | words_mainse_ids | se_keyword |
train_title : hs_dssm_dic_title_3 - | id | words_mainse_ids | title |


inference_query : hs_dssm_dic_query_inf_1 - | id | words_mainse_ids | query |
inference_title : hs_dssm_dic_title_inf_1 - | id | words_mainse_ids | title |


drop table hs_tmp_188;
yes
create table hs_tmp_188 as
select query, title, video_id, item_id, search_kg:alinlp_dssm_text_similarity(search_kg:alinlp_segment(hs_return_clean(query), 'CONTENT_SEARCH', ' '), search_kg:alinlp_segment(hs_return_clean(title), 'CONTENT_SEARCH', ' '),' ') as score from hs_query_ugc_co_video_final_result_info_3;

hs_query_ugc_co_video_final_result_info_3

hs_tmp_175

  1. 构造finetune训练集

正样本生成

drop table hs_tmp_188;
yes
create table hs_tmp_188 as
select query, title, video_id, item_id, search_kg:alinlp_dssm_text_similarity(search_kg:alinlp_segment(hs_return_clean(query), 'CONTENT_SEARCH', ' '), search_kg:alinlp_segment(hs_return_clean(title), 'CONTENT_SEARCH', ' '),' ') as score from hs_query_ugc_co_video_final_result_info_3;

drop table hs_tmp_189;
yes
create table hs_tmp_189 as select query, title, 1 as label from hs_tmp_188 where score > 0.5;

负样本采样 & 乱序

insert overwrite table graph_embedding.hs_tmp_169 select * from graph_embedding.hs_tmp_169 DISTRIBUTE by random();

drop table graph_embedding.hs_tmp_190;
yes
create table graph_embedding.hs_tmp_190 as
select distinct c.query, c.title, c.label, c.query_id, d.id as video_id from
(select a.*, b.id as query_id from (select * from graph_embedding.hs_tmp_189)a left join (select * from graph_embedding.hs_dssm_dic_query_inf_1)b on a.query == b.query)c left join (select * from graph_embedding.hs_dssm_dic_title_inf_1)d on c.title == d.title;

insert into table graph_embedding.hs_tmp_190 select query, title, 0 as label, query_id, video_id from graph_embedding.hs_tmp_169 where score < 0.5 limit 2100000;

insert overwrite table graph_embedding.hs_tmp_191 select distinct * from graph_embedding.hs_tmp_191 DISTRIBUTE by random();

hs_dssm_dic_query_inf_1 : | id | words_mainse_ids | query |
hs_tmp_184 : | id | emb |
hs_dssm_dic_title_inf_1 : | id | words_mainse_ids | title |
hs_tmp_185 : | id | emb |
hs_train_data_dssm_v2_4 : | se_keyword_mainse_ws | title_mainse_ws | label |
hs_tmp_190 : | query | title | label | query_id | video_id |

训练集生成

create table graph_embedding.hs_tmp_191 as
select distinct c.se_keyword_mainse_ws, d.emb as title_mainse_ws, c.label from
(select a.*, b.emb as se_keyword_mainse_ws from (select * from graph_embedding.hs_tmp_190)a left join (select * from graph_embedding.hs_tmp_184)b on a.query_id == b.id)c left join (select * from graph_embedding.hs_tmp_185)d on c.video_id == d.id;

insert overwrite table hs_tmp_193 select * from hs_tmp_191 limit 100000;

验证集

create table hs_tmp_192 as select * from hs_tmp_187 limit 1000000;

finetune

pai -name tensorflow140 -Dscript="file:///home/hengsong/origin_deep_cluster_odps_8.tar.gz" -DentryFile="finetune_inference_v7.py" -Dcluster='{"worker":{"count":10, "cpu":200, "memory":4000}, "ps":{"count":10, "cpu":200, "memory":5000}}' -Dtables="odps://graph_embedding/tables/hs_tmp_191,odps://graph_embedding/tables/hs_tmp_193,odps://graph_embedding/tables/hs_tmp_192" -Doutputs="odps://graph_embedding/tables/hs_dssm_result_4" -DcheckpointDir="oss://bucket-automl/hengsong/?role_arn=acs:ram::1293303983251548:role/graph2018&host=cn-hangzhou.oss-internal.aliyun-inc.com" -DuserDefinedParameters="--learning_rate=3e-4 --batch_size=1024 --is_save_model=True --attention_type=1 --num_epochs=1 --ckpt=hs_ugc_video_4e_1.ckpt" -DuseSparseClusterSchema=True;

你可能感兴趣的:(2019-08-08工作进展)