import paddle
import jieba
import pandas as pd
import numpy as np
import os
from glob import glob
from multiprocessing import Process, Manager, freeze_supportfrom tqdm import tqdm
class SearchTensorDataSet:def __init__(self, data_set_name, data_voc_name):""":param data_set_name: 存储pandas_pickle 的 向量数据:param data_voc_name: 存储pandas_pickle 的 词表"""if os.path.exists(data_set_name):self.read_data_set(data_set_name)self.current_data_set_name = data_set_nameelse:assert data_set_nameif os.path.exists(data_voc_name):self.read_data_voc(data_voc_name)self.current_data_voc_name = data_voc_nameelse:assert data_voc_nameself.voc_to_tensor = VocToTensor()def read_data_set(self, read_data_set_name):""":param read_data_set_name: 存储pandas_pickle 的 向量数据:return:"""data_set_paths = glob(read_data_set_name + "/*")data_set_df_next_voc_id = []data_set_df_next_voc = []data_set_df_up_voc_id = []data_set_df_up_voc = []for one_paths in tqdm(data_set_paths):if "up" in one_paths:data_set = pd.read_pickle(one_paths)data_set_df_up_voc+=data_set["voc"]data_set_df_up_voc_id.append(np.vstack(data_set["voc_id"]))else:data_set = pd.read_pickle(one_paths)data_set_df_next_voc += data_set["voc"]data_set_df_next_voc_id.append(np.vstack(data_set["voc_id"]))self.data_set = [{"voc":data_set_df_up_voc,"voc_id":data_set_df_up_voc_id}, {"voc":data_set_df_next_voc,"voc_id":data_set_df_next_voc_id}]def read_data_voc(self, read_data_voc_name):""":param read_data_voc_name:voc 词表路径:return:“”"""self.voc_table = pd.read_pickle(read_data_voc_name)self.voc_table=pd.DataFrame({"voc":self.voc_table["voc_id"],"voc_id":list(range(len(self.voc_table["voc_id"])))})def search_data(self, text, top):"""计算 dot 排序 取值 而后 top 个 而后分别计算出 top 的前后相关top 倒数第一内的 上下相关文个数而后 使用这个值进行排序方可要可以加载和区分数据 上下的库:param text::param top::return:"""voc_id = self.voc_to_tensor.text_to_voc(text, self.voc_table)voc_tensor = self.voc_to_tensor.voc_to_tensor(voc_id["voc_id"],em_dim=8)self.dot_top(self.data_set,voc_tensor,top)def dot_top(self, dat_set, vot,stop):""":param dat_set::param vot::return:"""vector_database_up,vector_database_next =dat_setinput_vector = votscore=np.dot(np.vstack(vector_database_next["voc_id"]), input_vector[-1])arg_index=np.argsort(score)[:stop]one_score=[score[i] for i in arg_index]one_text=[vector_database_next["voc"][i] for i in arg_index]top_nc=[]for two in one_text:voc_id = self.voc_to_tensor.text_to_voc(two, self.voc_table)voc_tensor = self.voc_to_tensor.voc_to_tensor(voc_id["voc_id"], em_dim=8)up_score = np.dot(np.vstack(vector_database_up["voc_id"]), voc_tensor[-1])next_score = np.dot(np.vstack(vector_database_next["voc_id"]), voc_tensor [-1])nc = sum(up_score<max(one_score))*sum(next_score<max(one_score))top_nc.append(nc)print(nc,two)print(top_nc)class VocToTensor:def __init__(self):passdef voc_to_tensor(self, voc_id, em='sample_for_sin', em_dim=512):""":param voc_id::param em: em_func:param em_dim: 隐藏层维度:return:"""if "sample_for_sin" in em:return self.local_for_sin_encoder_one(voc_id, em_dim)@staticmethoddef local_for_sin_encoder_one(voc_id, em_dim, device="gpu"):""":param voc_id::param em_dim::return:"""if device == "cpu":voc_id_sample = np.linspace(0, np.expand_dims(voc_id.values, 0), em_dim).transpose([2, 1, 0])add = 0for i in range(voc_id_sample.shape[1]):one = voc_id_sample[:, i:i + 1]add = np.sin(one + add)return add.squeeze(1).astype("float16")else:voc_id_sample = np.linspace(0, np.expand_dims(voc_id.values, 0), em_dim).transpose([2, 1, 0])voc_id_sample = paddle.to_tensor(voc_id_sample, dtype="float32")add = 0for i in range(voc_id_sample.shape[1]):one = voc_id_sample[:, i:i + 1]add = paddle.sin(one + add)return add.squeeze(1).numpy().astype("float16")@staticmethoddef text_to_voc(search_text, voc_table):""":param search_text: 被搜的文本:param voc_table: 被搜的文本:return: voc_id"""one_data = "".join(search_text.split())one_data = list(jieba.cut(one_data))one_i = pd.DataFrame({"voc": one_data})voc_idf_one = voc_table[voc_table["voc"].isin(one_i["voc"])]one_id = pd.merge(one_i, voc_idf_one, on="voc", how="left")return one_idclass GenVocTensorForDataSet:def __init__(self):self.voc_to_tensor = VocToTensor()@staticmethoddef gen_data_voc(data_v, data_voc_list):""":param data_v::param data_voc_list::return:"""set_list = set()for one_path in tqdm(data_v):with open(one_path, "r", encoding="utf-8") as f:dataa = f.read()dataa = "".join(dataa.split())set_list |= set(jieba.cut(dataa))data_voc_list.append(set_list)def gen_voc_run(self, voc_name, voc_root_dir, works_num=16):""":param voc_name: voc pandas pickle 表 的 名字 /ds/ss/voc_name 或者 voc_name:param voc_root_dir: txt 数据文件夹 路径:param works_num: 进程数量:return: "" 输出的是voc pandas pickle 表"""paths_list_g = glob(pathname=voc_root_dir + "*")res_data = Manager().list()np.random.shuffle(paths_list_g)works_num_steps = len(paths_list_g) // works_nump_list = []for i in range(0, len(paths_list_g), works_num_steps):j = i + works_num_stepsone_works = paths_list_g[i:j]p = Process(target=self.gen_data_voc, args=(one_works, res_data))p.start()p_list.append(p)for p in p_list:p.join()voc_set = set()while len(res_data) > 0:voc_set |= res_data.pop()voc_id = ["<<<<<<<<pad>>>>>>>>>>>"] + sorted(voc_set)pd.DataFrame({"voc_id": voc_id}).to_pickle("{}.pandas_pickle_voc_id".format(voc_name))@staticmethoddef save_func_run(one_id, func="one"):""":param one_id::param func::return:"""if func == "one":return pd.DataFrame({"voc_id": one_id[:-1]["voc_id"].values, "voc": one_id[1:]["voc"].values})elif func == "two":return pd.DataFrame({"voc_id": one_id[1:]["voc_id"].values, "voc": one_id[:-1]["voc"].values})def gen_data_tensor(self, data_v, data_path, voc_i_list, process_count, total_c, data_prefix, em='sample_for_sin',em_dim=512, data_set_class="one"):""":param data_v::param data_path::param voc_i_list::param process_count::param total_c::param em::param em_dim::data_set_class::return:"""voc_idf = voc_i_list[0]voc_id_list = []voc_list=[]for one_v in tqdm(data_v):with open(one_v, "r", encoding="utf-8") as f:one_data = f.read()total_c["count"] += 1one_id = self.voc_to_tensor.text_to_voc(one_data, voc_idf)one_id = self.save_func_run(one_id, data_set_class)voc_id = self.voc_to_tensor.voc_to_tensor(one_id["voc_id"], em=em, em_dim=em_dim)voc_list+=one_id["voc"].values.tolist()voc_id_list.append(voc_id)if len(voc_id_list) % 5000 == 0:pd.to_pickle({"voc":voc_list,"voc_id":voc_id_list} ,data_path + "/{}{}{}.pandas_pickle_data_set".format(process_count, data_prefix, total_c["count"]))voc_id_list = []voc_list = []pd.to_pickle({"voc": voc_list, "voc_id": voc_id_list},data_path + "/{}{}{}.pandas_pickle_data_set".format(process_count, data_prefix, total_c["count"]))def gen_voc_data_to_tensor_set(self, paths_list_dir, out_dir, voc_id_name, data_prefix, works_num=8,data_set_class="one", em_hidden_dim=128):""":param paths_list_dir: txt 文件夹:param data_prefix::param out_dir: 输出 pandas_pickle 文件夹:param voc_id_name: voc_id_name 路径:param works_num: 进程数:data_set_class: 数据类型:return:"""paths_list_pr = glob(pathname=paths_list_dir + "*")voc_id = pd.read_pickle(voc_id_name)["voc_id"]voc_df = Manager().list()voc_idf = pd.DataFrame({"voc": voc_id})voc_idf["voc_id"] = voc_idf.index.values.copy()voc_df.append(voc_idf)total_count = Manager().dict()total_count["count"] = 0p_list = []for i in range(0, len(paths_list_pr), len(paths_list_pr) // works_num):j = len(paths_list_pr) // works_num + ip = Process(target=self.gen_data_tensor, args=(paths_list_pr[i:j], out_dir, voc_df, i, total_count, data_prefix, 'sample_for_sin', em_hidden_dim,data_set_class))p.start()p_list.append(p)for p in p_list:p.join()if __name__ == '__main__':freeze_support()txt_p = "E:/just_and_sum/data_sets/"gvt_fds = GenVocTensorForDataSet()stds = SearchTensorDataSet("E:\just_and_sum\data_set_p", "voc_id_cut")stds.search_data("或者说直接使用进制拆分策略", top=10)