Bladeren bron

改进代码;修改关键词聚合分析模型

ChenGanBin 3 jaren geleden
bovenliggende
commit
2b92e21d2a
9 gewijzigde bestanden met toevoegingen van 1114 en 193 verwijderingen
  1. 21 0
      REMEAD.md
  2. 256 122
      agg_word.py
  3. 87 45
      cal.py
  4. 20 1
      config.py
  5. 354 18
      key_reverse.py
  6. 1 6
      key_reverse_index.py
  7. 218 0
      key_reverse_statistics.py
  8. 1 1
      logging.conf
  9. 156 0
      statistics.py

+ 21 - 0
REMEAD.md

@@ -18,3 +18,24 @@
 7. 根据关键词文件生成倒排文件(key_reverse.py)
 
 8. 根据关键词文件、索引文件、倒排文件生成最终的聚合分析文件(agg_word.py)
+
+
+# 进程相关
+1. 普通的只有一个进程
+2. 普通使用ProcessPoolExecutor只有4个进程
+3. 如果使用Manager会额外多一个进程
+
+# 文件位置相关
+1. with open 与 mmap 读取到的每行的位置是一样的
+
+# 文件读取速度比较
+1. 从单进程的角度看:仅以顺序读文件来说mmap的速度快很多,如果对内容进行编码,速度也较with open快一点
+2. 从多进程的角度看:仅以顺序读文件来说mmap的速度快很多,如果对内容进行编码,速度也较with open快一点
+3. 仅以顺序读取同一个文件,with open 与 mmap 均是 单进程读取一次的速度 比 多进程读取一次要快,而且with open的差距更明显
+4. 多进程分段读取中 mmap比with open快很多,with open非常的慢
+* 总结:
+    * mmap(单)>>mmap(单,对内容进行编码)>with open(单)
+    * mmap(多)>>mmap(多,对内容进行编码)>with open(多)
+    * (这个结论没啥用处,因为多进程一般是完成不同的任务)仅以顺序读取同一个文件,with open 与 mmap 均是 单进程读取一次的速度 比 多进程读取一次要快,而且with open的差距更明显
+    * 多进程分段读取中 mmap比with open快很多,with open非常的慢
+

+ 256 - 122
agg_word.py

@@ -1,155 +1,289 @@
 # -*- coding:utf-8 -*-
 
+from concurrent.futures import ProcessPoolExecutor, as_completed
+from functools import reduce
+from itertools import combinations
+import math
 import mmap
+import os
+from time import sleep, time
+from cal import cal_cos_sim
+
 import config
 import tools
 import stop_word
 import re
-import ast
-import cal
 import logging
-import ast
-from bitmap import BitMap
 
-TITLE = "聚合文件"
+# 问题
+# 用线程处理IO高的部分
+# 主线程利用率极低
+# 优化代码,加快速度(目前速度:约1分钟100个关键词)
+
+# 已解决
+# 输出的格式不正确
+# 分析结果内容没有写入结果中
+# 移除祠根数等于1的词,不做分析
+# 减少重复加载 -> 解决:加入仅在子进程时才加载的判断
+
+tools.init_log()    
 
-def re_extract_key(pattern, line):
+def intesect(x, y):
     """
-    正则提取关键词信息
+    计算集合的交集
     """
-    m = pattern.match(line)
-    # 关键词 序号
-    index = m.group(1)
-    # 关键词 
-    key = m.group(2)
-    # 关键词 分词词根
-    word_root = m.group(3)
-    # 把index转换成数字方便使用
-    return int(index), key, word_root
-
-def main():
-    # 初始化日志配置
-    tools.init_log()
-    tools.log_start_msg(TITLE)
+    return x & y
 
+if __name__ != "__main__":
     # 停用词
-    logging.info("加载停用词")
-    stop_word_cache = stop_word.load_stop_word()
-    # 关键词索引
-    logging.info("加载关键词索引")
-    key_index_cache = tools.load_obj(config.KEY_INDEX_CACHE)
-    # 倒排索引
-    logging.info("加载倒排索引")
-    key_reverse_index_cache = tools.load_obj(config.KEY_REVERSE_INDEX_CACHE)
-    # 正则 提取数据
-    s = r"(\d+),([^,]*),(.*)"
-    pattern = re.compile(s, re.I)
-
-    with open(config.KEY_FILE, "r", encoding=config.ENCODING_CHARSET) as fkey, \
-        open(config.KEY_REVERSE_FILE, "r", encoding=config.ENCODING_CHARSET) as freverse, \
-        mmap.mmap(fkey.fileno(), 0, access=mmap.ACCESS_READ) as f_key_mmap, \
-        mmap.mmap(freverse.fileno(), 0, access=mmap.ACCESS_READ) as f_reverse_mmap:
-
-        # 计算总关键词数
-
-        # TODO 这里要改成从统计信息中获取
-        total_count = 14500029
+    stop_word_index = stop_word.load_stop_word()
+
+    # KEY表索引
+    key_index = tools.load_obj(config.KEY_INDEX_CACHE)
+
+    # 倒排表索引
+    reverse_index = tools.load_obj(config.KEY_REVERSE_INDEX_CACHE)
+
+    # 聚合阈值
+    agg_threshold = 0.8
+
+    # 正则提取
+    # 倒排表 索引
+    index_re = r"'(\d+)'"
+    index_pattern = re.compile(index_re, re.I)
+    # 关键词    
+    key_re = r"[^,]*,(.*),\["
+    key_pattern = re.compile(key_re, re.I)
+    # KEY表 词根
+    stem_re = r"'([^,]*)'"
+    stem_pattern = re.compile(stem_re, re.I)
+
+def sub_process(start_pos, end_pos):
+    """
+    子进程
+    """
+    pid = os.getpid()
+
+    logging.debug("子进程-%d 开始执行任务,开始位置:%d,结束位置:%d" % (pid,start_pos, end_pos))
+
+    # 聚合结果
+    agg_result = []
+    
+    # 开始时间
+    start_time = time()
+
+    with open(config.KEY_FILE, "r", encoding=config.ENCODING_CHARSET) as f_key, \
+        mmap.mmap(f_key.fileno(), 0, access=mmap.ACCESS_READ) as f_key_mmap, \
+        open(config.KEY_REVERSE_FILE, "r", encoding=config.ENCODING_CHARSET) as f_reverse, \
+        mmap.mmap(f_reverse.fileno(), 0, access=mmap.ACCESS_READ) as f_reverse_mmap :
         
-        # 生成位图bitmap
-        bm = BitMap(total_count)
+        # 把关键词索引转换成对应的位置
+        lower_pos = key_index[start_pos]
+        upper_pos = key_index[end_pos]
 
-        # 待处理的文件总大小
-        total_num = f_key_mmap.size()
+        # 移动到开始位置
+        f_key_mmap.seek(lower_pos)
 
+        # 读取主关键词信息
+        a_keys = {}
         while True:
-            # 当前处理位置
+            # 校验当前位置是否越界
             cur_pos = f_key_mmap.tell()
+            if cur_pos >= upper_pos:
+                break
+            
+            line = f_key_mmap.readline().decode("UTF-8")
+            # 提取 关键词、词根
+            key_m = key_pattern.match(line)
+            a_key = key_m.group(1)
+            a_stem = []
+            # 过滤停用词
+            tmp_stem = stem_pattern.findall(line)
+            for stem in tmp_stem:
+                if stem in stop_word_index:
+                    continue
+                a_stem.append(stem)
+            # 保存到容器,如果祠根数等于1则没有比较的价值
+            if len(a_stem) > 1:
+                a_keys[a_key]=a_stem
+
+        # 合并词根
+        all_stem = set()
+        for a_stem in a_keys.values():
+            for stem in a_stem:
+                all_stem.add(stem)
 
-            # 进度提示
-            tools.tip_in_size(total_num, cur_pos)
+        # 获取倒排信息
+        reverse_dict = {}
+        for stem in all_stem:
+            # 读取倒排表
+            f_reverse_mmap.seek(reverse_index[stem])
+            reverse_line = f_reverse_mmap.readline().decode("UTF-8")
+            # 提取 位置信息
+            b_indexs = index_pattern.findall(reverse_line)
+            reverse_dict[stem]=set(b_indexs)
+        
+        # 计算相关性
+        for a_key, a_stem in a_keys.items():
+            # 计算词根组合
+            logging.debug("子进程-%d 主关键词:%s 开始计算词根组合" % (pid, a_key))
+            tmp_stem = []
+            for stem in a_stem:
+                tmp_stem.append(stem)
+            num = math.ceil(len(tmp_stem) * 0.7)
+            stem_combs = list (combinations(tmp_stem, num))
+            logging.debug("子进程-%d 主关键词:%s 计算词根组合结束" % (pid, a_key))
+
+            logging.debug("子进程-%d 主关键词:%s 开始获取词根涉及的关键词信息" % (pid, a_key))
+            # 计算词根涉及的关键词的交集
+            b_indexs = set()
+            for stem_comb in stem_combs:
+                indexs = [reverse_dict[a_stem] for a_stem in stem_comb]
+                for b_index in  reduce(intesect, indexs):
+                    b_indexs.add(b_index)
+            logging.debug("子进程-%d 主关键词:%s 总祠根数:%d" % (pid, a_key, len(b_indexs)))
+            # 获取关键词信息
+            b_keys = []
+            for b_index in b_indexs:
+                # 读取关键词数据
+                f_key_mmap.seek(key_index[int(b_index)])
+                line = f_key_mmap.readline().decode("UTF-8")
+                # 提取 关键词、词根
+                key_m = key_pattern.match(line)
+                b_key = key_m.group(1)
+                b_stem = stem_pattern.findall(line)
+                b_keys.append((b_key, b_stem))
+            logging.debug("子进程-%d 主关键词:%s 获取词根涉及的关键词信息结束,涉及计算关键词数量:%d" % (pid, a_key, len(b_keys)))
+
+            logging.debug("子进程-%d 主关键词:%s 开始计算相关性" % (pid, a_key))
+            # 结果容器
+            correlation_key = []
+            correlation_key.append(a_key)
+            # 计算相关性
+            if b_keys:
+                for b_key, b_stem in b_keys:
+                    try:
+                        val = cal_cos_sim(a_key, a_stem, b_key, b_stem)
+                        if val >= agg_threshold:
+                            correlation_key.append(b_key)
+                    except Exception as e:
+                        logging.error("主关键词:%s 发生异常,涉及的副关键词信息-关键词:%s,分词:%s" % (a_key, b_key, b_stem), e)
+
+                # 有内容则进行保存
+                if len(correlation_key) > 1:
+                    agg_result.append(correlation_key)
+            logging.debug("子进程-%d 主关键词:%s 计算相关性结束,相关的关键词数据量:%d" % (pid, a_key, (len(correlation_key)-1)))
 
-            # 获取要处理的关键词
-            line = f_key_mmap.readline().decode(config.ENCODING_CHARSET)
             
-            # 如果没有任何内容则结束 
-            if not line:
-                logging.info("发现空白line")
-                break
+    logging.debug("子进程-%d 执行任务结束,耗时:%f" % (pid, (time() - start_time)))
 
-            # 提取信息
-            index, key, word_root = re_extract_key(pattern, line)
+    return {
+        "agg_result": agg_result,
+        "start_pos": start_pos
+    }
             
-            # bitmap校验,如果已经处理过则跳过
-            if bm.test(index):
-                logging.debug("主关键词:%s 已处理,跳过" % key)
-                continue
+def main_process():
+    """
+    主进程
+    """
 
-            # 通过bitmap校验,设置对应的bit为0
-            bm.set(index)
+    # 进程数
+    process_num = 4
 
-            # 聚合结果存放容器
-            agg_cache = []
+    # KEY 表总长度
+    total_task = 14500028
 
-            # 记录主要关键词
-            agg_cache.append(key)
+    # 任务数量
+    per_task_num = 100
 
-            # 转换成真正的list对象
-            logging.debug("当前处理的主关键词:%s, 词根数量:%d" % (key, len(word_root)))
-            for item in ast.literal_eval(word_root):
-                # 排除停用词
-                if item in stop_word_cache:
-                    continue
-                
-                # 根据倒排索引,获取相关的关键词序号
-                other_key_pos = key_reverse_index_cache.get(item)
-                f_reverse_mmap.seek(other_key_pos)
-                other_key_line = f_reverse_mmap.readline().decode(config.ENCODING_CHARSET)
-                # 截取关键词索引部分
-                other_index = other_key_line.index(",")
-                other_key_indexs = other_key_line[other_index+1:]
-                # 转换成真正的list对象
-                other_key_indexs = ast.literal_eval(other_key_indexs)
-                if not other_key_indexs:
-                    continue
+    # 处理进度保存间隔(单位:秒)
+    save_process_internal = 300
+
+    # 划分子任务:任务进度记录、任务列表
+    process_record, tasks = avg_split_task(total_task, per_task_num)
+
+    with ProcessPoolExecutor(max_workers=process_num) as process_pool, \
+        open(config.AGG_FILE, "a", encoding=config.ENCODING_CHARSET) as f:
 
-                logging.debug("词根:%s, 涉及的其它关键词数量:%d" % (item, len(other_key_indexs)))
-                for other_key_index in other_key_indexs:
-                    # bitmap校验,如果已经处理过则跳过
-                    if bm.test(int(other_key_index)):
-                        logging.debug("待比较关键词:%s 已处理,跳过" % other_key_index)
-                        continue
-
-                    # 从关键词索引中获取关键词位置
-                    pos = key_index_cache[other_key_index]
-                    # 获取待比较的关键词
-                    f_key_mmap.seek(pos)
-                    other_key_line = f_key_mmap.readline().decode(config.ENCODING_CHARSET)
-                    other_key_index, other_key,other_word_root = re_extract_key(pattern, other_key_line)
-
-                    # 计算相关性
-                    val = cal.cal_cos(key, other_key, word_root, other_word_root)
-                    if val >= 0.8:
-                        # 设置bitmap,该关键词已经处理过
-                        bm.set(other_key_index)
-
-                        # 记录类似的关键词
-                        agg_cache.append(other_key)
+        save_start_time = time()
+
+        logging.info("主进程:提交任务到子进程")
+        process_futures = [process_pool.submit(sub_process, task[0], task[1]) for task in tasks]
         
-            # 保存到本地
-            with open(config.AGG_ANALYSE_FILE % key, "w", encoding=config.ENCODING_CHARSET) as f:
-                for item in agg_cache:
-                    f.write(item)
-                    f.write("\n")
-
-            # 如果所有的关键词都处理完则结束
-            if bm.all():
-                logging.info("bitmap全部为1")
-                break
-            else:
-                count = bm.count()
-                logging.info("已处理数量:%d / %d,剩余数量:%d / %d" % (count, total_count, (total_count - count), total_count))
+        for p_future in as_completed(process_futures):
+            logging.debug("主进程:子进程返回部分数据")
+            result = p_future.result()
+
+            # 记录处理进度
+            cur_pos = result["start_pos"]
+            process_record[cur_pos]=1
+
+            # 保存分析结果
+            if result:
+                logging.debug("主进程:存在有效数据开始处理")
+                for correlation_key in result["agg_result"]:
+                    f.write("\n######开始######\n")
+                    for key in correlation_key:
+                        f.write("%s\n" % key)
+            
+            # 保存处理进度
+            if (time() - save_start_time) > save_process_internal:
+                logging.debug("保存处理进度")
+                # 更新开始时间
+                save_start_time = time()
+                tools.save_obj(config.ANALYSE_PROCESS_CACHE, process_record)
+
+            tools.tip(total_task, cur_pos)
+            
+
+                
+
+def avg_split_task(total:int, split_internal:int):
+    """
+    平分任务
+    """
+    # 任务列表
+    tasks = None
+    # 任务进度记录
+    process_record = None
 
-    tools.log_end_msg(TITLE)
+    # 分割的任务份数
+    split_num = math.ceil(total / split_internal)
+
+    # 平分
+    tmp_lists = []
+    for i in range(split_num):
+        # 计算平分点在列表中的位置
+        start_pos = i * split_internal
+        end_pos = i * split_internal + split_internal
+        # 如果超过列表大小需要额外处理
+        if end_pos >= total:
+            end_pos = None
+        tmp_lists.append([start_pos,end_pos])
+    
+    # 加载进度缓存
+    if os.path.exists(config.ANALYSE_PROCESS_CACHE):
+        logging.debug("存在分析进度缓存")
+        process_record = tools.load_obj(config.ANALYSE_PROCESS_CACHE)
+    
+    # 更新任务列表
+    if process_record:
+        tasks = []
+        for task in tmp_lists:
+            pos = task[0] // split_internal
+            if not process_record[pos]:
+                tasks.append(task)
+    else:
+        tasks = tmp_lists
+        process_record = [0 for i in range(len(tmp_lists))]
+
+    return process_record, tasks
 
 if __name__ == "__main__":
-    main()
+
+    TITLE = "(多进程版 fast_14.py)聚合文件"
+    tools.log_start_msg(TITLE)
+
+    main_process()
+
+    tools.log_end_msg(TITLE)

+ 87 - 45
cal.py

@@ -2,73 +2,115 @@
 
 import config
 import re
-import math
+import numpy as np
 
-def merge_word_root(word_root_a, word_root_b):
+def merge_stem(a_stem:list, b_stem:list):
     """
     合并词根
     """
-    return list(set(word_root_a).union(set(word_root_b)))
+    return list(set(a_stem).union(set(b_stem)))
 
-def gen_word_vector(word_a, word_b, word_root_union):
+def gen_word_vec(a_word:str, b_word:str, stem:list):
     """
     生成词向量
     """
-    a_word_vector, b_word_vector = [], []
-    for word in word_root_union:
-        if word in config.RE_SPECIAL_SIMBOL :
+    a_vec, b_vec = [], []
+    for word in stem:
+        # if re.findall(word, config.RE_SPECIAL_SIMBOL):
+        if word in config.RE_SPECIAL_SIMBOL:
             word = "\\" + word
-        a_word_vector.append(len(re.findall(word, word_a)))
-        b_word_vector.append(len(re.findall(word, word_b)))
-    return a_word_vector, b_word_vector
+        if word == "c++":
+            word = "c\\+\\+"
+        a_vec.append(len(re.findall(word, a_word)))
+        b_vec.append(len(re.findall(word, b_word)))
+    return a_vec, b_vec
 
-def vector_multi(a_vector, b_vector):
+def col_sim(vec1, vec2):
     """
-    向量相乘求和
+    计算余弦相似性
     """
-    return sum(map(lambda a_b: a_b[0]*a_b[1], zip(a_vector, b_vector)))
+    return vec1.dot(vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
 
-def vector_square_sum(word_vector):
-    """
-    向量平方求和
-    """
-    sum = 0
-    for i in word_vector:
-        sum = sum + i * i
-    return sum
 
-def vector_cos(v_multi, a_v_ss, b_v_ss):
+def cal_cos_sim(a_word:str, a_stem:list, b_word:str, b_stem:list):
     """
-    计算余弦
+    计算余弦相似性
     """
-    return v_multi / (math.sqrt(a_v_ss) * math.sqrt(b_v_ss))
+    union_stem = merge_stem(a_stem, b_stem)
+    a_vec, b_vec = gen_word_vec(a_word, b_word, union_stem)
+    val = col_sim(np.array(a_vec), np.array(b_vec))
+    return val
 
 
-def cal_cos(a_word, b_word, a_word_root, b_word_root):
-    """
-    计算两个长尾关键词的余弦值
-    """
+if __name__ == "__main__":
 
-    # 合并词根,用于生成词向量
-    union_word_root = merge_word_root(a_word_root, b_word_root)
+    # a_word= "QQ邮箱格式怎么写"
+    # b_word= "QQ邮箱格式如何写"
+    # a_word_root = ['QQ', '邮箱', '格式', '怎么', '写']
+    # b_word_root = ['QQ', '邮箱', '格式', '如何', '写']
+    # print(cal_cos_sim(a_word, a_word_root, b_word, b_word_root))
 
-    # 生成词向量
-    a_vector, b_vector = gen_word_vector(a_word, b_word, union_word_root)
+     # 合并词根,用于生成词向量
+    # union_word_root = merge_stem(a_word_root, b_word_root)
+    # print(union_word_root)
 
-    # 词向量相乘求和
-    ab_vector_multi = vector_multi(a_vector, b_vector)
+    # # 生成词向量
+    # a_vec, b_vec = gen_word_vec(a_word, b_word, union_word_root)
+    # print(a_vec)
+    # print(b_vec)
+    # # a_vec = [1,1,1,1,0,1]
+    # # b_vec = [1,1,1,0,1,1]
+    # print(col_sim(np.array(a_vec), np.array(b_vec)))
 
-    # 向量平方求和
-    a_vector_squar_sum = vector_square_sum(a_vector)
-    b_vector_squar_sum = vector_square_sum(b_vector)
+    # s = "0,腋下长了一个小疙瘩是什么东西,['腋下', '长', '了', '一个', '小', '疙瘩', '是', '什么', '东西']"
+    # s_r = r"'([^,]*)'"
+    # pattern = re.compile(s_r, re.I)
+    # for i in pattern.findall(s):
+    #     print(i)
+    
+    # s_r = r"([\d]*),(.*),\["
+    # pattern = re.compile(s_r, re.I)
+    # m = pattern.match(s)
+    # for i in m.groups():
+    #     print(i)
 
-    cos_val = vector_cos(ab_vector_multi, a_vector_squar_sum, b_vector_squar_sum)
+    # import mmap
+    # with open(config.KEY_FILE, "r", encoding=config.ENCODING_CHARSET) as f_key, \
+    #     mmap.mmap(f_key.fileno(), 0, access=mmap.ACCESS_READ) as f_key_mmap:
 
-    return cos_val
+    #     key_info_re = r"([\d]*),(.*),\["
+    #     key_info_pattern = re.compile(key_info_re, re.I)
 
-if __name__ == "__main__":
-    a_word = "腋下长了一个小疙瘩是什么东西"
-    b_word = "白凉粉是什么东西"
-    a_word_root = ['腋下', '长', '了', '一个', '小', '疙瘩', '是', '什么', '东西']
-    b_word_root = ['白', '凉粉', '是', '什么', '东西']
-    print(cal_cos(a_word, b_word, a_word_root, b_word_root))
+    #     s_r = r"'([^,]*)'"
+    #     s_pattern = re.compile(s_r, re.I)
+
+    #     a_line = f_key_mmap.readline().decode("UTF-8")
+    #     b_line = f_key_mmap.readline().decode("UTF-8")
+
+    #     a_m = key_info_pattern.match(a_line)
+    #     a_key = a_m.group(2)
+    #     a_stem = s_pattern.findall(a_line)
+    #     print(a_stem)
+
+    #     b_m = key_info_pattern.match(b_line)
+    #     b_key = b_m.group(2)
+    #     b_stem = s_pattern.findall(b_line)
+    #     print(b_stem)
+
+    #     print(cal_cos_sim(a_key, a_stem, b_key, b_stem))
+
+    # a_key = "吃什么东西减肥最快"
+    # a_stem = ['吃', '什么', '东西', '减肥', '最快']
+
+    # b_key="vc++读写什么文件最快"
+    # b_stem =['v', 'c++', '读写', '什么', '文件', '最快']
+    # print(cal_cos_sim(a_key, a_stem, b_key, b_stem))
+
+    # print(re.findall("c\\+\\+", "vc++读写什么文件最快"))
+    
+    
+    print("".join([".", "?", "^", "$", "*", "+", "\\", "[", "]", "|", "{", "}", "(", ")"]))
+    # s = r"([.?^$*+\[]|{}()])"
+    s=r"([\\])"
+    re.findall(s, "vc++读写什么文件最快")
+    print(re.findall(s, "vc++读写什么文件\最快"))

+ 20 - 1
config.py

@@ -27,17 +27,36 @@ KEY_REVERSE_FILE = "./data/tmp/key_reverse.csv"
 # 关键词倒排索引模型 缓存 (包含两个要素:词根、位置)
 KEY_REVERSE_INDEX_CACHE = "./data/cache/key_reverse_index.pkl"
 
+# 关键词倒排文件 数据统计 (包含两个要素:词根,涉及的关键词数量)
+KEY_REVERSE_STATISTICS_FILE = "./data/tmp/key_reverse_statistics.csv"
+
+# 关键词倒排索引热点 缓存 (包含两个要素:词根、位置)
+KEY_REVERSE_INDEX_HOT_CACHE = "./data/cache/key_reverse_index_hot.pkl"
+
 # 最终的聚合分析结果存放文件
 AGG_ANALYSE_FILE = "./data/analyse/%s.csv"
 
+# 聚合结果
+AGG_FILE = "./data/agg_analyse.csv"
+
 # 停用词存放文件夹
 STOP_WORD_DIR = "./data/stopwords"
 
 # 停用词模型 缓存
 STOP_WORD_CACHE = "./data/cache/stop_word.pkl"
 
+# 分析BITMAP模型 缓存
+ANALYSE_BITMAP_CACHE = "./data/cache/analyse_bitmap.pkl"
+
+# 分析进度模型 缓存
+ANALYSE_PROCESS_CACHE = "./data/cache/analyse_process.pkl"
+
 # 正则表达式中需要额外处理的特殊符号
+# RE_SPECIAL_SIMBOL = "'.', '?', '^', '$', '*', '+', '\\', '[', ']', '|', '{', '}', '(', ')"
 RE_SPECIAL_SIMBOL = [".", "?", "^", "$", "*", "+", "\\", "[", "]", "|", "{", "}", "(", ")"]
 
 # 百分比进度提示
-PRECENT_TIPS = 0.01
+PRECENT_TIPS = 0.01
+
+# 正则提取关键词表中的信息
+KEY_RE_PATTERAN = r"(\d+),([^,]*),(.*)"

+ 354 - 18
key_reverse.py

@@ -1,29 +1,364 @@
 # -*- coding:utf-8 -*-
 
+from asyncio import FIRST_COMPLETED
+from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor, as_completed, wait
+from multiprocessing import Manager
+import multiprocessing
+from queue import Queue
 import sys
 from time import time
 import os
+from unittest import result
 import config
 import tools
 import ast
 import re
+import math
 import stop_word
+import logging
+import mmap
+import threading
 
-TITLE = "关键词倒排文件"
+TITLE = "生成关键词倒排和统计信息"
 
+def thread_handle(path, start_pos, end_pos):
+    pattern = re.compile(config.KEY_RE_PATTERAN, re.I)
+    t = threading.current_thread()
+    print("线城-%d 开始任务,开始位置:%d,结束位置:%d" % (t.ident, start_pos, end_pos))
+    # 临时容器
+    key_reverse = []
+    with open(path, "r", encoding=config.ENCODING_CHARSET) as fkey, \
+        mmap.mmap(fkey.fileno(), 0, access=mmap.ACCESS_READ) as fmmap:
+        # 移动到开始位置
+        fmmap.seek(start_pos)
 
+        while True:
+            # 获取当前处理位置
+            cur_pos = fmmap.tell()
+
+            # 越界检查
+            if cur_pos > end_pos:
+                break
+
+            line = fmmap.readline()
+            key_reverse.append(line)
+            # 暂留
+            # # 读取关键词数据
+            # line = fmmap.readline().decode("UTF-8")
+
+            # # 如果到行尾则结束
+            # if not line:
+            #     break
+
+            # # 提取数据
+            # m = pattern.match(line)
+            # # 获取关键词序号、词根
+            # index = m.group(1)
+            # key_root = m.group(3)
+
+            # # 转换成真正的list对象
+            # key_root = ast.literal_eval(key_root)
+
+            # key_reverse.append((index, key_root))
+    # print("线城-%d 结束任务" % t.ident)
+    # return key_reverse
+    return {
+        "tid": t.ident,
+        "key_reverse": key_reverse
+    }
+
+
+
+def process_handle(stop_word_cache, lines):
+    pid = os.getpid()
+    print("进程-%d 接收到数据,开始计算")
+    pattern = re.compile(config.KEY_RE_PATTERAN, re.I)
+
+    key_reverse_dict={}
+
+    for line in lines:
+        line = line.decode("UTF-8")
+        # 提取数据
+        m = pattern.match(line)
+        # 获取关键词序号、词根
+        index = m.group(1)
+        key_root = m.group(3)
+
+        # 转换成真正的list对象
+        for item in ast.literal_eval(key_root):
+            # 排除停用词
+            if item in stop_word_cache:
+                continue
+            # 构建倒排表和统计数据量
+            val = key_reverse_dict.get(item)
+            if val:
+                key_reverse_dict[item]["count"] = key_reverse_dict[item]["count"] + 1
+                key_reverse_dict[item]["indexs"].append(index) 
+            else:
+                key_reverse_dict[item]= {
+                    "count":1,
+                    "indexs":[]
+                }
+    return {
+        "pid": pid,
+        "key_reverse_dict":key_reverse_dict
+    }
+
+def multi_thread():
+    """
+    构建待排表(多线程)
+    """
+
+    key_reverse_dict = {}
+
+    def process_handle_result(future):
+        print("进程-%d 执行结束,返回部分数据 合并数据" % result["pid"])
+        result = future.result()
+        for key,value in result["key_reverse_dict"].items():
+            val = key_reverse_dict.get(key)
+            if val:
+                key_reverse_dict[key]["count"] = key_reverse_dict[key]["count"] + value["count"]
+                key_reverse_dict[key]["indexs"].append(value["indexs"]) 
+            else:
+                key_reverse_dict[key]= value
+        print("进程-%d 合并数据结束" % result["pid"])
+        
+
+    def thread_handle_result(future):
+        result = future.result()
+        logging.info("线程-%d 执行结束,返回部分数据,提交至计算进程" % result["tid"])
+        process_future = process_pool.submit(process_handle, stop_word_cache, result["key_reverse"])
+        process_future.add_done_callback(process_handle_result)
+
+    tools.init_log()
+    tools.log_start_msg(TITLE)
+
+    logging.info("目前执行的是多线程版")
+
+    logging.info("开始执行初始化")
+    # 提取规则
+    pattern = re.compile(config.KEY_RE_PATTERAN, re.I)
+    # 停用表
+    stop_word_cache = stop_word.load_stop_word()
+    # 进程处理数
+    thread_num = 1
+    process_num = 2
+    worker_num = 100
+    # 关键表索引
+    key_index_cache = tools.load_obj(config.KEY_INDEX_CACHE)
+
+    logging.info("开始对数据进行分段计算")
+
+    # 对索引文件中的元素进行平分
+    # 转成列表,计算总长 和 平分后的处理区间
+    key_index_list = [key for key in key_index_cache.keys()]
+    total_len = len(key_index_list)
+    internal = math.ceil(total_len / worker_num )
+
+    # 利用 缓存索引文件 生成处理区间的位置信息
+    # 位置信息容器
+    pos_list = []
+    for i in range(worker_num + 1):
+        # 计算平分点在列表中的位置
+        l_pos = i * internal
+        # 如果大于等于列表大小需要额外处理
+        if l_pos >= total_len:
+            l_pos = total_len -1
+        # 获取列表中的词根
+        key_index = key_index_list[l_pos:l_pos+1]
+        # 根据词根获取位置信息
+        pos = key_index_cache[key_index[0]]
+        # 记录位置信息
+        pos_list.append(pos)
+   
+    logging.info("把分段结果提交至多线程执行")
+    # 生成任务
+    with ThreadPoolExecutor(thread_num) as thread_pool, \
+        ProcessPoolExecutor(process_num) as process_pool:
+
+        # thread_futures = []
+
+        for i in range(0, len(pos_list)-1):
+            pos = pos_list[i: i+2]
+            # thread_futures.append()
+            thread_future = thread_pool.submit(thread_handle, config.KEY_FILE, pos[0], pos[1])
+            thread_future.add_done_callback(thread_handle_result)
+
+        # 等待数据返回
+        logging.info("等待多线程执行结束")
+        thread_pool.shutdown(wait=True)
+        process_pool.shutdown(wait=True)
+        
+
+
+            
+                
+    
+    logging.info("已获取全部子进程返回部分结果,总数据量:%d" % len(key_reverse_dict))
+    return 
+    # 根据关键词数量进行排序,这里通过items()方法转成元组列表,才能进行排序
+    logging.info("根据关键词数量进行倒序排列")
+    sorted_reverse_list = sorted(key_reverse_dict.items(), key=lambda x: x[1]["count"], reverse=True)
+
+    # 保存到本地文件
+    logging.info("保存到本地")
+    with open(config.KEY_REVERSE_FILE, "w", encoding=config.ENCODING_CHARSET) as f:
+        for key, value in sorted_reverse_list:
+            f.write("%s,%d,%s\n" % (key, value["count"], value["indexs"]))
+
+    tools.log_end_msg(TITLE)
+
+def handle(path, start_pos, end_pos, share_pattern, stop_word_cache):
+    pid = os.getpid()
+    print("进程-%d 开始任务,开始位置:%d,结束位置:%d" % (pid, start_pos, end_pos))
+    # 临时容器
+    key_reverse = {}
+    # 提取数据用的正则
+    pattern = share_pattern.value
+    with open(path, "r", encoding=config.ENCODING_CHARSET) as fkey, \
+        mmap.mmap(fkey.fileno(), 0, access=mmap.ACCESS_READ) as fmmap:
+        # 移动到开始位置
+        fmmap.seek(start_pos)
 
-def main():
+        while True:
+            # 获取当前处理位置
+            cur_pos = fmmap.tell()
+
+            # 越界检查
+            if cur_pos > end_pos:
+                break
+
+            # 读取关键词数据
+            line = fmmap.readline().decode("UTF-8")
+
+            # 如果到行尾则结束
+            if not line:
+                break
+
+            # 提取数据
+            m = pattern.match(line)
+            # 获取关键词序号、词根
+            index = m.group(1)
+            key_root = m.group(3)
+
+            # 转换成真正的list对象
+            for item in ast.literal_eval(key_root):
+                # 排除停用词
+                if item in stop_word_cache:
+                    continue
+                # 构建倒排表和统计数据量
+                val = key_reverse.get(item)
+                if val:
+                    count = key_reverse[item]["count"]
+                    key_reverse[item]["count"] = count + 1
+                    key_reverse[item]["indexs"].append(index) 
+                else:
+                    key_reverse[item]= {
+                        "count":1,
+                        "indexs":[]
+                    }
+    
+    print("进程-%d 任务执行结束" % pid)
+
+    return key_reverse
+
+def multi_process():
+    """
+    构建待排表(多进程)
+    """
+
+    tools.init_log()
+    tools.log_start_msg(TITLE)
+
+    logging.info("目前执行的是多进程版")
+
+    logging.info("开始执行初始化")
+    manager = Manager()
+    # 提取规则
+    share_pattern = manager.Value("pattern", re.compile(config.KEY_RE_PATTERAN, re.I))
+    # 停用表
+    stop_word_cache = manager.dict(stop_word.load_stop_word())
+    # 进程处理数
+    process_num = os.cpu_count() - 1
+    worker_num = process_num
+    pool = ProcessPoolExecutor(max_workers=process_num)
+    # 关键表索引
+    key_index_cache = tools.load_obj(config.KEY_INDEX_CACHE)
+
+    logging.info("开始对数据进行分段计算")
+
+    # 对索引文件中的元素进行平分
+    # 转成列表,计算总长 和 平分后的处理区间
+    key_index_list = [key for key in key_index_cache.keys()]
+    total_len = len(key_index_list)
+    internal = math.ceil(total_len / worker_num )
+
+    # 利用 缓存索引文件 生成处理区间的位置信息
+    # 位置信息容器
+    pos_list = []
+    for i in range(worker_num + 1):
+        # 计算平分点在列表中的位置
+        l_pos = i * internal
+        # 如果大于等于列表大小需要额外处理
+        if l_pos >= total_len:
+            l_pos = total_len -1
+        # 获取列表中的词根
+        key_index = key_index_list[l_pos:l_pos+1]
+        # 根据词根获取位置信息
+        pos = key_index_cache[key_index[0]]
+        # 记录位置信息
+        pos_list.append(pos)
+   
+    logging.info("把分段结果提交至多进程执行")
+    # 生成任务
+    process_futures = []
+    for i in range(0, len(pos_list)-1):
+        pos = pos_list[i: i+2]
+        process_futures.append(pool.submit(handle, config.KEY_FILE, pos[0], pos[1], share_pattern, stop_word_cache))
+
+    # 等待数据返回
+    logging.info("等待多进程执行结束")
+    wait(process_futures,return_when=FIRST_COMPLETED)
+    key_reverse_dict = {}
+    for future in as_completed(process_futures):
+        logging.info("有子进程执行结束,返回部分结果")
+        result = future.result()
+        for key, value in result.items():
+            # 进行数据合并
+            val_dict = key_reverse_dict.get(key)
+            if val_dict:
+                count = val_dict["count"]
+                key_reverse_dict[key]["count"] = count + value["count"]
+                key_reverse_dict[key]["indexs"].extend(value["indexs"])
+            else:
+                key_reverse_dict[key] = value
+
+    logging.info("已获取全部子进程返回部分结果")
+    
+    # 根据关键词数量进行排序,这里通过items()方法转成元组列表,才能进行排序
+    logging.info("根据关键词数量进行倒序排列")
+    sorted_reverse_list = sorted(key_reverse_dict.items(), key=lambda x: x[1]["count"], reverse=True)
+
+    # 保存到本地文件
+    logging.info("保存到本地")
+    with open(config.KEY_REVERSE_FILE, "w", encoding=config.ENCODING_CHARSET) as f:
+        for key, value in sorted_reverse_list:
+            f.write("%s,%d,%s\n" % (key, value["count"], value["indexs"]))
+
+    tools.log_end_msg(TITLE)
+
+def one_process():
     """
-    构建待排表
+    构建倒排表(单进程版)
     """
 
     tools.init_log()
     tools.log_start_msg(TITLE)
 
+    logging.info("目前执行的是单进程版")
+
     # 提取规则
-    s = r"(\d+),([^,]*),(.*)"
-    pattern = re.compile(s, re.I)
+    pattern = re.compile(config.KEY_RE_PATTERAN, re.I)
 
     # 倒排表 容器
     key_reverse = {}
@@ -68,24 +403,25 @@ def main():
                 # 构建倒排表
                 val = key_reverse.get(item)
                 if val:
-                    key_reverse[item].append(index) 
+                    count = key_reverse[item]["count"]
+                    key_reverse[item]["count"] = count + 1
+                    key_reverse[item]["indexs"].append(index) 
                 else:
-                    key_reverse[item]=[]
-                    key_reverse[item].append(index)
+                    key_reverse[item]= {
+                        "count":1,
+                        "indexs":[]
+                    }
+
+    logging.info("根据关键词数量进行倒序排列")
+    sorted_reverse_list = sorted(key_reverse.items(), key=lambda x: x[1]["count"], reverse=True)
 
     # 保存到本地文件
+    logging.info("保存到本地")
     with open(config.KEY_REVERSE_FILE, "w", encoding=config.ENCODING_CHARSET) as f:
-        for key, value in key_reverse.items():
-            f.write("%s,%s\n" % (key, value))
+        for key, value in sorted_reverse_list:
+            f.write("%s,%d,%s\n" % (key, value["count"], value["indexs"]))
 
     tools.log_end_msg(TITLE)
 
 if __name__ == "__main__":
-    main()
-
-    # 测试加载耗时
-    # start = time()
-    # key_reverse_cache = tools.load_obj(config.KEY_REVERSE_CACHE)
-    # end = time()
-    # print("占用大小:", sys.getsizeof(key_reverse_cache))
-    # print("加载耗时:", end-start)
+    multi_thread()

+ 1 - 6
key_reverse_index.py

@@ -49,9 +49,4 @@ def main():
     tools.log_end_msg(TITLE)
 
 if __name__ == "__main__":
-    # main()
-    key_reverse_index_cache = tools.load_obj(config.KEY_REVERSE_INDEX_CACHE)
-    for i, item in enumerate(key_reverse_index_cache):
-        if i > 10:
-            break
-        print(item)
+    main()

+ 218 - 0
key_reverse_statistics.py

@@ -0,0 +1,218 @@
+# -*- coding:utf-8 -*-
+
+from concurrent.futures import ProcessPoolExecutor, as_completed
+import mmap
+from multiprocessing.connection import wait
+import random
+import sys
+from time import sleep, time
+import os
+import config
+import tools
+import ast
+import re
+import stop_word
+import logging
+import math
+from multiprocessing import Process, Pool
+
+TITLE = "关键词倒排文件 统计"
+
+# def reverse_statistics(start_pos, end_pos):
+
+def handle(start_pos, end_pos):
+
+    print("进程:%d, 统计开始,开始位置:%d,结束位置:%d" % (os.getpid(), start_pos, end_pos))
+
+    # 统计信息容器
+    reverse_statistics = {}
+    
+    with open(config.KEY_REVERSE_FILE, "r", encoding=config.ENCODING_CHARSET) as fr, \
+        mmap.mmap(fr.fileno(), 0 , access=mmap.ACCESS_READ) as fmmap:
+        # 调整开始位置
+        fmmap.seek(start_pos)
+
+        while True:
+            cur_pos = fmmap.tell()
+            # 越界检测
+            if cur_pos >= end_pos:
+                break
+            
+            line = fmmap.readline().decode(config.ENCODING_CHARSET)
+            index=line.index(",")
+            key = line[:index]
+            word_root = line[index+1:]
+            word_root = ast.literal_eval(word_root)
+            l = len(word_root)
+            
+            reverse_statistics[key]=l
+            
+    logging.info("进程:%d, 统计结束" % os.getpid())
+
+    return {
+        "pid":os.getpid(),
+        "statistics":reverse_statistics
+    }
+    
+
+def main2():
+    # 日志信息配置
+    tools.init_log()
+    tools.log_start_msg(TITLE)
+
+    # 进程数
+    process_num = os.cpu_count()
+
+    # 加载缓存索引文件
+    key_reverse_index = tools.load_obj(config.KEY_REVERSE_INDEX_CACHE)
+
+    # 对索引文件中的元素进行平分
+
+    # 转成列表,计算总长 和 平分后的处理区间
+    key_list = [key for key in key_reverse_index.keys()]
+    key_list_len = len(key_list)
+    internal = math.ceil(key_list_len / process_num )
+
+    # 利用 缓存索引文件 生成处理区间的位置信息
+    # 位置信息容器
+    pos_list = []
+    for i in range(process_num + 1):
+        # 计算平分点在列表中的位置
+        l_pos = i * internal
+        # 如果超过列表大小需要额外处理
+        if l_pos > key_list_len:
+            l_pos = key_list_len -1
+        # 获取列表中的词根
+        key = key_list[l_pos:l_pos+1]
+        # 根据词根获取位置信息
+        pos = key_reverse_index[key[0]]
+        # 记录位置信息
+        pos_list.append(pos)
+
+     
+    # 使用用进程池
+    pool = ProcessPoolExecutor(process_num)
+    # 生成任务
+    process_futures = []
+    for i in range(0, len(pos_list)-1):
+        pos = pos_list[i: i+2]
+        process_futures.append(pool.submit(handle, pos[0], pos[1]))
+    
+    # with open(config.KEY_REVERSE_STATISTICS_FILE, "w", encoding=config.ENCODING_CHARSET) as fw:
+    #     for future in as_completed(process_futures):
+    #         logging.info("部分子任务统计结束,保存至本地 - 开始")
+    #         for key, value in future.result().items():
+    #             fw.write("%s,%s\n"%(key,value))
+    #         logging.info("部分子任务统计结束,保存至本地 - 结束")
+
+    
+    results = []
+    for future in as_completed(process_futures):
+        result = future.result()
+        logging.info("进程:%d, 统计结束" % result["pid"])
+        results.append(result)
+
+    logging.info("统计结束,保存至本地 - 开始")
+    with open(config.KEY_REVERSE_STATISTICS_FILE, "w", encoding=config.ENCODING_CHARSET) as fw:
+        for r in results:
+            for key, value in r["statistics"].items():
+                fw.write("%s,%s\n"%(key,value))
+    logging.info("部分子任务统计结束,保存至本地 - 结束")
+
+    pool.shutdown(wait=True)
+
+    tools.log_end_msg(TITLE)
+
+    # 测试代码3
+    # pool = ProcessPoolExecutor(3)
+    # for i in range(1,5):
+    #     pool.submit(handle, "测试进程-%d"%i, i, i*10)
+
+    # pool.shutdown(wait=True)
+
+    # 测试代码2
+    # pool = Pool(3)
+    # for i in range(1,5):
+    #     pool.apply_async(handle, ("测试进程-%d"%i, i, i*10))
+    # pool.close()
+    # pool.join()
+    # print("结束")
+
+    # 测试代码1
+    # p = Process(target=handle, args=('测试进程', 1, 10))
+    # p.start()
+    # p.join()
+    
+    # tools.init_log()
+    # tools.log_start_msg(TITLE)
+
+    # key_reverse_index = tools.load_obj(config.KEY_REVERSE_INDEX_CACHE)
+
+    # tmp = [key for key in key_reverse_index.keys()]
+    
+    # l = len(tmp)
+    # print("总长:", l)
+    # internal = math.ceil(l / 4)
+    # print("间隔:", internal)
+    # pos = []
+    # for i in range(5):
+    #     t = i*internal
+    #     if t > l:
+    #         t = l-1
+    #     pos.append(t)
+    # print(pos)
+
+    # for item in pos:
+    #     key = tmp[item:item+1]
+    #     print(key)
+    #     pos = key_reverse_index[key[0]]
+    #     print(key, pos)
+
+    
+    # reverse_statistics = {}
+    # logging.info("统计开始")
+    # with open(config.KEY_REVERSE_FILE, "r", encoding=config.ENCODING_CHARSET) as fr, \
+    #     mmap.mmap(fr.fileno(), 0 , access=mmap.ACCESS_READ) as fmmap:
+    #     for line in fr:
+    #         index=line.index(",")
+    #         key = line[:index]
+    #         word_root = line[index+1:]
+    #         word_root = ast.literal_eval(word_root)
+    #         l = len(word_root)
+            
+    #         reverse_statistics[key]=l
+
+    # logging.info("统计结束,保存至本地")
+    # with open(config.KEY_REVERSE_STATISTICS_FILE, "w", encoding=config.ENCODING_CHARSET) as fw:
+    #     for key, value in reverse_statistics:
+    #         fw.write("%s,%s\n"%(key,value))
+
+    # tools.log_end_msg(TITLE)
+
+
+def main():
+    tools.init_log()
+    tools.log_start_msg(TITLE)
+    
+    reverse_statistics = {}
+    logging.info("统计开始")
+    with open(config.KEY_REVERSE_FILE, "r", encoding=config.ENCODING_CHARSET) as fr, \
+        mmap.mmap(fr.fileno(), 0 , access=mmap.ACCESS_READ) as fmmap:
+        for line in fr:
+            index=line.index(",")
+            key = line[:index]
+            word_root = line[index+1:]
+            word_root = ast.literal_eval(word_root)
+            l = len(word_root)
+            
+            reverse_statistics[key]=l
+
+    logging.info("统计结束,保存至本地")
+    with open(config.KEY_REVERSE_STATISTICS_FILE, "w", encoding=config.ENCODING_CHARSET) as fw:
+        for key, value in reverse_statistics:
+            fw.write("%s,%s\n"%(key,value))
+
+    tools.log_end_msg(TITLE)
+
+if __name__ == "__main__":
+    main2()

+ 1 - 1
logging.conf

@@ -8,7 +8,7 @@ keys=fileHandler,consoleHandler
 keys=simpleFormatter
 
 [logger_root]
-level=INFO
+level=DEBUG
 handlers=fileHandler,consoleHandler
 
 [handler_consoleHandler]

+ 156 - 0
statistics.py

@@ -0,0 +1,156 @@
+# -*- coding:utf-8 -*-
+
+from collections import namedtuple
+from dataclasses import make_dataclass
+import os
+import tools
+import config
+import logging
+import random
+import time
+import ast
+import mmap
+
+TASK_TITLE = "数据统计分析"
+
+def transfer_str(num):
+    msg = None
+    if num >= 10000:
+        msg = "%d万%d" % (num//10000, num%10000)
+    else:
+        msg = str(num)
+    return msg
+
+def cal(list):
+    list_len = len(list)
+    list_count = sum(list)
+    sum_msg = transfer_str(list_len)
+    count_msg = transfer_str(list_count)
+    avg_msg = transfer_str(int(list_count/list_len))
+    return sum_msg, count_msg, avg_msg
+
+def tip(condition, list):
+    logging.info("条件:%s - 涉及:%s个词根,涉及词数:%s,平均约:%s 词数/词根" % ((condition,)+ cal(list)))
+
+def test_tip(list, ele_num):
+    start =time.time()
+    tmp = ast.literal_eval(str(random.sample(list, ele_num)))
+    end =time.time()
+    logging.info("%s个元素的字符列表转换成对象耗时%s" % (transfer_str(ele_num), end-start))
+
+def cost_statistics():
+    with open(config.KEY_REVERSE_STATISTICS_FILE, "r", encoding=config.ENCODING_CHARSET) as f:
+        count_list= []
+        total_count=0
+        for line in f:
+            first_index = line.index(",")
+            count = int(line[first_index+1:])
+            count_list.append(count)
+            total_count = total_count + count
+        
+        logging.info("总祠根数:%d, 涉及的总分词查找数:%d" % (len(count_list), total_count))
+        
+        tip("等于1", [val for val in count_list if val == 1])
+
+        tip("大于1小于100", [val for val in count_list if val > 1 and val < 100])
+
+        tip("大于等于100小于200", [val for val in count_list if val >= 100 and val < 200])
+
+        tip("大于等于200小于300", [val for val in count_list if val >= 200 and val < 300])
+
+        tip("大于等于300小于400", [val for val in count_list if val >= 300 and val < 400])
+
+        tip("大于等于400小于500", [val for val in count_list if val >= 400 and val < 500])
+        
+        tip("大于等于500小于1000", [val for val in count_list if val >= 500 and val < 1000])
+
+        tip("大于等于1000小于5000", [val for val in count_list if val >= 1000 and val < 5000])
+
+        tip("大于等于5000小于1万", [val for val in count_list if val >= 5000 and val < 10000])
+
+        tip("大于等于1万小于5万", [val for val in count_list if val >= 10000 and val < 50000])
+
+        tip("大于等于5万小于10万", [val for val in count_list if val >= 50000 and val < 100000])
+
+        tip("大于等于10万", [val for val in count_list if val >= 100000])
+
+        sample_list = [i for i in range(14500029)]
+        test_tip(sample_list, 1)
+        test_tip(sample_list, 10)
+        test_tip(sample_list, 50)
+        test_tip(sample_list, 100)
+        test_tip(sample_list, 200)
+        test_tip(sample_list, 300)
+        test_tip(sample_list, 400)
+        test_tip(sample_list, 500)
+        test_tip(sample_list, 1000)
+        test_tip(sample_list, 5000)
+        test_tip(sample_list, 10000)
+        test_tip(sample_list, 50000)
+        test_tip(sample_list, 100000)
+        test_tip(sample_list, 595528)
+        test_tip(sample_list, 689520)
+        test_tip(sample_list, 776035)
+        test_tip(sample_list, 822266)
+        test_tip(sample_list, 951491)
+
+def memory_statistics():
+    key_reverse_index_cache = tools.load_obj(config.KEY_REVERSE_INDEX_CACHE)
+    end_pos = key_reverse_index_cache["导不出"]
+    logging.info("查找结束位置")
+    with open(config.KEY_REVERSE_FILE, "r", encoding=config.ENCODING_CHARSET) as freverse, \
+        mmap.mmap(freverse.fileno(), 0, access=mmap.ACCESS_READ) as fmmap:
+        
+        logging.info("开始构建缓存")
+        cache = {}
+        start = time.time()
+        while True:
+            cur_pos = fmmap.tell()
+            
+            if cur_pos > end_pos:
+                break
+
+            line = fmmap.readline().decode("UTF-8")
+            first_index = line.index(",")
+            key = line[:first_index]
+
+            # 转换
+            word_root = line[first_index+1:]
+            cache[key]=ast.literal_eval(word_root)
+
+        end = time.time()
+        logging.info('构建热点缓存完成,耗时:%s,缓存数量:%d' % ((end-start), len(cache)))
+
+        logging.info('把缓存保存到本地')
+        tools.save_obj(config.KEY_REVERSE_INDEX_HOT_CACHE, cache)
+        logging.info('保存结束')
+
+        time.sleep(20)
+        logging.info('留20s进行内存观察')
+        
+
+def main():
+
+    # num = 459789
+    # print(num%10000)
+    # print(num//10000)
+    # return
+
+    tools.init_log()
+    tools.log_start_msg(TASK_TITLE)
+
+    memory_statistics()
+        
+
+    tools.log_end_msg(TASK_TITLE)
+
+
+if __name__ == "__main__":
+    # print("加载开始")
+    # cache = tools.load_obj(config.KEY_REVERSE_INDEX_HOT_CACHE+".bak")
+    # print("加载结束")
+    # time.sleep(20)
+
+    Shape = namedtuple('Shape', ['x', 'y', 'z'])
+    exm = Shape(1, 2, 3)
+    print(exm.index(2))