follow_up.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256
  1. import os
  2. import datetime
  3. import time
  4. import pandas as pd
  5. def follow_up_handle():
  6. '''后续处理'''
  7. object_dir = "./predictions"
  8. output_dir = "./keep"
  9. # 创建输出目录
  10. os.makedirs(output_dir, exist_ok=True)
  11. # 检查目录是否存在
  12. if not os.path.exists(object_dir):
  13. print(f"目录不存在: {object_dir}")
  14. return
  15. # 获取所有以 future_predictions_ 开头的 CSV 文件
  16. csv_files = []
  17. for file in os.listdir(object_dir):
  18. if file.startswith("future_predictions_") and file.endswith(".csv"):
  19. csv_files.append(file)
  20. if not csv_files:
  21. print(f"在 {object_dir} 中没有找到 future_predictions_ 开头的 CSV 文件")
  22. return
  23. csv_files.sort()
  24. # 调试分支
  25. # target_time = "202603271700"
  26. # matching_files = [f for f in csv_files if target_time in f]
  27. # if matching_files:
  28. # last_csv_file = matching_files[0]
  29. # print(f"指定时间的文件: {last_csv_file}")
  30. # else:
  31. # print(f"未找到时间 {target_time} 的预测文件")
  32. # return
  33. # 正式分支
  34. last_csv_file = csv_files[-1] # 只看最新预测的文件
  35. print(f"最新预测文件: {last_csv_file}")
  36. if last_csv_file.startswith("future_predictions_") and last_csv_file.endswith(".csv"):
  37. target_time = last_csv_file.replace("future_predictions_", "").replace(".csv", "")
  38. else:
  39. target_time = datetime.datetime.now().strftime("%Y%m%d%H%M")
  40. # 读取最新预测文件
  41. last_csv_path = os.path.join(object_dir, last_csv_file)
  42. df_last_predict = pd.read_csv(last_csv_path)
  43. df_last_predict_will_drop = df_last_predict[df_last_predict["will_price_drop"] == 1].reset_index(drop=True)
  44. df_last_predict_not_drop = df_last_predict[df_last_predict["will_price_drop"] == 0].reset_index(drop=True)
  45. print(f"最新预测文件中,预测降价的航班有 {len(df_last_predict_will_drop)} 条,预测不降价的航班有 {len(df_last_predict_not_drop)} 条")
  46. # 建一张 维护表 keep_info.csv 附加一个维护表快照 keep_info_{target_time}.csv
  47. keep_info_path = os.path.join(output_dir, "keep_info.csv")
  48. keep_info_snapshot_path = os.path.join(output_dir, f"keep_info_{target_time}.csv")
  49. key_cols = ["citypair", "flight_numbers", "baggage_weight", "from_date"]
  50. # 去重操作
  51. df_last_predict_will_drop = df_last_predict_will_drop.drop_duplicates(
  52. subset=key_cols, keep="last"
  53. ).reset_index(drop=True)
  54. # df_last_predict_not_drop = df_last_predict_not_drop.drop_duplicates(
  55. # subset=key_cols, keep="last"
  56. # ).reset_index(drop=True)
  57. # 读取维护表
  58. if os.path.exists(keep_info_path):
  59. try:
  60. df_keep_info = pd.read_csv(keep_info_path)
  61. except Exception as e:
  62. print(f"读取维护表失败: {keep_info_path}, error: {str(e)}")
  63. df_keep_info = pd.DataFrame()
  64. else:
  65. df_keep_info = pd.DataFrame()
  66. def _parse_dt(yyyymmddhhmm):
  67. try:
  68. return datetime.datetime.strptime(str(yyyymmddhhmm), "%Y%m%d%H%M")
  69. except Exception:
  70. return None
  71. current_dt = _parse_dt(target_time)
  72. prev_dt = None
  73. hud_decrement = 1
  74. # if not df_keep_info.empty and "last_predict_time" in df_keep_info.columns:
  75. # prev_candidates = (
  76. # df_keep_info["last_predict_time"].dropna().astype(str).tolist()
  77. # )
  78. # if prev_candidates:
  79. # prev_dt = _parse_dt(max(prev_candidates))
  80. if prev_dt is None:
  81. snapshot_times = []
  82. for f in os.listdir(output_dir):
  83. if (
  84. f.startswith("keep_info_")
  85. and f.endswith(".csv")
  86. and f != f"keep_info_{target_time}.csv"
  87. ):
  88. ts = f.replace("keep_info_", "").replace(".csv", "")
  89. dt = _parse_dt(ts)
  90. if dt is not None:
  91. snapshot_times.append(dt)
  92. if snapshot_times:
  93. prev_dt = max(snapshot_times)
  94. if current_dt is not None and prev_dt is not None:
  95. delta_seconds = (current_dt - prev_dt).total_seconds()
  96. if delta_seconds >= 0:
  97. hud_decrement = max(0, int(delta_seconds // 3600))
  98. else:
  99. hud_decrement = 0
  100. # 初始化维护表
  101. if df_keep_info.empty:
  102. df_keep_info = df_last_predict_will_drop.copy()
  103. df_keep_info["into_update_hour"] = df_keep_info['update_hour']
  104. # df_keep_info["into_price"] = df_keep_info['price_total']
  105. df_keep_info["keep_flag"] = 1
  106. # df_keep_info["last_predict_time"] = target_time
  107. df_keep_info.to_csv(keep_info_snapshot_path, index=False, encoding="utf-8-sig")
  108. print(f"维护表快照已保存: {keep_info_snapshot_path} (rows={len(df_keep_info)})")
  109. df_keep_info.to_csv(keep_info_path, index=False, encoding="utf-8-sig")
  110. print(f"维护表已初始化: {keep_info_path} (rows={len(df_keep_info)})")
  111. # 已存在维护表
  112. else:
  113. if "keep_flag" not in df_keep_info.columns:
  114. df_keep_info["keep_flag"] = 0
  115. df_keep_info["keep_flag"] = (
  116. pd.to_numeric(df_keep_info["keep_flag"], errors="coerce")
  117. .fillna(0)
  118. .astype(int)
  119. )
  120. missing_cols = [c for c in key_cols if c not in df_keep_info.columns]
  121. if missing_cols:
  122. print(f"维护表缺少字段: {missing_cols}, path={keep_info_path}")
  123. return
  124. for c in key_cols:
  125. df_last_predict_will_drop[c] = df_last_predict_will_drop[c].astype(str)
  126. # df_last_predict_not_drop[c] = df_last_predict_not_drop[c].astype(str)
  127. df_keep_info[c] = df_keep_info[c].astype(str)
  128. df_keep_info = df_keep_info.drop_duplicates(subset=key_cols, keep="last").reset_index(drop=True)
  129. # 提取两者的标志位
  130. df_last_keys = df_last_predict_will_drop[key_cols].drop_duplicates().reset_index(drop=True)
  131. df_keep_keys = df_keep_info[key_cols].drop_duplicates().reset_index(drop=True)
  132. df_last_with_merge = df_last_predict_will_drop.merge(
  133. df_keep_keys, on=key_cols, how="left", indicator=True
  134. )
  135. # 场景一: 如果某一行数据在 df_last_predict_will_drop 出现,没有在 df_keep_info 里
  136. df_to_add = (
  137. df_last_with_merge.loc[df_last_with_merge["_merge"] == "left_only"]
  138. .drop(columns=["_merge"])
  139. .copy()
  140. )
  141. # keep_flag 设为 1
  142. if not df_to_add.empty:
  143. df_to_add['into_update_hour'] = df_to_add['update_hour']
  144. # df_to_add['into_price'] = df_to_add['price_total']
  145. df_to_add["keep_flag"] = 1
  146. df_keep_with_merge = df_keep_info.reset_index().merge(
  147. df_last_keys, on=key_cols, how="left", indicator=True
  148. )
  149. # 场景二: 如果某一行数据在 df_last_predict_will_drop 和 df_keep_info 里都出现
  150. matched_idx = df_keep_with_merge.loc[df_keep_with_merge["_merge"] == "both", "index"].tolist()
  151. # 场景三: 如果某一行数据在 df_last_predict_will_drop 没有出现,却在 df_keep_info 里都出现
  152. keep_only_idx = df_keep_with_merge.loc[df_keep_with_merge["_merge"] == "left_only", "index"].tolist()
  153. # 符合场景二的索引 (在 df_keep_with_merge 中)
  154. if matched_idx:
  155. df_matched_keys = df_keep_info.loc[matched_idx, key_cols]
  156. df_latest_matched = df_matched_keys.merge(
  157. df_last_predict_will_drop, on=key_cols, how="left"
  158. )
  159. # 将 df_keep_info 的 df_matched_keys 的内容更新为 df_last_predict_will_drop 里对应的内容
  160. update_cols = [c for c in df_last_predict_will_drop.columns if c not in key_cols]
  161. for c in update_cols:
  162. if c == "keep_flag":
  163. continue
  164. if c not in df_keep_info.columns:
  165. df_keep_info[c] = pd.NA
  166. df_keep_info.loc[matched_idx, c] = df_latest_matched[c].values
  167. # 重新标记 原来是1 -> 0 原来是0 -> 0 原来是2 -> 0, 原来是-1 -> 1
  168. old_flags = df_keep_info.loc[matched_idx, "keep_flag"]
  169. df_keep_info.loc[matched_idx, "keep_flag"] = old_flags.apply(
  170. lambda x: 0 if x in (0, 1, 2) else (1 if x == -1 else 1)
  171. )
  172. # 符合场景三的索引 (在 df_keep_with_merge 中)
  173. if keep_only_idx:
  174. mask_keep_only = df_keep_info.index.isin(keep_only_idx) # 布尔索引序列
  175. # 如果 df_keep_info 的 keep_flag 大于等于0
  176. mask_need_observe = mask_keep_only & (df_keep_info["keep_flag"] >= 0) # 布尔索引序列
  177. if mask_need_observe.any():
  178. if "hours_until_departure" not in df_keep_info.columns:
  179. df_keep_info.loc[mask_need_observe, "keep_flag"] = -1
  180. else:
  181. hud = pd.to_numeric(
  182. df_keep_info.loc[mask_need_observe, "hours_until_departure"],
  183. errors="coerce",
  184. )
  185. # hours_until_departure自动减1
  186. # new_hud = hud - 1
  187. new_hud = hud - hud_decrement
  188. df_keep_info.loc[mask_need_observe, "hours_until_departure"] = new_hud
  189. df_keep_info.loc[mask_need_observe, "keep_flag"] = -1 # 删除标志
  190. # 将 df_to_add 添加到 df_keep_info 之后
  191. add_rows = len(df_to_add) if "df_to_add" in locals() else 0
  192. if add_rows:
  193. df_keep_info = pd.concat([df_keep_info, df_to_add], ignore_index=True)
  194. df_keep_info_snapshot = df_keep_info.copy()
  195. df_keep_info_snapshot.to_csv(keep_info_snapshot_path, index=False, encoding="utf-8-sig")
  196. print(
  197. f"维护表快照已保存: {keep_info_snapshot_path} (rows={len(df_keep_info_snapshot)})"
  198. )
  199. # 移除 keep_flag 为 -1 的行
  200. before_rm = len(df_keep_info)
  201. df_keep_info = df_keep_info.loc[df_keep_info["keep_flag"] != -1].reset_index(drop=True)
  202. rm_rows = before_rm - len(df_keep_info)
  203. # 保存更新后的 df_keep_info 到维护表csv文件
  204. df_keep_info.to_csv(keep_info_path, index=False, encoding="utf-8-sig")
  205. print(
  206. f"维护表已更新: {keep_info_path} (rows={len(df_keep_info)} add={add_rows} rm={rm_rows})"
  207. )
  208. pass
  209. if __name__ == "__main__":
  210. time.sleep(5)
  211. follow_up_handle()
  212. time.sleep(5)