result_validate_0.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593
  1. import argparse
  2. import datetime
  3. import os
  4. import pandas as pd
  5. from data_loader import mongo_con_parse, validate_one_line, fill_hourly_crawl_date
  6. def _validate_predict_df(df_predict):
  7. client, db = mongo_con_parse()
  8. count = 0
  9. for idx, row in df_predict.iterrows():
  10. city_pair = row['city_pair']
  11. flight_day = row['flight_day']
  12. flight_number_1 = row['flight_number_1']
  13. flight_number_2 = row['flight_number_2']
  14. baggage = row['baggage']
  15. valid_begin_hour = row['valid_begin_hour']
  16. valid_begin_dt = pd.to_datetime(valid_begin_hour, format='%Y-%m-%d %H:%M:%S')
  17. # valid_end_hour = row['valid_end_hour']
  18. # valid_end_dt = pd.to_datetime(valid_end_hour, format='%Y-%m-%d %H:%M:%S')
  19. update_hour = row['update_hour']
  20. update_dt = pd.to_datetime(update_hour, format='%Y-%m-%d %H:%M:%S')
  21. valid_begin_hour_modify = max(
  22. valid_begin_dt,
  23. update_dt
  24. ).strftime('%Y-%m-%d %H:%M:%S')
  25. df_val= validate_one_line(db, city_pair, flight_day, flight_number_1, flight_number_2, baggage, valid_begin_hour_modify)
  26. entry_price = pd.to_numeric(row.get('adult_total_price'), errors='coerce')
  27. crawl_dt = pd.to_datetime(row.get('crawl_date'), errors='coerce')
  28. batch_dt = pd.to_datetime(row.get('batch_time'), format="%Y%m%d%H%M", errors='coerce')
  29. wait_start_dt = pd.NaT
  30. wait_end_dt = pd.NaT
  31. dep_hour_dt = pd.to_datetime(row.get('from_time'), errors='coerce')
  32. if pd.notna(batch_dt):
  33. wait_start_dt = batch_dt.floor('h')
  34. if pd.notna(crawl_dt):
  35. crawl_floor = crawl_dt.floor('h')
  36. if pd.isna(wait_start_dt):
  37. wait_start_dt = crawl_floor
  38. else:
  39. wait_start_dt = max(wait_start_dt, crawl_floor) # 等待近端接近预测批次时间
  40. if pd.notna(wait_start_dt):
  41. wait_end_dt = wait_start_dt + pd.Timedelta(hours=48) # 等待窗口48小时
  42. if pd.notna(dep_hour_dt):
  43. dep_hour_dt = dep_hour_dt.floor('h')
  44. cutoff_dt = dep_hour_dt - pd.Timedelta(hours=4)
  45. if pd.notna(wait_end_dt):
  46. wait_end_dt = min(wait_end_dt, cutoff_dt) # 等待远端不能越过起飞前4小时
  47. # 有可能在当前验证时刻,数据库里没有在valid_begin_hour之后的数据
  48. if not df_val.empty:
  49. df_val_f = fill_hourly_crawl_date(df_val, rear_fill=2)
  50. df_val_f = df_val_f[df_val_f['is_filled']==0] # 只要原始数据,不要补齐的
  51. # df_val_f = df_val_f[df_val_f['update_hour'] <= valid_end_dt]
  52. if df_val_f.empty:
  53. drop_flag = 0
  54. # first_drop_amount = pd.NA
  55. first_drop_price = pd.NA
  56. first_drop_hours_until_departure = pd.NA
  57. first_drop_update_hour = pd.NA
  58. last_hours_util = pd.NA
  59. last_update_hour = pd.NA
  60. list_change_price = []
  61. list_change_hours = []
  62. drop_flag_window = 0
  63. first_lower_price = pd.NA
  64. first_lower_update_hour = pd.NA
  65. boundary_final_price = pd.NA
  66. boundary_final_update_hour = pd.NA
  67. trigger_type = pd.NA
  68. trigger_price = pd.NA
  69. trigger_update_hour = pd.NA
  70. pnl = pd.NA
  71. pnl_pct = pd.NA
  72. else:
  73. # 有效数据的最后一行
  74. last_row = df_val_f.iloc[-1]
  75. last_hours_util = last_row['hours_until_departure']
  76. last_update_hour = last_row['update_hour']
  77. df_val_f['update_hour'] = pd.to_datetime(df_val_f['update_hour'], errors='coerce')
  78. # 使用 batch_time 对齐的实际价格作为 entry_price
  79. if pd.notna(batch_dt):
  80. df_entry = df_val_f[df_val_f['update_hour'] <= batch_dt].copy()
  81. if not df_entry.empty:
  82. entry_price = df_entry.iloc[-1]['adult_total_price']
  83. df_window = df_val_f
  84. if pd.notna(wait_start_dt) and pd.notna(wait_end_dt):
  85. df_window = df_val_f[
  86. (df_val_f['update_hour'] >= wait_start_dt) &
  87. (df_val_f['update_hour'] <= wait_end_dt)
  88. ].copy() # 构建观测窗口
  89. else:
  90. df_window = df_val_f.iloc[0:0].copy() # 空切片
  91. if not df_window.empty:
  92. df_window = df_window.sort_values('update_hour')
  93. df_window_price_changes = df_window.loc[
  94. df_window["adult_total_price"].shift() != df_window["adult_total_price"]
  95. ].copy()
  96. df_window_price_changes['change_amount'] = (
  97. df_window_price_changes['adult_total_price'].diff().fillna(0)
  98. )
  99. df_first_window_negative = df_window_price_changes[
  100. df_window_price_changes['change_amount'] < -5
  101. ].head(1)
  102. drop_flag_window = 1 if not df_first_window_negative.empty else 0 # 在观测窗口中的发生降价判定
  103. else:
  104. drop_flag_window = 0
  105. first_lower_price = pd.NA
  106. first_lower_update_hour = pd.NA
  107. if not df_window.empty and pd.notna(entry_price) and pd.notna(wait_start_dt):
  108. df_lower = df_window[
  109. (df_window['update_hour'] > wait_start_dt) &
  110. (df_window['adult_total_price'] <= entry_price - 5)
  111. ].head(1)
  112. if not df_lower.empty: # 首次出现低于 entry_price - 5 的价格与时间
  113. first_lower_price = df_lower['adult_total_price'].iloc[0].round(2)
  114. first_lower_update_hour = df_lower['update_hour'].iloc[0]
  115. boundary_final_price = pd.NA
  116. boundary_final_update_hour = pd.NA
  117. if not df_window.empty: # 观测窗口远端边界的价格与时间
  118. boundary_row = df_window.iloc[-1]
  119. boundary_final_price = boundary_row['adult_total_price']
  120. boundary_final_update_hour = boundary_row['update_hour']
  121. trigger_type = pd.NA
  122. trigger_price = pd.NA
  123. trigger_update_hour = pd.NA
  124. if pd.notna(first_lower_price):
  125. trigger_type = "first_lower" # 发生降价
  126. trigger_price = first_lower_price
  127. trigger_update_hour = first_lower_update_hour
  128. elif pd.notna(boundary_final_price):
  129. trigger_type = "boundary" # 到达边界
  130. trigger_price = boundary_final_price
  131. trigger_update_hour = boundary_final_update_hour
  132. else:
  133. trigger_type = "no_data"
  134. if pd.notna(entry_price) and pd.notna(trigger_price):
  135. pnl = round(float(entry_price - trigger_price), 2) # 盈利(亏损)额度,基于entry_price
  136. if entry_price != 0:
  137. pnl_pct = round(float(pnl) / float(entry_price) * 100, 2) # 盈利(亏损)百分比,基于entry_price
  138. else:
  139. pnl_pct = pd.NA
  140. else:
  141. pnl = pd.NA
  142. pnl_pct = pd.NA
  143. # 价格变化过滤
  144. df_price_changes = df_val_f.loc[
  145. df_val_f["adult_total_price"].shift() != df_val_f["adult_total_price"]
  146. ].copy()
  147. # 价格变化幅度
  148. df_price_changes['change_amount'] = df_price_changes['adult_total_price'].diff().fillna(0)
  149. # 找到第一个 change_amount 小于 -5 的行
  150. first_negative_change = df_price_changes[df_price_changes['change_amount'] < -5].head(1)
  151. # 提取所需的值
  152. if not first_negative_change.empty:
  153. drop_flag = 1
  154. # first_drop_amount = first_negative_change['change_amount'].iloc[0].round(2)
  155. first_drop_price = first_negative_change['adult_total_price'].iloc[0].round(2)
  156. first_drop_hours_until_departure = first_negative_change['hours_until_departure'].iloc[0]
  157. first_drop_update_hour = first_negative_change['update_hour'].iloc[0]
  158. else:
  159. drop_flag = 0
  160. # first_drop_amount = pd.NA
  161. first_drop_price = pd.NA
  162. first_drop_hours_until_departure = pd.NA
  163. first_drop_update_hour = pd.NA
  164. list_change_price = df_price_changes['adult_total_price'].tolist()
  165. list_change_hours = df_price_changes['hours_until_departure'].tolist()
  166. else:
  167. drop_flag = 0
  168. # first_drop_amount = pd.NA
  169. first_drop_price = pd.NA
  170. first_drop_hours_until_departure = pd.NA
  171. first_drop_update_hour = pd.NA
  172. last_hours_util = pd.NA
  173. last_update_hour = pd.NA
  174. list_change_price = []
  175. list_change_hours = []
  176. drop_flag_window = 0
  177. first_lower_price = pd.NA
  178. first_lower_update_hour = pd.NA
  179. boundary_final_price = pd.NA
  180. boundary_final_update_hour = pd.NA
  181. trigger_type = pd.NA
  182. trigger_price = pd.NA
  183. trigger_update_hour = pd.NA
  184. pnl = pd.NA
  185. pnl_pct = pd.NA
  186. safe_sep = "; "
  187. df_predict.at[idx, 'change_prices'] = safe_sep.join(map(str, list_change_price))
  188. df_predict.at[idx, 'change_hours'] = safe_sep.join(map(str, list_change_hours))
  189. df_predict.at[idx, 'last_hours_util'] = last_hours_util
  190. df_predict.at[idx, 'last_update_hour'] = last_update_hour
  191. # df_predict.at[idx, 'first_drop_amount'] = first_drop_amount * -1 # 负数转正数
  192. df_predict.at[idx, 'first_drop_price'] = first_drop_price
  193. df_predict.at[idx, 'first_drop_hours_until_departure'] = first_drop_hours_until_departure
  194. df_predict.at[idx, 'first_drop_update_hour'] = first_drop_update_hour
  195. df_predict.at[idx, 'drop_flag'] = drop_flag
  196. df_predict.at[idx, 'wait_start_hour'] = wait_start_dt
  197. df_predict.at[idx, 'wait_end_hour'] = wait_end_dt
  198. df_predict.at[idx, 'drop_flag_window'] = drop_flag_window
  199. df_predict.at[idx, 'first_lower_price'] = first_lower_price
  200. df_predict.at[idx, 'first_lower_update_hour'] = first_lower_update_hour
  201. df_predict.at[idx, 'boundary_final_price'] = boundary_final_price
  202. df_predict.at[idx, 'boundary_final_update_hour'] = boundary_final_update_hour
  203. df_predict.at[idx, 'trigger_type'] = trigger_type
  204. df_predict.at[idx, 'trigger_price'] = trigger_price
  205. df_predict.at[idx, 'trigger_update_hour'] = trigger_update_hour
  206. df_predict.at[idx, 'pnl'] = pnl
  207. df_predict.at[idx, 'pnl_pct'] = pnl_pct
  208. count += 1
  209. if count % 5 == 0:
  210. print(f"cal count: {count}")
  211. print(f"计算结束")
  212. client.close()
  213. return df_predict
  214. def validate_process(node, interval_hours, pred_time_str):
  215. '''手动验证脚本'''
  216. date = pred_time_str[4:8]
  217. output_dir = f"./validate/{node}_{date}"
  218. os.makedirs(output_dir, exist_ok=True)
  219. object_dir = "./predictions_0"
  220. csv_file = f'future_predictions_{pred_time_str}.csv'
  221. csv_path = os.path.join(object_dir, csv_file)
  222. try:
  223. df_predict = pd.read_csv(csv_path)
  224. except Exception as e:
  225. print(f"read {csv_path} error: {str(e)}")
  226. df_predict = pd.DataFrame()
  227. if df_predict.empty:
  228. print(f"预测数据为空")
  229. return
  230. df_predict = _validate_predict_df(df_predict)
  231. timestamp_str = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
  232. save_scv = f"result_validate_{node}_{pred_time_str}_{timestamp_str}.csv"
  233. output_path = os.path.join(output_dir, save_scv)
  234. df_predict.to_csv(output_path, index=False, encoding="utf-8-sig")
  235. print(f"保存完成: {output_path}")
  236. def validate_process_auto(node, interval_hours):
  237. '''自动验证脚本'''
  238. # 当前时间,取整时
  239. current_time = datetime.datetime.now()
  240. current_time_str = current_time.strftime("%Y%m%d%H%M")
  241. hourly_time = current_time.replace(minute=0, second=0, microsecond=0)
  242. hourly_time_str = hourly_time.strftime("%Y%m%d%H%M")
  243. print(f"验证时间:{current_time_str}, (取整): {hourly_time_str}")
  244. output_dir = f"./validate/{node}"
  245. os.makedirs(output_dir, exist_ok=True)
  246. object_dir = "./predictions_0"
  247. # 检查目录是否存在
  248. if not os.path.exists(object_dir):
  249. print(f"目录不存在: {object_dir}")
  250. return
  251. # 获取所有以 future_predictions_ 开头的 CSV 文件
  252. csv_files = []
  253. for file in os.listdir(object_dir):
  254. if file.startswith("future_predictions_") and file.endswith(".csv"):
  255. csv_files.append(file)
  256. if not csv_files:
  257. print(f"在 {object_dir} 中没有找到 future_predictions_ 开头的 CSV 文件")
  258. return
  259. # 提取时间戳并转换为 datetime 对象
  260. file_times = []
  261. for file in csv_files:
  262. # 提取时间戳部分:future_predictions_202601151600.csv -> 202601151600
  263. timestamp_str = file.replace("future_predictions_", "").replace(".csv", "")
  264. try:
  265. # 将时间戳转换为 datetime 对象
  266. file_time = datetime.datetime.strptime(timestamp_str, "%Y%m%d%H%M")
  267. file_times.append((file, file_time))
  268. except ValueError as e:
  269. print(f"文件 {file} 的时间戳格式错误: {e}")
  270. continue
  271. if not file_times:
  272. print("没有找到有效的时间戳文件")
  273. return
  274. # 目标验证文件(当前整点减56小时: 48 + (12 - 4) = 56)
  275. target_time = hourly_time - datetime.timedelta(hours=56)
  276. target_time_str = target_time.strftime("%Y%m%d%H%M")
  277. print(f"目标验证时间: {target_time_str}")
  278. valid_files = [(f, t) for f, t in file_times if t == target_time]
  279. if not valid_files:
  280. print(f"没有找到目标对应时间 {target_time.strftime('%Y%m%d%H%M')} 的文件")
  281. return
  282. valid_file, valid_time = valid_files[0]
  283. valid_time_str = valid_time.strftime("%Y%m%d%H%M")
  284. print(f"找到符合条件的文件: {valid_file} (时间: {valid_time_str})")
  285. csv_path = os.path.join(object_dir, valid_file)
  286. # 开始验证
  287. try:
  288. df_predict = pd.read_csv(csv_path)
  289. except Exception as e:
  290. print(f"read {csv_path} error: {str(e)}")
  291. df_predict = pd.DataFrame()
  292. if df_predict.empty:
  293. print(f"预测数据为空")
  294. return
  295. df_predict = _validate_predict_df(df_predict)
  296. timestamp_str = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
  297. save_scv = f"result_validate_{node}_{valid_time_str}_{timestamp_str}.csv"
  298. output_path = os.path.join(output_dir, save_scv)
  299. df_predict.to_csv(output_path, index=False, encoding="utf-8-sig")
  300. print(f"保存完成: {output_path}")
  301. print(f"验证完成: {node} {valid_time_str}")
  302. print()
  303. def validate_process_zong(node, enable_min_max_batch_flag=False, min_batch_time_str=None, max_batch_time_str=None):
  304. object_dir = "./predictions_0"
  305. output_dir = f"./validate/{node}_zong"
  306. os.makedirs(output_dir, exist_ok=True)
  307. # 检查目录是否存在
  308. if not os.path.exists(object_dir):
  309. print(f"目录不存在: {object_dir}")
  310. return
  311. # 获取所有以 future_predictions_ 开头的 CSV 文件
  312. csv_files = []
  313. for file in os.listdir(object_dir):
  314. if file.startswith("future_predictions_") and file.endswith(".csv"):
  315. csv_files.append(file)
  316. if not csv_files:
  317. print(f"在 {object_dir} 中没有找到 future_predictions_ 开头的 CSV 文件")
  318. return
  319. csv_files.sort()
  320. list_df_will_drop = []
  321. min_batch_dt = None
  322. max_batch_dt = None
  323. if enable_min_max_batch_flag:
  324. if not min_batch_time_str and not max_batch_time_str:
  325. print("enable_min_max_batch_flag=True 但未提供 min_batch_time_str/max_batch_time_str,退出")
  326. return
  327. if min_batch_time_str:
  328. min_batch_dt = datetime.datetime.strptime(min_batch_time_str, "%Y%m%d%H%M")
  329. min_batch_dt = min_batch_dt.replace(minute=0, second=0, microsecond=0)
  330. if max_batch_time_str:
  331. max_batch_dt = datetime.datetime.strptime(max_batch_time_str, "%Y%m%d%H%M")
  332. max_batch_dt = max_batch_dt.replace(minute=0, second=0, microsecond=0)
  333. if min_batch_dt is not None and max_batch_dt is not None and min_batch_dt > max_batch_dt:
  334. print(f"时间范围非法: min_batch_time_str({min_batch_time_str}) > max_batch_time_str({max_batch_time_str}),退出")
  335. return
  336. # 从所有预测的文件中
  337. for csv_file in csv_files:
  338. batch_time_str = (
  339. csv_file.replace("future_predictions_", "").replace(".csv", "")
  340. )
  341. batch_dt = datetime.datetime.strptime(batch_time_str, "%Y%m%d%H%M")
  342. batch_hour_dt = batch_dt.replace(minute=0, second=0, microsecond=0)
  343. if min_batch_dt is not None and batch_hour_dt < min_batch_dt:
  344. continue
  345. if max_batch_dt is not None and batch_hour_dt > max_batch_dt:
  346. continue
  347. csv_path = os.path.join(object_dir, csv_file)
  348. try:
  349. df_predict = pd.read_csv(csv_path)
  350. except Exception as e:
  351. print(f"read {csv_path} error: {str(e)}")
  352. df_predict = pd.DataFrame()
  353. if df_predict.empty:
  354. print(f"预测数据为空: {csv_file}")
  355. continue
  356. if "will_price_drop" not in df_predict.columns:
  357. print(f"缺少 will_price_drop 字段,跳过: {csv_file}")
  358. continue
  359. df_predict_will_drop = df_predict[df_predict["will_price_drop"] == 1].copy()
  360. if df_predict_will_drop.empty:
  361. continue
  362. # df_predict_will_drop["batch_file"] = csv_file
  363. df_predict_will_drop["batch_time"] = batch_time_str
  364. list_df_will_drop.append(df_predict_will_drop) # 保存每个批次的 will_drop 数据
  365. del df_predict
  366. if not list_df_will_drop:
  367. print("所有批次的 will_drop 都为空")
  368. return
  369. # === 1. 合并所有 will_drop 结果 ===
  370. df_predict_will_drop_all = pd.concat(list_df_will_drop, ignore_index=True)
  371. # 释放临时列表内存(大列表时很有必要)
  372. del list_df_will_drop
  373. before_rows = len(df_predict_will_drop_all)
  374. # 定义“航班唯一标识”的分组键
  375. group_keys = ["city_pair", "flight_number_1", "flight_number_2", "flight_day"]
  376. # === 2. batch_time 转为 datetime,用于时间间隔判断 ===
  377. df_predict_will_drop_all["batch_dt"] = pd.to_datetime(
  378. df_predict_will_drop_all["batch_time"],
  379. format="%Y%m%d%H%M",
  380. errors="coerce", # 非法时间直接置为 NaT
  381. )
  382. # === 3. 自动推断 batch_time 的“正常时间步长”(分钟) ===
  383. diff_minutes = (
  384. df_predict_will_drop_all["batch_dt"].dropna().sort_values().drop_duplicates().diff()
  385. .dt.total_seconds()
  386. .div(60)
  387. .dropna()
  388. )
  389. # - 取出现频率最高的时间差作为“期望步长” 默认 60 分钟
  390. expected_step_minutes = (
  391. int(diff_minutes.value_counts().idxmax()) if not diff_minutes.empty else 60
  392. )
  393. # === 4. 按航班 + 批次时间排序,为后续连续性判断做准备 ===
  394. df_predict_will_drop_all.sort_values(
  395. by=group_keys + ["batch_dt"],
  396. inplace=True,
  397. ignore_index=True,
  398. na_position="last",
  399. )
  400. # === 5. 计算组内相邻 batch_dt 的时间间隔 ===
  401. df_predict_will_drop_all["prev_batch_dt"] = df_predict_will_drop_all.groupby(group_keys)[
  402. "batch_dt"
  403. ].shift(1)
  404. df_predict_will_drop_all["gap_minutes"] = (
  405. (df_predict_will_drop_all["batch_dt"] - df_predict_will_drop_all["prev_batch_dt"])
  406. .dt.total_seconds()
  407. .div(60)
  408. )
  409. # === 6. 标记“是否是一个新的连续段” ===
  410. # 新段的条件:
  411. # 1) prev_batch_dt 缺失(当前是组内第一条)
  412. # 2) batch_dt 缺失 (不常见)
  413. # 3) 与上一条的时间间隔 != 期望步长
  414. df_predict_will_drop_all["is_new_segment"] = (
  415. df_predict_will_drop_all["prev_batch_dt"].isna()
  416. | df_predict_will_drop_all["batch_dt"].isna()
  417. | (df_predict_will_drop_all["gap_minutes"] != expected_step_minutes)
  418. )
  419. # === 7. 生成段号(segment_id)===
  420. # 同一航班内,每遇到一个新段就 +1
  421. df_predict_will_drop_all["segment_id"] = df_predict_will_drop_all.groupby(group_keys)[
  422. "is_new_segment"
  423. ].cumsum()
  424. # === 8. 计算每个连续段的“段尾 hours_until_departure” ===
  425. df_segment_last = df_predict_will_drop_all.groupby(
  426. group_keys + ["segment_id"], as_index=False
  427. ).agg(last_hours_until_departure=("hours_until_departure", "last"))
  428. # === 9. 每个连续段只保留“第一条记录”,并补上段尾信息 ===
  429. df_predict_will_drop_filter = df_predict_will_drop_all.drop_duplicates(
  430. subset=group_keys + ["segment_id"], keep="first"
  431. ).merge(
  432. df_segment_last,
  433. on=group_keys + ["segment_id"],
  434. how="left",
  435. )
  436. # === 10. 清理中间附加字段 ===
  437. df_predict_will_drop_filter = (
  438. df_predict_will_drop_filter.drop(
  439. columns=[
  440. "batch_dt",
  441. "prev_batch_dt",
  442. "gap_minutes",
  443. "is_new_segment",
  444. "segment_id",
  445. ]
  446. )
  447. .reset_index(drop=True)
  448. )
  449. # === 11. 调整字段顺序(last_hours_until_departure 紧跟 price_change_percent)===
  450. if "price_change_percent" in df_predict_will_drop_filter.columns:
  451. cols = df_predict_will_drop_filter.columns.tolist()
  452. if "last_hours_until_departure" in cols:
  453. cols.remove("last_hours_until_departure")
  454. cols.insert(cols.index("price_change_percent"), "last_hours_until_departure")
  455. df_predict_will_drop_filter = df_predict_will_drop_filter[cols]
  456. after_rows = len(df_predict_will_drop_filter)
  457. print(
  458. f"will_drop 连续段过滤完成(step={expected_step_minutes}min): {before_rows} -> {after_rows}"
  459. )
  460. # 当前时间,取整时
  461. current_time = datetime.datetime.now()
  462. current_time_str = current_time.strftime("%Y%m%d%H%M")
  463. hourly_time = current_time.replace(minute=0, second=0, microsecond=0)
  464. hourly_time_str = hourly_time.strftime("%Y%m%d%H%M")
  465. before_rows = len(df_predict_will_drop_filter)
  466. df_predict_will_drop_filter["valid_end_dt"] = pd.to_datetime(
  467. df_predict_will_drop_filter["valid_end_hour"],
  468. errors="coerce",
  469. )
  470. df_predict_will_drop_filter_1 = df_predict_will_drop_filter[
  471. (df_predict_will_drop_filter["valid_end_dt"] + pd.Timedelta(hours=8))
  472. <= hourly_time
  473. ].copy()
  474. df_predict_will_drop_filter_1.drop(columns=["valid_end_dt"], inplace=True)
  475. after_rows = len(df_predict_will_drop_filter_1)
  476. print(
  477. f"valid_end_hour(+8h)过滤完成: {before_rows} -> {after_rows} (hourly_time={hourly_time_str})"
  478. )
  479. # 开始验证
  480. df_predict_will_drop_validate = _validate_predict_df(df_predict_will_drop_filter_1)
  481. timestamp_str = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
  482. save_scv = f"result_validate_{node}_zong_{timestamp_str}.csv"
  483. output_path = os.path.join(output_dir, save_scv)
  484. df_predict_will_drop_validate.to_csv(output_path, index=False, encoding="utf-8-sig")
  485. print(f"保存完成: {output_path}")
  486. print(f"验证完成: {node} zong")
  487. print()
  488. if __name__ == "__main__":
  489. parser = argparse.ArgumentParser(description='验证脚本')
  490. parser.add_argument('--interval', type=int, choices=[1],
  491. default=0, help='间隔小时数(1,)')
  492. args = parser.parse_args()
  493. interval_hours = args.interval
  494. # 0 手动验证
  495. if interval_hours == 0:
  496. # node, pred_time_str = "node0127", "202601301500"
  497. # validate_process(node, interval_hours, pred_time_str)
  498. # node = "node0127"
  499. # validate_process_zong(node) # 无条件汇总
  500. # node = "node0127"
  501. # validate_process_zong(node, True, None, "202602051400") # 有条件汇总
  502. # node = "node0203"
  503. # validate_process_zong(node, True, "202602041100", "202602051400") # 有条件汇总
  504. node = "node0205"
  505. validate_process_zong(node, True, "202602061000", "202602091000") # 有条件汇总
  506. # 1 自动验证
  507. else:
  508. node = "node0127"
  509. validate_process_auto(node, interval_hours)