Browse Source

针对24小时的快速降价, 调整训练与预测的过程

node04 2 weeks ago
parent
commit
7e1ae70f96
1 changed files with 150 additions and 91 deletions
  1. 150 91
      data_preprocess.py

+ 150 - 91
data_preprocess.py

@@ -889,10 +889,13 @@ def preprocess_data_simple(df_input, is_train=False):
         .round(4)
     )
 
-    # 第一步:标记价格变化段
+    # 第一步:标记价格变化段(按“是否发生新的实际变价事件”切段)
+    # 这样即使连续两次变价金额相同(如 -50, -50),也会分到不同段
+    _price_change_event = df_input['_raw_price_diff'].abs().ge(price_change_amount_threshold)
     df_input['price_change_segment'] = (
-        df_input.groupby(['gid', 'baggage'], group_keys=False)['price_change_amount']
-        .apply(lambda s: (s != s.shift()).cumsum())
+        _price_change_event
+        .groupby([df_input['gid'], df_input['baggage']], group_keys=False)
+        .cumsum()
     )
 
     # 第二步:计算每个变化段内的持续时间
@@ -941,7 +944,14 @@ def preprocess_data_simple(df_input, is_train=False):
 
         # 对于先升后降(先降后降)的分析
         seg_start_mask = df_target['price_duration_hours'].eq(1)   # 开始变价节点
-        drop_mask = seg_start_mask & ((prev_pct > 0) | (prev_pct < 0)) & (df_target['price_change_percent'] < 0)
+        # 正例库仅保留24小时内发生的降价:上一价格段持续时长需<=24h
+        prev_pct_num = pd.to_numeric(prev_pct, errors='coerce')
+        drop_mask = (
+            seg_start_mask
+            & prev_pct_num.notna()
+            & (df_target['price_change_percent'] < 0)
+            & prev_dur.le(24)
+        )
         
         df_drop_nodes = df_target.loc[drop_mask, ['gid', 'hours_until_departure', 'days_to_departure', 'update_hour']].copy()
         df_drop_nodes.rename(columns={'hours_until_departure': 'drop_hours_until_departure'}, inplace=True)
@@ -957,7 +967,7 @@ def preprocess_data_simple(df_input, is_train=False):
         df_drop_nodes = df_drop_nodes.reset_index(drop=True)
 
         flight_info_cols = [
-            'city_pair',
+            'gid', 'city_pair',
             'flight_number_1', 'seg1_dep_air_port', 'seg1_dep_time', 'seg1_arr_air_port', 'seg1_arr_time',
             'flight_number_2', 'seg2_dep_air_port', 'seg2_dep_time', 'seg2_arr_air_port', 'seg2_arr_time',
             'currency', 'baggage', 'flight_day',
@@ -965,7 +975,7 @@ def preprocess_data_simple(df_input, is_train=False):
 
         flight_info_cols = [c for c in flight_info_cols if c in df_target.columns]
         
-        df_gid_info = df_target[['gid'] + flight_info_cols].drop_duplicates(subset=['gid']).reset_index(drop=True)
+        df_gid_info = df_target[flight_info_cols].drop_duplicates(subset=['gid']).reset_index(drop=True)
         df_drop_nodes = df_drop_nodes.merge(df_gid_info, on='gid', how='left')
 
         drop_info_cols = ['drop_update_hour', 'drop_days_to_departure',
@@ -973,13 +983,35 @@ def preprocess_data_simple(df_input, is_train=False):
                           'high_price_duration_hours', 'high_price_change_percent', 'high_price_change_amount',
                           'high_price_amount', 'high_price_seats_remaining',
         ]
-        # 按顺序排列 去掉gid
+        # 按顺序排列 保留gid
         df_drop_nodes = df_drop_nodes[flight_info_cols + drop_info_cols]
         # df_drop_nodes = df_drop_nodes[df_drop_nodes['drop_price_change_percent'] <= -0.01]   # 太低的降幅不计
 
-        # 对于先升再升(先降再升)的分析
+        # 反例库:所有有效节点(不限升价)中,未来24小时内未发生降价
         # seg_start_mask = df_target['price_duration_hours'].eq(1)
-        rise_mask = seg_start_mask & ((prev_pct > 0) | (prev_pct < 0)) & (df_target['price_change_percent'] > 0)
+        # valid_mask = seg_start_mask & ((prev_pct > 0) | (prev_pct < 0))
+        prev_pct_num = pd.to_numeric(prev_pct, errors='coerce')
+        valid_mask = seg_start_mask & prev_pct_num.notna()
+        
+        curr_pct = pd.to_numeric(df_target['price_change_percent'], errors='coerce')
+        prev_dur_num = pd.to_numeric(prev_dur, errors='coerce')
+        pos_case_mask = curr_pct.ge(0)
+        neg_case_mask = curr_pct.lt(0) & prev_dur_num.gt(24)
+
+        # next_seg_hours = pd.Series(index=df_target.index, dtype='float64')
+        # next_seg_pct = pd.Series(index=df_target.index, dtype='float64')
+        # next_seg_hours.loc[seg_start_mask] = (
+        #     df_target.loc[seg_start_mask].groupby('gid')['hours_until_departure'].shift(-1).to_numpy()
+        # )
+        # next_seg_pct.loc[seg_start_mask] = (
+        #     df_target.loc[seg_start_mask].groupby('gid')['price_change_percent'].shift(-1).to_numpy()
+        # )
+
+        # hours_to_next_seg = df_target['hours_until_departure'] - next_seg_hours
+        # drop_within_24h = next_seg_pct.lt(0) & hours_to_next_seg.ge(0) & hours_to_next_seg.le(24)
+
+        rise_mask = valid_mask & (pos_case_mask | neg_case_mask)
+        # rise_mask = seg_start_mask & ((prev_pct > 0) | (prev_pct < 0)) & (df_target['price_change_percent'] > 0)
 
         df_rise_nodes = df_target.loc[rise_mask, ['gid', 'hours_until_departure', 'days_to_departure', 'update_hour']].copy()
         df_rise_nodes.rename(columns={'hours_until_departure': 'rise_hours_until_departure'}, inplace=True)
@@ -1059,57 +1091,57 @@ def predict_data_simple(df_input, group_route_str, output_dir, predict_dir=".",
     else:
         df_rise_nodes = pd.DataFrame()
 
-    # # ==================== 跨航班日包络线 + 降价潜力 ====================
-    # print(">>> 构建跨航班日价格包络线")
-    # flight_key = ['city_pair', 'flight_number_1', 'flight_number_2']
-    # day_key = flight_key + ['flight_day']
-
-    # # 1. 历史侧:加载训练阶段的峰值数据
-    # envelope_csv_path = os.path.join(output_dir, f'{group_route_str}_envelope_info.csv')
-    # if os.path.exists(envelope_csv_path):
-    #     df_hist = pd.read_csv(envelope_csv_path)
-    #     df_hist = df_hist[day_key + ['peak_price', 'peak_hours']]
-    #     df_hist['source'] = 'hist'
-    # else:
-    #     df_hist = pd.DataFrame()
-
-    # # 2. 未来侧:当前在售价格
-    # df_future = df_min_hours[day_key + ['adult_total_price', 'hours_until_departure']].copy().rename(
-    #     columns={'adult_total_price': 'peak_price', 'hours_until_departure': 'peak_hours'}
-    # )
-    # df_future['source'] = 'future'
-
-    # # 3. 合并包络线数据点
-    # df_envelope_all = pd.concat(
-    #     [x for x in [df_hist, df_future] if not x.empty], ignore_index=True
-    # ).drop_duplicates(subset=day_key, keep='last')
-
-    # # 4. 包络线统计 + 找高点起飞日
-    # df_envelope_agg = df_envelope_all.groupby(flight_key).agg(
-    #     envelope_max=('peak_price', 'max'),               # 峰值最大 
-    #     envelope_min=('peak_price', 'min'),               # 峰值最小
-    #     envelope_mean=('peak_price', 'mean'),             # 峰值平均
-    #     envelope_count=('peak_price', 'count'),           # 峰值统计总数
-    #     envelope_avg_peak_hours=('peak_hours', 'mean'),   # 峰值发生的距离起飞小时数, 做一下平均
-    # ).reset_index()
-
-    # # 对数值列保留两位小数
-    # df_envelope_agg[['envelope_mean', 'envelope_avg_peak_hours']] = df_envelope_agg[['envelope_mean', 'envelope_avg_peak_hours']].round(2)
-
-    # idx_top = df_envelope_all.groupby(flight_key)['peak_price'].idxmax()
-    # df_top = df_envelope_all.loc[idx_top, flight_key + ['flight_day', 'peak_price', 'peak_hours']].rename(
-    #     columns={'flight_day': 'target_flight_day', 'peak_price': 'target_price', 'peak_hours': 'target_peak_hours'}
-    # )
-    # df_envelope_agg = df_envelope_agg.merge(df_top, on=flight_key, how='left')
-
-    # # 5. 合并到 df_min_hours
-    # df_min_hours = df_min_hours.merge(df_envelope_agg, on=flight_key, how='left')
-    # price_range = (df_min_hours['envelope_max'] - df_min_hours['envelope_min']).replace(0, 1)    # 计算当前价格在包络区间的百分位
-    # df_min_hours['envelope_position'] = (
-    #     (df_min_hours['adult_total_price'] - df_min_hours['envelope_min']) / price_range
-    # ).clip(0, 1).round(4)
+    # ==================== 跨航班日包络线 + 降价潜力 ====================
+    print(">>> 构建跨航班日价格包络线")
+    flight_key = ['city_pair', 'flight_number_1', 'flight_number_2']
+    day_key = flight_key + ['flight_day']
+
+    # 1. 历史侧:加载训练阶段的峰值数据
+    envelope_csv_path = os.path.join(output_dir, f'{group_route_str}_envelope_info.csv')
+    if os.path.exists(envelope_csv_path):
+        df_hist = pd.read_csv(envelope_csv_path)
+        df_hist = df_hist[day_key + ['peak_price', 'peak_hours']]
+        df_hist['source'] = 'hist'
+    else:
+        df_hist = pd.DataFrame()
+
+    # 2. 未来侧:当前在售价格
+    df_future = df_min_hours[day_key + ['adult_total_price', 'hours_until_departure']].copy().rename(
+        columns={'adult_total_price': 'peak_price', 'hours_until_departure': 'peak_hours'}
+    )
+    df_future['source'] = 'future'
+
+    # 3. 合并包络线数据点
+    df_envelope_all = pd.concat(
+        [x for x in [df_hist, df_future] if not x.empty], ignore_index=True
+    ).drop_duplicates(subset=day_key, keep='last')
+
+    # 4. 包络线统计 + 找高点起飞日
+    df_envelope_agg = df_envelope_all.groupby(flight_key).agg(
+        envelope_max=('peak_price', 'max'),               # 峰值最大 
+        envelope_min=('peak_price', 'min'),               # 峰值最小
+        envelope_mean=('peak_price', 'mean'),             # 峰值平均
+        envelope_count=('peak_price', 'count'),           # 峰值统计总数
+        envelope_avg_peak_hours=('peak_hours', 'mean'),   # 峰值发生的距离起飞小时数, 做一下平均
+    ).reset_index()
+
+    # 对数值列保留两位小数
+    df_envelope_agg[['envelope_mean', 'envelope_avg_peak_hours']] = df_envelope_agg[['envelope_mean', 'envelope_avg_peak_hours']].round(2)
+
+    idx_top = df_envelope_all.groupby(flight_key)['peak_price'].idxmax()
+    df_top = df_envelope_all.loc[idx_top, flight_key + ['flight_day', 'peak_price', 'peak_hours']].rename(
+        columns={'flight_day': 'target_flight_day', 'peak_price': 'target_price', 'peak_hours': 'target_peak_hours'}
+    )
+    df_envelope_agg = df_envelope_agg.merge(df_top, on=flight_key, how='left')
+
+    # 5. 合并到 df_min_hours
+    df_min_hours = df_min_hours.merge(df_envelope_agg, on=flight_key, how='left')
+    price_range = (df_min_hours['envelope_max'] - df_min_hours['envelope_min']).replace(0, 1)    # 计算当前价格在包络区间的百分位
+    df_min_hours['envelope_position'] = (
+        (df_min_hours['adult_total_price'] - df_min_hours['envelope_min']) / price_range
+    ).clip(0, 1).round(4)
     # df_min_hours['is_envelope_peak'] = (df_min_hours['envelope_position'] >= 0.75).astype(int)   # 0.95 -> 0.75
-    # df_min_hours['is_target_day'] = (df_min_hours['flight_day'] == df_min_hours['target_flight_day']).astype(int)
+    df_min_hours['is_target_day'] = (df_min_hours['flight_day'] == df_min_hours['target_flight_day']).astype(int)
 
     # # ==================== 目标二:降价潜力评分 ====================
     # # 用“上涨后回落倾向”替代简单计数:drop / (drop + rise)
@@ -1151,26 +1183,27 @@ def predict_data_simple(df_input, group_route_str, output_dir, predict_dir=".",
     #         (df_min_hours['drop_freq_count'] + alpha) / denom.replace(0, np.nan)
     #     ).fillna(0.0).clip(0, 1).round(4)
         
-    # # ==================== 综合评分:包络高位 × 降价潜力 ====================
-    # # target_score = 包络位置(越高越好)× 降价潜力(越高越好)
+    # ==================== 综合评分:包络高位 × 降价潜力 ====================
+    # target_score = 包络位置(越高越好)× 降价潜力(越高越好)
     # thres_ep = 0.6
     # thres_dp = 0.4
     # df_min_hours['target_score'] = (
     #     df_min_hours['envelope_position'] * thres_ep + df_min_hours['drop_potential'] * thres_dp
     # ).round(4)
 
-    # # 综合评分阈值:大于阈值的都认为值得投放
-    # target_score_threshold = 0.75
-    # # df_min_hours['target_score_threshold'] = target_score_threshold
-    # df_min_hours['is_good_target'] = (df_min_hours['target_score'] >= target_score_threshold).astype(int)
+    # 综合评分阈值:大于阈值的都认为值得投放
+    target_score_threshold = 0.5
+    # df_min_hours['target_score_threshold'] = target_score_threshold
+    df_min_hours['is_good_target'] = (df_min_hours['envelope_position'] >= target_score_threshold).astype(int)
 
-    # print(f">>> 包络线+降价潜力评分完成")
-    # del df_hist, df_future, df_envelope_all, df_envelope_agg, df_top, df_drop_freq, df_rise_freq
+    print(f">>> 包络线+降价潜力评分完成")
+    del df_hist, df_future, df_envelope_all, df_envelope_agg, df_top   # df_drop_freq, df_rise_freq
     
-    # df_min_hours = df_min_hours[(df_min_hours['is_good_target'] == 1) & (df_min_hours['seats_remaining'] >= 5)].reset_index(drop=True)   # 保留值得投放的 
-
+    total_cnt_before = len(df_min_hours)   # 记录下过滤前的总数
+    df_min_hours = df_min_hours[(df_min_hours['is_good_target'] == 1) & (df_min_hours['seats_remaining'] >= 3)].reset_index(drop=True)   # 保留值得投放的 
+    total_cnt_after = len(df_min_hours)    # 记录下过滤后的总数
     # =====================================================================
-    df_min_hours = df_min_hours[(df_min_hours['seats_remaining'] >= 5)].reset_index(drop=True)
+    # df_min_hours = df_min_hours[(df_min_hours['seats_remaining'] >= 5)].reset_index(drop=True)
 
     df_min_hours['simple_will_price_drop'] = 0   
     # df_min_hours['simple_drop_in_hours'] = 0
@@ -1187,9 +1220,9 @@ def predict_data_simple(df_input, group_route_str, output_dir, predict_dir=".",
     df_min_hours['rise_price_sample_size'] = 0
 
     # 这个阈值取多少?
-    pct_threshold = 0.01
+    pct_threshold = 0.1
     # pct_threshold = 2
-    pct_threshold_1 = 0.01
+    pct_threshold_1 = 0.1
     # pct_threshold_c = 0.001
 
     for idx, row in df_min_hours.iterrows(): 
@@ -1252,11 +1285,20 @@ def predict_data_simple(df_input, group_route_str, output_dir, predict_dir=".",
                 df_drop_gap['price_abs_gap'] = df_drop_gap['price_gap'].abs()
 
                 df_drop_gap = df_drop_gap.sort_values(['price_abs_gap', 'pct_abs_gap'], ascending=[True, True])
-                df_match = df_drop_gap[(df_drop_gap['pct_abs_gap'] <= pct_threshold) & (df_drop_gap['price_abs_gap'] <= 1.0)].copy()
+                same_sign_mask = (
+                    np.sign(pd.to_numeric(df_drop_gap['high_price_change_percent'], errors='coerce'))
+                    == np.sign(pct_base)
+                )
+                df_match = df_drop_gap[
+                    (df_drop_gap['pct_abs_gap'] <= pct_threshold)
+                    & (df_drop_gap['price_abs_gap'] <= 1.0)
+                    & same_sign_mask
+                ].copy()
+                # df_match = df_drop_gap[(df_drop_gap['pct_abs_gap'] <= pct_threshold) & (df_drop_gap['price_abs_gap'] <= 1.0)].copy()
                 # df_drop_gap = df_drop_gap.sort_values(['price_abs_gap'], ascending=[True])
-                # df_match = df_drop_gap[(df_drop_gap['price_abs_gap'] <= 5.0)].copy()
+                # df_match = df_drop_gap[(df_drop_gap['price_abs_gap'] <= 3.0)].copy()
 
-                # 历史上出现的极近似的增长幅度后的降价场景
+                # 历史上出现的极近似的增长(下降)幅度后的降价场景
                 if not df_match.empty:
                     dur_base = pd.to_numeric(price_duration_hours, errors='coerce')
                     hud_base = pd.to_numeric(hours_until_departure, errors='coerce')
@@ -1268,15 +1310,16 @@ def predict_data_simple(df_input, group_route_str, output_dir, predict_dir=".",
                         # df_match_chk = df_match_chk.loc[dur_vals.notna()].copy()
                         # df_match_chk = df_match_chk.loc[(dur_vals.loc[dur_vals.notna()] - float(dur_base)).abs() <= 36].copy()
 
-                        drop_hud_vals = pd.to_numeric(df_match_chk['drop_hours_until_departure'], errors='coerce')
-                        df_match_chk = df_match_chk.loc[drop_hud_vals.notna()].copy()
-                        df_match_chk = df_match_chk.loc[(float(hud_base) - drop_hud_vals.loc[drop_hud_vals.notna()]) >= -24].copy()
+                        # drop_hud_vals = pd.to_numeric(df_match_chk['drop_hours_until_departure'], errors='coerce')
+                        # df_match_chk = df_match_chk.loc[drop_hud_vals.notna()].copy()
+                        # df_match_chk = df_match_chk.loc[(float(hud_base) - drop_hud_vals.loc[drop_hud_vals.notna()]) >= -24].copy()
 
+                        # 正例收紧
                         dur_num_chk = pd.to_numeric(df_match_chk['high_price_duration_hours'], errors='coerce')
                         dur_delta = dur_num_chk - float(dur_base)
                         df_match_chk = df_match_chk.assign(dur_delta=dur_delta)
                         df_match_chk = df_match_chk.loc[df_match_chk['dur_delta'].notna()].copy()
-                        df_match_chk = df_match_chk.loc[df_match_chk['dur_delta'].abs() <= 48].copy()
+                        df_match_chk = df_match_chk.loc[df_match_chk['dur_delta'].abs() <= 72].copy()
                         
                         # seats_vals = pd.to_numeric(df_match_chk['high_price_seats_remaining_change_amount'], errors='coerce')
                         # df_match_chk = df_match_chk.loc[seats_vals.notna()].copy()
@@ -1360,9 +1403,18 @@ def predict_data_simple(df_input, group_route_str, output_dir, predict_dir=".",
                 df_rise_gap_1['price_abs_gap'] = df_rise_gap_1['price_gap'].abs()
 
                 df_rise_gap_1 = df_rise_gap_1.sort_values(['price_abs_gap', 'pct_abs_gap'], ascending=[True, True])
-                df_match_1 = df_rise_gap_1.loc[(df_rise_gap_1['pct_abs_gap'] <= pct_threshold_1) & (df_rise_gap_1['price_abs_gap'] <= 1.0)].copy()
+                same_sign_mask_1 = (
+                    np.sign(pd.to_numeric(df_rise_gap_1['prev_rise_change_percent'], errors='coerce'))
+                    == np.sign(pct_base_1)
+                )
+                df_match_1 = df_rise_gap_1[
+                    (df_rise_gap_1['pct_abs_gap'] <= pct_threshold_1)
+                    & (df_rise_gap_1['price_abs_gap'] <= 1.0)
+                    & same_sign_mask_1
+                ].copy()
+                # df_match_1 = df_rise_gap_1.loc[(df_rise_gap_1['pct_abs_gap'] <= pct_threshold_1) & (df_rise_gap_1['price_abs_gap'] <= 1.0)].copy()
                 # df_rise_gap_1 = df_rise_gap_1.sort_values(['price_abs_gap'], ascending=[True])
-                # df_match_1 = df_rise_gap_1.loc[(df_rise_gap_1['price_abs_gap'] <= 5.0)].copy()
+                # df_match_1 = df_rise_gap_1.loc[(df_rise_gap_1['price_abs_gap'] <= 3.0)].copy()
 
                 # 历史上出现过近似变化幅度后继续涨价场景
                 if not df_match_1.empty:
@@ -1376,13 +1428,20 @@ def predict_data_simple(df_input, group_route_str, output_dir, predict_dir=".",
 
                     if pd.notna(hud_base_1):   #  and pd.notna(seats_base_1)
                         df_match_chk_1 = df_match_1.copy()
+
+                        # 反例收紧:48小时内发生降价的不算显著反例
+                        _rise_pct_chk = pd.to_numeric(df_match_chk_1['rise_price_change_percent'], errors='coerce')
+                        _prev_dur_chk = pd.to_numeric(df_match_chk_1['prev_rise_duration_hours'], errors='coerce')
+                        _exclude_mask = _rise_pct_chk.lt(0) & _prev_dur_chk.lt(48)
+                        df_match_chk_1 = df_match_chk_1.loc[~_exclude_mask.fillna(False)].copy()
+
                         # dur_vals_1 = pd.to_numeric(df_match_chk_1['modify_rise_price_duration_hours'], errors='coerce')
                         # df_match_chk_1 = df_match_chk_1.loc[dur_vals_1.notna()].copy()
                         # df_match_chk_1 = df_match_chk_1.loc[(dur_vals_1.loc[dur_vals_1.notna()] - float(dur_base_1)).abs() <= 24].copy()
 
-                        rise_hud_vals_1 = pd.to_numeric(df_match_chk_1['rise_hours_until_departure'], errors='coerce')
-                        df_match_chk_1 = df_match_chk_1.loc[rise_hud_vals_1.notna()].copy()
-                        df_match_chk_1 = df_match_chk_1.loc[(float(hud_base_1) - rise_hud_vals_1.loc[rise_hud_vals_1.notna()]) >= -24].copy()
+                        # rise_hud_vals_1 = pd.to_numeric(df_match_chk_1['rise_hours_until_departure'], errors='coerce')
+                        # df_match_chk_1 = df_match_chk_1.loc[rise_hud_vals_1.notna()].copy()
+                        # df_match_chk_1 = df_match_chk_1.loc[(float(hud_base_1) - rise_hud_vals_1.loc[rise_hud_vals_1.notna()]) >= -24].copy()
 
                         # seats_vals_1 = pd.to_numeric(df_match_chk_1['rise_seats_remaining_change_amount'], errors='coerce')
                         # df_match_chk_1 = df_match_chk_1.loc[seats_vals_1.notna()].copy()
@@ -1413,7 +1472,7 @@ def predict_data_simple(df_input, group_route_str, output_dir, predict_dir=".",
                             else:
                                 drop_prob = round(length_drop / (length_rise + length_drop), 2)
                                 # 依旧保持之前的降价判定,概率修改
-                                if drop_prob >= 0.7:
+                                if drop_prob > 0.5:
                                     df_min_hours.loc[idx, 'simple_will_price_drop'] = 1
                                     # df_min_hours.loc[idx, 'simple_drop_in_hours_dist'] = 'd1'
                                     df_min_hours.loc[idx, 'flag_dist'] = 'd1'
@@ -1473,11 +1532,11 @@ def predict_data_simple(df_input, group_route_str, output_dir, predict_dir=".",
                   'flag_dist',
                   'drop_price_change_upper', 'drop_price_change_lower', 'drop_price_sample_size',
                   'rise_price_change_upper', 'rise_price_change_lower', 'rise_price_sample_size',
-                # 'envelope_max', 'envelope_min', 'envelope_mean', 'envelope_count',
-                # 'envelope_avg_peak_hours', 'envelope_position', 'is_envelope_peak',         # 包络线特征
-                # 'target_flight_day', 'target_price', 'target_peak_hours', 'is_target_day',  # 高点起飞日(纯包络线高点)
-                # 'drop_freq_count', 'drop_potential',                                        # 降价潜力 
-                # 'target_score', 'is_good_target',                                           # 综合目标评分(高点 × 降价潜力 = 最终投放目标
+                  'envelope_max', 'envelope_min', 'envelope_mean', 'envelope_count',
+                  'envelope_avg_peak_hours', 'envelope_position',                             # 包络线特征
+                  'target_flight_day', 'target_price', 'target_peak_hours', 'is_target_day',  # 高点起飞日(纯包络线高点)
+                # 'drop_freq_count', 'drop_potential', 'target_score',                        # 降价潜力 
+                  'is_good_target',                                                           # 综合目标评分() 
                  ]
     df_predict = df_min_hours[order_cols]
     df_predict = df_predict.rename(columns={
@@ -1512,7 +1571,7 @@ def predict_data_simple(df_input, group_route_str, output_dir, predict_dir=".",
     else:
         drop_1_cnt = 0
         drop_0_cnt = 0
-    print(f"will_price_drop 分类数量统计: 1(会降)={drop_1_cnt}, 0(不降)={drop_0_cnt}, 总数={total_cnt}")
+    print(f"will_price_drop 分类数量统计: 1(会降)={drop_1_cnt}, 0(不降)={drop_0_cnt}, 总数={total_cnt}, 过滤前总数={total_cnt_before}")
 
     csv_path1 = os.path.join(predict_dir, f'future_predictions_{pred_time_str}.csv')
     df_predict.to_csv(csv_path1, mode='a', index=False, header=not os.path.exists(csv_path1), encoding='utf-8-sig')