From 81b0ad0124847f083990d574dc8d20961ec6e713 Mon Sep 17 00:00:00 2001
From: baoshiwei <baoshiwei@shlanbao.cn>
Date: 星期三, 01 四月 2026 14:12:55 +0800
Subject: [PATCH] feat(参数调节): 添加优化版挤出机参数调节页面
---
app/pages/metered_weight_advanced.py | 318 ++++++++++++++++++++++++++++++++++++++++++++--------
1 files changed, 267 insertions(+), 51 deletions(-)
diff --git a/app/pages/metered_weight_advanced.py b/app/pages/metered_weight_advanced.py
index d3a4e40..3676644 100644
--- a/app/pages/metered_weight_advanced.py
+++ b/app/pages/metered_weight_advanced.py
@@ -3,6 +3,8 @@
import plotly.graph_objects as go
import pandas as pd
import numpy as np
+import joblib
+import os
from datetime import datetime, timedelta
from app.services.extruder_service import ExtruderService
from app.services.main_process_service import MainProcessService
@@ -13,8 +15,107 @@
from sklearn.svm import SVR
from sklearn.neural_network import MLPRegressor
-
-
+# 瀵煎叆绋虫�佽瘑鍒姛鑳�
+class SteadyStateDetector:
+ def __init__(self):
+ pass
+
+ def detect_steady_state(self, df, weight_col='绫抽噸', window_size=20, std_threshold=0.5, duration_threshold=60):
+ """
+ 绋虫�佽瘑鍒�昏緫锛氭爣璁扮背閲嶆暟鎹腑鐨勭ǔ鎬佹
+ :param df: 鍖呭惈绫抽噸鏁版嵁鐨勬暟鎹
+ :param weight_col: 绫抽噸鍒楀悕
+ :param window_size: 婊戝姩绐楀彛澶у皬锛堢锛�
+ :param std_threshold: 鏍囧噯宸槇鍊�
+ :param duration_threshold: 绋虫�佹寔缁椂闂撮槇鍊硷紙绉掞級
+ :return: 鍖呭惈绋虫�佹爣璁扮殑鏁版嵁妗嗗拰绋虫�佷俊鎭�
+ """
+ if df is None or df.empty:
+ return df, []
+
+ # 纭繚鏃堕棿鍒楁槸datetime绫诲瀷
+ df['time'] = pd.to_datetime(df['time'])
+
+ # 璁$畻婊氬姩缁熻閲�
+ df['rolling_std'] = df[weight_col].rolling(window=window_size, min_periods=5).std()
+ df['rolling_mean'] = df[weight_col].rolling(window=window_size, min_periods=5).mean()
+
+ # 璁$畻娉㈠姩鑼冨洿
+ df['fluctuation_range'] = (df['rolling_std'] / df['rolling_mean']) * 100
+ df['fluctuation_range'] = df['fluctuation_range'].fillna(0)
+
+ # 鏍囪绋虫�佺偣
+ df['is_steady'] = 0
+ steady_condition = (
+ (df['fluctuation_range'] < std_threshold) &
+ (df[weight_col] >= 0.1)
+ )
+ df.loc[steady_condition, 'is_steady'] = 1
+
+ # 璇嗗埆杩炵画绋虫�佹
+ steady_segments = []
+ current_segment = {}
+
+ for i, row in df.iterrows():
+ if row['is_steady'] == 1:
+ if not current_segment:
+ current_segment = {
+ 'start_time': row['time'],
+ 'start_idx': i,
+ 'weights': [row[weight_col]]
+ }
+ else:
+ current_segment['weights'].append(row[weight_col])
+ else:
+ if current_segment:
+ current_segment['end_time'] = df.loc[i-1, 'time'] if i > 0 else df.loc[i, 'time']
+ current_segment['end_idx'] = i-1
+ duration = (current_segment['end_time'] - current_segment['start_time']).total_seconds()
+
+ if duration >= duration_threshold:
+ weights_array = np.array(current_segment['weights'])
+ current_segment['duration'] = duration
+ current_segment['mean_weight'] = np.mean(weights_array)
+ current_segment['std_weight'] = np.std(weights_array)
+ current_segment['min_weight'] = np.min(weights_array)
+ current_segment['max_weight'] = np.max(weights_array)
+ current_segment['fluctuation_range'] = (current_segment['std_weight'] / current_segment['mean_weight']) * 100
+
+ # 璁$畻缃俊搴�
+ confidence = 100 - (current_segment['fluctuation_range'] / std_threshold) * 50
+ confidence = max(50, min(100, confidence))
+ current_segment['confidence'] = confidence
+
+ steady_segments.append(current_segment)
+
+ current_segment = {}
+
+ # 澶勭悊鏈�鍚庝竴涓ǔ鎬佹
+ if current_segment:
+ current_segment['end_time'] = df['time'].iloc[-1]
+ current_segment['end_idx'] = len(df) - 1
+ duration = (current_segment['end_time'] - current_segment['start_time']).total_seconds()
+
+ if duration >= duration_threshold:
+ weights_array = np.array(current_segment['weights'])
+ current_segment['duration'] = duration
+ current_segment['mean_weight'] = np.mean(weights_array)
+ current_segment['std_weight'] = np.std(weights_array)
+ current_segment['min_weight'] = np.min(weights_array)
+ current_segment['max_weight'] = np.max(weights_array)
+ current_segment['fluctuation_range'] = (current_segment['std_weight'] / current_segment['mean_weight']) * 100
+
+ confidence = 100 - (current_segment['fluctuation_range'] / std_threshold) * 50
+ confidence = max(50, min(100, confidence))
+ current_segment['confidence'] = confidence
+
+ steady_segments.append(current_segment)
+
+ # 鍦ㄦ暟鎹涓爣璁板畬鏁寸殑绋虫�佹
+ for segment in steady_segments:
+ df.loc[segment['start_idx']:segment['end_idx'], 'is_steady'] = 1
+
+ return df, steady_segments
def show_metered_weight_advanced():
# 鍒濆鍖栨湇鍔�
@@ -35,10 +136,16 @@
st.session_state['ma_model_type'] = 'RandomForest'
if 'ma_sequence_length' not in st.session_state:
st.session_state['ma_sequence_length'] = 10
-
+ if 'ma_use_steady_data' not in st.session_state:
+ st.session_state['ma_use_steady_data'] = True
+ if 'ma_steady_window' not in st.session_state:
+ st.session_state['ma_steady_window'] = 20
+ if 'ma_steady_threshold' not in st.session_state:
+ st.session_state['ma_steady_threshold'] = 1.5
+
# 榛樿鐗瑰緛鍒楄〃锛堜笉鍐嶅厑璁哥敤鎴烽�夋嫨锛�
- default_features = ['铻烘潌杞��', '鏈哄ご鍘嬪姏', '娴佺▼涓婚��', '铻烘潌娓╁害',
- '鍚庢満绛掓俯搴�', '鍓嶆満绛掓俯搴�', '鏈哄ご娓╁害']
+ default_features = ['铻烘潌杞��', '鏈哄ご鍘嬪姏', '娴佺▼涓婚��', '鍚庢満绛掓俯搴�']
+
# 瀹氫箟鍥炶皟鍑芥暟
def update_dates(qs):
@@ -126,6 +233,42 @@
options=model_options,
key="ma_model_type",
help="閫夋嫨鐢ㄤ簬棰勬祴鐨勬ā鍨嬬被鍨�"
+ )
+
+ # 绋虫�佽瘑鍒厤缃�
+ st.markdown("---")
+ steady_cols = st.columns(3)
+ with steady_cols[0]:
+ st.write("鈿栵笍 **绋虫�佽瘑鍒厤缃�**")
+ st.checkbox(
+ "浠呬娇鐢ㄧǔ鎬佹暟鎹繘琛岃缁�",
+ value=st.session_state['ma_use_steady_data'],
+ key="ma_use_steady_data",
+ help="鍚敤鍚庯紝鍙娇鐢ㄧ背閲嶇ǔ鎬佹椂娈电殑鏁版嵁杩涜妯″瀷璁粌"
+ )
+
+ with steady_cols[1]:
+ st.write("馃搹 **绋虫�佸弬鏁�**")
+ st.slider(
+ "婊戝姩绐楀彛澶у皬 (绉�)",
+ min_value=5,
+ max_value=60,
+ value=st.session_state['ma_steady_window'],
+ step=5,
+ key="ma_steady_window",
+ help="鐢ㄤ簬绋虫�佽瘑鍒殑婊戝姩绐楀彛澶у皬"
+ )
+
+ with steady_cols[2]:
+ st.write("馃搳 **绋虫�侀槇鍊�**")
+ st.slider(
+ "娉㈠姩闃堝�� (%)",
+ min_value=0.1,
+ max_value=2.0,
+ value=st.session_state['ma_steady_threshold'],
+ step=0.1,
+ key="ma_steady_threshold",
+ help="绋虫�佽瘑鍒殑娉㈠姩鑼冨洿闃堝��"
)
@@ -249,6 +392,82 @@
# 閲嶅懡鍚嶇背閲嶅垪
df_analysis.rename(columns={'metered_weight': '绫抽噸'}, inplace=True)
+
+ # 绋虫�佽瘑鍒�
+ steady_detector = SteadyStateDetector()
+
+ # 鑾峰彇绋虫�佽瘑鍒弬鏁�
+ use_steady_data = st.session_state.get('ma_use_steady_data', True)
+ steady_window = st.session_state.get('ma_steady_window', 20)
+ steady_threshold = st.session_state.get('ma_steady_threshold', 0.5)
+
+ # 鎵ц绋虫�佽瘑鍒�
+ df_analysis_with_steady, steady_segments = steady_detector.detect_steady_state(
+ df_analysis,
+ weight_col='绫抽噸',
+ window_size=steady_window,
+ std_threshold=steady_threshold
+ )
+
+ # 鏇存柊df_analysis涓哄寘鍚ǔ鎬佹爣璁扮殑鏁版嵁
+ df_analysis = df_analysis_with_steady
+
+ # 绋虫�佹暟鎹彲瑙嗗寲
+ st.subheader("馃搱 绋虫�佹暟鎹垎甯�")
+
+ # 鍒涘缓绋虫�佹暟鎹彲瑙嗗寲鍥捐〃
+ fig_steady = go.Figure()
+
+ # 娣诲姞鍘熷绫抽噸鏇茬嚎
+ fig_steady.add_trace(go.Scatter(
+ x=df_analysis['time'],
+ y=df_analysis['绫抽噸'],
+ name='鍘熷绫抽噸',
+ mode='lines',
+ line=dict(color='lightgray', width=1)
+ ))
+
+ # 娣诲姞绋虫�佹暟鎹偣
+ steady_data_points = df_analysis[df_analysis['is_steady'] == 1]
+ fig_steady.add_trace(go.Scatter(
+ x=steady_data_points['time'],
+ y=steady_data_points['绫抽噸'],
+ name='绋虫�佺背閲�',
+ mode='markers',
+ marker=dict(color='green', size=3, opacity=0.6)
+ ))
+
+ # 娣诲姞闈炵ǔ鎬佹暟鎹偣
+ non_steady_data_points = df_analysis[df_analysis['is_steady'] == 0]
+ fig_steady.add_trace(go.Scatter(
+ x=non_steady_data_points['time'],
+ y=non_steady_data_points['绫抽噸'],
+ name='闈炵ǔ鎬佺背閲�',
+ mode='markers',
+ marker=dict(color='red', size=3, opacity=0.6)
+ ))
+
+ # 閰嶇疆鍥捐〃甯冨眬
+ fig_steady.update_layout(
+ title="绫抽噸鏁版嵁绋虫�佸垎甯�",
+ xaxis=dict(title="鏃堕棿"),
+ yaxis=dict(title="绫抽噸 (Kg/m)"),
+ legend=dict(orientation="h", yanchor="bottom", y=1.02, xanchor="right", x=1),
+ height=500
+ )
+
+ # 鏄剧ず鍥捐〃
+ st.plotly_chart(fig_steady, use_container_width=True)
+
+ # 鏄剧ず绋虫�佺粺璁�
+ total_data = len(df_analysis)
+ steady_data = len(df_analysis[df_analysis['is_steady'] == 1])
+ steady_ratio = (steady_data / total_data * 100) if total_data > 0 else 0
+
+ stats_cols = st.columns(3)
+ stats_cols[0].metric("鎬绘暟鎹噺", total_data)
+ stats_cols[1].metric("绋虫�佹暟鎹噺", steady_data)
+ stats_cols[2].metric("绋虫�佹暟鎹瘮渚�", f"{steady_ratio:.1f}%")
# --- 鍘熷鏁版嵁瓒嬪娍鍥� ---
st.subheader("馃搱 鍘熷鏁版嵁瓒嬪娍鍥�")
@@ -381,8 +600,16 @@
else:
try:
# 鍑嗗鏁版嵁
+ # 鏍规嵁閰嶇疆鍐冲畾鏄惁鍙娇鐢ㄧǔ鎬佹暟鎹�
+ use_steady_data = st.session_state.get('ma_use_steady_data', True)
+ if use_steady_data:
+ df_filtered = df_analysis[df_analysis['is_steady'] == 1]
+ st.info(f"宸茶繃婊ら潪绋虫�佹暟鎹紝浣跨敤 {len(df_filtered)} 鏉$ǔ鎬佹暟鎹繘琛岃缁�")
+ else:
+ df_filtered = df_analysis.copy()
+
# 棣栧厛纭繚df_analysis涓病鏈塏aN鍊�
- df_analysis_clean = df_analysis.dropna(subset=default_features + ['绫抽噸'])
+ df_analysis_clean = df_filtered.dropna(subset=default_features + ['绫抽噸'])
# 妫�鏌ユ竻鐞嗗悗鐨勬暟鎹噺
if len(df_analysis_clean) < 30:
@@ -391,8 +618,8 @@
# 鍒涘缓涓�涓柊鐨凞ataFrame鏉ュ瓨鍌ㄦ墍鏈夌壒寰佸拰鐩爣鍙橀噺
all_features = df_analysis_clean[default_features + ['绫抽噸']].copy()
-
-
+
+
# 娓呯悊鎵�鏈塏aN鍊�
all_features_clean = all_features.dropna()
@@ -568,49 +795,38 @@
)
st.plotly_chart(fig_importance, width='stretch')
- # --- 棰勬祴鍔熻兘 ---
- st.subheader("馃敭 绫抽噸棰勬祴")
-
- # 鍒涘缓棰勬祴琛ㄥ崟
- st.write("杈撳叆鐗瑰緛鍊艰繘琛岀背閲嶉娴�:")
- predict_cols = st.columns(2)
- input_features = {}
-
- for i, feature in enumerate(default_features):
- with predict_cols[i % 2]:
- # 鑾峰彇鐗瑰緛鐨勭粺璁′俊鎭�
- min_val = df_analysis_clean[feature].min()
- max_val = df_analysis_clean[feature].max()
- mean_val = df_analysis_clean[feature].mean()
-
- input_features[feature] = st.number_input(
- f"{feature}",
- key=f"ma_pred_{feature}",
- value=float(mean_val),
- min_value=float(min_val),
- max_value=float(max_val),
- step=0.1
- )
-
- if st.button("棰勬祴绫抽噸"):
- # 鍑嗗棰勬祴鏁版嵁
- input_df = pd.DataFrame([input_features])
-
- # 鍚堝苟鐗瑰緛
- input_combined = pd.concat([input_df], axis=1)
-
- # 棰勬祴
- if model_type in ['SVR', 'MLP']:
- input_scaled = scaler_X.transform(input_combined)
- prediction_scaled = model.predict(input_scaled)
- predicted_weight = scaler_y.inverse_transform(prediction_scaled.reshape(-1, 1)).ravel()[0]
-
- else:
- predicted_weight = model.predict(input_combined)[0]
-
- # 鏄剧ず棰勬祴缁撴灉
- st.success(f"棰勬祴绫抽噸: {predicted_weight:.4f} Kg/m")
-
+ # --- 妯″瀷淇濆瓨 ---
+ st.subheader("锟� 妯″瀷淇濆瓨")
+
+ # 鍒涘缓妯″瀷鐩綍锛堝鏋滀笉瀛樺湪锛�
+ model_dir = "saved_models"
+ os.makedirs(model_dir, exist_ok=True)
+
+ # 鍑嗗妯″瀷淇℃伅
+ model_info = {
+ 'model': model,
+ 'features': feature_columns,
+ 'scaler_X': scaler_X if model_type in ['SVR', 'MLP'] else None,
+ 'scaler_y': scaler_y if model_type in ['SVR', 'MLP'] else None,
+ 'model_type': model_type,
+ 'created_at': datetime.now(),
+ 'r2_score': r2,
+ 'mse': mse,
+ 'mae': mae,
+ 'rmse': rmse,
+ 'use_steady_data': use_steady_data
+ }
+
+ # 鐢熸垚妯″瀷鏂囦欢鍚�
+ model_filename = f"advanced_{model_type.lower()}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.joblib"
+ model_path = os.path.join(model_dir, model_filename)
+
+ # 淇濆瓨妯″瀷
+ joblib.dump(model_info, model_path)
+
+ st.success(f"妯″瀷宸叉垚鍔熶繚瀛�: {model_filename}")
+ st.info(f"淇濆瓨璺緞: {model_path}")
+
# --- 鏁版嵁棰勮 ---
st.subheader("馃攳 鏁版嵁棰勮")
st.dataframe(df_analysis.head(20), width='stretch')
--
Gitblit v1.9.3