import tensorflow as tf
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, LSTM, Dense, Attention, Concatenate
# 数据预处理函数
def preprocess_data(file_path, sequence_length=10):
"""读取并预处理双色球历史数据"""
df = pd.read_csv(file_path ,encoding ="gbk")
# 确保数据排序正确
df.sort_values('期号', inplace=True)
# 提取红球和蓝球数据
red_balls = df[['红1', '红2', '红3', '红4', '红5', '红6']].values
blue_ball = df['蓝球'].values.reshape(-1, 1)
# 归一化处理
scaler_red = MinMaxScaler()
scaler_blue = MinMaxScaler()
red_scaled = scaler_red.fit_transform(red_balls)
blue_scaled = scaler_blue.fit_transform(blue_ball)
# 创建序列数据
X_red, X_blue, y = [], [], []
for i in range(len(red_scaled) - sequence_length):
X_red.append(red_scaled[i:i+sequence_length])
X_blue.append(blue_scaled[i:i+sequence_length])
y.append(red_scaled[i+sequence_length]) # 预测下一期红球
return np.array(X_red), np.array(X_blue), np.array(y), scaler_red, scaler_blue
# Genie 2世界模型架构
def build_genie2_model(red_seq_length, blue_seq_length, red_features, blue_features):
"""构建Genie 2世界模型"""
# 红球输入分支
red_input = Input(shape=(red_seq_length, red_features))
red_lstm = LSTM(64, return_sequences=True)(red_input)
red_lstm2 = LSTM(32)(red_lstm)
# 蓝球输入分支
blue_input = Input(shape=(blue_seq_length, blue_features))
blue_lstm = LSTM(32)(blue_input)
# 注意力机制融合
attention = Attention()([red_lstm2, blue_lstm])
combined = Concatenate()([red_lstm2, blue_lstm, attention])
# 世界模型核心
world_model = Dense(128, activation='relu')(combined)
world_model = Dense(64, activation='relu')(world_model)
# 输出层 - 预测6个红球
output = Dense(red_features, activation='sigmoid')(world_model)
model = Model(inputs=[red_input, blue_input], outputs=output)
model.compile(optimizer='adam', loss='mse')
return model
# 主执行流程
def main():
# 1. 数据准备
FILE_PATH = "D:\\worker\\lottery_results8.csv"
X_red, X_blue, y, scaler_red, scaler_blue = preprocess_data(FILE_PATH, sequence_length=10)
# 2. 构建模型
model = build_genie2_model(
red_seq_length=X_red.shape[1],
blue_seq_length=X_blue.shape[1],
red_features=X_red.shape[2],
blue_features=X_blue.shape[2]
)
# 3. 训练模型
model.fit([X_red, X_blue], y, epochs=100, batch_size=32, validation_split=0.2)
# 4. 预测最新一期
last_red = X_red[-1].reshape(1, X_red.shape[1], X_red.shape[2])
last_blue = X_blue[-1].reshape(1, X_blue.shape[1], X_blue.shape[2])
predicted_red = model.predict([last_red, last_blue])
# 5. 反归一化并输出预测结果
predicted_red_actual = scaler_red.inverse_transform(predicted_red)
predicted_red_actual = np.round(predicted_red_actual).astype(int)
# 蓝球预测简化版(实际应用中可扩展)
blue_prediction = np.random.randint(1, 17) # 随机预测作为演示
print("\n预测结果:")
print(f"红球: {predicted_red_actual[0]}")
print(f"蓝球: {blue_prediction}")
if __name__ == "__main__":
main()
ValueError: Exception encountered when calling Attention.call().
Dimension must be 2 but is 3 for '{{node functional_1/attention_1/transpose}} = Transpose[T=DT_FLOAT, Tperm=DT_INT32](functional_1/lstm_2_1/strided_slice_3, functional_1/attention_1/transpose/perm)' with input shapes: [?,32], [3].
Arguments received by Attention.call():
• inputs=['tf.Tensor(shape=(None, 32), dtype=float32)', 'tf.Tensor(shape=(None, 32), dtype=float32)']
• mask=['None', 'None']
• training=True
• return_attention_scores=False
• use_causal_mask=False 完善相关代码
最新发布