活动介绍
file-type

Squeeze_RA_V1_HTF - MetaTrader 5脚本增强时段选择功能

ZIP文件

下载需积分: 6 | 7KB | 更新于2025-08-30 | 132 浏览量 | 0 下载量 举报 收藏
download 立即下载
根据给定的文件信息,我们可以推断出以下知识点: ### 标题知识点 - **Squeeze_RA_V1_HTF**: 标题中的"Squeeze_RA_V1_HTF"很可能是指一个特定的脚本或指标名称。"Squeeze"通常指的是一种名为Squeeze指标的技术分析工具,它由Blahtech Limited开发,用于MetaTrader 4和MetaTrader 5平台。它旨在识别市场波动的收缩期,预示着可能的突破。"RA"可能代表某种特定的配置或修正版本。"HTF"则可能指的是"High TimeFrame"(高时间框架),这表明该脚本可能提供不同时间框架内Squeeze指标的应用,为交易者提供更宽广的视角来分析市场。 - **MetaTrader 5脚本**: MetaTrader 5(MT5)是金融市场中最流行的交易平台之一,由MetaQuotes Software Corp开发。它支持外汇、差价合约(CFDs)、商品和股票等多种交易。MT5平台具有强大的内置脚本功能,允许用户创建和运行自定义的脚本,以自动化交易流程或执行特定的交易策略。脚本通常用MQL5(MetaQuotes Language 5)编写,这是一种用于定制MT5平台功能的编程语言。 ### 描述知识点 - **时段选择选项**: 这表示"Squeeze_RA_V1"指标允许用户在其参数中选择不同的时间框架。时间框架(Timeframes)是指在技术分析中用来表示价格数据的时间单位,例如1分钟、5分钟、1小时、日线等。提供时段选择选项意味着指标可以调整以显示不同时间框架的Squeeze信号,这有助于交易者从长期和短期两个视角分析市场状况和潜在的交易机会。 ### 标签知识点 - **MetaTrader**: 此标签再次强调了脚本是为MetaTrader平台设计的。它提醒用户这个脚本或指标是与MT4或MT5平台兼容的,因此只有使用这些平台的用户才能利用"Squeeze_RA_V1_HTF"指标进行交易分析。 ### 压缩包子文件的文件名称列表知识点 - **squeeze_ra_v1.mq5**: 这个文件名称表明了核心脚本文件,它很可能是"Squeeze_RA_V1"指标的主程序文件。文件的".mq5"扩展名确认了它是为了在MetaTrader 5平台上使用而编写的。 - **squeeze_ra_v1_htf.mq5**: 这个文件名称进一步指明了这是一个扩展或定制版本的"Squeeze_RA_V1"脚本,它特别包含了"High TimeFrame"功能。该文件扩展了基本指标的功能,使用户能够在高时间框架上进行分析,这为长期和高级交易策略提供了更多的灵活性。 综上所述,我们可以总结出"Squeeze_RA_V1_HTF - MetaTrader 5脚本.zip"提供的是一个为MetaTrader 5平台定制的Squeeze指标版本,它允许用户在不同的时间框架内分析市场潜在的突破点。该脚本的设计旨在帮助交易者更好地理解市场动态,并可能与其他的交易系统或指标集成,以便提供更为全面的市场分析。

相关推荐

filetype

def on_epoch_end(self, epoch, logs=None): # 处理训练集 # 使用当前模型(self.model)对训练集特征数据(self.X_train)进行预测,得到训练集上的预测值 y_pred_train。verbose=0 表示在预测过程中不输出额外的信息(如进度条等) y_pred_train = self.model.predict(self.X_train, verbose=0) # 获得训练集的真实值和预测值 y_train_true = self.y_train.squeeze() y_train_pred = y_pred_train.squeeze() # 反标准化 y_train_true_unscaled = self.scaler_y.inverse_transform(y_train_true.reshape(-1, 1)).squeeze() y_train_pred_unscaled = self.scaler_y.inverse_transform(y_train_pred.reshape(-1, 1)).squeeze() # 计算训练集指标,是一轮训练周期后训练集的各项指标,与模型调参阶段默认计算的mse不同,下面这些指标使用反标准化后数据计算得到 train_rmse_unscaled = np.sqrt(mean_squared_error(y_train_true_unscaled, y_train_pred_unscaled)) train_true_mean_unscaled = np.mean(y_train_true_unscaled) train_rmse_percentage_unscaled = (train_rmse_unscaled / train_true_mean_unscaled) * 100 train_r2_unscaled = r2_score(y_train_true_unscaled, y_train_pred_unscaled) train_d_unscaled = 1 - (np.sum((y_train_pred_unscaled - y_train_true_unscaled) ** 2) / np.sum((np.abs(y_train_pred_unscaled - np.mean(y_train_true_unscaled)) + np.abs(y_train_true_unscaled - np.mean(y_train_true_unscaled))) ** 2)) # 输出 train_rmse 和 train_true_mean 的值 print( f"Epoch {epoch + 1}: Train RMSE_unscaled = {train_rmse_unscaled:.4f}, Train True Mean_unscaled = {train_true_mean_unscaled:.4f}") # 将计算得到的训练集指标(MSE、RMSE、RMSE 百分比、R2和Willmott's d 指标)添加到 logs 字典中,logs 字典会在训练过程中传递,用于记录和存储各种训练信息 logs['rmse'] = train_rmse_unscaled logs['rmse%'] = train_rmse_percentage_unscaled logs['r2'] = train_r2_unscaled logs['d'] = train_d_unscaled # 添加默认计算的 loss 和 mae 到 logs 字典 logs['loss'] = logs.get('loss', 0) logs['mae'] = logs.get('mae', 0) # 处理验证集 y_pred_val = self.model.predict(self.X_val, verbose=0) # 获得验证集的真实值和预测值 y_val_true = self.y_val.squeeze() y_val_pred = y_pred_val.squeeze() # 反标准化 y_val_true_unscaled = self.scaler_y.inverse_transform(y_val_true.reshape(-1, 1)).squeeze() y_val_pred_unscaled = self.scaler_y.inverse_transform(y_val_pred.reshape(-1, 1)).squeeze() # 计算额外的验证集指标,基于标准化后的数据计算得到 val_rmse_unscaled = np.sqrt(mean_squared_error(y_val_true_unscaled, y_val_pred_unscaled)) val_measured_mean_unscaled = np.mean(y_val_true_unscaled) val_rmse_percentage_unscaled = (val_rmse_unscaled / val_measured_mean_unscaled) * 100 val_r2_unscaled = r2_score(y_val_true_unscaled, y_val_pred_unscaled) val_d_unscaled = 1 - (np.sum((y_val_pred - y_val_true) ** 2) / np.sum((np.abs(y_val_pred - np.mean(y_val_true)) + np.abs(y_val_true - np.mean(y_val_true))) ** 2)) # 输出 val_rmse 和 val_true_mean 的值 print( f"Epoch {epoch + 1}: Val RMSE_unscaled = {val_rmse_unscaled:.4f}, Val True Mean_unscaled = {val_measured_mean_unscaled:.4f}") # logs['val_mse'] = val_mse logs['val_rmse'] = val_rmse_unscaled logs['val_rmse%'] = val_rmse_percentage_unscaled logs['val_r2'] = val_r2_unscaled logs['val_d'] = val_d_unscaled 这段代码中,为什么y_train_true = self.y_train.squeeze() y_train_pred = y_pred_train.squeeze()这里要用.squeeze

filetype

--------------------------------------------------------------------------- ValueError Traceback (most recent call last) /tmp/ipykernel_553597/306262114.py in <module> 7 ## Training 8 # Epoch_list,Loss_list = model_train(batchsize,channel_SNR_db1,noise_init,nl_factor,eq_flag,norm_epsilon,earlystop_epoch) ----> 9 Epoch_list,Loss_list, Min_Distance_list = model_train(batchsize,channel_SNR_db1,noise_init,nl_factor,eq_flag,norm_epsilon,earlystop_epoch, min_distance_threshold=0.7,flags_schedule=[(1, 0), (0, 1), (1, 1)],iter_per_stage=50) /tmp/ipykernel_553597/4102420687.py in model_train(batchsize, channel_SNR, noise_init, nl_factor, eq_flag, epsilon, earlystop_epoch, min_distance_threshold, flags_schedule, iter_per_stage) 58 59 (batch_loss, batch_loss_Eq, NGMI, GMI, entropy_S, ---> 60 p_s, norm_constellation, x, min_distance) = train_step( 61 channel_SNR, noise_tf, GS_flag_now, PS_flag_now, eq_flag, epsilon, min_distance_threshold 62 ) ~/miniconda3/lib/python3.8/site-packages/tensorflow/python/util/traceback_utils.py in error_handler(*args, **kwargs) 151 except Exception as e: 152 filtered_tb = _process_traceback_frames(e.__traceback__) --> 153 raise e.with_traceback(filtered_tb) from None 154 finally: 155 del filtered_tb /tmp/__autograph_generated_file_jsnzuik.py in tf__train_step(inp_SNR, noise, GS_flag, PS_flag, eq_flag, epsilon, min_distance_threshold) 39 batch_size = ag__.converted_call(ag__.ld(tf).shape, (ag__.ld(p_s),), None, fscope)[0] 40 batch_indices = ag__.converted_call(ag__.ld(tf).tile, (ag__.converted_call(ag__.ld(tf).range, (ag__.ld(batch_size),), None, fscope)[:, ag__.ld(tf).newaxis, ag__.ld(tf).newaxis], [1, ag__.ld(M_int), ag__.ld(k)]), None, fscope) ---> 41 gather_indices = ag__.converted_call(ag__.ld(tf).stack, ([ag__.ld(batch_indices), ag__.converted_call(ag__.ld(tf).tile, (ag__.ld(topk_indices)[:, :, ag__.ld(tf).newaxis, :], [1, 1, ag__.ld(k), 1]), None, fscope)],), dict(axis=(- 1)), fscope) 42 neighbor_probs = ag__.converted_call(ag__.ld(tf).gather_nd, (ag__.ld(p_s), ag__.ld(gather_indices)), None, fscope) 43 neighbor_sum = ag__.converted_call(ag__.ld(tf).reduce_sum, (ag__.ld(neighbor_probs),), dict(axis=(- 1)), fscope) ValueError: in user code: File "/tmp/ipykernel_553597/675414708.py", line 77, in train_step * gather_indices = tf.stack([ ValueError: Shapes must be equal rank, but are 3 and 4 From merging shape 0 with other shapes. for '{{node stack_1}} = Pack[N=2, T=DT_INT32, axis=-1](Tile, Tile_1)' with input shapes: [1,8,3], [1,8,3,3].

filetype

class PPO(object): def __init__(self): self.sess = tf.Session() self.tfs = tf.placeholder(tf.float32, [None, S_DIM], 'state') # critic with tf.variable_scope('critic'): l1 = tf.layers.dense(self.tfs, 100, tf.nn.relu) self.v = tf.layers.dense(l1, 1) self.tfdc_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r') self.advantage = self.tfdc_r - self.v self.closs = tf.reduce_mean(tf.square(self.advantage)) self.ctrain_op = tf.train.AdamOptimizer(C_LR).minimize(self.closs) # actor pi, pi_params = self._build_anet('pi', trainable=True) oldpi, oldpi_params = self._build_anet('oldpi', trainable=False) with tf.variable_scope('sample_action'): self.sample_op = tf.squeeze(pi.sample(1), axis=0) # choosing action with tf.variable_scope('update_oldpi'): self.update_oldpi_op = [oldp.assign(p) for p, oldp in zip(pi_params, oldpi_params)] self.tfa = tf.placeholder(tf.float32, [None, A_DIM], 'action') self.tfadv = tf.placeholder(tf.float32, [None, 1], 'advantage') with tf.variable_scope('loss'): with tf.variable_scope('surrogate'): # ratio = tf.exp(pi.log_prob(self.tfa) - oldpi.log_prob(self.tfa)) ratio = pi.prob(self.tfa) / (oldpi.prob(self.tfa) + 1e-5) surr = ratio * self.tfadv if METHOD['name'] == 'kl_pen': self.tflam = tf.placeholder(tf.float32, None, 'lambda') kl = tf.distributions.kl_divergence(oldpi, pi) self.kl_mean = tf.reduce_mean(kl) self.aloss = -(tf.reduce_mean(surr - self.tflam * kl)) else: # clipping method, find this is better self.aloss = -tf.reduce_mean(tf.minimum( surr, tf.clip_by_value(ratio, 1.-METHOD['epsilon'], 1.+METHOD['epsilon'])*self.tfadv))

filetype

for i = 1:ncomp rslt(i).ncomp = i; cal_p = squeeze(cal_preds(i, :)); cal_p = mode(cal_p,1); cal_t = squeeze(cal_trues(i, :)); cal_t = mode(cal_t,1); [cal_confus, rslt_confusionOrder] = confusionmat(cal_t, cal_p); cal_rslt = statsOfMeasure(cal_confus, 0); rslt(i).cal_confus = cal_confus; rslt(i).cal_rslt = cal_rslt; rslt(i).cal_acc = cal_rslt.microAVG(end-1); rslt(i).cal_sen = cal_rslt.microAVG(end-3); rslt(i).cal_spe = cal_rslt.microAVG(end-2); rslt(i).cal_y_true = cal_t; rslt(i).cal_y_pred = cal_p; val_p = squeeze(val_preds(i, :)); val_t = squeeze(val_trues(i, :)); [val_confus, rslt_confusionOrder] = confusionmat(val_t, val_p); val_rslt = statsOfMeasure(val_confus, 0); rslt(i).val_confus = val_confus; rslt(i).val_rslt = val_rslt; rslt(i).val_acc = val_rslt.microAVG(end-1); rslt(i).val_sen = val_rslt.microAVG(end-3); rslt(i).val_spe = val_rslt.microAVG(end-2); rslt(i).val_y_true = val_t; rslt(i).val_y_pred = val_p; mdl = plsda(x_pp, y, i, opts0); trainedModel{i} = mdl; mdl = plsda(x_test_pp,[],i,mdl, opts0); rslt(i).probability = mdl.classification.probability; y_test_pred = mdl.classification.mostprobable; [test_confus, rslt_confusionOrder] = confusionmat(y_test, y_test_pred); test_rslt = statsOfMeasure(test_confus, 0); rslt(i).test_confus = test_confus; rslt(i).test_rslt = test_rslt; rslt(i).test_acc = test_rslt.microAVG(end-1); rslt(i).test_sen = test_rslt.microAVG(end-3); rslt(i).test_spe = test_rslt.microAVG(end-2); rslt(i).test_y_true = y_test; rslt(i).test_y_pred = y_test_pred; 什么意思

weixin_38744270
  • 粉丝: 330
上传资源 快速赚钱