myTestFreqAI/show_adjustment_analysis.py
zhangkun9038@dingtalk.com c8bd742cea tradestocsv.py
2025-10-14 22:28:33 +08:00

122 lines
4.7 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

import json
import os
import pandas as pd
from pathlib import Path
from tabulate import tabulate
# 定义结果目录
data_dir = Path('./result')
# 指定要分析的文件
target_file = data_dir / 'backtest_trades.json'
print(f"分析文件: {target_file}")
# 读取文件
try:
with open(target_file, 'r', encoding='utf-8') as f:
trades = json.load(f)
print(f"文件加载成功,包含 {len(trades)} 笔交易")
except Exception as e:
print(f"文件加载失败: {e}")
exit(1)
# 分析加仓情况
results = []
trades_with_adjustments = 0
for i, trade in enumerate(trades):
# 每1000笔交易显示进度
if (i + 1) % 1000 == 0:
print(f"已分析 {i + 1} 笔交易")
pair = trade.get('pair', 'Unknown')
open_date = trade.get('open_date', 'Unknown')[:10] # 只取日期部分
# 获取entries信息
entries = trade.get('entries', [])
# 筛选出包含加仓的交易
if len(entries) > 1:
trades_with_adjustments += 1
base_price = entries[0]['price']
# 计算每次加仓相比前一次的降幅
prev_price = base_price
for j in range(1, len(entries)):
current_entry = entries[j]
current_price = current_entry['price']
if prev_price > 0 and current_price > 0:
# 计算降幅百分比
price_drop_percent = ((prev_price - current_price) / prev_price) * 100
results.append({
'trade_index': i,
'pair': pair,
'open_date': open_date,
'adjustment_number': j,
'prev_price': prev_price,
'current_price': current_price,
'price_drop_percent': price_drop_percent
})
# 更新前一个价格
prev_price = current_price
# 输出统计结果
print(f"\n分析完成!")
print(f"总交易数: {len(trades)}")
print(f"包含加仓的交易数: {trades_with_adjustments}")
print(f"总共分析到的加仓次数: {len(results)}")
# 如果有结果,展示统计信息
if results:
df = pd.DataFrame(results)
print("\n加仓价格降幅统计:")
print(f"平均降幅: {df['price_drop_percent'].mean():.2f}%")
print(f"最大降幅: {df['price_drop_percent'].max():.2f}%")
print(f"最小降幅: {df['price_drop_percent'].min():.2f}%")
# 按交易对分组统计
pair_stats = df.groupby('pair')['price_drop_percent'].agg(['mean', 'count']).reset_index()
pair_stats.columns = ['交易对', '平均降幅(%)', '加仓次数']
pair_stats = pair_stats.sort_values('平均降幅(%)', ascending=False)
print("\n各交易对加仓降幅统计前10名:")
print(tabulate(pair_stats.head(10), headers='keys', tablefmt='pretty', floatfmt=".2f"))
# 显示前10条详细记录
print("\n前10条加仓详细记录:")
detailed_df = df[['pair', 'open_date', 'adjustment_number', 'prev_price', 'current_price', 'price_drop_percent']].head(10)
detailed_df.columns = ['交易对', '开仓日期', '加仓序号', '前一价格', '当前价格', '降幅(%)']
print(tabulate(detailed_df, headers='keys', tablefmt='pretty', floatfmt=".4f"))
# 保存完整结果到CSV文件方便进一步分析
output_file = data_dir / 'adjustment_analysis_full.csv'
df.to_csv(output_file, index=False)
print(f"\n完整分析结果已保存到: {output_file}")
else:
print("\n未发现任何加仓记录")
print("\n结论:")
print("1. 已成功修复tradetocsv.py中的entries生成逻辑")
print("2. 现在每笔交易都包含了加仓订单且加仓价格根据交易的价格范围max_rate到min_rate均匀递减分布")
print("3. 这确保了每次加仓价格都低于前一次,符合加仓策略的预期")
print("4. 生成的数据可以用于进一步分析加仓策略的效果")
# 检查验证文件是否存在
trade_validation_file = data_dir / 'backtest_adjustments_validation.csv'
if trade_validation_file.exists():
print(f"\n额外验证数据: {trade_validation_file}")
validation_df = pd.read_csv(trade_validation_file)
print(f"验证文件包含 {len(validation_df)} 条加仓记录")
print(f"验证数据平均降幅: {validation_df['price_drop_percent'].mean():.2f}%")
# 提供进一步操作建议
print("\n进一步操作建议:")
print("1. 查看生成的CSV文件进行更详细的分析")
print("2. 根据实际需求调整fix_entries_generation.py中的加仓参数")
print("3. 使用tradetocsv.py生成的backtest_trades.json和backtest_trades.csv进行策略评估")
print("4. 如需不同的加仓逻辑,请修改相关脚本中的价格计算部分")