当前位置: 首页 > news >正文

数据分析ReAct工作流

让我用一个数据分析项目的例子来展示plan-and-execute框架的应用。这个例子会涉及数据处理、分析和可视化等任务。

from typing import List, Dict, Any
from dataclasses import dataclass
import json
from enum import Enum
import logging
from datetime import datetime# 任务状态枚举
class TaskStatus(Enum):PENDING = "pending"RUNNING = "running"COMPLETED = "completed"FAILED = "failed"# 任务优先级枚举
class TaskPriority(Enum):LOW = 1MEDIUM = 2HIGH = 3# 任务定义
@dataclass
class Task:id: strname: strdescription: strpriority: TaskPrioritydependencies: List[str]  # 依赖的任务ID列表status: TaskStatusresult: Any = Noneerror: str = None# 工作流执行器
class WorkflowExecutor:def __init__(self):self.tasks = {}self.logger = logging.getLogger(__name__)def add_task(self, task: Task):self.tasks[task.id] = taskdef get_ready_tasks(self) -> List[Task]:"""获取所有依赖已满足的待执行任务"""ready_tasks = []for task in self.tasks.values():if task.status == TaskStatus.PENDING:dependencies_met = all(self.tasks[dep_id].status == TaskStatus.COMPLETEDfor dep_id in task.dependencies)if dependencies_met:ready_tasks.append(task)return sorted(ready_tasks, key=lambda x: x.priority.value, reverse=True)def execute_task(self, task: Task):"""执行单个任务"""task.status = TaskStatus.RUNNINGtry:# 这里实现具体任务的执行逻辑result = self.task_handlers[task.id](task, {dep: self.tasks[dep].result for dep in task.dependencies})task.result = resulttask.status = TaskStatus.COMPLETEDexcept Exception as e:task.status = TaskStatus.FAILEDtask.error = str(e)self.logger.error(f"Task {task.id} failed: {e}")def execute_workflow(self):"""执行整个工作流"""while True:ready_tasks = self.get_ready_tasks()if not ready_tasks:breakfor task in ready_tasks:self.execute_task(task)# 检查是否所有任务都完成all_completed = all(task.status == TaskStatus.COMPLETED for task in self.tasks.values())return all_completed# 数据分析工作流示例
class DataAnalysisWorkflow:def __init__(self, data_path: str, output_path: str):self.data_path = data_pathself.output_path = output_pathself.executor = WorkflowExecutor()def plan_workflow(self):"""规划工作流程"""tasks = [Task(id="load_data",name="加载数据",description="从CSV文件加载数据",priority=TaskPriority.HIGH,dependencies=[],status=TaskStatus.PENDING),Task(id="clean_data",name="数据清洗",description="处理缺失值和异常值",priority=TaskPriority.HIGH,dependencies=["load_data"],status=TaskStatus.PENDING),Task(id="feature_engineering",name="特征工程",description="创建新特征",priority=TaskPriority.MEDIUM,dependencies=["clean_data"],status=TaskStatus.PENDING),Task(id="statistical_analysis",name="统计分析",description="计算基本统计指标",priority=TaskPriority.MEDIUM,dependencies=["clean_data"],status=TaskStatus.PENDING),Task(id="visualization",name="数据可视化",description="生成图表",priority=TaskPriority.MEDIUM,dependencies=["statistical_analysis"],status=TaskStatus.PENDING),Task(id="generate_report",name="生成报告",description="生成分析报告",priority=TaskPriority.LOW,dependencies=["visualization", "feature_engineering"],status=TaskStatus.PENDING)]for task in tasks:self.executor.add_task(task)def register_task_handlers(self):"""注册任务处理函数"""self.executor.task_handlers = {"load_data": self.load_data,"clean_data": self.clean_data,"feature_engineering": self.feature_engineering,"statistical_analysis": self.statistical_analysis,"visualization": self.visualization,"generate_report": self.generate_report}def load_data(self, task: Task, dependencies: Dict):import pandas as pddf = pd.read_csv(self.data_path)return dfdef clean_data(self, task: Task, dependencies: Dict):df = dependencies["load_data"]# 处理缺失值df = df.fillna(df.mean())# 处理异常值# ... 其他清洗逻辑return dfdef feature_engineering(self, task: Task, dependencies: Dict):df = dependencies["clean_data"]# 创建新特征# ... 特征工程逻辑return dfdef statistical_analysis(self, task: Task, dependencies: Dict):df = dependencies["clean_data"]stats = {"basic_stats": df.describe(),"correlations": df.corr(),# ... 其他统计分析}return statsdef visualization(self, task: Task, dependencies: Dict):import matplotlib.pyplot as pltstats = dependencies["statistical_analysis"]figures = []# 生成可视化# ... 可视化逻辑return figuresdef generate_report(self, task: Task, dependencies: Dict):figures = dependencies["visualization"]df_features = dependencies["feature_engineering"]report = {"timestamp": datetime.now().isoformat(),"statistics": str(dependencies["statistical_analysis"]),"features": df_features.columns.tolist(),"figures": [f.to_json() for f in figures]}# 保存报告with open(f"{self.output_path}/report.json", "w") as f:json.dump(report, f, indent=2)return reportdef run(self):"""运行完整的工作流"""self.plan_workflow()self.register_task_handlers()success = self.executor.execute_workflow()if success:final_report = self.executor.tasks["generate_report"].resultprint("工作流执行成功!")return final_reportelse:failed_tasks = [task for task in self.executor.tasks.values()if task.status == TaskStatus.FAILED]print("工作流执行失败。失败的任务:")for task in failed_tasks:print(f"- {task.name}: {task.error}")return None# 使用示例
def main():workflow = DataAnalysisWorkflow(data_path="data/sales_data.csv",output_path="output")result = workflow.run()if result:print("分析报告已生成:", result)else:print("工作流执行失败")if __name__ == "__main__":main()

这个例子展示了:

  1. 工作流框架的核心组件:
  • Task定义
  • 工作流执行器
  • 依赖管理
  • 状态追踪
  • 错误处理
  1. 实现的关键特性:
  • 自动任务规划
  • 依赖关系处理
  • 并行任务执行
  • 结果传递
  • 错误恢复
  1. 可以扩展的方向:
# 1. 添加任务重试机制
class RetryableExecutor(WorkflowExecutor):def execute_task(self, task: Task, max_retries: int = 3):retries = 0while retries < max_retries:try:super().execute_task(task)if task.status == TaskStatus.COMPLETED:breakexcept Exception as e:retries += 1self.logger.warning(f"Retry {retries}/{max_retries} for task {task.id}")# 2. 添加进度监控
class MonitoredWorkflow(DataAnalysisWorkflow):def __init__(self, *args, **kwargs):super().__init__(*args, **kwargs)self.progress_callback = Nonedef set_progress_callback(self, callback):self.progress_callback = callbackdef update_progress(self, task: Task, status: str):if self.progress_callback:self.progress_callback(task, status)# 3. 添加中间结果缓存
class CachedExecutor(WorkflowExecutor):def __init__(self, cache_dir: str):super().__init__()self.cache_dir = cache_dirdef get_cached_result(self, task: Task):cache_path = f"{self.cache_dir}/{task.id}.cache"if os.path.exists(cache_path):return pickle.load(open(cache_path, "rb"))return Nonedef cache_result(self, task: Task):cache_path = f"{self.cache_dir}/{task.id}.cache"pickle.dump(task.result, open(cache_path, "wb"))
  1. 使用建议:
# 1. 配置日志
logging.basicConfig(level=logging.INFO,format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)# 2. 添加性能监控
from time import timeclass PerformanceMonitor:def __init__(self):self.task_times = {}def start_task(self, task_id: str):self.task_times[task_id] = {"start": time()}def end_task(self, task_id: str):self.task_times[task_id]["end"] = time()def get_task_duration(self, task_id: str):times = self.task_times[task_id]return times["end"] - times["start"]# 3. 实现优雅的终止
import signalclass GracefulWorkflow(DataAnalysisWorkflow):def __init__(self, *args, **kwargs):super().__init__(*args, **kwargs)self.should_stop = Falsesignal.signal(signal.SIGINT, self.handle_interrupt)def handle_interrupt(self, signum, frame):print("\nReceived interrupt signal. Cleaning up...")self.should_stop = True

这个框架可以用于很多场景,比如:

  • 数据处理管道
  • ETL工作流
  • 机器学习实验
  • 报告生成系统
  • 自动化测试流程

关键是要根据具体需求调整任务定义和执行逻辑。

http://www.lryc.cn/news/480309.html

相关文章:

  • Rust-AOP编程实战
  • Flutter鸿蒙next 中的 Expanded 和 Flexible 使用技巧详解
  • 【微信小游戏学习心得】
  • Python | Leetcode Python题解之第539题最小时间差
  • Zookeeper运维秘籍:四字命令基础、详解及业务应用全解析
  • Error: `slot-scope` are deprecated报错解决
  • Excel(图例)中使用上标下标
  • 熔断和降级
  • 【学习笔记】Linux系统基础知识 6 —— su命令详解
  • docker-compose命令介绍
  • Spring学习笔记_29——@Transactional
  • github使用基础
  • Flink-Kafka-Connector
  • 远程终端vim里使用系统剪切板
  • 底层视角看C语言
  • 【点云学习笔记】——分割任务学习
  • Qt——窗口
  • InfluxDB性能优化指南
  • 负载均衡式在线oj项目开发文档2(个人项目)
  • ssm081高校实验室管理系统的设计与实现+vue(论文+源码)_kaic
  • GitLab基于Drone搭建持续集成(CI/CD)
  • 用GPB外链打造长期稳定的SEO优势!
  • 第11章 内连接与外连接
  • C++ 游戏开发:打造高效、性能优越的游戏世界
  • 太速科技-440-基于XCVU440的多核处理器多输入芯片验证板卡
  • 澳鹏通过高质量数据支持 Onfido 优化AI反欺诈功能
  • 基于ECS实例搭建Hadoop环境
  • 关于vue如何监听route和state以及各自对应的实际场景
  • 【计网不挂科】计算机网络期末考试(综合)——【选择题&填空题&判断题&简述题】完整题库
  • Linux(CentOS)设置防火墙开放8080端口,运行jar包,接收请求