Agent Development

LangChain 构建在 LangGraph 运行时之上,Agent系统(模型+提示词+工具+中间件+记忆),LangSmith 用于可观测性 LangChain 1.0 Agents 文档 LangChain API 参考 多模型切换 from dotenv import load_dotenv load_dotenv() def get_default_model(): if os.getenv("OPENAI_API_KEY"): return "gpt-4-turbo" elif os.getenv("GOOGLE_API_KEY"): return "google:gemini-1.5-flash" elif os.getenv("ANTHROPIC_API_KEY"): return "anthropic:claude-sonnet-4-5" elif os.getenv("GROQ_API_KEY"): return "groq:llama-3.3-70b-versatile" else: raise ValueError("No API key found!") 基本使用 import os from dotenv import load_dotenv from langchain.chat_models import init_chat_model from langchain_core.messages import SystemMessage, HumanMessage, AIMessage load_dotenv() model = init_chat_model( model_provider="openai", base_url="https://api.siliconflow.cn/v1/", model="deepseek-ai/DeepSeek-R1-0528-Qwen3-8B", api_key=os.getenv("OPENAI_API_KEY"), temperature=0.7, max_tokens=100, ) # messages = [ # {"role": "system", "content": "you are a helpful assistant."}, # {"role": "user", "content": "Hello! How are you?"}, # ] messages = [ SystemMessage(content="you are a helpful assistant."), HumanMessage(content="Hello! How are you?"), ] # print("messages:", messages) print(messages[-1].type, ": ", messages[-1].content) # sync try: response = model.invoke(messages, config=None) # can be string as well messages.append(response) print(messages[-1].type, ": ", messages[-1].content) except ValueError as e: print("configuration error:", e) except ConnectionError as e: print("network error:", e) except Exception as e: print("unknown error:", type(e).__name__, ":", e) # streaming # for chunk in model.stream(messages): # print(chunk.content, end="", flush=True) 普通调用返回 finish_reason、model_name、token_usage ...

January 5, 2025 · 21 min · biglonglong