환경 변수 로드
from dotenv import load_dotenv
load_dotenv()
상태관리 저장소 생성하기
from typing import TypedDict, Annotated, List, Union
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.messages import BaseMessage
import operator
# GraphState - 각 노드가 수행한 작업들을 기억(상태 기록)하는 기능
class AgentState(TypedDict):
input: str
chat_history: list[BaseMessage] # 대화 내용 중 '이전 메시지' 목록
agent_outcome: Union[AgentAction, AgentFinish, None] # 유효한 유형으로 `None`이 필요
intermediate_steps: Annotated[list[tuple[AgentAction, str]], operator.add]
커스텀 툴 생성하기
## Custom Tools
from langchain.tools import BaseTool, StructuredTool, Tool, tool
import random
@tool("lower_case", return_direct=True)
def to_lower_case(input:str) -> str:
"""Returns the input as all lower case."""
return input.lower()
@tool("random_number", return_direct=True)
def random_number_maker(input:str) -> str:
"""Returns a random number between 0-100."""
return random.randint(0, 100)
tools = [to_lower_case,random_number_maker]
random_value = random_number_maker.run('random')
SAM_value = to_lower_case.run('SAM')
새로운 에이전트 설정하기
## Agent - with new create_open_ai
from langchain import hub
from langchain.agents import create_openai_functions_agent
from langchain_openai.chat_models import ChatOpenAI
prompt = hub.pull("hwchase17/openai-functions-agent")
llm = ChatOpenAI(model="gpt-3.5-turbo-1106", streaming=True)
agent_runnable = create_openai_functions_agent(llm,tools,prompt)
inputs = {"input": "give me a random number and then write in words and make it lower case.",
"chat_history": [],
"intermediate_steps":[]}
agent_outcome = agent_runnable.invoke(inputs)
노드 생성하기
## Nodes
from langchain_core.agents import AgentFinish
from langgraph.prebuilt.tool_executor import ToolExecutor
tool_executor = ToolExecutor(tools)
def run_agent(data):
agent_outcome = agent_runnable.invoke(data)
return {"agent_outcome": agent_outcome}
def execute_tools(data):
agent_action = data['agent_outcome']
output = tool_executor.invoke(agent_action)
return {"intermediate_steps": [(agent_action, str(output))]}
def should_continue(data):
if isinstance(data['agent_outcome'], AgentFinish):
return "end"
else:
return "continue"
그래프 정의하기
## Define the graph
from langgraph.graph import END, StateGraph
workflow = StateGraph(AgentState)
workflow.add_node("agent", run_agent)
workflow.add_node("action", execute_tools)
workflow.set_entry_point("agent")
workflow.add_conditional_edges(
"agent",
should_continue,
{
"continue": "action",
"end": END
}
)
workflow.add_edge('action', 'agent') # really this is the graph.
app = workflow.compile() # to be like an app in here
그래프 확인하기
#--- 그래프 형태 확인하기 ---
from IPython.display import Image, display
try:
display(Image(app.get_graph(xray=True).draw_mermaid_png()))
except:
# This requires some extra dependencies and is optional
pass
타입별 결과 보기
스트림
## type 1
inputs = {"input": "give me a random number and then write in words and make it lower case.", "chat_history": []}
for s in app.stream(inputs):
print(list(s.values())[0])
print("----")
인보크
## type 2
inputs = {"input": "give me a random number and then write in words and make it lower case", "chat_history": []}
output = app.invoke(inputs)
agent_outcome = output.get("agent_outcome").return_values['output']
# intermediate_steps = output.get("intermediate_steps"
인보크(도구 사용 X)
## type 3
inputs = {"input": "does it get cold in SF in Jan?", "chat_history": []}
output = app.invoke(inputs)
agent_outcome_no_Tools = output.get("agent_outcome").return_values['output']
print("\\n - agent_outcome_no_Tools :" ,agent_outcome_no_Tools)
# intermediate_steps_no_Tools = output.get("intermediate_steps")