Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commit b0b169b

Browse files
research
1 parent a61dc2a commit b0b169b

File tree

6 files changed

+564
-0
lines changed

6 files changed

+564
-0
lines changed

‎app.py

Lines changed: 241 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,241 @@
1+
import os
2+
import time
3+
from langchain_groq import ChatGroq
4+
from langchain_core.prompts import ChatPromptTemplate
5+
from langchain_core.pydantic_v1 import BaseModel, Field
6+
from langgraph.graph.message import AnyMessage, add_messages
7+
from langgraph.graph import END, StateGraph
8+
from langgraph.checkpoint.sqlite import SqliteSaver
9+
from typing import Annotated, Dict, TypedDict, List
10+
from operator import itemgetter
11+
from langchain_core.runnables import RunnablePassthrough
12+
from langchain_core.prompts import PromptTemplate
13+
from IPython.display import Image, display
14+
import uuid
15+
16+
# Set environment variables
17+
os.environ['TOKENIZERS_PARALLELISM'] = 'true'
18+
# mistral_api_key = os.getenv("MISTRAL_API_KEY") # Ensure this is set
19+
20+
# Set up the LLM
21+
llm = ChatGroq(temperature=0, groq_api_key="groq_api", model_name="llama3-8b-8192")
22+
23+
# Define the prompt template
24+
code_gen_prompt_claude = ChatPromptTemplate.from_messages(
25+
[
26+
(
27+
"system",
28+
"""You are a coding assistant. Ensure any code you provide can be executed with all required imports and variables defined. Structure your answer: 1) a prefix describing the code solution, 2) the imports, 3) the functioning code block.
29+
\n Here is the user question:""",
30+
),
31+
("placeholder", "{messages}"),
32+
]
33+
)
34+
35+
# Define the data model
36+
class code(BaseModel):
37+
"""Code output"""
38+
39+
prefix: str = Field(description="Description of the problem and approach")
40+
imports: str = Field(description="Code block import statements")
41+
code: str = Field(description="Code block not including import statements")
42+
description = "Schema for code solutions to questions about LCEL."
43+
44+
# Set up the structured output
45+
code_gen_chain = llm.with_structured_output(code, include_raw=False)
46+
47+
# Define the graph state
48+
class GraphState(TypedDict):
49+
"""
50+
Represents the state of our graph.
51+
52+
Attributes:
53+
error : Binary flag for control flow to indicate whether test error was tripped
54+
messages : With user question, error messages, reasoning
55+
generation : Code solution
56+
iterations : Number of tries
57+
"""
58+
59+
error: str
60+
messages: Annotated[list[AnyMessage], add_messages]
61+
generation: str
62+
iterations: int
63+
64+
# Define the nodes
65+
def generate(state: GraphState):
66+
"""
67+
Generate a code solution
68+
69+
Args:
70+
state (dict): The current graph state
71+
72+
Returns:
73+
state (dict): New key added to state, generation
74+
"""
75+
76+
print("---GENERATING CODE SOLUTION---")
77+
78+
# State
79+
messages = state["messages"]
80+
iterations = state["iterations"]
81+
error = state["error"]
82+
83+
# Solution
84+
code_solution = code_gen_chain.invoke(messages)
85+
messages += [
86+
(
87+
"assistant",
88+
f"Here is my attempt to solve the problem: {code_solution.prefix} \n Imports: {code_solution.imports} \n Code: {code_solution.code}",
89+
)
90+
]
91+
92+
# Increment
93+
iterations = iterations + 1
94+
95+
# Add delay to reduce API requests
96+
time.sleep(1) # Wait for 1 second
97+
98+
return {"generation": code_solution, "messages": messages, "iterations": iterations}
99+
100+
def code_check(state: GraphState):
101+
"""
102+
Check code
103+
104+
Args:
105+
state (dict): The current graph state
106+
107+
Returns:
108+
state (dict): New key added to state, error
109+
"""
110+
111+
print("---CHECKING CODE---")
112+
113+
# State
114+
messages = state["messages"]
115+
code_solution = state["generation"]
116+
iterations = state["iterations"]
117+
118+
# Get solution components
119+
prefix = code_solution.prefix
120+
imports = code_solution.imports
121+
code = code_solution.code
122+
123+
# Check imports
124+
try:
125+
exec(imports)
126+
except Exception as e:
127+
print("---CODE IMPORT CHECK: FAILED---")
128+
error_message = [("user", f"Your solution failed the import test. Here is the error: {e}. Reflect on this error and your prior attempt to solve the problem. (1) State what you think went wrong with the prior solution and (2) try to solve this problem again. Return the FULL SOLUTION. Use the code tool to structure the output with a prefix, imports, and code block:")]
129+
messages += error_message
130+
return {
131+
"generation": code_solution,
132+
"messages": messages,
133+
"iterations": iterations,
134+
"error": "yes",
135+
}
136+
137+
# Check execution
138+
try:
139+
combined_code = f"{imports}\n{code}"
140+
# Use a shared scope for exec
141+
global_scope = {}
142+
exec(combined_code, global_scope)
143+
except Exception as e:
144+
print("---CODE BLOCK CHECK: FAILED---")
145+
error_message = [("user", f"Your solution failed the code execution test: {e}) Reflect on this error and your prior attempt to solve the problem. (1) State what you think went wrong with the prior solution and (2) try to solve this problem again. Return the FULL SOLUTION. Use the code tool to structure the output with a prefix, imports, and code block:")]
146+
messages += error_message
147+
return {
148+
"generation": code_solution,
149+
"messages": messages,
150+
"iterations": iterations,
151+
"error": "yes",
152+
}
153+
154+
# No errors
155+
print("---NO CODE TEST FAILURES---")
156+
return {
157+
"generation": code_solution,
158+
"messages": messages,
159+
"iterations": iterations,
160+
"error": "no",
161+
}
162+
163+
def decide_to_finish(state: GraphState):
164+
"""
165+
Determines whether to finish.
166+
167+
Args:
168+
state (dict): The current graph state
169+
170+
Returns:
171+
str: Next node to call
172+
"""
173+
error = state["error"]
174+
iterations = state["iterations"]
175+
176+
if error == "no" or iterations == max_iterations:
177+
print("---DECISION: FINISH---")
178+
return "end"
179+
else:
180+
print("---DECISION: RE-TRY SOLUTION---")
181+
return "generate"
182+
183+
# Define the graph
184+
builder = StateGraph(GraphState)
185+
186+
# Add nodes
187+
builder.add_node("generate", generate) # generation solution
188+
builder.add_node("check_code", code_check) # check code
189+
190+
# Build graph
191+
builder.set_entry_point("generate")
192+
builder.add_edge("generate", "check_code")
193+
builder.add_conditional_edges(
194+
"check_code",
195+
decide_to_finish,
196+
{
197+
"end": END,
198+
"generate": "generate",
199+
},
200+
)
201+
202+
# Compile the graph
203+
memory = SqliteSaver.from_conn_string(":memory:")
204+
graph = builder.compile(checkpointer=memory)
205+
206+
# Display the graph
207+
try:
208+
display(Image(graph.get_graph(xray=True).draw_mermaid_png()))
209+
except:
210+
pass
211+
212+
# Run the graph
213+
_printed = set()
214+
thread_id = str(uuid.uuid4())
215+
config = {
216+
"configurable": {
217+
# Checkpoints are accessed by thread_id
218+
"thread_id": thread_id,
219+
}
220+
}
221+
222+
# Ask user for input
223+
question = input("Enter your question or search query: ")
224+
225+
# Run the graph
226+
max_iterations = 5 # Define the maximum number of iterations
227+
events = graph.stream(
228+
{"messages": [("user", question)], "iterations": 0}, config, stream_mode="values"
229+
)
230+
231+
def _print_event(event, _printed):
232+
if str(event) not in _printed:
233+
print(event)
234+
_printed.add(str(event))
235+
236+
for event in events:
237+
_print_event(event, _printed)
238+
239+
# Output the final result
240+
print("Final Result:")
241+
print(event['generation'])

‎main.py

Lines changed: 99 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,99 @@
1+
# from langchain.agents import initialize_agent
2+
# from langchain_experimental.utilities import PythonREPL
3+
# from langchain_community.tools.ddg_search.tool import DuckDuckGoSearchRun
4+
# from langchain_core.tools import Tool
5+
# from langchain_groq import ChatGroq
6+
7+
# # Create the Python REPL tool
8+
# python_repl = PythonREPL()
9+
# python_repl_tool = Tool(
10+
# name="python_repl",
11+
# description="A Python shell. Use this to execute python commands. Input should be a valid python command. If you want to see the output of a value, you should print it out with `print(...)`.",
12+
# func=python_repl.run,
13+
# )
14+
15+
# # Create the DuckDuckGo search tool
16+
# duckduckgo_search = DuckDuckGoSearchRun()
17+
# duckduckgo_search_tool = Tool(
18+
# name="duckduckgo_search",
19+
# description="A wrapper around DuckDuckGo Search. Useful for when you need to answer questions about current events. Input should be a search query.",
20+
# func=duckduckgo_search.run,
21+
# )
22+
23+
# # Create the list of tools
24+
# tools = [python_repl_tool, duckduckgo_search_tool]
25+
26+
# # Initialize the LLM
27+
# llm = ChatGroq(temperature=0, groq_api_key="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", model_name="llama3-70b-8192")
28+
29+
# # Initialize the agent
30+
# agent = initialize_agent(tools, llm, agent_type="zero-shot-react-description")
31+
32+
# # Run the agent
33+
# while True:
34+
# user_input = input("Enter a command or search query (or 'quit' to stop): ")
35+
# if user_input.lower() == 'quit':
36+
# break
37+
# result = agent.run(user_input)
38+
# print(result)
39+
40+
41+
42+
from langchain.agents import initialize_agent
43+
from langchain_experimental.utilities import PythonREPL
44+
from langchain_community.tools.ddg_search.tool import DuckDuckGoSearchRun
45+
from langchain_core.tools import Tool
46+
from langchain_groq import ChatGroq
47+
from langchain_community.agent_toolkits import FileManagementToolkit
48+
from tempfile import TemporaryDirectory
49+
50+
# Create the temporary directory for file operations
51+
working_directory = TemporaryDirectory()
52+
53+
# Create the file management toolkit
54+
file_management_toolkit = FileManagementToolkit(root_dir=str(working_directory.name))
55+
file_tools = file_management_toolkit.get_tools()
56+
57+
# Wrap the file operation tools with Tool class
58+
file_operations_tools = []
59+
for tool in file_tools:
60+
file_operations_tools.append(
61+
Tool(
62+
name=tool.__class__.__name__,
63+
description=tool.__doc__,
64+
func=tool.invoke,
65+
)
66+
)
67+
68+
# Create the Python REPL tool
69+
python_repl = PythonREPL()
70+
python_repl_tool = Tool(
71+
name="python_repl",
72+
description="A Python shell. Use this to execute python commands. Input should be a valid python command. If you want to see the output of a value, you should print it out with `print(...)`.",
73+
func=python_repl.run,
74+
)
75+
76+
# Create the DuckDuckGo search tool
77+
duckduckgo_search = DuckDuckGoSearchRun()
78+
duckduckgo_search_tool = Tool(
79+
name="duckduckgo_search",
80+
description="A wrapper around DuckDuckGo Search. Useful for when you need to answer questions about current events and internet Search related queries. Input should be a search query.",
81+
func=duckduckgo_search.run,
82+
)
83+
84+
# Create the list of tools
85+
tools = file_operations_tools + [python_repl_tool, duckduckgo_search_tool]
86+
87+
# Initialize the LLM
88+
llm = ChatGroq(temperature=0, groq_api_key="groq_api", model_name="llama3-70b-8192")
89+
90+
# Initialize the agent
91+
agent = initialize_agent(tools, llm, agent_type="zero-shot-react-description")
92+
93+
# Run the agent
94+
while True:
95+
user_input = input("Enter a command or search query (or 'quit' to stop): ")
96+
if user_input.lower() == 'quit':
97+
break
98+
result = agent.run(user_input)
99+
print(result)

‎requirements.txt

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
langchain
2+
numpy
3+
pandas
4+
tiktoken
5+
langchain-mistralai
6+
duckduckgo-search
7+
langchain_community
8+
langgraph
9+
Ipython
10+
DDGS

‎t.py

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
from langchain.agents import initialize_agent,create_structured_chat_agent
2+
from langchain_experimental.utilities import PythonREPL
3+
from langchain_community.tools.ddg_search.tool import DuckDuckGoSearchRun
4+
from langchain_core.tools import Tool
5+
from langchain_groq import ChatGroq
6+
7+
# Create the Python REPL tool
8+
python_repl = PythonREPL()
9+
python_repl_tool = Tool(
10+
name="python_repl",
11+
description="A Python shell. Use this to execute python commands. Input should be a valid python command. If you want to see the output of a value, you should print it out with `print(...)`.",
12+
func=python_repl.run,
13+
)
14+
15+
# Create the DuckDuckGo search tool
16+
duckduckgo_search = DuckDuckGoSearchRun()
17+
duckduckgo_search_tool = Tool(
18+
name="duckduckgo_search",
19+
description="A wrapper around DuckDuckGo Search. Useful for when you need to answer questions about current events. Input should be a search query.",
20+
func=duckduckgo_search.run,
21+
)
22+
23+
# Create the list of tools
24+
tools = [python_repl_tool, duckduckgo_search_tool]
25+
26+
# Initialize the LLM
27+
llm = ChatGroq(temperature=0, groq_api_key="groq_api", model_name="llama3-70b-8192")
28+
29+
# Initialize the agent
30+
agent = initialize_agent(tools, llm, agent_type="zero-shot-react-description")
31+
32+
33+
# Run the agent
34+
while True:
35+
user_input = input("Enter a command or search query (or 'quit' to stop): ")
36+
if user_input.lower() == 'quit':
37+
break
38+
result = agent.run(user_input)
39+
print(result)

0 commit comments

Comments
(0)

AltStyle によって変換されたページ (->オリジナル) /