You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
# agent_management.pyimportbase64importjsonimportloggingimportosimportreimportrequestsimportstreamlitasstfromconfigs.configimportBUILT_IN_AGENTS, LLM_PROVIDER, MODEL_CHOICES, MODEL_TOKEN_LIMITSfrommodels.agent_base_modelimportAgentBaseModelfrommodels.tool_base_modelimportToolBaseModelfromutils.api_utilsimportget_api_keyfromutils.error_handlingimportlog_errorfromutils.tool_utilsimportpopulate_tool_models, show_toolsfromutils.ui_utilsimportdisplay_goal, get_llm_provider, get_provider_models, update_discussion_and_whiteboardlogging.basicConfig(level=logging.DEBUG)
logger=logging.getLogger(__name__)
defagent_button_callback(agent_index):
defcallback():
logger.debug(f"Agent button clicked for index: {agent_index}")
st.session_state['selected_agent_index'] =agent_indexagent=st.session_state.agents[agent_index]
logger.debug(f"Agent: {agent}")
# Check if the agent is an instance of AgentBaseModelifisinstance(agent, AgentBaseModel):
agent_name=agent.nameifhasattr(agent, 'name') else''agent_description=agent.descriptionifhasattr(agent, 'description') else''else:
# Fallback for dictionary-like structureagent_name=agent.get('config', {}).get('name', '')
agent_description=agent.get('description', '')
logger.debug(f"Agent name: {agent_name}, description: {agent_description}")
st.session_state['form_agent_name'] =agent_namest.session_state['form_agent_description'] =agent_description# Directly call process_agent_interaction here if appropriateprocess_agent_interaction(agent_index)
returncallbackdefconstruct_request(agent, agent_name, description, user_request, user_input, rephrased_request, reference_url, tool_results):
request=f"Act as the {agent_name} who {description}."ifuser_request:
request+=f" Original request was: {user_request}."ifrephrased_request:
request+=f" You are helping a team work on satisfying {rephrased_request}."ifuser_input:
request+=f" Additional input: {user_input}."ifreference_urlandreference_urlinst.session_state.reference_html:
html_content=st.session_state.reference_html[reference_url]
request+=f" Reference URL content: {html_content}."ifst.session_state.discussion:
request+=f" The discussion so far has been {st.session_state.discussion[-50000:]}."iftool_results:
request+=f" tool results: {tool_results}."# Check if agent is an AgentBaseModel instanceifisinstance(agent, AgentBaseModel):
agent_tools=agent.toolselse:
agent_tools=agent.get('tools', [])
ifagent_tools:
request+="\n\nYou have access to the following tools:\n"fortoolinagent_tools:
ifisinstance(tool, ToolBaseModel):
request+=f"{str(tool)}\n"elifisinstance(tool, dict):
request+=f"{tool.get('name', 'Unknown Tool')}: {tool.get('description', 'No description available')}\n"request+="\nTo use a tool, include its name and arguments in your response, e.g., 'I will use calculate_compound_interest(1000, 0.05, 10) to determine the future value.'"returnrequestdefdisplay_agents():
if"agents"inst.session_stateandst.session_state.agentsandlen(st.session_state.agents) ==3:
st.sidebar.warning(f"No agents have yet been created. Please enter a new request.")
st.sidebar.warning(f"ALSO: If no agents are created, do a hard reset (CTL-F5) and try switching models. LLM results can be unpredictable.")
st.sidebar.warning(f"SOURCE: https://github.com/jgravelle/AutoGroq\n\r\n\r https://j.gravelle.us\n\r\n\r DISCORD: https://discord.gg/DXjFPX84gs \n\r\n\r YouTube: https://www.youtube.com/playlist?list=PLPu97iZ5SLTsGX3WWJjQ5GNHy7ZX66ryP")
else:
st.sidebar.title("Your Agents")
st.sidebar.subheader("Click to interact")
dynamic_agents_exist=Falsebuilt_in_agents= []
# First pass: Identify if there are any dynamic agents and collect built-in agentsforindex, agentinenumerate(st.session_state.agents):
ifagent.namenotinBUILT_IN_AGENTS:
dynamic_agents_exist=Trueelse:
built_in_agents.append((agent, index))
# Display dynamically created agentsforindex, agentinenumerate(st.session_state.agents):
ifagent.namenotinBUILT_IN_AGENTS:
display_agent_button(agent, index)
# Display built-in agents only if dynamic agents existifdynamic_agents_existandbuilt_in_agents:
st.sidebar.markdown("---")
st.sidebar.markdown("Built-in Agents:")
foragent, indexinbuilt_in_agents:
display_agent_button(agent, index)
display_goal()
populate_tool_models()
show_tools()
else:
st.empty()
defdisplay_agent_button(agent, index):
col1, col2=st.sidebar.columns([1, 4])
withcol1:
gear_icon="⚙️"ifst.button(gear_icon, key=f"gear_{index}", help="Edit Agent"):
st.session_state['edit_agent_index'] =indexst.session_state[f'show_edit_{index}'] =notst.session_state.get(f'show_edit_{index}', False)
withcol2:
if"next_agent"inst.session_stateandst.session_state.next_agent==agent.name:
button_style=""" <style> div[data-testid*="stButton"] > button[kind="secondary"] { background-color: green !important; color: white !important; } </style> """st.markdown(button_style, unsafe_allow_html=True)
st.button(agent.name, key=f"agent_{index}", on_click=agent_button_callback(index))
ifst.session_state.get(f'show_edit_{index}', False):
display_agent_edit_form(agent, index)
defdisplay_agent_buttons(agents):
forindex, agentinenumerate(agents):
agent_name=agent.nameifagent.nameelsef"Unnamed Agent {index+1}"agent_id=getattr(agent, 'id', index) # Use agent's id if available, otherwise use indexcol1, col2=st.sidebar.columns([1, 4])
withcol1:
gear_icon="⚙️"# Unicode character for gear iconifst.button(
gear_icon,
key=f"gear_{agent_id}_{agent_name}", # Use both id and name for uniquenesshelp="Edit Agent"# Add the tooltip text
):
st.session_state['edit_agent_index'] =indexst.session_state['show_edit'] =Truewithcol2:
if"next_agent"inst.session_stateandst.session_state.next_agent==agent_name:
button_style=""" <style> div[data-testid*="stButton"] > button[kind="secondary"] { background-color: green !important; color: white !important; } </style> """st.markdown(button_style, unsafe_allow_html=True)
st.button(agent_name, key=f"agent_{agent_id}_{agent_name}", on_click=agent_button_callback(index))
defdisplay_agent_edit_form(agent, edit_index):
withst.expander(f"Edit Properties of {agent.name}", expanded=True):
col1, col2=st.columns([4, 1])
withcol1:
unique_key=f"name_{edit_index}_{agent.name}"new_name=st.text_input("Name", value=agent.name, key=unique_key)
ifst.session_state.get(f"delete_confirmed_{edit_index}_{agent.name}", False):
ifst.button("Confirm Deletion", key=f"confirm_delete_{edit_index}_{agent.name}"):
st.session_state.agents.pop(edit_index)
st.session_state[f'show_edit_{edit_index}'] =Falsedelst.session_state[f"delete_confirmed_{edit_index}_{agent.name}"]
st.experimental_rerun()
ifst.button("Cancel", key=f"cancel_delete_{edit_index}_{agent.name}"):
delst.session_state[f"delete_confirmed_{edit_index}_{agent.name}"]
st.experimental_rerun()
withcol2:
container=st.container()
ifcontainer.button("X", key=f"delete_{edit_index}_{agent.name}"):
ifst.session_state.get(f"delete_confirmed_{edit_index}_{agent.name}", False):
st.session_state.agents.pop(edit_index)
st.session_state[f'show_edit_{edit_index}'] =Falsest.experimental_rerun()
else:
st.session_state[f"delete_confirmed_{edit_index}_{agent.name}"] =Truest.experimental_rerun()
description_value=agent.descriptioncol1, col2=st.columns([3, 1])
withcol1:
current_provider=agent.providerorst.session_state.get('provider')
selected_provider=st.selectbox(
"Provider",
options=MODEL_CHOICES.keys(),
index=list(MODEL_CHOICES.keys()).index(current_provider),
key=f"provider_select_{edit_index}_{agent.name}"
)
provider_models=get_provider_models(selected_provider)
current_model=agent.modelorst.session_state.get('model')
ifcurrent_modelnotinprovider_models:
st.warning(f"Current model '{current_model}' is not available for the selected provider. Please select a new model.")
current_model=next(iter(provider_models)) # Set to first available modelselected_model=st.selectbox(
"Model",
options=list(provider_models.keys()),
index=list(provider_models.keys()).index(current_model),
key=f"model_select_{edit_index}_{agent.name}"
)
withcol2:
ifst.button("Set for ALL agents", key=f"set_all_agents_{edit_index}_{agent.name}"):
foragentinst.session_state.agents:
agent.config['provider'] =selected_providerif'llm_config'notinagent.config:
agent.config['llm_config'] = {'config_list': [{}]}
ifnotagent.config['llm_config']['config_list']:
agent.config['llm_config']['config_list'] = [{}]
agent.config['llm_config']['config_list'][0]['model'] =selected_modelagent.config['llm_config']['max_tokens'] =provider_models[selected_model]
st.experimental_rerun()
# Display the description in a text areanew_description=st.text_area("Description", value=description_value, key=f"desc_{edit_index}_{agent.name}")
col1, col2=st.columns([3, 1])
withcol1:
ifst.button("Update User Description", key=f"regenerate_{edit_index}_{agent.name}"):
print(f"Regenerate button clicked for agent {edit_index}")
new_description=regenerate_agent_description(agent)
ifnew_description:
agent.description=new_descriptionprint(f"Description regenerated for {agent.name}: {new_description}")
st.session_state[f"regenerate_description_{edit_index}_{agent.name}"] =Truedescription_value=new_descriptionst.experimental_rerun()
else:
print(f"Failed to regenerate description for {agent.name}")
withcol2:
ifst.button("Save", key=f"save_{edit_index}_{agent.name}"):
agent.name=new_nameagent.description=new_descriptionagent.provider=selected_provideragent.model=selected_model# Update the config as wellagent.config['provider'] =selected_providerif'llm_config'notinagent.config:
agent.config['llm_config'] = {'config_list': [{}]}
ifnotagent.config['llm_config']['config_list']:
agent.config['llm_config']['config_list'] = [{}]
agent.config['llm_config']['config_list'][0]['model'] =selected_modelagent.config['llm_config']['max_tokens'] =provider_models[selected_model]
st.session_state[f'show_edit_{edit_index}'] =Falseif'edit_agent_index'inst.session_state:
delst.session_state['edit_agent_index']
st.session_state.agents[edit_index] =agentst.experimental_rerun()
# Add a debug print to check the agent's descriptionprint(f"Agent {agent.name} description: {agent.description}")
defdownload_agent_file(expert_name):
# Format the expert_nameformatted_expert_name=re.sub(r'[^a-zA-Z0-9\s]', '', expert_name) # Remove non-alphanumeric charactersformatted_expert_name=formatted_expert_name.lower().replace(' ', '_') # Convert to lowercase and replace spaces with underscores# Get the full path to the agent JSON fileagents_dir=os.path.abspath(os.path.join(os.path.dirname(__file__), "agents"))
json_file=os.path.join(agents_dir, f"{formatted_expert_name}.json")
# Check if the file existsifos.path.exists(json_file):
# Read the file contentwithopen(json_file, "r") asf:
file_content=f.read()
# Encode the file content as base64b64_content=base64.b64encode(file_content.encode()).decode()
# Create a download linkhref=f'<a href="data:application/json;base64,{b64_content}" download="{formatted_expert_name}.json">Download {formatted_expert_name}.json</a>'st.markdown(href, unsafe_allow_html=True)
else:
st.error(f"File not found: {json_file}")
defextract_content(response):
ifisinstance(response, dict) and'choices'inresponse:
# Handle response from providers like Groqreturnresponse['choices'][0]['message']['content']
elifhasattr(response, 'content') andisinstance(response.content, list):
# Handle Anthropic-style responsereturnresponse.content[0].textelifisinstance(response, requests.models.Response):
# Handle response from providers using requests.Responsetry:
json_response=response.json()
if'choices'injson_responseandjson_response['choices']:
returnjson_response['choices'][0]['message']['content']
exceptjson.JSONDecodeError:
logger.error("Failed to decode JSON from response")
logger.error(f"Unexpected response format: {type(response)}")
returnNonedefprocess_agent_interaction(agent_index):
agent=st.session_state.agents[agent_index]
logger.debug(f"Processing interaction for agent: {agent.name}")
logger.debug(f"Agent tools: {agent.tools}")
ifisinstance(agent, AgentBaseModel):
agent_name=agent.namedescription=agent.descriptionagent_tools=agent.toolselse:
# Fallback for dictionary-like structureagent_name=agent.get('config', {}).get('name', '')
description=agent.get('description', '')
agent_tools=agent.get("tools", [])
user_request=st.session_state.get('user_request', '')
user_input=st.session_state.get('user_input', '')
rephrased_request=st.session_state.get('rephrased_request', '')
reference_url=st.session_state.get('reference_url', '')
# Execute associated tools for the agenttool_results= {}
fortoolinagent_tools:
try:
logger.debug(f"Executing tool: {tool.name}")
iftool.nameinst.session_state.tool_functions:
tool_function=st.session_state.tool_functions[tool.name]
iftool.name=='fetch_web_content'andreference_url:
tool_result=tool_function(reference_url)
eliftool.name=='generate_code':
tool_result=tool_function(user_inputoruser_requestorrephrased_request)
else:
tool_result=tool_function(user_inputoruser_requestorrephrased_request)
logger.debug(f"Tool result: {tool_result[:500]}...") # Log first 500 characters of resultelse:
logger.error(f"Tool function not found for {tool.name}")
tool_result=f"Error: Tool function not found for {tool.name}"tool_results[tool.name] =tool_resultlogger.debug(f"Tool result for {tool.name}: {tool_result[:500]}...")
# Update the tool_result_string in the session statest.session_state.tool_result_string=tool_result[:1000] +"..."# Limit to first 1000 characters# Update the discussion and whiteboard immediatelyupdate_discussion_and_whiteboard(tool.name, st.session_state.tool_result_string, "")
exceptExceptionase:
error_message=f"Error executing tool {tool.name}: {str(e)}"logger.error(error_message, exc_info=True)
tool_results[tool.name] =error_messagest.session_state.tool_result_string=error_messageupdate_discussion_and_whiteboard(tool.name, error_message, "")
request=construct_request(agent, agent_name, description, user_request, user_input, rephrased_request, reference_url, tool_results)
# Use the agent-specific provider and modelifisinstance(agent, AgentBaseModel):
provider=agent.providerorst.session_state.get('provider', LLM_PROVIDER)
model=agent.modelorst.session_state.get('model', 'default')
else:
# Fallback for dictionary-like structureprovider=agent.get('provider') orst.session_state.get('provider', LLM_PROVIDER)
model=agent.get('model') orst.session_state.get('model', 'default')
logger.debug(f"Using provider: {provider}, model: {model}")
api_key=get_api_key(provider)
llm_provider=get_llm_provider(api_key=api_key, provider=provider)
llm_request_data= {
"model": model,
"temperature": st.session_state.temperature,
"max_tokens": MODEL_TOKEN_LIMITS.get(model, 4096),
"top_p": 1,
"stop": "TERMINATE",
"messages": [
{
"role": "user",
"content": request
}
]
}
logger.debug(f"Sending request to {provider} using model {model}")
response=llm_provider.send_request(llm_request_data)
content=extract_content(response)
ifcontent:
update_discussion_and_whiteboard(agent_name, content, user_input)
st.session_state['form_agent_name'] =agent_namest.session_state['form_agent_description'] =descriptionst.session_state['selected_agent_index'] =agent_indexelse:
error_message=f"Error: Failed to extract content from response"log_error(error_message)
logger.error(error_message)
# Force a rerun to update the UI and trigger the moderator if necessaryst.experimental_rerun()
defregenerate_agent_description(agent):
agent_name=agent.nameifhasattr(agent, 'name') else"Unknown Agent"agent_description=agent.descriptionifhasattr(agent, 'description') else""print(f"agent_name: {agent_name}")
print(f"agent_description: {agent_description}")
user_request=st.session_state.get('user_request', '')
print(f"user_request: {user_request}")
discussion_history=st.session_state.get('discussion_history', '')
prompt=f""" You are an AI assistant helping to improve an agent's description. The agent's current details are: Name: {agent_name} Description: {agent_description} The current user request is: {user_request} The discussion history so far is: {discussion_history} Please generate a revised description for this agent that defines it in the best manner possible to address the current user request, taking into account the discussion thus far. Return only the revised description, written in the third-person, without any additional commentary or narrative. It is imperative that you return ONLY the text of the new description written in the third-person. No preamble, no narrative, no superfluous commentary whatsoever. Just the description, written in the third-person, unlabeled, please. You will have been successful if your reply is thorough, comprehensive, concise, written in the third-person, and adherent to all of these instructions. """print(f"regenerate_agent_description called with agent_name: {agent_name}")
print(f"regenerate_agent_description called with prompt: {prompt}")
api_key=get_api_key()
llm_provider=get_llm_provider(api_key=api_key)
llm_request_data= {
"model": st.session_state.model,
"temperature": st.session_state.temperature,
"max_tokens": st.session_state.max_tokens,
"top_p": 1,
"stop": "TERMINATE",
"messages": [
{
"role": "user",
"content": prompt
}
]
}
response=llm_provider.send_request(llm_request_data)
ifresponse.status_code==200:
response_data=llm_provider.process_response(response)
if"choices"inresponse_dataandresponse_data["choices"]:
content=response_data["choices"][0]["message"]["content"]
returncontent.strip()
returnNonedefretrieve_agent_information(agent_index):
agent=st.session_state.agents[agent_index]
agent_name=agent["config"]["name"]
description=agent["description"]
returnagent_name, descriptiondefsend_request(agent_name, request):
api_key=get_api_key()
llm_provider=get_llm_provider(api_key=api_key)
response=llm_provider.send_request(request)
returnresponse
# prompts.pydefcreate_project_manager_prompt(rephrased_text):
returnf""" As a Project Manager, create a project plan for:{rephrased_text} Include: Project Outline: Comprehensive overview Logical structure Key Deliverables: List in order of completion Expert Team: Roles based on project needs Minimum necessary team size For each expert: a) Role title b) Key responsibilities c) Essential expertise Format: Project Outline: [Your detailed outline] Key Deliverables: [Numbered list] Team of Experts: [Description of the ideal team of experts] """defget_agent_prompt(rephrased_request):
returnf""" Based on the following user request, please create a detailed and comprehensive description of an AI agent that can effectively assist with the request: User Request: "{rephrased_request}" Provide a clear and concise description of the agent's role, capabilities, and expertise. The description should be efficiently written in a concise, professional and engaging manner, highlighting the agent's ability to understand and respond to the request efficiently. Agent Description: """defget_agents_prompt():
return""" You are an expert system designed to format the JSON describing each member of the team of AI agents listed in the 'Team of Experts' section below. Follow these guidelines: 1. Agent Roles: Clearly transcribe the titles of each agent listed. 2. Expertise Description: Provide a brief but thorough description of each agent's expertise based on the provided information. 3. Format: Return the results in JSON format with values labeled as expert_name, description, role, goal, and backstory. 'expert_name' should be the agent's title, not their given or proper name. Return ONLY the JSON array, with no other text: [ { "expert_name": "agent_title", "description": "agent_description", "role": "agent_role", "goal": "agent_goal", "backstory": "agent_backstory" } ] """# Contributed by ScruffyNerfdefget_generate_tool_prompt(rephrased_tool_request):
returnf''' Based on the rephrased tool request below, please do the following: 1. Do step-by-step reasoning and think to better understand the request. 2. Code the best Autogen Studio Python tool as per the request as a [tool_name].py file. 3. Return only the tool file, no commentary, intro, or other extra text. If there ARE any non-code lines, please pre-pend them with a '#' symbol to comment them out. 4. A proper tool will have these parts: a. Imports (import libraries needed for the tool) b. Function definition AND docstrings (this helps the LLM understand what the function does and how to use it) c. Function body (the actual code that implements the function) d. (optional) Example usage - ALWAYS commented out Here is an example of a well formatted tool: # Tool filename: save_file_to_disk.py # Import necessary module(s) import os def save_file_to_disk(contents, file_name): # docstrings """ Saves the given contents to a file with the given file name. Parameters: contents (str): The string contents to save to the file. file_name (str): The name of the file, including its extension. Returns: str: A message indicating the success of the operation. """ # Body of tool # Ensure the directory exists; create it if it doesn't directory = os.path.dirname(file_name) if directory and not os.path.exists(directory): os.makedirs(directory) # Write the contents to the file with open(file_name, 'w') as file: file.write(contents) return f"File file_name has been saved successfully." # Example usage: # contents_to_save = "Hello, world!" # file_name = "example.txt" # print(save_file_to_disk(contents_to_save, file_name)) Rephrased tool request: "{rephrased_tool_request}" '''defget_moderator_prompt(discussion_history, goal, last_comment, last_speaker, team_members_str, current_deliverable, current_phase):
returnf""" This agent is our Moderator Bot. Its goal is to mediate the conversation between a team of AI agents in a manner that persuades them to act in the most expeditious and thorough manner to accomplish their goal. This will entail considering the user's stated goal, the conversation thus far, the descriptions of all the available agent/experts in the current team, the last speaker, and their remark. Based upon a holistic analysis of all the facts at hand, use logic and reasoning to decide which team member should speak next. Then draft a prompt directed at that agent that persuades them to act in the most expeditious and thorough manner toward helping this team of agents accomplish their goal. Their overall goal is: {goal}. The current deliverable they're working on is: {current_deliverable} The current implementation phase is: {current_phase} The last speaker was {last_speaker}, who said: {last_comment} Here is the current conversational discussion history: {discussion_history} And here are the team members and their descriptions:{team_members_str} IMPORTANT: Your response must start with "To [Agent Name]:", where [Agent Name] is one of the valid team members listed above. Do not address tools or non-existent team members. This agent's response should be JUST the requested prompt addressed to the next agent, and should not contain any introduction, narrative, or any other superfluous text whatsoever. If you believe the current phase of the deliverable has been satisfactorily completed, include the exact phrase "PHASE_COMPLETED" at the beginning of your response, followed by your usual prompt to the next agent focusing on the next phase or deliverable. Remember, we are now in the {current_phase} phase. The agents should focus on actually implementing, coding, testing, or deploying the solutions as appropriate for the current phase, not just planning. """defget_rephrased_user_prompt(user_request):
returnf"""Act as a professional prompt engineer and refactor the following user request into an optimized prompt. This agent's goal is to rephrase the request with a focus on the satisfying all following the criteria without explicitly stating them: 1. Clarity: Ensure the prompt is clear and unambiguous. 2. Specific Instructions: Provide detailed steps or guidelines. 3. Context: Include necessary background information. 4. Structure: Organize the prompt logically. 5. Language: Use concise and precise language. 6. Examples: Offer examples to illustrate the desired output. 7. Constraints: Define any limits or guidelines. 8. Engagement: Make the prompt engaging and interesting. 9. Feedback Mechanism: Suggest a way to improve or iterate on the response. Apply introspection and reasoning to reconsider your own prompt[s] to: Clarify ambiguities Break down complex tasks Provide essential context Structure logically Use precise, concise language Include relevant examples Specify constraints Do NOT reply with a direct response to these instructions OR the original user request. Instead, rephrase the user's request as a well-structured prompt, and return ONLY that rephrased prompt. Do not preface the rephrased prompt with any other text or superfluous narrative. Do not enclose the rephrased prompt in quotes. This agent will be successful only if it returns a well-formed rephrased prompt ready for submission as an LLM request. User request: "{user_request}" Rephrased: """
AutoGroq\agents\code_developer.py
# agents/code_developer.pyimportdatetimeimportstreamlitasstfromconfigs.configimportLLM_PROVIDERfrommodels.agent_base_modelimportAgentBaseModelfrommodels.tool_base_modelimportToolBaseModelfromtools.code_generatorimportcode_generator_toolclassCodeDeveloperAgent(AgentBaseModel):
def__init__(self, name, description, tools, config, role, goal, backstory, provider, model):
current_timestamp=datetime.datetime.now().isoformat()
super().__init__(name=name, description=description, tools=tools, config=config,
role=role, goal=goal, backstory=backstory)
self.provider=providerself.model=modelself.created_at=current_timestampself.updated_at=current_timestampself.user_id="default"self.timestamp=current_timestamp@classmethoddefcreate_default(cls):
returncls(
name="Code Developer",
description="An agent specialized in generating code based on feature descriptions.",
tools=[code_generator_tool],
config={
"llm_config": {
"config_list": [{"model": st.session_state.get('model', 'default'), "api_key": None}],
"temperature": st.session_state.get('temperature', 0.7)
},
"human_input_mode": "NEVER",
"max_consecutive_auto_reply": 10
},
role="Code Developer",
goal="To create efficient and effective code solutions based on given requirements.",
backstory="I am an AI agent with extensive knowledge of various programming languages and software development best practices. My purpose is to assist in creating code that meets the specified requirements.",
provider=st.session_state.get('provider', LLM_PROVIDER),
model=st.session_state.get('model', 'default')
)
defto_dict(self):
data=self.__dict__forkey, valueindata.items():
ifisinstance(value, ToolBaseModel):
data[key] =value.to_dict()
returndata
AutoGroq\agents\code_tester.py
# agents/code_tester.pyimportdatetimeimportstreamlitasstfromconfigs.configimportLLM_PROVIDERfrommodels.agent_base_modelimportAgentBaseModelfrommodels.tool_base_modelimportToolBaseModelfromtools.code_testimportcode_test_toolclassCodeTesterAgent(AgentBaseModel):
def__init__(self, name, description, tools, config, role, goal, backstory, provider, model):
current_timestamp=datetime.datetime.now().isoformat()
super().__init__(name=name, description=description, tools=tools, config=config,
role=role, goal=goal, backstory=backstory)
self.provider=providerself.model=modelself.created_at=current_timestampself.updated_at=current_timestampself.user_id="default"self.timestamp=current_timestamp@classmethoddefcreate_default(cls):
returncls(
name="Code Tester",
description="An agent specialized in testing code and providing feedback on its functionality.",
tools=[code_test_tool],
config={
"llm_config": {
"config_list": [{"model": st.session_state.get('model', 'default'), "api_key": None}],
"temperature": st.session_state.get('temperature', 0.7)
},
"human_input_mode": "NEVER",
"max_consecutive_auto_reply": 10
},
role="Code Tester",
goal="To thoroughly test code and provide comprehensive feedback to ensure its reliability and correctness.",
backstory="I am an AI agent with expertise in software testing and quality assurance. My purpose is to rigorously test code and provide comprehensive feedback to ensure its reliability and correctness.",
provider=st.session_state.get('provider', LLM_PROVIDER),
model=st.session_state.get('model', 'default')
)
defto_dict(self):
data=self.__dict__forkey, valueindata.items():
ifisinstance(value, ToolBaseModel):
data[key] =value.to_dict()
returndata
AutoGroq\agents\web_content_retriever.py
# agents/web_content_retriever.pyimportdatetimeimportstreamlitasstfromconfigs.configimportLLM_PROVIDERfrommodels.agent_base_modelimportAgentBaseModelfrommodels.tool_base_modelimportToolBaseModelfromtools.fetch_web_contentimportfetch_web_content_toolclassWebContentRetrieverAgent(AgentBaseModel):
def__init__(self, name, description, tools, config, role, goal, backstory, provider, model):
current_timestamp=datetime.datetime.now().isoformat()
super().__init__(name=name, description=description, tools=tools, config=config,
role=role, goal=goal, backstory=backstory)
self.provider=providerself.model=modelself.created_at=current_timestampself.updated_at=current_timestampself.user_id="default"self.timestamp=current_timestampself.reference_url=Noneself.web_content=None@classmethoddefcreate_default(cls):
returncls(
name="Web Content Retriever",
description="An agent specialized in retrieving and processing web content.",
tools=[fetch_web_content_tool],
config={
"llm_config": {
"config_list": [{"model": st.session_state.get('model', 'default'), "api_key": None}],
"temperature": st.session_state.get('temperature', 0.7)
},
"human_input_mode": "NEVER",
"max_consecutive_auto_reply": 10
},
role="Web Content Specialist",
goal="To retrieve and analyze web content efficiently and accurately.",
backstory="I am an AI agent designed to fetch and analyze web content, providing valuable insights and information from various online sources.",
provider=st.session_state.get('provider', LLM_PROVIDER),
model=st.session_state.get('model', 'default')
)
defto_dict(self):
data=self.__dict__forkey, valueindata.items():
ifisinstance(value, ToolBaseModel):
data[key] =value.to_dict()
returndatadefretrieve_web_content(self, reference_url):
""" Retrieve web content from the given reference URL and store it in the agent's memory. Args: reference_url (str): The URL to fetch content from. Returns: dict: A dictionary containing the status, URL, and content (or error message). """self.reference_url=reference_urlfetch_tool=next((toolfortoolinself.toolsiftool.name=="fetch_web_content"), None)
iffetch_toolisNone:
return {"status": "error", "message": "fetch_web_content tool not found"}
result=fetch_tool.function(reference_url)
ifresult["status"] =="success":
self.web_content=result["content"]
returnresultdefget_web_content(self):
""" Get the stored web content. Returns: str: The stored web content or None if not available. """returnself.web_contentdefget_reference_url(self):
""" Get the stored reference URL. Returns: str: The stored reference URL or None if not available. """returnself.reference_url
AutoGroq\cli\create_agent.py
importargparseimportdatetimeimportjsonimportosimportstreamlitasstimportsys# Add the root directory to the Python module search pathsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
fromconfigs.configimportMODEL_TOKEN_LIMITSfrompromptsimportget_agent_promptfromutils.api_utilsimportget_llm_providerfromutils.agent_utilsimportcreate_agent_datafromutils.auth_utilsimportget_api_keyfromutils.file_utilsimportsanitize_textdefcreate_agent(request, provider, model, temperature, max_tokens, output_file):
# Get the API key and providerapi_key=get_api_key()
llm_provider=get_llm_provider(api_key=api_key)
# Generate the prompt using get_agent_promptprompt=get_agent_prompt(request)
# Adjust the token limit based on the selected modelmax_tokens=MODEL_TOKEN_LIMITS.get(provider, {}).get(model, 4096)
# Make the request to the LLM APIllm_request_data= {
"model": model,
"temperature": st.session_state.temperature,
"max_tokens": max_tokens,
"messages": [{"role": "user", "content": prompt}],
}
response=llm_provider.send_request(llm_request_data)
ifresponse.status_code!=200:
print(f"Error: Received status code {response.status_code}")
print(response.text)
returnresponse_data=response.json()
if'choices'notinresponse_dataorlen(response_data['choices']) ==0:
print("Error: 'choices' not found in the response data or it's empty")
print(json.dumps(response_data, indent=2))
returnagent_description=response_data['choices'][0]['message']['content'].strip()
agent_data= {
"type": "assistant",
"config": {
"name": request,
"llm_config": {
"config_list": [
{
"user_id": "default",
"timestamp": datetime.datetime.now().isoformat(),
"model": model,
"base_url": None,
"api_type": None,
"api_version": None,
"description": "OpenAI model configuration"
}
],
"temperature": temperature,
"cache_seed": None,
"timeout": None,
"max_tokens": max_tokens,
"extra_body": None
},
"human_input_mode": "NEVER",
"max_consecutive_auto_reply": 8,
"system_message": f"You are a helpful assistant that can act as {sanitize_text(agent_description)} who {request}.",
"is_termination_msg": None,
"code_execution_config": None,
"default_auto_reply": "",
"description": agent_description# Ensure the description key is present
},
"timestamp": datetime.datetime.now().isoformat(),
"user_id": "default",
"tools": []
}
# Debug print to verify agent_dataprint("Agent Data:", json.dumps(agent_data, indent=2))
# Create the appropriate agent dataautogen_agent_data, crewai_agent_data=create_agent_data(agent_data)
# Save the agent data to the output filewithopen(output_file, "w") asf:
json.dump(autogen_agent_data, f, indent=2)
print(f"Agent created successfully. Output saved to: {output_file}")
if__name__=="__main__":
parser=argparse.ArgumentParser(description="Create an agent based on a user request.")
parser.add_argument("--request", required=True, help="The user request for creating the agent.")
parser.add_argument("--model", default="mixtral-8x7b-32768", help="The model to use for the agent.")
parser.add_argument("--temperature", type=float, default=0.5, help="The temperature value for the agent.")
parser.add_argument("--max_tokens", type=int, default=32768, help="The maximum number of tokens for the agent.")
parser.add_argument("--agent_type", default="autogen", choices=["autogen", "crewai"], help="The type of agent to create.")
parser.add_argument("--output", default="agent.json", help="The output file path for the agent JSON.")
parser.add_argument("--provider", default="groq", help="The LLM provider to use (e.g., 'openai', 'anthropic').")
args=parser.parse_args()
create_agent(args.request, args.provider, args.model, args.temperature, args.max_tokens, args.output)
AutoGroq\cli\rephrase_prompt.py
importargparseimportosimportsys# Add the root directory to the Python module search pathsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
fromconfigs.configimportMODEL_TOKEN_LIMITS, LLM_PROVIDERfromutils.api_utilsimportget_llm_providerfromutils.auth_utilsimportget_api_keyfromutils.ui_utilsimportrephrase_promptdefrephrase_prompt_cli(prompt, provider, model, temperature, max_tokens):
# Get the API keyapi_key=get_api_key()
# Use the provider specified in the CLI argumentsllm_provider=get_llm_provider(api_key=api_key, provider=provider)
# Override the model and max_tokens if specified in the command-line argumentsmodel_to_use=modelifmodelelseprovidermax_tokens_to_use=MODEL_TOKEN_LIMITS.get(model_to_use, max_tokens)
rephrased_prompt=rephrase_prompt(prompt, model_to_use, max_tokens_to_use, llm_provider=llm_provider, provider=provider)
ifrephrased_prompt:
print(f"Rephrased Prompt: {rephrased_prompt}")
else:
print("Error: Failed to rephrase the prompt.")
if__name__=="__main__":
parser=argparse.ArgumentParser(description="Rephrase a user prompt.")
parser.add_argument("--prompt", required=True, help="The user prompt to rephrase.")
parser.add_argument("--model", default=None, help="The model to use for rephrasing.")
parser.add_argument("--temperature", type=float, default=0.5, help="The temperature value for rephrasing.")
parser.add_argument("--max_tokens", type=int, default=32768, help="The maximum number of tokens for rephrasing.")
parser.add_argument("--provider", default=None, help="The LLM provider to use (e.g., 'openai', 'anthropic').")
args=parser.parse_args()
rephrase_prompt_cli(args.prompt, args.provider, args.model, args.temperature, args.max_tokens)
AutoGroq\configs\config.py
# configs/config.py:importosfromtypingimportDict# Get user home directoryhome_dir=os.path.expanduser("~")
default_db_path=f'{home_dir}/.autogenstudio/database.sqlite'# DebugDEFAULT_DEBUG=False# Default configurationsDEFAULT_LLM_PROVIDER="anthropic"# Supported values: "anthropic", "groq", "openai", "ollama", "lmstudio", "fireworks"DEFAULT_GROQ_API_URL="https://api.groq.com/openai/v1/chat/completions"DEFAULT_LMSTUDIO_API_URL="http://localhost:1234/v1/chat/completions"DEFAULT_OLLAMA_API_URL="http://127.0.0.1:11434/api/generate"DEFAULT_OPENAI_API_URL="https://api.openai.com/v1/chat/completions"DEFAULT_ANTHROPIC_API_URL="https://api.anthropic.com/v1/messages"# Try to import user-specific configurations from config_local.pytry:
fromconfig_localimport*exceptImportError:
pass# Set the configurations using the user-specific values if available, otherwise use the defaultsDEBUG=locals().get('DEBUG', DEFAULT_DEBUG)
LLM_PROVIDER=locals().get('LLM_PROVIDER', DEFAULT_LLM_PROVIDER)
GROQ_API_URL=locals().get('GROQ_API_URL', DEFAULT_GROQ_API_URL)
LMSTUDIO_API_URL=locals().get('LMSTUDIO_API_URL', DEFAULT_LMSTUDIO_API_URL)
OLLAMA_API_URL=locals().get('OLLAMA_API_URL', DEFAULT_OLLAMA_API_URL)
OPENAI_API_URL=locals().get('OPENAI_API_URL', DEFAULT_OPENAI_API_URL)
ANTHROPIC_API_URL=locals().get('ANTHROPIC_API_URL', DEFAULT_ANTHROPIC_API_URL)
API_KEY_NAMES= {
"groq": "GROQ_API_KEY",
"lmstudio": None,
"ollama": None,
"openai": "OPENAI_API_KEY",
"anthropic": "ANTHROPIC_API_KEY",
}
# Retry settingsMAX_RETRIES=3RETRY_DELAY=2# in secondsRETRY_TOKEN_LIMIT=5000# Model configurationsifLLM_PROVIDER=="groq":
API_URL=GROQ_API_URLMODEL_TOKEN_LIMITS= {
'mixtral-8x7b-32768': 32768,
'llama3-70b-8192': 8192,
'llama3-8b-8192': 8192,
'gemma-7b-it': 8192,
}
elifLLM_PROVIDER=="lmstudio":
API_URL=LMSTUDIO_API_URLMODEL_TOKEN_LIMITS= {
'instructlab/granite-7b-lab-GGUF': 2048,
'MaziyarPanahi/Codestral-22B-v0.1-GGUF': 32768,
}
elifLLM_PROVIDER=="openai":
API_URL=OPENAI_API_URLMODEL_TOKEN_LIMITS= {
'gpt-4o': 4096,
}
elifLLM_PROVIDER=="ollama":
API_URL=OLLAMA_API_URLMODEL_TOKEN_LIMITS= {
'llama3': 8192,
}
elifLLM_PROVIDER=="anthropic":
API_URL=ANTHROPIC_API_URLMODEL_TOKEN_LIMITS= {
"claude-3-5-sonnet-20240620": 4096,
"claude-3-opus-20240229": 4096,
"claude-3-sonnet-20240229": 4096,
"claude-3-haiku-20240307": 4096,
"claude-2.1": 100000,
"claude-2.0": 100000,
"claude-instant-1.2": 100000,
}
else:
API_URL=NoneMODEL_TOKEN_LIMITS= {}
# Database pathFRAMEWORK_DB_PATH=os.environ.get('FRAMEWORK_DB_PATH', default_db_path)
MODEL_CHOICES= {
"anthropic": {
"claude-3-5-sonnet-20240620": 4096,
"claude-3-opus-20240229": 4096,
"claude-3-sonnet-20240229": 4096,
"claude-3-haiku-20240307": 4096,
"claude-2.1": 100000,
"claude-2.0": 100000,
"claude-instant-1.2": 100000,
},
"groq": {
"mixtral-8x7b-32768": 32768,
"llama3-70b-8192": 8192,
"llama3-8b-8192": 8192,
"gemma-7b-it": 8192,
},
"openai": {
"gpt-4o": 4096,
"gpt-4": 8192,
"gpt-3.5-turbo": 4096,
"dall-e-3": 4096,
},
"fireworks": {
"fireworks": 4096,
},
"ollama": {
"llama3": 8192,
},
"lmstudio": {
"instructlab/granite-7b-lab-GGUF": 2048,
"MaziyarPanahi/Codestral-22B-v0.1-GGUF": 32768,
},
}
SUPPORTED_PROVIDERS= ["anthropic", "fireworks", "groq", "lmstudio", "ollama", "openai"]
BUILT_IN_AGENTS= ["Web Content Retriever", "Code Developer", "Code Tester"]
AVAILABLE_MODELS: Dict[str, Dict[str, int]] = {}
defupdate_available_models(provider: str, models: Dict[str, int]):
""" Update the available models for a given provider. :param provider: The name of the provider (e.g., 'groq', 'openai') :param models: A dictionary of model names and their token limits """globalAVAILABLE_MODELSAVAILABLE_MODELS[provider] =models
# tools/code_generator.pyimportinspectimportjsonimportloggingfrommodels.tool_base_modelimportToolBaseModelfromutils.api_utilsimportget_api_key, get_llm_providerimportstreamlitasstlogger=logging.getLogger(__name__)
defgenerate_code(request: str, language: str="Python") ->str:
logger.debug(f"Generating code for request: {request}")
logger.debug(f"Language: {language}")
ifnotrequest.strip():
return"Error: No specific code generation request provided."prompt=f""" You are an advanced AI language model with expertise in software development. Your task is to generate the best possible software solution for the following request: **Request:**{request} **Language:**{language} Please ensure that the code follows best practices for {language}, is optimized for performance and maintainability, and includes comprehensive comments explaining each part of the code. Additionally, provide any necessary context or explanations to help understand the implementation. The solution should be robust, scalable, and adhere to industry standards. If there are multiple ways to solve the problem, choose the most efficient and elegant approach. If any libraries or frameworks are beneficial, include their usage with appropriate explanations. Begin your response with a brief overview of the approach you are taking, and then provide the complete code. Example overview: "To solve the problem of {request}, we will implement a {{specific algorithm/pattern}} using {{specific features/libraries of the language}}. This approach ensures {{benefits of the approach}}." Here is the code: """api_key=get_api_key()
llm_provider=get_llm_provider(api_key=api_key)
llm_request_data= {
"model": st.session_state.get('model', 'default'),
"temperature": st.session_state.get('temperature', 0.7),
"max_tokens": st.session_state.get('max_tokens', 2000),
"top_p": 1,
"frequency_penalty": 0,
"presence_penalty": 0,
"messages": [
{
"role": "system",
"content": "You are an expert code generator."
},
{
"role": "user",
"content": prompt
}
]
}
try:
response=llm_provider.send_request(llm_request_data)
logger.debug(f"LLM response status code: {response.status_code}")
logger.debug(f"LLM response content: {response.text[:500]}...") # Log first 500 characters of responseifresponse.status_code==200:
response_data=llm_provider.process_response(response)
if"choices"inresponse_dataandresponse_data["choices"]:
generated_code=response_data["choices"][0]["message"]["content"]
returngenerated_code.strip()
else:
return"Error: Unexpected response format from the language model."else:
returnf"Error: Received status code {response.status_code} from the language model API."exceptExceptionase:
logger.error(f"Error generating code: {str(e)}", exc_info=True)
returnf"Error generating code: {str(e)}"code_generator_tool=ToolBaseModel(
name="generate_code",
description="Generates code for a specified feature in a given programming language.",
title="Code Generator",
file_name="code_generator.py",
content=inspect.getsource(generate_code),
function=generate_code,
)
defget_tool():
returncode_generator_tool
AutoGroq\tools\code_test.py
# tools/code_test.pyimportinspectimportsubprocessimporttempfilefrommodels.tool_base_modelimportToolBaseModeldeftest_code(language: str, code: str, test_cases: str) ->str:
""" Tests the given code with provided test cases. Args: language (str): The programming language of the code (e.g., "Python", "JavaScript"). code (str): The code to be tested. test_cases (str): A string containing test cases, each on a new line. Returns: str: The test results as a string. """iflanguage.lower() !="python":
returnf"Testing for {language} is not supported yet."withtempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) astemp_file:
temp_file.write(code)
temp_file.write("\n\n# Test cases\n")
temp_file.write(test_cases)
temp_file_name=temp_file.nametry:
result=subprocess.run(['python', temp_file_name], capture_output=True, text=True, timeout=10)
ifresult.returncode==0:
returnf"Tests passed successfully.\nOutput:\n{result.stdout}"else:
returnf"Tests failed.\nError:\n{result.stderr}"exceptsubprocess.TimeoutExpired:
return"Test execution timed out."exceptExceptionase:
returnf"An error occurred during testing: {str(e)}"code_test_tool=ToolBaseModel(
name="test_code",
description="Tests the given code with provided test cases.",
title="Code Tester",
file_name="code_test.py",
content=inspect.getsource(test_code),
function=test_code,
)
defget_tool():
returncode_test_tool
AutoGroq\tools\fetch_web_content.py
# tools/fetch_web_content.pyimportinspectimportjsonimportloggingimportrequestsfrombs4importBeautifulSoupfrommodels.tool_base_modelimportToolBaseModelfromurllib.parseimporturlparse, urlunparsedeffetch_web_content(url: str) ->dict:
""" Fetches the text content from a website. Args: url (str): The URL of the website. Returns: dict: A dictionary containing the status, URL, and content (or error message). """try:
cleaned_url=clean_url(url)
logging.info(f"Fetching content from cleaned URL: {cleaned_url}")
headers= {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}
response=requests.get(cleaned_url, headers=headers, timeout=10)
response.raise_for_status()
logging.info(f"Response status code: {response.status_code}")
logging.info(f"Response headers: {response.headers}")
soup=BeautifulSoup(response.text, "html.parser")
logging.info(f"Parsed HTML structure: {soup.prettify()[:500]}...") # Log first 500 characters of prettified HTML# Try to get content from article tags firstarticle_content=soup.find('article')
ifarticle_content:
content=article_content.get_text(strip=True)
else:
# If no article tag, fall back to body contentbody_content=soup.bodyifbody_content:
content=body_content.get_text(strip=True)
else:
raiseValueError("No content found in the webpage")
logging.info(f"Extracted text content (first 500 chars): {content[:500]}...")
result= {
"status": "success",
"url": cleaned_url,
"content": content
}
print(f"DEBUG: fetch_web_content result: {str(result)[:500]}...") # Debug printreturnresultexceptrequests.RequestExceptionase:
error_message=f"Error fetching content from {cleaned_url}: {str(e)}"logging.error(error_message)
return {
"status": "error",
"url": cleaned_url,
"message": error_message
}
exceptExceptionase:
error_message=f"Unexpected error while fetching content from {cleaned_url}: {str(e)}"logging.error(error_message)
return {
"status": "error",
"url": cleaned_url,
"message": error_message
}
# Create the ToolBaseModel instancefetch_web_content_tool=ToolBaseModel(
name="fetch_web_content",
description="Fetches the text content from a website.",
title="Fetch Web Content",
file_name="fetch_web_content.py",
content=inspect.getsource(fetch_web_content),
function=fetch_web_content,
)
# Function to get the tooldefget_tool():
returnfetch_web_content_tooldefclean_url(url: str) ->str:
""" Clean and validate the URL. Args: url (str): The URL to clean. Returns: str: The cleaned URL. """url=url.strip().strip("'\"")
ifnoturl.startswith(('http://', 'https://')):
url='https://'+urlparsed=urlparse(url)
returnurlunparse(parsed)
AutoGroq\utils\agent_utils.py
# utils/agent_utils.pyimportdatetimeimportstreamlitasstfromconfigs.configimportLLM_PROVIDERfromutils.text_utilsimportnormalize_configdefcreate_agent_data(agent):
expert_name=agent['name']
description=agent.get('description', '')
current_timestamp=datetime.datetime.now().isoformat()
provider=agent.get('config', {}).get('provider', st.session_state.get('provider', LLM_PROVIDER))
# Use normalize_config to get the standardized confignormalized_config=normalize_config(agent, expert_name)
autogen_agent_data= {
"name": normalized_config['name'],
"description": description,
"config": normalized_config,
"tools": agent.get('tools', []),
"role": agent.get('role', normalized_config['name']),
"goal": agent.get('goal', f"Assist with tasks related to {description}"),
"backstory": agent.get('backstory', f"As an AI assistant, I specialize in {description}"),
"provider": provider,
"model": st.session_state.get('model', 'default')
}
crewai_agent_data= {
"name": normalized_config['name'],
"description": description,
"verbose": True,
"allow_delegation": True
}
returnautogen_agent_data, crewai_agent_data
AutoGroq\utils\api_utils.py
# utils/api_utils.pyimportimportlibimportosimportrequestsimportstreamlitasstimporttimefromconfigs.configimportLLM_PROVIDER, RETRY_DELAY, RETRY_TOKEN_LIMITdefdisplay_api_key_input(provider=None):
ifproviderisNone:
provider=st.session_state.get('provider', LLM_PROVIDER)
api_key_env_var=f"{provider.upper()}_API_KEY"api_key=os.environ.get(api_key_env_var)
ifapi_keyisNone:
st.session_state.warning_placeholder.warning(f"{provider.upper()} API Key not found. Please enter your API key, or select a different provider.")
api_key=st.text_input(f"Enter your {provider.upper()} API Key:", type="password", key=f"api_key_input_{provider}")
ifapi_key:
st.session_state[api_key_env_var] =api_keyos.environ[api_key_env_var] =api_key# st.success(f"{provider.upper()} API Key entered successfully.")st.session_state.warning_placeholder.empty()
returnapi_keydeffetch_available_models(provider=None):
ifproviderisNone:
provider=st.session_state.get('provider', LLM_PROVIDER)
api_key=get_api_key(provider)
llm_provider=get_llm_provider(api_key=api_key, provider=provider)
try:
models=llm_provider.get_available_models()
st.session_state.available_models=modelsreturnmodelsexceptExceptionase:
st.error(f"Failed to fetch available models: {str(e)}")
return {}
deffetch_available_models(provider=None):
ifproviderisNone:
provider=st.session_state.get('provider', LLM_PROVIDER)
api_key=get_api_key(provider)
llm_provider=get_llm_provider(api_key=api_key, provider=provider)
try:
models=llm_provider.get_available_models()
st.session_state.available_models=modelsreturnmodelsexceptExceptionase:
st.error(f"Failed to fetch available models: {str(e)}")
return {}
defget_api_key(provider=None):
ifproviderisNone:
provider=st.session_state.get('provider', LLM_PROVIDER)
api_key_env_var=f"{provider.upper()}_API_KEY"api_key=os.environ.get(api_key_env_var)
ifapi_keyisNone:
api_key=st.session_state.get(api_key_env_var)
returnapi_keydefget_llm_provider(api_key=None, api_url=None, provider=None):
ifproviderisNone:
provider=st.session_state.get('provider', LLM_PROVIDER)
provider_module=importlib.import_module(f"llm_providers.{provider}_provider")
provider_class=getattr(provider_module, f"{provider.capitalize()}Provider")
ifapi_urlisNone:
api_url=st.session_state.get(f'{provider.upper()}_API_URL')
returnprovider_class(api_url=api_url, api_key=api_key)
defmake_api_request(url, data, headers, api_key):
time.sleep(RETRY_DELAY) # Throttle the request to ensure at least 2 seconds between callstry:
ifnotapi_key:
llm=LLM_PROVIDER.upper()
raiseValueError(f"{llm}_API_KEY not found. Please enter your API key.")
headers["Authorization"] =f"Bearer {api_key}"response=requests.post(url, json=data, headers=headers)
ifresponse.status_code==200:
returnresponse.json()
elifresponse.status_code==429:
error_message=response.json().get("error", {}).get("message", "")
st.error(f"Rate limit reached for the current model. If you click 'Update' again, we'll retry with a reduced token count. Or you can try selecting a different model.")
st.error(f"Error details: {error_message}")
returnNoneelse:
print(f"Error: API request failed with status {response.status_code}, response: {response.text}")
returnNoneexceptrequests.RequestExceptionase:
print(f"Error: Request failed {e}")
returnNonedefsend_request_with_retry(url, data, headers, api_key):
response=make_api_request(url, data, headers, api_key)
ifresponseisNone:
# Add a retry buttonifst.button("Retry with decreased token limit"):
# Update the token limit in the request datadata["max_tokens"] =RETRY_TOKEN_LIMIT# Retry the request with the decreased token limitprint(f"Retrying the request with decreased token limit.")
print(f"URL: {url}")
print(f"Retry token limit: {RETRY_TOKEN_LIMIT}")
response=make_api_request(url, data, headers, api_key)
ifresponseisnotNone:
print(f"Retry successful. Response: {response}")
else:
print("Retry failed.")
returnresponsedefset_llm_provider_title():
# "What's life without whimsy?" ~Sheldon CooperifLLM_PROVIDER=="groq":
st.title("AutoGroq™")
elifLLM_PROVIDER=="ollama":
st.title("Auto̶G̶r̶o̶qOllama")
elifLLM_PROVIDER=="lmstudio":
st.title("Auto̶G̶r̶o̶qLM_Studio")
elifLLM_PROVIDER=="openai":
st.title("Auto̶G̶r̶o̶qChatGPT")
elifLLM_PROVIDER=="anthropic":
st.title("Auto̶G̶r̶o̶qClaude")
AutoGroq\utils\auth_utils.py
importosimportstreamlitasstfromconfigs.configimportLLM_PROVIDERfromutils.api_utilsimportdisplay_api_key_inputdefcheck_api_key(provider=None):
# Ensure we have a warning placeholderif'warning_placeholder'notinst.session_state:
st.session_state.warning_placeholder=st.empty()
# Check for API key of the default provider on initial loadif'initial_api_check'notinst.session_state:
st.session_state.initial_api_check=Truedefault_provider=st.session_state.get('provider', LLM_PROVIDER)
ifnotcheck_api_key(default_provider):
display_api_key_input(default_provider)
returnTruedefget_api_url():
api_url_env_var=f"{LLM_PROVIDER.upper()}_API_URL"api_url=os.environ.get(api_url_env_var)
ifapi_urlisNone:
api_url=globals().get(api_url_env_var)
ifapi_urlisNone:
ifapi_url_env_varnotinst.session_state:
api_url=st.text_input(f"Enter the {LLM_PROVIDER.upper()} API URL:", type="password", key=f"{LLM_PROVIDER}_api_url_input")
ifapi_url:
st.session_state[api_url_env_var] =api_urlst.success("API URL entered successfully.")
else:
st.warning(f"Please enter the {LLM_PROVIDER.upper()} API URL to use the app.")
else:
api_url=st.session_state.get(api_url_env_var)
returnapi_url
AutoGroq\utils\db_utils.py
# db_utils.pyimportdatetimeimportjsonimportsqlite3importstreamlitasstimporttracebackimportuuidfromconfigs.configimportFRAMEWORK_DB_PATHfromutils.text_utilsimportnormalize_configfromutils.workflow_utilsimportget_workflow_from_agentsdefexport_to_autogen():
db_path=FRAMEWORK_DB_PATHprint(f"Database path: {db_path}")
ifdb_path:
export_data(db_path)
else:
st.warning("Please provide a valid database path in config.py.")
defexport_data(db_path):
print(f"Exporting data to: {db_path}")
ifdb_path:
try:
conn=sqlite3.connect(db_path)
cursor=conn.cursor()
print("Connected to the database successfully.")
agents=st.session_state.agentsprint(f"Number of agents: {len(agents)}")
forindex, agentinenumerate(agents):
try:
print(f"Processing agent {index+1}: {agent.name}")
current_time=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# Normalize the confignormalized_config=normalize_config(agent.to_dict(), agent.name)
agent_data= (
None, # id (AUTO INCREMENT)current_time, # created_atcurrent_time, # updated_at'guestuser@gmail.com', # user_id'0.0.1', # version'assistant', # typejson.dumps(normalized_config), # config (JSON)normalized_config['system_message'] # task_instruction
)
print(f"Inserting agent data: {agent_data}")
cursor.execute(""" INSERT INTO agent (id, created_at, updated_at, user_id, version, type, config, task_instruction) VALUES (?, ?, ?, ?, ?, ?, ?, ?) """, agent_data)
print(f"Inserted agent: {agent.name}")
exceptExceptionase:
print(f"Error processing agent {index+1}: {str(e)}")
print(f"Agent data: {agent.__dict__}")
traceback.print_exc()
# Handle skills/toolsfortoolinst.session_state.tool_models:
try:
current_time=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
skill_data= (
None, # id (AUTO INCREMENT)current_time, # created_atcurrent_time, # updated_at'guestuser@gmail.com', # user_id'0.0.1', # versiontool.name,
tool.content,
tool.description,
json.dumps(tool.secrets) ifhasattr(tool, 'secrets') else'{}',
json.dumps(tool.libraries) ifhasattr(tool, 'libraries') else'[]'
)
cursor.execute(""" INSERT INTO skill (id, created_at, updated_at, user_id, version, name, content, description, secrets, libraries) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) """, skill_data)
print(f"Inserted skill: {tool.name}")
exceptExceptionase:
print(f"Error inserting skill {tool.name}: {str(e)}")
traceback.print_exc()
# Handle the workflowtry:
workflow_data, _=get_workflow_from_agents(agents)
current_time=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
workflow_insert_data= (
None, # id (AUTO INCREMENT)current_time, # created_atcurrent_time, # updated_at'guestuser@gmail.com', # user_id'0.0.1', # versionworkflow_data.get('name', 'AutoGroq Workflow'),
workflow_data.get('description', 'Workflow auto-generated by AutoGroq.'),
workflow_data.get('type', 'autonomous'), # Default to 'autonomous' if not specifiedworkflow_data.get('summary_method', 'last')[:4], # VARCHAR(4)json.dumps(workflow_data.get('sample_tasks', []))
)
cursor.execute(""" INSERT INTO workflow (id, created_at, updated_at, user_id, version, name, description, type, summary_method, sample_tasks) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) """, workflow_insert_data)
print("Inserted workflow data.")
exceptExceptionase:
print(f"Error inserting workflow: {str(e)}")
traceback.print_exc()
conn.commit()
print("Changes committed to the database.")
conn.close()
print("Database connection closed.")
st.success("Data exported to Autogen successfully!")
exceptsqlite3.Errorase:
st.error(f"Error exporting data to Autogen: {str(e)}")
print(f"Error exporting data to Autogen: {str(e)}")
traceback.print_exc()
defget_table_info(table_name):
conn=sqlite3.connect(FRAMEWORK_DB_PATH)
cursor=conn.cursor()
cursor.execute(f"PRAGMA table_info({table_name})")
columns=cursor.fetchall()
conn.close()
returncolumnsdefinsert_or_get_skill(cursor, tool):
tool_name=tool.nameifhasattr(tool, 'name') elsetool.get('name', '')
cursor.execute("SELECT id FROM skill WHERE name = ?", (tool_name,))
result=cursor.fetchone()
ifresult:
returnresult[0]
else:
print(f"Inserting new skill: {tool}")
skill_data= (
None, # id is INTEGER PRIMARY KEY, let SQLite auto-incrementdatetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), # created_atdatetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), # updated_at'default', # user_id'0.0.1', # versionstr(tool_name),
str(tool.contentifhasattr(tool, 'content') elsetool.get('content', '')),
str(tool.descriptionifhasattr(tool, 'description') elsetool.get('description', '')),
json.dumps(tool.secretsifhasattr(tool, 'secrets') elsetool.get('secrets', {})),
json.dumps(tool.librariesifhasattr(tool, 'libraries') elsetool.get('libraries', []))
)
print(f"Skill data to be inserted: {skill_data}")
try:
cursor.execute(""" INSERT INTO skill (id, created_at, updated_at, user_id, version, name, content, description, secrets, libraries) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) """, skill_data)
returncursor.lastrowidexceptsqlite3.Errorase:
print(f"SQLite error: {e}")
print("Data types:")
fori, iteminenumerate(skill_data):
print(f" {i}: {type(item)}")
raisedefinsert_or_get_model(cursor, model_config):
cursor.execute("SELECT id FROM model WHERE model = ?", (model_config['model'],))
result=cursor.fetchone()
ifresult:
returnresult[0]
else:
model_data= (
str(uuid.uuid4()), # iddatetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), # created_atdatetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), # updated_at'guestuser@gmail.com', # user_id'0.0.1', # versionmodel_config['model'],
model_config.get('api_key'),
model_config.get('base_url'),
model_config.get('api_type', '')[:6], # VARCHAR(6)model_config.get('api_version'),
model_config.get('description', '')
)
cursor.execute(""" INSERT INTO model (id, created_at, updated_at, user_id, version, model, api_key, base_url, api_type, api_version, description) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) """, model_data)
returncursor.lastrowiddefinsert_workflow(cursor, workflow_data):
workflow_insert_data= (
None, # id is INTEGER PRIMARY KEY, let SQLite auto-incrementdatetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), # created_atdatetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), # updated_at'guestuser@gmail.com', # user_id (matching existing entries)'0.0.1', # versionworkflow_data.get('name', 'AutoGroq Workflow'),
workflow_data.get('description', 'Workflow auto-generated by AutoGroq.'),
workflow_data.get('type', 'groupchat')[:10], # VARCHAR(10)workflow_data.get('summary_method', 'last')[:4], # VARCHAR(4)json.dumps(workflow_data.get('sample_tasks', []))
)
print(f"Inserting workflow data: {workflow_insert_data}")
try:
cursor.execute(""" INSERT INTO workflow (id, created_at, updated_at, user_id, version, name, description, type, summary_method, sample_tasks) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) """, workflow_insert_data)
returncursor.lastrowidexceptsqlite3.Errorase:
print(f"SQLite error: {e}")
print("Data types:")
fori, iteminenumerate(workflow_insert_data):
print(f" {i}: {type(item)}")
raisedefsql_to_db(sql: str, params: tuple=None):
try:
conn=sqlite3.connect(FRAMEWORK_DB_PATH)
cursor=conn.cursor()
print("Connected to the database successfully.")
ifparams:
cursor.execute(sql, params)
else:
cursor.execute(sql)
conn.commit()
print("SQL executed successfully.")
exceptsqlite3.Errorase:
print(f"Error executing SQL: {str(e)}")
print(f"SQL: {sql}")
print(f"Params: {params}")
raisefinally:
ifconn:
conn.close()
print("Database connection closed.")
#FUTURE functions for exporting to new Autogen Studio schema:# def create_or_update_agent(agent: dict, db_path: str):# with sqlite3.connect(db_path) as conn:# cursor = conn.cursor()# cursor.execute("""# INSERT OR REPLACE INTO Agent (id, skills, created_at, updated_at, user_id, workflows, type, config, models)# VALUES (:id, :skills, :created_at, :updated_at, :user_id, :workflows, :type, :config, :models)# """, agent)# conn.commit()# def create_or_update_skill(skill: dict, db_path: str):# with sqlite3.connect(db_path) as conn:# cursor = conn.cursor()# cursor.execute("""# INSERT OR REPLACE INTO Skill (id, created_at, updated_at, user_id, name, content, description, secrets, libraries)# VALUES (:id, :created_at, :updated_at, :user_id, :name, :content, :description, :secrets, :libraries)# """, skill)# conn.commit()# def create_or_update_workflow(workflow: dict, db_path: str):# with sqlite3.connect(db_path) as conn:# cursor = conn.cursor()# cursor.execute("""# INSERT OR REPLACE INTO Workflow (id, agents, created_at, updated_at, user_id, name, description, type, summary_method)# VALUES (:id, :agents, :created_at, :updated_at, :user_id, :name, :description, :type, :summary_method)# """, workflow)# conn.commit()# def get_agent_by_id(agent_id: int, db_path: str) -> Optional[dict]:# with sqlite3.connect(db_path) as conn:# cursor = conn.cursor()# cursor.execute("SELECT * FROM Agent WHERE id = ?", (agent_id,))# row = cursor.fetchone()# if row:# columns = [column[0] for column in cursor.description]# return dict(zip(columns, row))# return None# def get_skill_by_id(skill_id: int, db_path: str) -> Optional[dict]:# with sqlite3.connect(db_path) as conn:# cursor = conn.cursor()# cursor.execute("SELECT * FROM Skill WHERE id = ?", (skill_id,))# row = cursor.fetchone()# if row:# columns = [column[0] for column in cursor.description]# return dict(zip(columns, row))# return None# def get_workflow_by_id(workflow_id: int, db_path: str) -> Optional[dict]:# with sqlite3.connect(db_path) as conn:# cursor = conn.cursor()# cursor.execute("SELECT * FROM Workflow WHERE id = ?", (workflow_id,))# row = cursor.fetchone()# if row:# columns = [column[0] for column in cursor.description]# return dict(zip(columns, row))# return None
importdatetimeimportioimportjsonimportstreamlitasstimportzipfilefromutils.db_utilsimportnormalize_configfromutils.text_utilsimportsanitize_textfromutils.workflow_utilsimportget_workflow_from_agentsdefcreate_workflow_data(workflow):
# Sanitize the workflow namesanitized_workflow_name=sanitize_text(workflow["name"])
sanitized_workflow_name=sanitized_workflow_name.lower().replace(' ', '_')
returnworkflowdefcreate_zip_file(zip_buffer, file_data):
withzipfile.ZipFile(zip_buffer, 'w', zipfile.ZIP_DEFLATED) aszip_file:
forfile_name, file_contentinfile_data.items():
zip_file.writestr(file_name, file_content)
defregenerate_json_files_and_zip():
# Get the updated workflow dataworkflow_data, _=get_workflow_from_agents(st.session_state.agents)
workflow_data["updated_at"] =datetime.datetime.now().isoformat()
# Regenerate the zip filesautogen_zip_buffer, crewai_zip_buffer=zip_files_in_memory(workflow_data)
# Update the zip buffers in the session statest.session_state.autogen_zip_buffer=autogen_zip_bufferst.session_state.crewai_zip_buffer=crewai_zip_bufferdefregenerate_zip_files():
if"agents"inst.session_state:
workflow_data, _=get_workflow_from_agents(st.session_state.agents)
workflow_data["updated_at"] =datetime.datetime.now().isoformat()
autogen_zip_buffer, crewai_zip_buffer=zip_files_in_memory(workflow_data)
st.session_state.autogen_zip_buffer=autogen_zip_bufferst.session_state.crewai_zip_buffer=crewai_zip_bufferprint("Zip files regenerated.")
else:
print("No agents found. Skipping zip file regeneration.")
defzip_files_in_memory(workflow_data):
autogen_zip_buffer=io.BytesIO()
crewai_zip_buffer=io.BytesIO()
withzipfile.ZipFile(autogen_zip_buffer, 'w', zipfile.ZIP_DEFLATED) asautogen_zip:
foragentinst.session_state.agents:
agent_data=agent.to_dict()
agent_name=agent_data['name']
agent_file_name=f"{agent_name}.json"autogen_zip.writestr(f"agents/{agent_file_name}", json.dumps(agent_data, indent=2))
# Add tools to the zip filefortoolinst.session_state.tool_models:
tool_data=tool.to_dict()
tool_name=tool_data['name']
tool_file_name=f"{tool_name}.json"autogen_zip.writestr(f"tools/{tool_file_name}", json.dumps(tool_data, indent=2))
# Add workflow dataautogen_zip.writestr("workflow.json", json.dumps(workflow_data, indent=2))
withzipfile.ZipFile(crewai_zip_buffer, 'w', zipfile.ZIP_DEFLATED) ascrewai_zip:
foragentinst.session_state.agents:
agent_data=normalize_config(agent.to_dict(), agent.name)
agent_name=agent_data['name']
crewai_agent_data= {
"name": agent_name,
"description": agent_data.get('description', ''),
"verbose": True,
"allow_delegation": True
}
crewai_zip.writestr(f"agents/{agent_name}.json", json.dumps(crewai_agent_data, indent=2))
autogen_zip_buffer.seek(0)
crewai_zip_buffer.seek(0)
returnautogen_zip_buffer, crewai_zip_buffer
AutoGroq\utils\sandbox.py
importosimportsubprocessdefexecute_in_sandbox(tool_name, *args):
# Create a temporary Python file with the tool executionwithopen('temp_tool_execution.py', 'w') asf:
f.write(f"from tools.{tool_name} import {tool_name}\n")
f.write(f"result = {tool_name}(*{args})\n")
f.write("print(result)\n")
# Execute the temporary file in a separate process with restricted permissionstry:
result=subprocess.run(['python', 'temp_tool_execution.py'],
capture_output=True, text=True, timeout=10)
returnresult.stdout.strip()
finally:
os.remove('temp_tool_execution.py')