104 lines
2.6 KiB
Python
104 lines
2.6 KiB
Python
|
import subprocess
|
||
|
|
||
|
# packages = ["open-interpreter", "gradio"]
|
||
|
|
||
|
# for package in packages:
|
||
|
# subprocess.check_call(["pip", "install", package])
|
||
|
|
||
|
|
||
|
# Importieren Sie die erforderlichen Bibliotheken
|
||
|
import asyncio
|
||
|
import platform
|
||
|
|
||
|
if platform.system() == 'Windows':
|
||
|
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
||
|
|
||
|
|
||
|
# Environment Variablen importieren
|
||
|
from dotenv import load_dotenv
|
||
|
load_dotenv()
|
||
|
|
||
|
# CrewAI importieren
|
||
|
from crewai import Agent, Task, Crew, Process
|
||
|
|
||
|
# LLMs von OpenAI und Ollama importieren
|
||
|
from langchain_community.llms import OpenAI, Ollama
|
||
|
from langchain_openai import ChatOpenAI
|
||
|
from langchain.tools import tool
|
||
|
# Interpreter importieren
|
||
|
from interpreter import interpreter
|
||
|
|
||
|
|
||
|
# Configurations and Tools
|
||
|
llm = ChatOpenAI(model_name="gpt-4-turbo-preview", temperature=0.8)
|
||
|
interpreter.auto_run = True
|
||
|
interpreter.llm.model = "openai/gpt-4-turbo-preview"
|
||
|
interpreter.llm.temperature = 0.8
|
||
|
|
||
|
# loacl LLM Configuration and Tools
|
||
|
# llm = Ollama(model="mistral")
|
||
|
# interpreter.auto_run = True
|
||
|
# interpreter.llm.model = "openai/mistral"
|
||
|
# interpreter.llm.api_base = "http://localhost:11434/v1"
|
||
|
# interpreter.llm.api_key = "fake_key"
|
||
|
# interpreter.offline = True
|
||
|
# interpreter.llm.max_tokens = 1000
|
||
|
# interpreter.llm.max_retries = 20
|
||
|
# interpreter.llm.context_window = 3000
|
||
|
|
||
|
|
||
|
@tool('Executor')
|
||
|
def execute_cli_command(command: str):
|
||
|
"""Create and Execute the code using Open Interpreter"""
|
||
|
result = interpreter.chat(command)
|
||
|
return result
|
||
|
|
||
|
|
||
|
# Create an agent for the CLI tasks
|
||
|
|
||
|
cli_agent = Agent(
|
||
|
role='Software Developer',
|
||
|
goal='Helpfull Assistant. Ability to perform CLI operations, write programms and execute code using Executor Tool.',
|
||
|
backstory='Expert in command line operations and automating tasks.',
|
||
|
tools=[execute_cli_command],
|
||
|
llm=llm,
|
||
|
deligate_tools=False,
|
||
|
verbose=True
|
||
|
)
|
||
|
|
||
|
cli_task = Task(
|
||
|
description='Identify the OS and empty my trash bin.',
|
||
|
expected_output='Log all actions und result of the code execution',
|
||
|
agent=cli_agent
|
||
|
)
|
||
|
|
||
|
# Create a crew for the CLI tasks
|
||
|
cli_crew = Crew(
|
||
|
name='CLI Crew',
|
||
|
agents=[cli_agent],
|
||
|
tasks=[cli_task],
|
||
|
process=Process.sequential,
|
||
|
manager_llm=llm
|
||
|
)
|
||
|
|
||
|
# Run the crew in the CLI Interface
|
||
|
import gradio as gr
|
||
|
|
||
|
def cli_interface(command):
|
||
|
cli_task.description = command
|
||
|
result = cli_crew.kickoff()
|
||
|
return result
|
||
|
|
||
|
iface = gr.Interface(
|
||
|
fn=cli_interface,
|
||
|
inputs=gr.Textbox(lines=2, placeholder="Was soll ich tun?"),
|
||
|
outputs="text",
|
||
|
title="CLI Command Executor",
|
||
|
description="Execute CLI commands via a natural language interface."
|
||
|
)
|
||
|
|
||
|
iface.launch()
|
||
|
|
||
|
|
||
|
|