import os from langchain.agents import Tool, initialize_agent, AgentType from dotenv import load_dotenv from tools.qbit import QbitDownloadListTool, QbitSearchTool, QbitDownloadTorrentTool from tools.search import MediaInfoSearchTool, MoviesAdviceSearchTool from langchain.memory import ConversationBufferMemory from langchain.chat_models import init_chat_model import gradio as gr import datetime # Load environment variables load_dotenv() def create_agent(): # Initialize the language model #llm = init_chat_model("gpt-4.1-mini", model_provider="openai") llm = init_chat_model("qwen2.5-coder:14b", model_provider="ollama", temperature=0) # Create system message with current time and other info current_time = datetime.datetime.now() # Initialize memory memory = ConversationBufferMemory( memory_key="chat_history", return_messages=True, human_prefix="User", ai_prefix="Assistant" ) memory.chat_memory.add_message(f"SYSTEM: today is {current_time.strftime('%Y-%m-%d')}") memory.chat_memory.add_message(f"SYSTEM:from now on when User ask for movie or tv series suggestion reply with a numbered markdown list with a brief description of each title") memory.chat_memory.add_message(f"SYSTEM:from now on when list torrents show seeds number and MAGNET LINK (USING A MARKDOWN LINK WITH TEXT 'Magnet link 🧲')") memory.chat_memory.add_message(f"SYSTEM:from now on, when show downloads list show a clean and nice markdown format with name and the most important information, \ also add near it an emoji of progress of the download to represent how complete it is and set a coloured bullet emoji after status of torrent status, for example blue for uploading, green for downloading, \ red for error, yellow for paused, and grey for completed") memory.chat_memory.add_message(f"SYSTEM: from now on, when user ask for downolad NEVER start a qbittorrent download if user hasn't viewed the list of torrents first, \ and choosed one of them") # Initialize tools tools = [ QbitDownloadListTool(), QbitSearchTool(), QbitDownloadTorrentTool(), MoviesAdviceSearchTool(), MediaInfoSearchTool() ] # Initialize the agent with memory agent = initialize_agent( tools, llm, agent=AgentType.CHAT_CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory ) return agent def process_query(message, history): try: # Create agent if it doesn't exist if not hasattr(process_query, "agent"): process_query.agent = create_agent() # Run the agent with the user's message response = process_query.agent.run(message) return response except Exception as e: return f"Error: {str(e)}" def main(): print("Starting qBittorrent AI Agent...") # Create Gradio interface with gr.Blocks(title="qbit-agent") as interface: gr.Markdown("# qbit-agent") gr.Markdown("### Made by Matteo with hate and piracy 💀") gr.Markdown("Ask about downloads, search for content (and torrent), or get recommendations.") chatbot = gr.ChatInterface( process_query, examples=["Find me the latest sci-fi movies", "What are the top TV shows from 2023?", "Download Interstellar in 1080p", "Show me my current downloads", "What is The Matrix", "Get me a list of horror movies"], ) # Launch the interface interface.launch(share=True) def cli_main(): print("Starting qBittorrent AI Agent in CLI mode...") agent = create_agent() while True: user_input = input("\nEnter your question (or 'quit' to exit): ") if user_input.lower() in ['quit', 'exit']: break try: response = agent.run(user_input) print(response) except Exception as e: print(f"Error: {str(e)}") if __name__ == "__main__": # Use main() for Gradio interface or cli_main() for command-line interface main() # Uncomment the line below to use CLI instead # cli_main()