From 1582722044f29531927d090a30a7b5b4bde1be65 Mon Sep 17 00:00:00 2001 From: Shorya Sethia <132898518+shoryasethia@users.noreply.github.com> Date: Thu, 8 Jan 2026 00:28:38 +0530 Subject: [PATCH] Add OpenRouter support --- README.md | 1 + voice_assistant/api_key_manager.py | 3 ++- voice_assistant/config.py | 11 +++++++++-- voice_assistant/response_generation.py | 15 ++++++++++++++- 4 files changed, 26 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 308d490..995220a 100644 --- a/README.md +++ b/README.md @@ -224,6 +224,7 @@ If you are running LLM locally via [Ollama](https://ollama.com/), make sure the - **OpenAI**: Uses OpenAI's GPT-4 model. - **Groq**: Uses Groq's LLaMA model. +- **OpenRouter**: Uses any model available on OpenRouter (default: Gemini 2.0 Flash). - **Ollama**: Uses any model served via Ollama. - **Local**: Placeholder for a local language model. diff --git a/voice_assistant/api_key_manager.py b/voice_assistant/api_key_manager.py index 68668e3..a4488ae 100644 --- a/voice_assistant/api_key_manager.py +++ b/voice_assistant/api_key_manager.py @@ -10,7 +10,8 @@ }, "response":{ "openai":Config.OPENAI_API_KEY, - "groq": Config.GROQ_API_KEY + "groq": Config.GROQ_API_KEY, + "openrouter": Config.OPENROUTER_API_KEY }, "tts": { "openai": Config.OPENAI_API_KEY, diff --git a/voice_assistant/config.py b/voice_assistant/config.py index 73fbc87..ede61bf 100644 --- a/voice_assistant/config.py +++ b/voice_assistant/config.py @@ -22,7 +22,7 @@ class Config: """ # Model selection TRANSCRIPTION_MODEL = 'deepgram' # possible values: openai, groq, deepgram, fastwhisperapi - RESPONSE_MODEL = 'openai' # possible values: openai, groq, ollama + RESPONSE_MODEL = 'openai' # possible values: openai, groq, ollama, openrouter TTS_MODEL = 'openai' # possible values: openai, deepgram, elevenlabs, melotts, cartesia, piper # Piper Server configuration @@ -36,6 +36,7 @@ class Config: OLLAMA_LLM="llama3:8b" GROQ_LLM="llama3-8b-8192" OPENAI_LLM="gpt-4o" + OPENROUTER_LLM="google/gemini-2.0-flash-exp:free" # API keys and paths OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") @@ -44,6 +45,11 @@ class Config: ELEVENLABS_API_KEY = os.getenv("ELEVENLABS_API_KEY") LOCAL_MODEL_PATH = os.getenv("LOCAL_MODEL_PATH") CARTESIA_API_KEY = os.getenv("CARTESIA_API_KEY") +<<<<<<< Updated upstream +======= + GOOGLE_API_KEY = os.getenv("GOOGLE_API_KEY") + OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY") +>>>>>>> Stashed changes # for serving the MeloTTS model TTS_PORT_LOCAL = 5150 @@ -62,7 +68,7 @@ def validate_config(): Config._validate_model('TRANSCRIPTION_MODEL', [ 'openai', 'groq', 'deepgram', 'fastwhisperapi', 'local']) Config._validate_model('RESPONSE_MODEL', [ - 'openai', 'groq', 'ollama', 'local']) + 'openai', 'groq', 'ollama', 'local', 'openrouter']) Config._validate_model('TTS_MODEL', [ 'openai', 'deepgram', 'elevenlabs', 'melotts', 'cartesia', 'local', 'piper']) @@ -72,6 +78,7 @@ def validate_config(): Config._validate_api_key('RESPONSE_MODEL', 'openai', 'OPENAI_API_KEY') Config._validate_api_key('RESPONSE_MODEL', 'groq', 'GROQ_API_KEY') + Config._validate_api_key('RESPONSE_MODEL', 'openrouter', 'OPENROUTER_API_KEY') Config._validate_api_key('TTS_MODEL', 'openai', 'OPENAI_API_KEY') Config._validate_api_key('TTS_MODEL', 'deepgram', 'DEEPGRAM_API_KEY') diff --git a/voice_assistant/response_generation.py b/voice_assistant/response_generation.py index e6e3891..188071e 100644 --- a/voice_assistant/response_generation.py +++ b/voice_assistant/response_generation.py @@ -27,6 +27,8 @@ def generate_response(model:str, api_key:str, chat_history:list, local_model_pat return _generate_openai_response(api_key, chat_history) elif model == 'groq': return _generate_groq_response(api_key, chat_history) + elif model == 'openrouter': + return _generate_openrouter_response(api_key, chat_history) elif model == 'ollama': return _generate_ollama_response(chat_history) elif model == 'local': @@ -61,4 +63,15 @@ def _generate_ollama_response(chat_history): model=Config.OLLAMA_LLM, messages=chat_history, ) - return response['message']['content'] \ No newline at end of file + return response['message']['content'] + +def _generate_openrouter_response(api_key, chat_history): + client = OpenAI( + base_url="https://openrouter.ai/api/v1", + api_key=api_key, + ) + response = client.chat.completions.create( + model=Config.OPENROUTER_LLM, + messages=chat_history, + ) + return response.choices[0].message.content \ No newline at end of file