From 829f973cc8176748eaef3fb644851d5651e19352 Mon Sep 17 00:00:00 2001 From: Lukas Date: Wed, 28 Jan 2026 01:27:26 -0800 Subject: [PATCH 1/3] Fix Gemini model config --- batchata/providers/gemini/models.py | 85 ++++++++++------------------- 1 file changed, 28 insertions(+), 57 deletions(-) diff --git a/batchata/providers/gemini/models.py b/batchata/providers/gemini/models.py index 339abd8..f6c5ac9 100644 --- a/batchata/providers/gemini/models.py +++ b/batchata/providers/gemini/models.py @@ -2,96 +2,67 @@ from ..model_config import ModelConfig +# updated based on current (Jan 28 2026) docs +# models - https://ai.google.dev/gemini-api/docs/models +# batch pricing - https://ai.google.dev/gemini-api/docs/pricing +# image file types - https://ai.google.dev/gemini-api/docs/image-understanding#supported-formats # Google Gemini models with batch processing support # Batch mode provides 50% discount on standard API pricing GEMINI_MODELS = { - "gemini-3.0-pro-latest": ModelConfig( - name="gemini-3.0-pro-latest", - max_input_tokens=2097152, # 2M context - max_output_tokens=8192, - batch_discount=0.5, # 50% discount confirmed in docs - supports_images=True, - supports_files=True, - supports_citations=False, - supports_structured_output=True, - file_types=[".pdf", ".txt", ".jpg", ".png", ".gif", ".webp"] - ), - "gemini-3.0-pro": ModelConfig( - name="gemini-3.0-pro", - max_input_tokens=2097152, # 2M context - max_output_tokens=8192, - batch_discount=0.5, # 50% discount confirmed in docs - supports_images=True, - supports_files=True, - supports_citations=False, - supports_structured_output=True, - file_types=[".pdf", ".txt", ".jpg", ".png", ".gif", ".webp"] - ), - "gemini-3.0-flash-latest": ModelConfig( - name="gemini-3.0-flash-latest", + "gemini-3.0-pro-preview": ModelConfig( + name="gemini-3.0-pro-preview", max_input_tokens=1048576, # 1M context - max_output_tokens=8192, - batch_discount=0.5, - supports_images=True, - supports_files=True, - supports_citations=False, - supports_structured_output=True, - file_types=[".pdf", ".txt", ".jpg", ".png", ".gif", ".webp"] - ), - "gemini-3.0-flash": ModelConfig( - name="gemini-3.0-flash", - max_input_tokens=1048576, # 1M context - max_output_tokens=8192, - batch_discount=0.5, + max_output_tokens=65536, + batch_discount=0.5, # 50% discount confirmed in docs supports_images=True, supports_files=True, supports_citations=False, supports_structured_output=True, - file_types=[".pdf", ".txt", ".jpg", ".png", ".gif", ".webp"] + file_types=[".pdf", ".txt", ".jpg", ".png", ".webp"] ), - "gemini-3.0-flash-lite-latest": ModelConfig( - name="gemini-3.0-flash-lite-latest", + "gemini-3.0-flash-preview": ModelConfig( + name="gemini-3.0-flash-preview", max_input_tokens=1048576, # 1M context - max_output_tokens=8192, - batch_discount=0.5, + max_output_tokens=65536, + batch_discount=0.5, # 50% discount confirmed in docs supports_images=True, supports_files=True, supports_citations=False, supports_structured_output=True, - file_types=[".pdf", ".txt", ".jpg", ".png", ".gif", ".webp"] + file_types=[".pdf", ".txt", ".jpg", ".png", ".webp"] ), - "gemini-3.0-flash-lite": ModelConfig( - name="gemini-3.0-flash-lite", + "gemini-2.5-pro": ModelConfig( + name="gemini-2.5-pro", max_input_tokens=1048576, # 1M context - max_output_tokens=8192, + max_output_tokens=65536, batch_discount=0.5, supports_images=True, supports_files=True, supports_citations=False, supports_structured_output=True, - file_types=[".pdf", ".txt", ".jpg", ".png", ".gif", ".webp"] + file_types=[".pdf", ".txt", ".jpg", ".png", ".webp"] ), - "gemini-2.0-flash": ModelConfig( - name="gemini-2.0-flash", + "gemini-2.5-flash": ModelConfig( + name="gemini-2.5-flash", max_input_tokens=1048576, # 1M context - max_output_tokens=8192, + max_output_tokens=65536, batch_discount=0.5, supports_images=True, - supports_files=True, + supports_files=False, supports_citations=False, supports_structured_output=True, - file_types=[".pdf", ".txt", ".jpg", ".png", ".gif", ".webp"] + file_types=[".txt", ".jpg", ".png", ".webp"] ), - "gemini-2.0-flash-lite": ModelConfig( - name="gemini-2.0-flash-lite", + "gemini-2.5-flash-lite": ModelConfig( + name="gemini-2.5-flash-lite", max_input_tokens=1048576, # 1M context - max_output_tokens=8192, + max_output_tokens=65536, batch_discount=0.5, supports_images=True, supports_files=True, supports_citations=False, supports_structured_output=True, - file_types=[".pdf", ".txt", ".jpg", ".png", ".gif", ".webp"] - ), + file_types=[".pdf", ".txt", ".jpg", ".png", ".webp"] + ) } \ No newline at end of file From 051e3748ea581e221e3aedb47f4f004315e4038a Mon Sep 17 00:00:00 2001 From: Lukas Date: Wed, 28 Jan 2026 01:28:44 -0800 Subject: [PATCH 2/3] Explicity disable VertexAI in Gemini client --- batchata/providers/gemini/gemini_provider.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/batchata/providers/gemini/gemini_provider.py b/batchata/providers/gemini/gemini_provider.py index 223056a..e7fb34a 100644 --- a/batchata/providers/gemini/gemini_provider.py +++ b/batchata/providers/gemini/gemini_provider.py @@ -48,7 +48,7 @@ def __init__(self, auto_register: bool = True): if not api_key: raise ValueError("GOOGLE_API_KEY environment variable is required") - self.client = genai_lib.Client(api_key=api_key) + self.client = genai_lib.Client(vertexai=False, api_key=api_key) super().__init__() self.models = GEMINI_MODELS self._batches: Dict[str, Dict] = {} From a3489f2b938d52a8663870ce1a55a1c7d2f68e39 Mon Sep 17 00:00:00 2001 From: Lukas Date: Sun, 1 Feb 2026 16:06:11 -0800 Subject: [PATCH 3/3] Fix model names for gemini-3 --- batchata/providers/gemini/models.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/batchata/providers/gemini/models.py b/batchata/providers/gemini/models.py index f6c5ac9..dbaf69f 100644 --- a/batchata/providers/gemini/models.py +++ b/batchata/providers/gemini/models.py @@ -10,8 +10,8 @@ # Google Gemini models with batch processing support # Batch mode provides 50% discount on standard API pricing GEMINI_MODELS = { - "gemini-3.0-pro-preview": ModelConfig( - name="gemini-3.0-pro-preview", + "gemini-3-pro-preview": ModelConfig( + name="gemini-3-pro-preview", max_input_tokens=1048576, # 1M context max_output_tokens=65536, batch_discount=0.5, # 50% discount confirmed in docs @@ -21,8 +21,8 @@ supports_structured_output=True, file_types=[".pdf", ".txt", ".jpg", ".png", ".webp"] ), - "gemini-3.0-flash-preview": ModelConfig( - name="gemini-3.0-flash-preview", + "gemini-3-flash-preview": ModelConfig( + name="gemini-3-flash-preview", max_input_tokens=1048576, # 1M context max_output_tokens=65536, batch_discount=0.5, # 50% discount confirmed in docs