branch: externals/llm
commit f9e38f6c75506296911f1044b8fe8d41a55499e6
Author: Andrew Hyatt <ahy...@gmail.com>
Commit: GitHub <nore...@github.com>

    Add Gemini 2.5 reasoning support, and flash lite model (#202)
    
    Also change the default Vertex model to Gemini 2.5 Pro.
    
    This fixes https://github.com/ahyatt/llm/issues/187.
---
 NEWS.org      |  4 ++++
 llm-gemini.el |  9 +++++----
 llm-models.el | 12 +++++++++---
 llm-test.el   |  5 +++--
 llm-vertex.el | 38 ++++++++++++++++++++++++++++++++------
 5 files changed, 53 insertions(+), 15 deletions(-)

diff --git a/NEWS.org b/NEWS.org
index 5ad76a1d7f..56a60b214b 100644
--- a/NEWS.org
+++ b/NEWS.org
@@ -1,3 +1,7 @@
+* Version 0.27.1
+- Add thinking control to Gemini / Vertex.
+- Change default Vertex model to Gemini 2.5 Pro.
+- Add Gemini 2.5 Flash model
 * Version 0.27.0
 - Add =thinking= option to control the amount of thinking that happens for 
reasoning models.
 - Fix incorrectly low default Claude max tokens
diff --git a/llm-gemini.el b/llm-gemini.el
index 38dfbca007..bb3fbd7c46 100644
--- a/llm-gemini.el
+++ b/llm-gemini.el
@@ -78,10 +78,11 @@ If STREAMING-P is non-nil, use the streaming endpoint."
 (cl-defmethod llm-provider-chat-streaming-url ((provider llm-gemini))
   (llm-gemini--chat-url provider t))
 
-(cl-defmethod llm-provider-chat-request ((_ llm-gemini) _ _)
-  ;; Temporary, can be removed in the next version.  Without this the old
-  ;; definition will cause problems when users upgrade.
-  (cl-call-next-method))
+(cl-defmethod llm-provider-chat-request ((provider llm-gemini) prompt _)
+  (llm-provider--chat-request prompt (let ((model (llm-models-match 
(llm-gemini-chat-model provider))))
+                                       (if model
+                                           (llm-model-symbol model)
+                                         'unknown))))
 
 (cl-defmethod llm-name ((_ llm-gemini))
   "Return the name of PROVIDER."
diff --git a/llm-models.el b/llm-models.el
index d57823e1d2..8f4cded106 100644
--- a/llm-models.el
+++ b/llm-models.el
@@ -174,15 +174,21 @@ REGEX is a regular expression that can be used to 
identify the model, uniquely (
     :regex "gemini-2\\.0-flash-thinking")
    (make-llm-model
     :name "Gemini 2.5 Pro" :symbol 'gemini-2.5-pro
-    :capabilities '(generation tool-use image-input audio-input video-input 
json-response)
+    :capabilities '(generation tool-use image-input audio-input video-input 
json-response reasoning)
     :context-length 1048576
     :regex "gemini-2\\.5-pro")
+   (make-llm-model
+    :name "Gemini 2.5 Flash Lite" :symbol 'gemini-2.5-flash-lite
+    :capabilities '(generation tool-use image-input audio-input video-input 
json-response
+                               pdf-input caching reasoning)
+    :context-length 1048576
+    :regex "gemini-2\\.5-flash-lite")
    (make-llm-model
     :name "Gemini 2.5 Flash" :symbol 'gemini-2.5-flash
     :capabilities '(generation tool-use image-input audio-input video-input 
json-response
-                               pdf-input caching)
+                               pdf-input caching reasoning)
     :context-length 1048576
-    :regex "gemini-2\\.5-flash")
+    :regex "gemini-2\\.5-flash$")
    (make-llm-model
     :name "Gemini 2.0 Pro" :symbol 'gemini-2.0-pro
     :capabilities '(generation tool-use image-input audio-input video-input)
diff --git a/llm-test.el b/llm-test.el
index 1d68a5f726..9ae1fc0dc5 100644
--- a/llm-test.el
+++ b/llm-test.el
@@ -319,9 +319,10 @@
   (should (member 'tool-use (llm-capabilities (make-llm-gemini :chat-model 
"gemini-1.5-flash")))))
 
 (ert-deftest llm-test-chat-token-limit-vertex ()
-  (should (= 2097152 (llm-chat-token-limit (make-llm-vertex))))
+  ;; The default is Gemini 2.5 Pro, which has a token limit of 1048576.
+  (should (= 1048576 (llm-chat-token-limit (make-llm-vertex))))
   (should (= 1048576 (llm-chat-token-limit
-                      (make-llm-gemini :chat-model "gemini-1.5-flash"))))
+                      (make-llm-gemini :chat-model "gemini-2.5-flash"))))
   (should (= 4096 (llm-chat-token-limit
                    (make-llm-vertex :chat-model "unknown")))))
 
diff --git a/llm-vertex.el b/llm-vertex.el
index 071e32ac67..852592ad87 100644
--- a/llm-vertex.el
+++ b/llm-vertex.el
@@ -59,7 +59,7 @@ and there is no default.  The maximum value possible here is 
2049."
   :type 'integer
   :group 'llm-vertex)
 
-(defcustom llm-vertex-default-chat-model "gemini-1.5-pro"
+(defcustom llm-vertex-default-chat-model "gemini-2.5-pro"
   "The default model to ask for.
 This should almost certainly be a chat model, other models are
 for more specialized uses."
@@ -207,7 +207,8 @@ the key must be regenerated every hour."
                               (llm-multipart-parts 
(llm-chat-prompt-interaction-content interaction)))))
             (t `[(:text ,(llm-chat-prompt-interaction-content 
interaction))]))))
 
-(cl-defmethod llm-provider-chat-request ((_ llm-google) prompt _)
+(defun llm-provider--chat-request (prompt model)
+  "Create the request for the chat PROMPT and MODEL symbol."
   (llm-provider-utils-combine-to-system-prompt prompt 
llm-vertex-example-prelude)
   (append
    (when (eq 'system (llm-chat-prompt-interaction-role (car 
(llm-chat-prompt-interactions prompt))))
@@ -231,12 +232,25 @@ the key must be regenerated every hour."
                               :parameters ,(llm-provider-utils-openai-arguments
                                             (llm-tool-args tool))))
                     (llm-chat-prompt-tools prompt))))]))
-   (llm-vertex--chat-parameters prompt)))
+   (llm-vertex--chat-parameters prompt model)))
 
-(defun llm-vertex--chat-parameters (prompt)
+;; TODO: remove after September 2025, this is only here so people can upgrade 
to
+;; a new version of their llm library without the old llm-google specializer
+;; sticking around.
+(cl-defmethod llm-provider-chat-request ((_ llm-google) _ _)
+  (cl-call-next-method))
+
+(cl-defmethod llm-provider-chat-request ((provider llm-vertex) prompt _)
+  (llm-provider--chat-request prompt (let ((model (llm-models-match 
(llm-vertex-chat-model provider))))
+                                       (if model
+                                           (llm-model-symbol model)
+                                         'unknown))))
+
+(defun llm-vertex--chat-parameters (prompt model)
   "From PROMPT, create the parameters section.
-Return value is a cons for adding to an alist, unless there is
-nothing to add, in which case it is nil."
+Return value is a cons for adding to an alist, unless there is nothing
+to add, in which case it is nil.  MODEL is the symbol of the model used,
+which is necessary to properly set some paremeters."
   (let ((params-plist (llm-provider-utils-non-standard-params-plist prompt)))
     (when (llm-chat-prompt-temperature prompt)
       (setq params-plist (plist-put params-plist :temperature
@@ -251,6 +265,18 @@ nothing to add, in which case it is nil."
         (setq params-plist (plist-put params-plist :response_schema
                                       
(llm-provider-utils-convert-to-serializable
                                        (llm-chat-prompt-response-format 
prompt))))))
+    (when-let ((budget (llm-chat-prompt-reasoning prompt))
+               (max-budget (if (eq model 'gemini-2.5-pro) 32768 24576)))
+      (when (member 'reasoning (llm-model-capabilities (llm-models-by-symbol 
model)))
+        (if (and (eq model 'gemini-2.5-pro) (eq budget 'none))
+            (display-warning 'llm :warning "Cannot turn off reasoning in 
Gemini 2.5 Pro, ignoring reasoning setting")
+          (setq params-plist (plist-put params-plist :thinkingConfig
+                                                           `(:thinkingBudget
+                                          ,(pcase budget
+                                             ('none 0)
+                                             ('light 1024)
+                                             ('medium (/ max-budget 2))
+                                             ('maximum max-budget))))))))
     (when params-plist
       `(:generationConfig ,params-plist))))
 

Reply via email to