emacs-elpa-diffs
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[elpa] externals/llm 41d0889bcb 06/71: Make sync use of plz return data


From: ELPA Syncer
Subject: [elpa] externals/llm 41d0889bcb 06/71: Make sync use of plz return data instead of object, converted Gemini
Date: Fri, 17 May 2024 00:58:43 -0400 (EDT)

branch: externals/llm
commit 41d0889bcba042826ab58225c72c878aebbc4f19
Author: Andrew Hyatt <ahyatt@gmail.com>
Commit: Andrew Hyatt <ahyatt@gmail.com>

    Make sync use of plz return data instead of object, converted Gemini
    
    Gemini as a whole does not yet work, but the embeddings of Vertex and 
Gemini now work.
---
 llm-gemini.el      | 84 +++++++++++++++++++++++++++---------------------------
 llm-request-plz.el | 11 +++++--
 llm-vertex.el      | 28 +++++++++---------
 3 files changed, 65 insertions(+), 58 deletions(-)

diff --git a/llm-gemini.el b/llm-gemini.el
index 444528159e..6d0dbb4fd4 100644
--- a/llm-gemini.el
+++ b/llm-gemini.el
@@ -28,7 +28,7 @@
 
 (require 'cl-lib)
 (require 'llm)
-(require 'llm-request)
+(require 'llm-request-plz)
 (require 'llm-vertex)
 (require 'llm-provider-utils)
 (require 'json)
@@ -57,21 +57,21 @@ You can get this at 
https://makersuite.google.com/app/apikey.";
 
 (cl-defmethod llm-embedding ((provider llm-gemini) string)
   (llm-vertex--handle-response
-   (llm-request-sync (llm-gemini--embedding-url provider)
-                     :data (llm-gemini--embedding-request provider string))
+   (llm-request-plz-sync (llm-gemini--embedding-url provider)
+                         :data (llm-gemini--embedding-request provider string))
    #'llm-gemini--embedding-response-handler))
 
 (cl-defmethod llm-embedding-async ((provider llm-gemini) string 
vector-callback error-callback)
   (let ((buf (current-buffer)))
-    (llm-request-async (llm-gemini--embedding-url provider)
-                       :data (llm-gemini--embedding-request provider string)
-                       :on-success (lambda (data)
-                                     (llm-request-callback-in-buffer
-                                      buf vector-callback 
(llm-gemini--embedding-response-handler data)))
-                       :on-error (lambda (_ data)
-                                   (llm-request-callback-in-buffer
-                                    buf error-callback
-                                    'error (llm-vertex--error-message 
data))))))
+    (llm-request-plz-async (llm-gemini--embedding-url provider)
+                           :data (llm-gemini--embedding-request provider 
string)
+                           :on-success (lambda (data)
+                                         (llm-request-callback-in-buffer
+                                          buf vector-callback 
(llm-gemini--embedding-response-handler data)))
+                           :on-error (lambda (_ data)
+                                       (llm-request-callback-in-buffer
+                                        buf error-callback
+                                        'error (llm-vertex--error-message 
data))))))
 
 ;; from https://ai.google.dev/tutorials/rest_quickstart
 (defun llm-gemini--chat-url (provider streaming-p)
@@ -103,39 +103,39 @@ If STREAMING-P is non-nil, use the streaming endpoint."
 (cl-defmethod llm-chat ((provider llm-gemini) prompt)
   (llm-vertex--process-and-return
    provider prompt
-   (llm-request-sync (llm-gemini--chat-url provider nil)
-                     :data (llm-gemini--chat-request prompt))))
+   (llm-request-plz-sync (llm-gemini--chat-url provider nil)
+                         :data (llm-gemini--chat-request prompt))))
 
 (cl-defmethod llm-chat-async ((provider llm-gemini) prompt response-callback 
error-callback)
   (let ((buf (current-buffer)))
-    (llm-request-async (llm-gemini--chat-url provider nil)
-                       :data (llm-gemini--chat-request prompt)
-                       :on-success (lambda (data)
-                                     (llm-request-callback-in-buffer
-                                      buf response-callback
-                                      (llm-vertex--process-and-return
-                                       provider prompt
-                                       data)))
-                       :on-error (lambda (_ data)
-                                   (llm-request-callback-in-buffer buf 
error-callback 'error
-                                                                   
(llm-vertex--error-message data))))))
+    (llm-request-plz-async (llm-gemini--chat-url provider nil)
+                           :data (llm-gemini--chat-request prompt)
+                           :on-success (lambda (data)
+                                         (llm-request-callback-in-buffer
+                                          buf response-callback
+                                          (llm-vertex--process-and-return
+                                           provider prompt
+                                           data)))
+                           :on-error (lambda (_ data)
+                                       (llm-request-callback-in-buffer buf 
error-callback 'error
+                                                                       
(llm-vertex--error-message data))))))
 
 (cl-defmethod llm-chat-streaming ((provider llm-gemini) prompt 
partial-callback response-callback error-callback)
   (let ((buf (current-buffer)))
-    (llm-request-async (llm-gemini--chat-url provider t)
-                       :data (llm-gemini--chat-request prompt)
-                       :on-partial (lambda (partial)
-                                     (when-let ((response 
(llm-vertex--get-partial-chat-response partial)))
-                                       (when (> (length response) 0)
-                                         (llm-request-callback-in-buffer buf 
partial-callback response))))
-                       :on-success (lambda (data)
-                                     (llm-request-callback-in-buffer
-                                        buf response-callback
-                                        (llm-vertex--process-and-return
-                                         provider prompt data)))
-                       :on-error (lambda (_ data)
-                                 (llm-request-callback-in-buffer buf 
error-callback 'error
-                                                                 
(llm-vertex--error-message data))))))
+    (llm-request-plz-async (llm-gemini--chat-url provider t)
+                           :data (llm-gemini--chat-request prompt)
+                           :on-partial (lambda (partial)
+                                         (when-let ((response 
(llm-vertex--get-partial-chat-response partial)))
+                                           (when (> (length response) 0)
+                                             (llm-request-callback-in-buffer 
buf partial-callback response))))
+                           :on-success (lambda (data)
+                                         (llm-request-callback-in-buffer
+                                          buf response-callback
+                                          (llm-vertex--process-and-return
+                                           provider prompt data)))
+                           :on-error (lambda (_ data)
+                                       (llm-request-callback-in-buffer buf 
error-callback 'error
+                                                                       
(llm-vertex--error-message data))))))
 
 (defun llm-gemini--count-token-url (provider)
   "Return the URL for the count token call, using PROVIDER."
@@ -145,9 +145,9 @@ If STREAMING-P is non-nil, use the streaming endpoint."
 
 (cl-defmethod llm-count-tokens ((provider llm-gemini) string)
   (llm-vertex--handle-response
-   (llm-request-sync (llm-gemini--count-token-url provider)
-                     :data (llm-vertex--to-count-token-request
-                            (llm-vertex--chat-request 
(llm-make-simple-chat-prompt string))))
+   (llm-request-plz-sync (llm-gemini--count-token-url provider)
+                         :data (llm-vertex--to-count-token-request
+                                (llm-vertex--chat-request 
(llm-make-simple-chat-prompt string))))
    #'llm-vertex--count-tokens-extract-response))
 
 (cl-defmethod llm-name ((_ llm-gemini))
diff --git a/llm-request-plz.el b/llm-request-plz.el
index 14babfaa41..1467cf23cf 100644
--- a/llm-request-plz.el
+++ b/llm-request-plz.el
@@ -39,6 +39,10 @@ not very long so that we can end stuck requests."
   :type 'integer
   :group 'llm)
 
+(defun llm-request-success (status)
+  "Return non-nil if STATUS is a successful HTTP status code."
+  (<= 200 status 299))
+
 (cl-defun llm-request-plz-sync-raw-output (url &key headers data timeout)
   "Make a request to URL.  The raw text response will be returned.
 
@@ -50,13 +54,16 @@ This is required.
 
 TIMEOUT is the number of seconds to wait for a response."
   (condition-case error
-      (plz-media-type-request
+      (let ((resp (plz-media-type-request
         'post url
         :as `(media-types ,plz-media-types)
         :body (when data
                 (encode-coding-string (json-encode data) 'utf-8))
         :headers (append headers '(("Content-Type" . "application/json")))
-        :timeout (or timeout llm-request-plz-timeout))
+        :timeout (or timeout llm-request-plz-timeout))))
+        (if (llm-request-success (plz-response-status resp))
+            (plz-response-body resp)
+          (signal 'plz-http-error resp)))
     (plz-error
      (seq-let [error-sym message data] error
        (cond
diff --git a/llm-vertex.el b/llm-vertex.el
index 73e07d1ea8..0683a0e730 100644
--- a/llm-vertex.el
+++ b/llm-vertex.el
@@ -303,24 +303,24 @@ If STREAMING is non-nil, use the URL for the streaming 
API."
   (llm-vertex-refresh-key provider)
   (llm-vertex--process-and-return
      provider prompt
-     (llm-request-sync (llm-vertex--chat-url provider)
-                       :headers `(("Authorization" . ,(format "Bearer %s" 
(llm-vertex-key provider))))
-                       :data (llm-vertex--chat-request prompt))))
+     (llm-request-plz-sync (llm-vertex--chat-url provider)
+                           :headers `(("Authorization" . ,(format "Bearer %s" 
(llm-vertex-key provider))))
+                           :data (llm-vertex--chat-request prompt))))
 
 (cl-defmethod llm-chat-async ((provider llm-vertex) prompt response-callback 
error-callback)
   (llm-vertex-refresh-key provider)
   (let ((buf (current-buffer)))
-    (llm-request-async (llm-vertex--chat-url provider)
-                       :headers `(("Authorization" . ,(format "Bearer %s" 
(llm-vertex-key provider))))
-                       :data (llm-vertex--chat-request prompt)
-                       :on-success (lambda (data)
-                                     (llm-request-callback-in-buffer
-                                      buf response-callback
-                                      (llm-vertex--process-and-return
-                                       provider prompt data)))
-                       :on-error (lambda (_ data)
-                                   (llm-request-callback-in-buffer buf 
error-callback 'error
-                                                                   
(llm-vertex--error-message data))))))
+    (llm-request-plz-async (llm-vertex--chat-url provider)
+                           :headers `(("Authorization" . ,(format "Bearer %s" 
(llm-vertex-key provider))))
+                           :data (llm-vertex--chat-request prompt)
+                           :on-success (lambda (data)
+                                         (llm-request-callback-in-buffer
+                                          buf response-callback
+                                          (llm-vertex--process-and-return
+                                           provider prompt data)))
+                           :on-error (lambda (_ data)
+                                       (llm-request-callback-in-buffer buf 
error-callback 'error
+                                                                       
(llm-vertex--error-message data))))))
 
 (cl-defmethod llm-chat-streaming ((provider llm-vertex) prompt 
partial-callback response-callback error-callback)
   (llm-vertex-refresh-key provider)



reply via email to

[Prev in Thread] Current Thread [Next in Thread]