[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]
[elpa] externals/llm 41d0889bcb 06/71: Make sync use of plz return data
From: |
ELPA Syncer |
Subject: |
[elpa] externals/llm 41d0889bcb 06/71: Make sync use of plz return data instead of object, converted Gemini |
Date: |
Fri, 17 May 2024 00:58:43 -0400 (EDT) |
branch: externals/llm
commit 41d0889bcba042826ab58225c72c878aebbc4f19
Author: Andrew Hyatt <ahyatt@gmail.com>
Commit: Andrew Hyatt <ahyatt@gmail.com>
Make sync use of plz return data instead of object, converted Gemini
Gemini as a whole does not yet work, but the embeddings of Vertex and
Gemini now work.
---
llm-gemini.el | 84 +++++++++++++++++++++++++++---------------------------
llm-request-plz.el | 11 +++++--
llm-vertex.el | 28 +++++++++---------
3 files changed, 65 insertions(+), 58 deletions(-)
diff --git a/llm-gemini.el b/llm-gemini.el
index 444528159e..6d0dbb4fd4 100644
--- a/llm-gemini.el
+++ b/llm-gemini.el
@@ -28,7 +28,7 @@
(require 'cl-lib)
(require 'llm)
-(require 'llm-request)
+(require 'llm-request-plz)
(require 'llm-vertex)
(require 'llm-provider-utils)
(require 'json)
@@ -57,21 +57,21 @@ You can get this at
https://makersuite.google.com/app/apikey."
(cl-defmethod llm-embedding ((provider llm-gemini) string)
(llm-vertex--handle-response
- (llm-request-sync (llm-gemini--embedding-url provider)
- :data (llm-gemini--embedding-request provider string))
+ (llm-request-plz-sync (llm-gemini--embedding-url provider)
+ :data (llm-gemini--embedding-request provider string))
#'llm-gemini--embedding-response-handler))
(cl-defmethod llm-embedding-async ((provider llm-gemini) string
vector-callback error-callback)
(let ((buf (current-buffer)))
- (llm-request-async (llm-gemini--embedding-url provider)
- :data (llm-gemini--embedding-request provider string)
- :on-success (lambda (data)
- (llm-request-callback-in-buffer
- buf vector-callback
(llm-gemini--embedding-response-handler data)))
- :on-error (lambda (_ data)
- (llm-request-callback-in-buffer
- buf error-callback
- 'error (llm-vertex--error-message
data))))))
+ (llm-request-plz-async (llm-gemini--embedding-url provider)
+ :data (llm-gemini--embedding-request provider
string)
+ :on-success (lambda (data)
+ (llm-request-callback-in-buffer
+ buf vector-callback
(llm-gemini--embedding-response-handler data)))
+ :on-error (lambda (_ data)
+ (llm-request-callback-in-buffer
+ buf error-callback
+ 'error (llm-vertex--error-message
data))))))
;; from https://ai.google.dev/tutorials/rest_quickstart
(defun llm-gemini--chat-url (provider streaming-p)
@@ -103,39 +103,39 @@ If STREAMING-P is non-nil, use the streaming endpoint."
(cl-defmethod llm-chat ((provider llm-gemini) prompt)
(llm-vertex--process-and-return
provider prompt
- (llm-request-sync (llm-gemini--chat-url provider nil)
- :data (llm-gemini--chat-request prompt))))
+ (llm-request-plz-sync (llm-gemini--chat-url provider nil)
+ :data (llm-gemini--chat-request prompt))))
(cl-defmethod llm-chat-async ((provider llm-gemini) prompt response-callback
error-callback)
(let ((buf (current-buffer)))
- (llm-request-async (llm-gemini--chat-url provider nil)
- :data (llm-gemini--chat-request prompt)
- :on-success (lambda (data)
- (llm-request-callback-in-buffer
- buf response-callback
- (llm-vertex--process-and-return
- provider prompt
- data)))
- :on-error (lambda (_ data)
- (llm-request-callback-in-buffer buf
error-callback 'error
-
(llm-vertex--error-message data))))))
+ (llm-request-plz-async (llm-gemini--chat-url provider nil)
+ :data (llm-gemini--chat-request prompt)
+ :on-success (lambda (data)
+ (llm-request-callback-in-buffer
+ buf response-callback
+ (llm-vertex--process-and-return
+ provider prompt
+ data)))
+ :on-error (lambda (_ data)
+ (llm-request-callback-in-buffer buf
error-callback 'error
+
(llm-vertex--error-message data))))))
(cl-defmethod llm-chat-streaming ((provider llm-gemini) prompt
partial-callback response-callback error-callback)
(let ((buf (current-buffer)))
- (llm-request-async (llm-gemini--chat-url provider t)
- :data (llm-gemini--chat-request prompt)
- :on-partial (lambda (partial)
- (when-let ((response
(llm-vertex--get-partial-chat-response partial)))
- (when (> (length response) 0)
- (llm-request-callback-in-buffer buf
partial-callback response))))
- :on-success (lambda (data)
- (llm-request-callback-in-buffer
- buf response-callback
- (llm-vertex--process-and-return
- provider prompt data)))
- :on-error (lambda (_ data)
- (llm-request-callback-in-buffer buf
error-callback 'error
-
(llm-vertex--error-message data))))))
+ (llm-request-plz-async (llm-gemini--chat-url provider t)
+ :data (llm-gemini--chat-request prompt)
+ :on-partial (lambda (partial)
+ (when-let ((response
(llm-vertex--get-partial-chat-response partial)))
+ (when (> (length response) 0)
+ (llm-request-callback-in-buffer
buf partial-callback response))))
+ :on-success (lambda (data)
+ (llm-request-callback-in-buffer
+ buf response-callback
+ (llm-vertex--process-and-return
+ provider prompt data)))
+ :on-error (lambda (_ data)
+ (llm-request-callback-in-buffer buf
error-callback 'error
+
(llm-vertex--error-message data))))))
(defun llm-gemini--count-token-url (provider)
"Return the URL for the count token call, using PROVIDER."
@@ -145,9 +145,9 @@ If STREAMING-P is non-nil, use the streaming endpoint."
(cl-defmethod llm-count-tokens ((provider llm-gemini) string)
(llm-vertex--handle-response
- (llm-request-sync (llm-gemini--count-token-url provider)
- :data (llm-vertex--to-count-token-request
- (llm-vertex--chat-request
(llm-make-simple-chat-prompt string))))
+ (llm-request-plz-sync (llm-gemini--count-token-url provider)
+ :data (llm-vertex--to-count-token-request
+ (llm-vertex--chat-request
(llm-make-simple-chat-prompt string))))
#'llm-vertex--count-tokens-extract-response))
(cl-defmethod llm-name ((_ llm-gemini))
diff --git a/llm-request-plz.el b/llm-request-plz.el
index 14babfaa41..1467cf23cf 100644
--- a/llm-request-plz.el
+++ b/llm-request-plz.el
@@ -39,6 +39,10 @@ not very long so that we can end stuck requests."
:type 'integer
:group 'llm)
+(defun llm-request-success (status)
+ "Return non-nil if STATUS is a successful HTTP status code."
+ (<= 200 status 299))
+
(cl-defun llm-request-plz-sync-raw-output (url &key headers data timeout)
"Make a request to URL. The raw text response will be returned.
@@ -50,13 +54,16 @@ This is required.
TIMEOUT is the number of seconds to wait for a response."
(condition-case error
- (plz-media-type-request
+ (let ((resp (plz-media-type-request
'post url
:as `(media-types ,plz-media-types)
:body (when data
(encode-coding-string (json-encode data) 'utf-8))
:headers (append headers '(("Content-Type" . "application/json")))
- :timeout (or timeout llm-request-plz-timeout))
+ :timeout (or timeout llm-request-plz-timeout))))
+ (if (llm-request-success (plz-response-status resp))
+ (plz-response-body resp)
+ (signal 'plz-http-error resp)))
(plz-error
(seq-let [error-sym message data] error
(cond
diff --git a/llm-vertex.el b/llm-vertex.el
index 73e07d1ea8..0683a0e730 100644
--- a/llm-vertex.el
+++ b/llm-vertex.el
@@ -303,24 +303,24 @@ If STREAMING is non-nil, use the URL for the streaming
API."
(llm-vertex-refresh-key provider)
(llm-vertex--process-and-return
provider prompt
- (llm-request-sync (llm-vertex--chat-url provider)
- :headers `(("Authorization" . ,(format "Bearer %s"
(llm-vertex-key provider))))
- :data (llm-vertex--chat-request prompt))))
+ (llm-request-plz-sync (llm-vertex--chat-url provider)
+ :headers `(("Authorization" . ,(format "Bearer %s"
(llm-vertex-key provider))))
+ :data (llm-vertex--chat-request prompt))))
(cl-defmethod llm-chat-async ((provider llm-vertex) prompt response-callback
error-callback)
(llm-vertex-refresh-key provider)
(let ((buf (current-buffer)))
- (llm-request-async (llm-vertex--chat-url provider)
- :headers `(("Authorization" . ,(format "Bearer %s"
(llm-vertex-key provider))))
- :data (llm-vertex--chat-request prompt)
- :on-success (lambda (data)
- (llm-request-callback-in-buffer
- buf response-callback
- (llm-vertex--process-and-return
- provider prompt data)))
- :on-error (lambda (_ data)
- (llm-request-callback-in-buffer buf
error-callback 'error
-
(llm-vertex--error-message data))))))
+ (llm-request-plz-async (llm-vertex--chat-url provider)
+ :headers `(("Authorization" . ,(format "Bearer %s"
(llm-vertex-key provider))))
+ :data (llm-vertex--chat-request prompt)
+ :on-success (lambda (data)
+ (llm-request-callback-in-buffer
+ buf response-callback
+ (llm-vertex--process-and-return
+ provider prompt data)))
+ :on-error (lambda (_ data)
+ (llm-request-callback-in-buffer buf
error-callback 'error
+
(llm-vertex--error-message data))))))
(cl-defmethod llm-chat-streaming ((provider llm-vertex) prompt
partial-callback response-callback error-callback)
(llm-vertex-refresh-key provider)
- [elpa] externals/llm updated (efe218ac13 -> 478afbcb41), ELPA Syncer, 2024/05/17
- [elpa] externals/llm 1f3b018dcb 03/71: Merge pull request #26 from r0man/plz, ELPA Syncer, 2024/05/17
- [elpa] externals/llm 829bedabe6 04/71: Support for vertex embeddings, adding callback compatibility, ELPA Syncer, 2024/05/17
- [elpa] externals/llm 2ac956a060 05/71: Add support for the application/x-ndjson media type, ELPA Syncer, 2024/05/17
- [elpa] externals/llm 63f2b8ffbc 10/71: Merge branch 'main' into plz, ELPA Syncer, 2024/05/17
- [elpa] externals/llm a9cd296cd8 02/71: Add llm-request-plz.el, ELPA Syncer, 2024/05/17
- [elpa] externals/llm f33475eeae 01/71: Add plz and media type and event source extensions, ELPA Syncer, 2024/05/17
- [elpa] externals/llm 41d0889bcb 06/71: Make sync use of plz return data instead of object, converted Gemini,
ELPA Syncer <=
- [elpa] externals/llm 84678edfae 07/71: Merge pull request #28 from r0man/plz, ELPA Syncer, 2024/05/17
- [elpa] externals/llm 9541d34656 08/71: Enable streaming in Open AI with plz modifications, ELPA Syncer, 2024/05/17
- [elpa] externals/llm c9ab8664ce 09/71: Support function streaming with Open AI & plz, ELPA Syncer, 2024/05/17
- [elpa] externals/llm 3988fecb53 11/71: Make separate function for event streaming, w/ client-side handlers, ELPA Syncer, 2024/05/17
- [elpa] externals/llm 9ce3d9e003 13/71: Port claude to llm-request-plz, ELPA Syncer, 2024/05/17
- [elpa] externals/llm f9213b981c 14/71: Use the plz request module for everything in Claude, ELPA Syncer, 2024/05/17
- [elpa] externals/llm cdbb41528c 18/71: Fix issue advancing the process buffer, ELPA Syncer, 2024/05/17
- [elpa] externals/llm 3441784ae2 19/71: Fix error handling., ELPA Syncer, 2024/05/17
- [elpa] externals/llm 6f9c604e58 20/71: Decode body and chunks using a coding system, ELPA Syncer, 2024/05/17
- [elpa] externals/llm 62495de57f 21/71: Don't decode error response twice, ELPA Syncer, 2024/05/17