Gemini-pro-vision
Given a cue, the model will return one or more predicted completions, and may also return probabilities of alternative labels for each location.
Last updated
Given a cue, the model will return one or more predicted completions, and may also return probabilities of alternative labels for each location.
Last updated
Create complete for provided hints and parameters
POST
/v1/chat/completions
Give a typical example
{
"model": "gemini-pro-vision",
"stream": false,
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "What's in this picture?"
},
{
"type": "image_url",
"image_url": {
"url": "https://github.com/dianping/cat/raw/master/cat-home/src/main/webapp/images/logo/cat_logo03.png"
}
}
]
}
],
"max_tokens": 400
}
var myHeaders = new Headers();
myHeaders.append("Accept", "application/json");
myHeaders.append("Authorization", "Bearer {{YOUR_API_KEY}}");
myHeaders.append("Content-Type", "application/json");
var raw = JSON.stringify({
"model": "gemini-pro-vision",
"stream": false,
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "What's in this picture?"
},
{
"type": "image_url",
"image_url": {
"url": "https://github.com/dianping/cat/raw/master/cat-home/src/main/webapp/images/logo/cat_logo03.png"
}
}
]
}
],
"max_tokens": 400
});
var requestOptions = {
method: 'POST',
headers: myHeaders,
body: raw,
redirect: 'follow'
};
fetch("/v1/chat/completions", requestOptions)
.then(response => response.text())
.then(result => console.log(result))
.catch(error => console.log('error', error));
import http.client
import json
conn = http.client.HTTPSConnection("")
payload = json.dumps({
"model": "gemini-pro-vision",
"stream": False,
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "What's in this picture?"
},
{
"type": "image_url",
"image_url": {
"url": "https://github.com/dianping/cat/raw/master/cat-home/src/main/webapp/images/logo/cat_logo03.png"
}
}
]
}
],
"max_tokens": 400
})
headers = {
'Accept': 'application/json',
'Authorization': 'Bearer {{YOUR_API_KEY}}',
'Content-Type': 'application/json'
}
conn.request("POST", "/v1/chat/completions", payload, headers)
res = conn.getresponse()
data = res.read()
print(data.decode("utf-8"))
require "uri"
require "json"
require "net/http"
url = URI("/v1/chat/completions")
http = Net::HTTP.new(url.host, url.port);
request = Net::HTTP::Post.new(url)
request["Accept"] = "application/json"
request["Authorization"] = "Bearer {{YOUR_API_KEY}}"
request["Content-Type"] = "application/json"
request.body = JSON.dump({
"model": "gemini-pro-vision",
"stream": false,
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "What's in this picture?"
},
{
"type": "image_url",
"image_url": {
"url": "https://github.com/dianping/cat/raw/master/cat-home/src/main/webapp/images/logo/cat_logo03.png"
}
}
]
}
],
"max_tokens": 400
})
response = http.request(request)
puts response.read_body
Give a typical example OK
{
"id": "chatcmpl-123",
"object": "chat.completion",
"created": 1677652288,
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "\n\nHello there, how may I assist you today?"
},
"finish_reason": "stop"
}
],
"usage": {
"prompt_tokens": 9,
"completion_tokens": 12,
"total_tokens": 21
}
}