generated from github/codespaces-models
-
Notifications
You must be signed in to change notification settings - Fork 0
/
basic.py
36 lines (30 loc) · 850 Bytes
/
basic.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
"""This sample demonstrates a basic call to the chat completion API.
It is leveraging your endpoint and key. The call is synchronous."""
import os
from openai import OpenAI
token = os.environ["GITHUB_TOKEN"]
endpoint = "https://models.inference.ai.azure.com"
# Pick one of the Azure OpenAI models from the GitHub Models service
model_name = "gpt-4o-mini"
client = OpenAI(
base_url=endpoint,
api_key=token,
)
response = client.chat.completions.create(
messages=[
{
"role": "system",
"content": "You are a helpful assistant.",
},
{
"role": "user",
"content": "What is the capital of France?",
},
],
model=model_name,
# Optional parameters
temperature=1.,
max_tokens=1000,
top_p=1.
)
print(response.choices[0].message.content)