@@ -50,6 +50,15 @@ async def create_openai_chat_client(
5050 base_url = os .getenv ("OLLAMA_ENDPOINT" ),
5151 api_key = "nokeyneeded" ,
5252 )
53+ elif OPENAI_CHAT_HOST == "github" :
54+ logger .info ("Setting up OpenAI client for chat completions using GitHub Models" )
55+ github_base_url = os .getenv ("GITHUB_BASE_URL" , "https://models.inference.ai.azure.com" )
56+ github_model = os .getenv ("GITHUB_MODEL" , "gpt-4o" )
57+ logger .info (f"Using GitHub Models with base URL: { github_base_url } , model: { github_model } " )
58+ openai_chat_client = openai .AsyncOpenAI (
59+ base_url = github_base_url ,
60+ api_key = os .getenv ("GITHUB_TOKEN" ),
61+ )
5362 else :
5463 logger .info ("Setting up OpenAI client for chat completions using OpenAI.com API key" )
5564 openai_chat_client = openai .AsyncOpenAI (api_key = os .getenv ("OPENAICOM_KEY" ))
@@ -99,6 +108,15 @@ async def create_openai_embed_client(
99108 base_url = os .getenv ("OLLAMA_ENDPOINT" ),
100109 api_key = "nokeyneeded" ,
101110 )
111+ elif OPENAI_EMBED_HOST == "github" :
112+ logger .info ("Setting up OpenAI client for embeddings using GitHub Models" )
113+ github_base_url = os .getenv ("GITHUB_BASE_URL" , "https://models.inference.ai.azure.com" )
114+ github_embed_model = os .getenv ("GITHUB_EMBED_MODEL" , "text-embedding-3-small" )
115+ logger .info (f"Using GitHub Models with base URL: { github_base_url } , embedding model: { github_embed_model } " )
116+ openai_embed_client = openai .AsyncOpenAI (
117+ base_url = github_base_url ,
118+ api_key = os .getenv ("GITHUB_TOKEN" ),
119+ )
102120 else :
103121 logger .info ("Setting up OpenAI client for embeddings using OpenAI.com API key" )
104122 openai_embed_client = openai .AsyncOpenAI (api_key = os .getenv ("OPENAICOM_KEY" ))
0 commit comments