@@ -35,28 +35,80 @@ def test_ollama_llm_missing_dependency(mock_import: Mock) -> None:
3535
3636
3737@patch ("builtins.__import__" )
38- def test_ollama_llm_happy_path (mock_import : Mock ) -> None :
38+ def test_ollama_llm_happy_path_deprecated_options (mock_import : Mock ) -> None :
3939 mock_ollama = get_mock_ollama ()
4040 mock_import .return_value = mock_ollama
4141 mock_ollama .Client .return_value .chat .return_value = MagicMock (
4242 message = MagicMock (content = "ollama chat response" ),
4343 )
4444 model = "gpt"
4545 model_params = {"temperature" : 0.3 }
46+ with pytest .warns (DeprecationWarning ) as record :
47+ llm = OllamaLLM (
48+ model ,
49+ model_params = model_params ,
50+ )
51+ assert len (record ) == 1
52+ assert isinstance (record [0 ].message , Warning )
53+ assert (
54+ 'you must use model_params={"options": {"temperature": 0}}'
55+ in record [0 ].message .args [0 ]
56+ )
57+
58+ question = "What is graph RAG?"
59+ res = llm .invoke (question )
60+ assert isinstance (res , LLMResponse )
61+ assert res .content == "ollama chat response"
62+ messages = [
63+ {"role" : "user" , "content" : question },
64+ ]
65+ llm .client .chat .assert_called_once_with ( # type: ignore[attr-defined]
66+ model = model , messages = messages , options = {"temperature" : 0.3 }
67+ )
68+
69+
70+ @patch ("builtins.__import__" )
71+ def test_ollama_llm_unsupported_streaming (mock_import : Mock ) -> None :
72+ mock_ollama = get_mock_ollama ()
73+ mock_import .return_value = mock_ollama
74+ mock_ollama .Client .return_value .chat .return_value = MagicMock (
75+ message = MagicMock (content = "ollama chat response" ),
76+ )
77+ model = "gpt"
78+ model_params = {"stream" : True }
79+ with pytest .raises (ValueError ):
80+ OllamaLLM (
81+ model ,
82+ model_params = model_params ,
83+ )
84+
85+
86+ @patch ("builtins.__import__" )
87+ def test_ollama_llm_happy_path (mock_import : Mock ) -> None :
88+ mock_ollama = get_mock_ollama ()
89+ mock_import .return_value = mock_ollama
90+ mock_ollama .Client .return_value .chat .return_value = MagicMock (
91+ message = MagicMock (content = "ollama chat response" ),
92+ )
93+ model = "gpt"
94+ options = {"temperature" : 0.3 }
95+ model_params = {"options" : options , "format" : "json" }
4696 question = "What is graph RAG?"
4797 llm = OllamaLLM (
48- model ,
98+ model_name = model ,
4999 model_params = model_params ,
50100 )
51-
52101 res = llm .invoke (question )
53102 assert isinstance (res , LLMResponse )
54103 assert res .content == "ollama chat response"
55104 messages = [
56105 {"role" : "user" , "content" : question },
57106 ]
58107 llm .client .chat .assert_called_once_with ( # type: ignore[attr-defined]
59- model = model , messages = messages , options = model_params
108+ model = model ,
109+ messages = messages ,
110+ options = options ,
111+ format = "json" ,
60112 )
61113
62114
@@ -68,7 +120,8 @@ def test_ollama_invoke_with_system_instruction_happy_path(mock_import: Mock) ->
68120 message = MagicMock (content = "ollama chat response" ),
69121 )
70122 model = "gpt"
71- model_params = {"temperature" : 0.3 }
123+ options = {"temperature" : 0.3 }
124+ model_params = {"options" : options , "format" : "json" }
72125 llm = OllamaLLM (
73126 model ,
74127 model_params = model_params ,
@@ -81,7 +134,10 @@ def test_ollama_invoke_with_system_instruction_happy_path(mock_import: Mock) ->
81134 messages = [{"role" : "system" , "content" : system_instruction }]
82135 messages .append ({"role" : "user" , "content" : question })
83136 llm .client .chat .assert_called_once_with ( # type: ignore[attr-defined]
84- model = model , messages = messages , options = model_params
137+ model = model ,
138+ messages = messages ,
139+ options = options ,
140+ format = "json" ,
85141 )
86142
87143
@@ -93,7 +149,8 @@ def test_ollama_invoke_with_message_history_happy_path(mock_import: Mock) -> Non
93149 message = MagicMock (content = "ollama chat response" ),
94150 )
95151 model = "gpt"
96- model_params = {"temperature" : 0.3 }
152+ options = {"temperature" : 0.3 }
153+ model_params = {"options" : options }
97154 llm = OllamaLLM (
98155 model ,
99156 model_params = model_params ,
@@ -109,7 +166,7 @@ def test_ollama_invoke_with_message_history_happy_path(mock_import: Mock) -> Non
109166 messages = [m for m in message_history ]
110167 messages .append ({"role" : "user" , "content" : question })
111168 llm .client .chat .assert_called_once_with ( # type: ignore[attr-defined]
112- model = model , messages = messages , options = model_params
169+ model = model , messages = messages , options = options
113170 )
114171
115172
@@ -123,7 +180,8 @@ def test_ollama_invoke_with_message_history_and_system_instruction(
123180 message = MagicMock (content = "ollama chat response" ),
124181 )
125182 model = "gpt"
126- model_params = {"temperature" : 0.3 }
183+ options = {"temperature" : 0.3 }
184+ model_params = {"options" : options }
127185 system_instruction = "You are a helpful assistant."
128186 llm = OllamaLLM (
129187 model ,
@@ -145,7 +203,7 @@ def test_ollama_invoke_with_message_history_and_system_instruction(
145203 messages .extend (message_history )
146204 messages .append ({"role" : "user" , "content" : question })
147205 llm .client .chat .assert_called_once_with ( # type: ignore[attr-defined]
148- model = model , messages = messages , options = model_params
206+ model = model , messages = messages , options = options
149207 )
150208 assert llm .client .chat .call_count == 1 # type: ignore
151209
@@ -156,7 +214,8 @@ def test_ollama_invoke_with_message_history_validation_error(mock_import: Mock)
156214 mock_import .return_value = mock_ollama
157215 mock_ollama .ResponseError = ollama .ResponseError
158216 model = "gpt"
159- model_params = {"temperature" : 0.3 }
217+ options = {"temperature" : 0.3 }
218+ model_params = {"options" : options }
160219 system_instruction = "You are a helpful assistant."
161220 llm = OllamaLLM (
162221 model ,
@@ -187,7 +246,8 @@ async def mock_chat_async(*args: Any, **kwargs: Any) -> MagicMock:
187246
188247 mock_ollama .AsyncClient .return_value .chat = mock_chat_async
189248 model = "gpt"
190- model_params = {"temperature" : 0.3 }
249+ options = {"temperature" : 0.3 }
250+ model_params = {"options" : options }
191251 question = "What is graph RAG?"
192252 llm = OllamaLLM (
193253 model ,
0 commit comments