Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
595b0f0
langchain compatible llminterfacev2 interface created
serengil Oct 29, 2025
1b4c339
langchain compatible methods added to openai
serengil Oct 29, 2025
ab08ca1
broken test cases sorted
serengil Oct 29, 2025
da94032
brand new tests for llm interface v2
serengil Oct 29, 2025
2fc6db8
vertex ai started to support llm interface v2
serengil Oct 29, 2025
6a69656
brand new test cases added
serengil Oct 29, 2025
bdae480
invoke with tools method is not mandatory
serengil Oct 29, 2025
0710a27
llminterfacev2 support added to ollama
serengil Oct 29, 2025
db997da
llm interface v2 supported in mistralai
serengil Oct 29, 2025
8c324ab
llm interface v2 support added to cohere
serengil Oct 29, 2025
053f70f
llm interface v2 support added to anthropic
serengil Oct 29, 2025
1a45c4c
more tests for llminterfacev2
serengil Oct 29, 2025
be9bba1
mypy fixes
serengil Oct 30, 2025
c38ba4c
test_openai_llm possibly failed because of import in ci cd
serengil Oct 30, 2025
ce86150
Attempt CI/CD-compatible async mock for OpenAILLM tests
serengil Oct 30, 2025
0e94ddd
Attempt CI/CD-safe async mock for OpenAILLM v1 test
serengil Oct 30, 2025
1c05c9d
restoring graphrag e2e tests for v1
serengil Oct 30, 2025
e2dc9de
existing e2e test for graphrag sorted
serengil Oct 30, 2025
85a478a
create message history only if langchain compatible branch
serengil Oct 31, 2025
abf0efa
typo in docstring
serengil Oct 31, 2025
b951fe6
avoid repeated langchain compatible check code
serengil Oct 31, 2025
6fbb2e5
avoid to create vector idx and fulltext with same property
serengil Oct 31, 2025
b7cc001
avoid to create vector idx and fulltext with same property
serengil Oct 31, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
108 changes: 81 additions & 27 deletions src/neo4j_graphrag/generation/graphrag.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,29 +12,35 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations

# built-in dependencies
from __future__ import annotations
import logging
import warnings
from typing import Any, List, Optional, Union

# 3rd party dependencies
from pydantic import ValidationError

# project dependencies
from neo4j_graphrag.exceptions import (
RagInitializationError,
SearchValidationError,
)
from neo4j_graphrag.generation.prompts import RagTemplate
from neo4j_graphrag.generation.types import RagInitModel, RagResultModel, RagSearchModel
from neo4j_graphrag.llm import LLMInterface
from neo4j_graphrag.llm import LLMInterface, LLMInterfaceV2
from neo4j_graphrag.llm.utils import legacy_inputs_to_messages
from neo4j_graphrag.message_history import MessageHistory
from neo4j_graphrag.retrievers.base import Retriever
from neo4j_graphrag.types import LLMMessage, RetrieverResult
from neo4j_graphrag.utils.logging import prettify

# Set up logger
logger = logging.getLogger(__name__)


# pylint: disable=raise-missing-from
class GraphRAG:
"""Performs a GraphRAG search using a specific retriever
and LLM.
Expand All @@ -57,8 +63,10 @@ class GraphRAG:

Args:
retriever (Retriever): The retriever used to find relevant context to pass to the LLM.
llm (LLMInterface): The LLM used to generate the answer.
prompt_template (RagTemplate): The prompt template that will be formatted with context and user question and passed to the LLM.
llm (LLMInterface, LLMInterfaceV2 or LangChain Chat Model): The LLM used to generate
the answer.
prompt_template (RagTemplate): The prompt template that will be formatted with context and
user question and passed to the LLM.

Raises:
RagInitializationError: If validation of the input arguments fail.
Expand All @@ -67,7 +75,7 @@ class GraphRAG:
def __init__(
self,
retriever: Retriever,
llm: LLMInterface,
llm: Union[LLMInterface, LLMInterfaceV2],
prompt_template: RagTemplate = RagTemplate(),
):
try:
Expand All @@ -93,7 +101,8 @@ def search(
) -> RagResultModel:
"""
.. warning::
The default value of 'return_context' will change from 'False' to 'True' in a future version.
The default value of 'return_context' will change from 'False'
to 'True' in a future version.


This method performs a full RAG search:
Expand All @@ -104,24 +113,30 @@ def search(

Args:
query_text (str): The user question.
message_history (Optional[Union[List[LLMMessage], MessageHistory]]): A collection previous messages,
with each message having a specific role assigned.
message_history (Optional[Union[List[LLMMessage], MessageHistory]]): A collection
of previous messages, with each message having a specific role assigned.
examples (str): Examples added to the LLM prompt.
retriever_config (Optional[dict]): Parameters passed to the retriever.
search method; e.g.: top_k
return_context (bool): Whether to append the retriever result to the final result (default: False).
response_fallback (Optional[str]): If not null, will return this message instead of calling the LLM if context comes back empty.
return_context (bool): Whether to append the retriever result to the final result
(default: False).
response_fallback (Optional[str]): If not null, will return this message instead
of calling the LLM if context comes back empty.

Returns:
RagResultModel: The LLM-generated answer.

"""
if return_context is None:
warnings.warn(
"The default value of 'return_context' will change from 'False' to 'True' in a future version.",
DeprecationWarning,
)
return_context = False
if self.is_langchain_compatible():
return_context = True
else: # e.g. LLMInterface
warnings.warn(
"The default value of 'return_context' will change from 'False'"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These changes seem to be related to line length only, are there made on purpose? (asking because I know Nathalie had some issues about this in the past)

Copy link
Contributor Author

@serengil serengil Oct 31, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

True, pylint was warning about that line is too long. Autolinter in my ide splitted this line to many lines. I can revert them back if you think it may cause any issue.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It's just that the next person pushing to the repo will have to make the opposite change, so we should agree on a convention. Is this something we should add to a config file in the repo?

" to 'True' in a future version.",
DeprecationWarning,
)
return_context = False
try:
validated_data = RagSearchModel(
query_text=query_text,
Expand All @@ -145,13 +160,30 @@ def search(
prompt = self.prompt_template.format(
query_text=query_text, context=context, examples=validated_data.examples
)
logger.debug(f"RAG: retriever_result={prettify(retriever_result)}")
logger.debug(f"RAG: prompt={prompt}")
llm_response = self.llm.invoke(
prompt,
message_history,
system_instruction=self.prompt_template.system_instructions,
)

logger.debug("RAG: retriever_result=%s", prettify(retriever_result))
logger.debug("RAG: prompt=%s", prompt)

if self.is_langchain_compatible():
messages = legacy_inputs_to_messages(
prompt=prompt,
message_history=message_history,
system_instruction=self.prompt_template.system_instructions,
)

# langchain chat model compatible invoke
llm_response = self.llm.invoke(
input=messages,
)
elif isinstance(self.llm, LLMInterface):
# may have custom LLMs inherited from V1, keep it for backward compatibility
llm_response = self.llm.invoke(
input=prompt,
message_history=message_history,
system_instruction=self.prompt_template.system_instructions,
)
else:
raise ValueError(f"Type {type(self.llm)} of LLM is not supported.")
answer = llm_response.content
result: dict[str, Any] = {"answer": answer}
if return_context:
Expand All @@ -163,18 +195,40 @@ def _build_query(
query_text: str,
message_history: Optional[List[LLMMessage]] = None,
) -> str:
summary_system_message = "You are a summarization assistant. Summarize the given text in no more than 300 words."
"""Builds the final query text, incorporating message history if provided."""
summary_system_message = (
"You are a summarization assistant. "
"Summarize the given text in no more than 300 words."
)
if message_history:
summarization_prompt = self._chat_summary_prompt(
message_history=message_history
)
summary = self.llm.invoke(
input=summarization_prompt,
system_instruction=summary_system_message,
).content
if self.is_langchain_compatible():
messages = legacy_inputs_to_messages(
summarization_prompt,
system_instruction=summary_system_message,
)
summary = self.llm.invoke(
input=messages,
).content
elif isinstance(self.llm, LLMInterface):
summary = self.llm.invoke(
input=summarization_prompt,
system_instruction=summary_system_message,
).content
else:
raise ValueError(f"Type {type(self.llm)} of LLM is not supported.")

return self.conversation_prompt(summary=summary, current_query=query_text)
return query_text

def is_langchain_compatible(self) -> bool:
"""Checks if the LLM is compatible with LangChain."""
return isinstance(self.llm, LLMInterfaceV2) or self.llm.__module__.startswith(
"langchain"
)

def _chat_summary_prompt(self, message_history: List[LLMMessage]) -> str:
message_list = [
f"{message['role']}: {message['content']}" for message in message_history
Expand Down
3 changes: 2 additions & 1 deletion src/neo4j_graphrag/llm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
from typing import Any

from .anthropic_llm import AnthropicLLM
from .base import LLMInterface
from .base import LLMInterface, LLMInterfaceV2
from .cohere_llm import CohereLLM
from .mistralai_llm import MistralAILLM
from .ollama_llm import OllamaLLM
Expand All @@ -30,6 +30,7 @@
"CohereLLM",
"LLMResponse",
"LLMInterface",
"LLMInterfaceV2",
"OllamaLLM",
"OpenAILLM",
"VertexAILLM",
Expand Down
Loading