diff --git a/.semversioner/next-release/patch-20250519184041368529.json b/.semversioner/next-release/patch-20250519184041368529.json new file mode 100644 index 0000000000..c72646235b --- /dev/null +++ b/.semversioner/next-release/patch-20250519184041368529.json @@ -0,0 +1,4 @@ +{ + "type": "patch", + "description": "[Bugfix]: max_length is not passed in the prompt template reduce_system_prompt in Global search.py" +} diff --git a/graphrag/query/structured_search/global_search/search.py b/graphrag/query/structured_search/global_search/search.py index b7f75a43ee..e8b93754fd 100644 --- a/graphrag/query/structured_search/global_search/search.py +++ b/graphrag/query/structured_search/global_search/search.py @@ -184,6 +184,7 @@ async def search( reduce_response = await self._reduce_response( map_responses=map_responses, query=query, + max_length=self.reduce_max_length, **self.reduce_llm_params, ) llm_calls["reduce"] = reduce_response.llm_calls @@ -297,6 +298,7 @@ async def _reduce_response( self, map_responses: list[SearchResult], query: str, + max_length: int, **llm_kwargs, ) -> SearchResult: """Combine all intermediate responses from single batches into a final answer to the user query.""" @@ -371,7 +373,9 @@ async def _reduce_response( text_data = "\n\n".join(data) search_prompt = self.reduce_system_prompt.format( - report_data=text_data, response_type=self.response_type + report_data=text_data, + response_type=self.response_type, + max_length=max_length, ) if self.allow_general_knowledge: search_prompt += "\n" + self.general_knowledge_inclusion_prompt