Skip to content

Commit 0144b3f

Browse files
Update FNLLM (#1738)
* Add ModelProvider to Query package. * Spellcheck + others * Semver * Fix tests * Format * Fix Pyright * Fix tests * Fix for smoke tests * Update fnllm version * Semver * Ruff
1 parent 5dd9fc5 commit 0144b3f

File tree

7 files changed

+142
-155
lines changed

7 files changed

+142
-155
lines changed
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
{
2+
"type": "patch",
3+
"description": "Update fnllm. Remove unused libs."
4+
}

graphrag/cli/query.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -391,7 +391,6 @@ def on_context(context: Any) -> None:
391391
logger.success(f"DRIFT Search Response:\n{response}")
392392
# NOTE: we return the response and context data here purely as a complete demonstration of the API.
393393
# External users should use the API directly to get the response and context data.
394-
# TODO: Map/Reduce Drift Search answer to a single response
395394
return response, context_data
396395

397396

graphrag/config/defaults.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -244,7 +244,6 @@ class GlobalSearchDefaults:
244244
dynamic_search_concurrent_coroutines: int = 16
245245
dynamic_search_max_level: int = 2
246246
chat_model_id: str = DEFAULT_CHAT_MODEL_ID
247-
embedding_model_id: str = DEFAULT_EMBEDDING_MODEL_ID
248247

249248

250249
@dataclass

graphrag/config/init_content.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,6 @@
151151
152152
global_search:
153153
chat_model_id: {graphrag_config_defaults.global_search.chat_model_id}
154-
embedding_model_id: {graphrag_config_defaults.global_search.embedding_model_id}
155154
map_prompt: "prompts/global_search_map_system_prompt.txt"
156155
reduce_prompt: "prompts/global_search_reduce_system_prompt.txt"
157156
knowledge_prompt: "prompts/global_search_knowledge_system_prompt.txt"

graphrag/index/operations/summarize_communities/community_reports_extractor.py

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,6 @@
33

44
"""A module containing 'CommunityReportsResult' and 'CommunityReportsExtractor' models."""
55

6-
import json
76
import logging
87
import traceback
98
from dataclasses import dataclass
@@ -86,11 +85,7 @@ async def __call__(self, inputs: dict[str, Any]):
8685
model_parameters={"max_tokens": self._max_report_length},
8786
)
8887

89-
# TODO: Json mode is currently broken on fnllm: https://github.com/microsoft/essex-toolkit/issues/364
90-
# once fixed, just assign to output the response.parsed_json
91-
output = CommunityReportResponse.model_validate(
92-
json.loads(response.output.content)
93-
)
88+
output = response.parsed_response
9489
except Exception as e:
9590
log.exception("error generating community report")
9691
self._on_error(e, traceback.format_exc(), None)

poetry.lock

Lines changed: 136 additions & 143 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

pyproject.toml

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -57,12 +57,10 @@ lancedb = "^0.17.0"
5757
aiofiles = "^24.1.0"
5858

5959
# LLM
60-
fnllm = {extras = ["azure", "openai"], version = "^0.1.2"}
61-
httpx = "^0.28.1"
60+
fnllm = {extras = ["azure", "openai"], version = "^0.2.3"}
6261
json-repair = "^0.30.3"
6362
openai = "^1.57.0"
6463
nltk = "3.9.1"
65-
tenacity = "^9.0.0"
6664
tiktoken = "^0.8.0"
6765

6866
# Data-Science

0 commit comments

Comments
 (0)