Skip to content

Commit aa3e0b5

Browse files
committed
Clarify source_nodes type
1 parent baf3640 commit aa3e0b5

File tree

15 files changed

+143
-95
lines changed

15 files changed

+143
-95
lines changed

graphdatascience/procedure_surface/api/centrality/articlerank_endpoints.py

Lines changed: 30 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def mutate(
3131
concurrency: int | None = None,
3232
job_id: str | None = None,
3333
relationship_weight_property: str | None = None,
34-
source_nodes: Any | None = None,
34+
source_nodes: int | list[int] | list[tuple[int, float]] | None = None,
3535
) -> ArticleRankMutateResult:
3636
"""
3737
Runs the Article Rank algorithm and stores the results in the graph catalog as a new node property.
@@ -74,8 +74,11 @@ def mutate(
7474
Identifier for the job.
7575
relationship_weight_property : str | None, default=None
7676
Name of the property to be used as weights.
77-
source_nodes : Any | None, default=None
78-
List of node ids to use as starting points. Use a list of list pairs to associate each node with a bias > 0.
77+
source_nodes : int | list[int] | list[tuple[int, float]] | None, default=None
78+
node ids to use as starting points. Can be:
79+
- single node id (e.g., 42)
80+
- list of node id (e.g., [42, 43, 44])
81+
- list of tuples to associate each node with a bias > 0 (e.g., [(42, 0.5), (43, 1.0)])
7982
8083
Returns
8184
-------
@@ -100,7 +103,7 @@ def stats(
100103
concurrency: int | None = None,
101104
job_id: str | None = None,
102105
relationship_weight_property: str | None = None,
103-
source_nodes: Any | None = None,
106+
source_nodes: int | list[int] | list[tuple[int, float]] | None = None,
104107
) -> ArticleRankStatsResult:
105108
"""
106109
Runs the Article Rank algorithm and returns result statistics without storing the results.
@@ -141,8 +144,11 @@ def stats(
141144
Identifier for the job.
142145
relationship_weight_property : str | None, default=None
143146
Name of the property to be used as weights.
144-
source_nodes : Any | None, default=None
145-
List of node ids to use as starting points. Use a list of list pairs to associate each node with a bias > 0.
147+
source_nodes : int | list[int] | list[tuple[int, float]] | None, default=None
148+
node ids to use as starting points. Can be:
149+
- single node id (e.g., 42)
150+
- list of node id (e.g., [42, 43, 44])
151+
- list of tuples to associate each node with a bias > 0 (e.g., [(42, 0.5), (43, 1.0)])
146152
147153
Returns
148154
-------
@@ -167,7 +173,7 @@ def stream(
167173
concurrency: int | None = None,
168174
job_id: str | None = None,
169175
relationship_weight_property: str | None = None,
170-
source_nodes: Any | None = None,
176+
source_nodes: int | list[int] | list[tuple[int, float]] | None = None,
171177
) -> DataFrame:
172178
"""
173179
Executes the ArticleRank algorithm and returns the results as a stream.
@@ -200,8 +206,11 @@ def stream(
200206
An identifier for the job
201207
relationship_weight_property : str | None, default=None
202208
The property name that contains weight
203-
source_nodes : Any | None, default=None
204-
The source nodes for personalized ArticleRank
209+
source_nodes : int | list[int] | list[tuple[int, float]] | None, default=None
210+
node ids to use as starting points. Can be:
211+
- single node id (e.g., 42)
212+
- list of node id (e.g., [42, 43, 44])
213+
- list of tuples to associate each node with a bias > 0 (e.g., [(42, 0.5), (43, 1.0)])
205214
206215
Returns
207216
-------
@@ -227,7 +236,7 @@ def write(
227236
concurrency: int | None = None,
228237
job_id: str | None = None,
229238
relationship_weight_property: str | None = None,
230-
source_nodes: Any | None = None,
239+
source_nodes: int | list[int] | list[tuple[int, float]] | None = None,
231240
write_concurrency: int | None = None,
232241
) -> ArticleRankWriteResult:
233242
"""
@@ -271,8 +280,11 @@ def write(
271280
Identifier for the job.
272281
relationship_weight_property : str | None, default=None
273282
Name of the property to be used as weights.
274-
source_nodes : Any | None, default=None
275-
List of node ids to use as starting points. Use a list of list pairs to associate each node with a bias > 0.
283+
source_nodes : int | list[int] | list[tuple[int, float]] | None, default=None
284+
node ids to use as starting points. Can be:
285+
- single node id (e.g., 42)
286+
- list of node id (e.g., [42, 43, 44])
287+
- list of tuples to associate each node with a bias > 0 (e.g., [(42, 0.5), (43, 1.0)])
276288
write_concurrency : int | None, default=None
277289
The number of concurrent threads used for writing
278290
@@ -295,7 +307,7 @@ def estimate(
295307
node_labels: list[str] = ALL_LABELS,
296308
concurrency: int | None = None,
297309
relationship_weight_property: str | None = None,
298-
source_nodes: Any | None = None,
310+
source_nodes: int | list[int] | list[tuple[int, float]] | None = None,
299311
) -> EstimationResult:
300312
"""
301313
Estimate the memory consumption of an algorithm run.
@@ -320,8 +332,11 @@ def estimate(
320332
The number of concurrent threads
321333
relationship_weight_property : str | None, default=None
322334
The property name that contains weight
323-
source_nodes : Any | None, default=None
324-
The source nodes for personalized ArticleRank
335+
source_nodes : int | list[int] | list[tuple[int, float]] | None, default=None
336+
node ids to use as starting points. Can be:
337+
- single node id (e.g., 42)
338+
- list of node id (e.g., [42, 43, 44])
339+
- list of tuples to associate each node with a bias > 0 (e.g., [(42, 0.5), (43, 1.0)])
325340
326341
Returns
327342
-------

graphdatascience/procedure_surface/api/centrality/degree_endpoints.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ def mutate(
4141
The graph to run the algorithm on
4242
mutate_property : str
4343
Name of the node property to store the results in.
44-
orientation : Any | None, default=None
44+
orientation : str | None
4545
The orientation of relationships to consider. Can be 'NATURAL', 'REVERSE', or 'UNDIRECTED'.
4646
relationship_types : list[str]
4747
Filter the graph using the given relationship types. Relationships with any of the given types will be included.
@@ -93,7 +93,7 @@ def stats(
9393
----------
9494
G : GraphV2
9595
The graph to run the algorithm on
96-
orientation : Any | None, default=None
96+
orientation : str | None
9797
The orientation of relationships to consider. Can be 'NATURAL', 'REVERSE', or 'UNDIRECTED'.
9898
relationship_types : list[str]
9999
Filter the graph using the given relationship types. Relationships with any of the given types will be included.
@@ -140,7 +140,7 @@ def stream(
140140
----------
141141
G : GraphV2
142142
The graph to run the algorithm on
143-
orientation : Any | None, default=None
143+
orientation : str | None
144144
The orientation of relationships to consider. Can be 'NATURAL', 'REVERSE', or 'UNDIRECTED'.
145145
'NATURAL' (default) respects the direction of relationships as they are stored in the graph.
146146
'REVERSE' treats each relationship as if it were directed in the opposite direction.
@@ -203,7 +203,7 @@ def write(
203203
The graph to run the algorithm on
204204
write_property : str
205205
The property name to store the degree centrality score for each node in the database
206-
orientation : Any | None, default=None
206+
orientation : str | None
207207
The orientation of relationships to consider. Can be 'NATURAL', 'REVERSE', or 'UNDIRECTED'.
208208
relationship_types : list[str]
209209
Filter the graph using the given relationship types. Relationships with any of the given types will be included.
@@ -248,7 +248,7 @@ def estimate(
248248
----------
249249
G : GraphV2 | dict[str, Any]
250250
The graph to run the algorithm on or a dictionary representing the graph.
251-
orientation : Any | None, default=None
251+
orientation : str | None
252252
The orientation of relationships to consider. Can be 'NATURAL', 'REVERSE', or 'UNDIRECTED'.
253253
relationship_types : list[str]
254254
The relationship types used to select relationships for this algorithm run

graphdatascience/procedure_surface/api/centrality/eigenvector_endpoints.py

Lines changed: 30 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ def mutate(
2020
mutate_property: str,
2121
max_iterations: int = 20,
2222
tolerance: float = 1.0e-7,
23-
source_nodes: Any | None = None,
23+
source_nodes: int | list[int] | list[tuple[int, float]] | None = None,
2424
scaler: str | dict[str, str | int | float] | ScalerConfig = "NONE",
2525
relationship_weight_property: str | None = None,
2626
relationship_types: list[str] = ALL_TYPES,
@@ -49,8 +49,11 @@ def mutate(
4949
The maximum number of iterations to run the algorithm
5050
tolerance : float
5151
The tolerance for convergence detection
52-
source_nodes : Any | None, default=None
53-
The source nodes to start the computation from
52+
source_nodes : int | list[int] | list[tuple[int, float]] | None, default=None
53+
node ids to use as starting points. Can be:
54+
- single node id (e.g., 42)
55+
- list of node id (e.g., [42, 43, 44])
56+
- list of tuples to associate each node with a bias > 0 (e.g., [(42, 0.5), (43, 1.0)])
5457
scaler : str | dict[str, str | int | float] | ScalerConfig, default="NONE"
5558
The scaler to use. Can be:
5659
- A string (e.g., 'MinMax', 'Mean', 'Max', 'Log', 'StdScore', 'Center', 'NONE')
@@ -88,7 +91,7 @@ def stats(
8891
G: GraphV2,
8992
max_iterations: int = 20,
9093
tolerance: float = 1.0e-7,
91-
source_nodes: Any | None = None,
94+
source_nodes: int | list[int] | list[tuple[int, float]] | None = None,
9295
scaler: str | dict[str, str | int | float] | ScalerConfig = "NONE",
9396
relationship_weight_property: str | None = None,
9497
relationship_types: list[str] = ALL_TYPES,
@@ -115,8 +118,11 @@ def stats(
115118
The maximum number of iterations to run the algorithm
116119
tolerance : float
117120
The tolerance for convergence detection
118-
source_nodes : Any | None, default=None
119-
The source nodes to start the computation from
121+
source_nodes : int | list[int] | list[tuple[int, float]] | None, default=None
122+
node ids to use as starting points. Can be:
123+
- single node id (e.g., 42)
124+
- list of node id (e.g., [42, 43, 44])
125+
- list of tuples to associate each node with a bias > 0 (e.g., [(42, 0.5), (43, 1.0)])
120126
scaler : str | dict[str, str | int | float] | ScalerConfig, default="NONE"
121127
The scaler to use. Can be:
122128
- A string (e.g., 'MinMax', 'Mean', 'Max', 'Log', 'StdScore', 'Center', 'NONE')
@@ -154,7 +160,7 @@ def stream(
154160
G: GraphV2,
155161
max_iterations: int = 20,
156162
tolerance: float = 1.0e-7,
157-
source_nodes: Any | None = None,
163+
source_nodes: int | list[int] | list[tuple[int, float]] | None = None,
158164
scaler: str | dict[str, str | int | float] | ScalerConfig = "NONE",
159165
relationship_weight_property: str | None = None,
160166
relationship_types: list[str] = ALL_TYPES,
@@ -176,8 +182,11 @@ def stream(
176182
The maximum number of iterations to run the algorithm
177183
tolerance : float
178184
The tolerance for convergence detection
179-
source_nodes : Any | None, default=None
180-
The source nodes to start the computation from
185+
source_nodes : int | list[int] | list[tuple[int, float]] | None, default=None
186+
node ids to use as starting points. Can be:
187+
- single node id (e.g., 42)
188+
- list of node id (e.g., [42, 43, 44])
189+
- list of tuples to associate each node with a bias > 0 (e.g., [(42, 0.5), (43, 1.0)])
181190
scaler : str | dict[str, str | int | float] | ScalerConfig, default="NONE"
182191
The scaler to use. Can be:
183192
- A string (e.g., 'MinMax', 'Mean', 'Max', 'Log', 'StdScore', 'Center', 'NONE')
@@ -216,7 +225,7 @@ def write(
216225
write_property: str,
217226
max_iterations: int = 20,
218227
tolerance: float = 1.0e-7,
219-
source_nodes: Any | None = None,
228+
source_nodes: int | list[int] | list[tuple[int, float]] | None = None,
220229
scaler: str | dict[str, str | int | float] | ScalerConfig = "NONE",
221230
relationship_weight_property: str | None = None,
222231
relationship_types: list[str] = ALL_TYPES,
@@ -246,8 +255,11 @@ def write(
246255
The maximum number of iterations to run the algorithm
247256
tolerance : float
248257
The tolerance for convergence detection
249-
source_nodes : Any | None, default=None
250-
The source nodes to start the computation from
258+
source_nodes : int | list[int] | list[tuple[int, float]] | None, default=None
259+
node ids to use as starting points. Can be:
260+
- single node id (e.g., 42)
261+
- list of node id (e.g., [42, 43, 44])
262+
- list of tuples to associate each node with a bias > 0 (e.g., [(42, 0.5), (43, 1.0)])
251263
scaler : str | dict[str, str | int | float] | ScalerConfig, default="NONE"
252264
The scaler to use. Can be:
253265
- A string (e.g., 'MinMax', 'Mean', 'Max', 'Log', 'StdScore', 'Center', 'NONE')
@@ -287,7 +299,7 @@ def estimate(
287299
G: GraphV2 | dict[str, Any],
288300
max_iterations: int = 20,
289301
tolerance: float = 1.0e-7,
290-
source_nodes: Any | None = None,
302+
source_nodes: int | list[int] | list[tuple[int, float]] | None = None,
291303
scaler: str | dict[str, str | int | float] | ScalerConfig = "NONE",
292304
relationship_weight_property: str | None = None,
293305
relationship_types: list[str] = ALL_TYPES,
@@ -305,8 +317,11 @@ def estimate(
305317
The maximum number of iterations to run the algorithm
306318
tolerance : float
307319
The tolerance for convergence detection
308-
source_nodes : Any | None, default=None
309-
The source nodes to start the computation from
320+
source_nodes : int | list[int] | list[tuple[int, float]] | None, default=None
321+
node ids to use as starting points. Can be:
322+
- single node id (e.g., 42)
323+
- list of node id (e.g., [42, 43, 44])
324+
- list of tuples to associate each node with a bias > 0 (e.g., [(42, 0.5), (43, 1.0)])
310325
scaler : str | dict[str, str | int | float] | ScalerConfig, default="NONE"
311326
The scaler to use. Can be:
312327
- A string (e.g., 'MinMax', 'Mean', 'Max', 'Log', 'StdScore', 'Center', 'NONE')

graphdatascience/procedure_surface/api/centrality/pagerank_endpoints.py

Lines changed: 20 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@ def mutate(
3030
concurrency: int | None = None,
3131
job_id: str | None = None,
3232
relationship_weight_property: str | None = None,
33-
source_nodes: Any | None = None,
33+
source_nodes: int | list[int] | list[tuple[int, float]] | None = None,
3434
) -> PageRankMutateResult:
3535
"""
3636
Runs the PageRank algorithm and stores the results in the graph catalog as a new node property.
@@ -72,8 +72,11 @@ def mutate(
7272
Identifier for the job.
7373
relationship_weight_property : str | None, default=None
7474
Name of the property to be used as weights.
75-
source_nodes : Any | None, default=None
76-
List of node ids to use as starting points. Use a list of list pairs to associate each node with a bias > 0.
75+
source_nodes : int | list[int] | list[tuple[int, float]] | None, default=None
76+
node ids to use as starting points. Can be:
77+
- single node id (e.g., 42)
78+
- list of node id (e.g., [42, 43, 44])
79+
- list of tuples to associate each node with a bias > 0 (e.g., [(42, 0.5), (43, 1.0)])
7780
7881
Returns
7982
-------
@@ -98,7 +101,7 @@ def stats(
98101
concurrency: int | None = None,
99102
job_id: str | None = None,
100103
relationship_weight_property: str | None = None,
101-
source_nodes: Any | None = None,
104+
source_nodes: int | list[int] | list[tuple[int, float]] | None = None,
102105
) -> PageRankStatsResult:
103106
"""
104107
Runs the PageRank algorithm and returns result statistics without storing the results.
@@ -138,8 +141,11 @@ def stats(
138141
Identifier for the job.
139142
relationship_weight_property : str | None, default=None
140143
Name of the property to be used as weights.
141-
source_nodes : Any | None, default=None
142-
List of node ids to use as starting points. Use a list of list pairs to associate each node with a bias > 0.
144+
source_nodes : int | list[int] | list[tuple[int, float]] | None, default=None
145+
node ids to use as starting points. Can be:
146+
- single node id (e.g., 42)
147+
- list of node id (e.g., [42, 43, 44])
148+
- list of tuples to associate each node with a bias > 0 (e.g., [(42, 0.5), (43, 1.0)])
143149
144150
Returns
145151
-------
@@ -164,7 +170,7 @@ def stream(
164170
concurrency: int | None = None,
165171
job_id: str | None = None,
166172
relationship_weight_property: str | None = None,
167-
source_nodes: Any | None = None,
173+
source_nodes: int | list[int] | list[tuple[int, float]] | None = None,
168174
) -> DataFrame:
169175
"""
170176
Executes the PageRank algorithm and returns a stream of results.
@@ -228,7 +234,7 @@ def write(
228234
concurrency: int | None = None,
229235
job_id: str | None = None,
230236
relationship_weight_property: str | None = None,
231-
source_nodes: Any | None = None,
237+
source_nodes: int | list[int] | list[tuple[int, float]] | None = None,
232238
write_concurrency: int | None = None,
233239
) -> PageRankWriteResult:
234240
"""
@@ -271,8 +277,11 @@ def write(
271277
Identifier for the job.
272278
relationship_weight_property : str | None, default=None
273279
Name of the property to be used as weights.
274-
source_nodes : Any | None, default=None
275-
List of node ids to use as starting points. Use a list of list pairs to associate each node with a bias > 0.
280+
source_nodes : int | list[int] | list[tuple[int, float]] | None, default=None
281+
node ids to use as starting points. Can be:
282+
- single node id (e.g., 42)
283+
- list of node id (e.g., [42, 43, 44])
284+
- list of tuples to associate each node with a bias > 0 (e.g., [(42, 0.5), (43, 1.0)])
276285
write_concurrency : int | None, default=None
277286
The number of concurrent threads used for writing
278287
@@ -295,7 +304,7 @@ def estimate(
295304
node_labels: list[str] = ALL_LABELS,
296305
concurrency: int | None = None,
297306
relationship_weight_property: str | None = None,
298-
source_nodes: Any | None = None,
307+
source_nodes: int | list[int] | list[tuple[int, float]] | None = None,
299308
) -> EstimationResult:
300309
"""
301310
Estimate the memory consumption of an algorithm run.

0 commit comments

Comments
 (0)