|
18 | 18 |
|
19 | 19 | from pymongo import common, periodic_executor |
20 | 20 | from pymongo.errors import OperationFailure |
21 | | -from pymongo.server_type import SERVER_TYPE |
22 | 21 | from pymongo.monotonic import time as _time |
23 | 22 | from pymongo.read_preferences import MovingAverage |
24 | 23 | from pymongo.server_description import ServerDescription |
| 24 | +from pymongo.server_type import SERVER_TYPE |
| 25 | +from pymongo.srv_resolver import _SrvResolver |
| 26 | + |
| 27 | + |
| 28 | +class MonitorBase(object): |
| 29 | + def __init__(self, *args, **kwargs): |
| 30 | + """Override this method to create an executor.""" |
| 31 | + raise NotImplementedError |
| 32 | + |
| 33 | + def open(self): |
| 34 | + """Start monitoring, or restart after a fork. |
| 35 | +
|
| 36 | + Multiple calls have no effect. |
| 37 | + """ |
| 38 | + self._executor.open() |
| 39 | + |
| 40 | + def close(self): |
| 41 | + """Close and stop monitoring. |
| 42 | +
|
| 43 | + open() restarts the monitor after closing. |
| 44 | + """ |
| 45 | + self._executor.close() |
| 46 | + |
| 47 | + def join(self, timeout=None): |
| 48 | + """Wait for the monitor to stop.""" |
| 49 | + self._executor.join(timeout) |
| 50 | + |
| 51 | + def request_check(self): |
| 52 | + """If the monitor is sleeping, wake it soon.""" |
| 53 | + self._executor.wake() |
25 | 54 |
|
26 | 55 |
|
27 | | -class Monitor(object): |
| 56 | +class Monitor(MonitorBase): |
28 | 57 | def __init__( |
29 | 58 | self, |
30 | 59 | server_description, |
@@ -68,31 +97,13 @@ def target(): |
68 | 97 | self_ref = weakref.ref(self, executor.close) |
69 | 98 | self._topology = weakref.proxy(topology, executor.close) |
70 | 99 |
|
71 | | - def open(self): |
72 | | - """Start monitoring, or restart after a fork. |
73 | | -
|
74 | | - Multiple calls have no effect. |
75 | | - """ |
76 | | - self._executor.open() |
77 | | - |
78 | 100 | def close(self): |
79 | | - """Close and stop monitoring. |
80 | | -
|
81 | | - open() restarts the monitor after closing. |
82 | | - """ |
83 | | - self._executor.close() |
| 101 | + super(Monitor, self).close() |
84 | 102 |
|
85 | 103 | # Increment the pool_id and maybe close the socket. If the executor |
86 | 104 | # thread has the socket checked out, it will be closed when checked in. |
87 | 105 | self._pool.reset() |
88 | 106 |
|
89 | | - def join(self, timeout=None): |
90 | | - self._executor.join(timeout) |
91 | | - |
92 | | - def request_check(self): |
93 | | - """If the monitor is sleeping, wake and check the server soon.""" |
94 | | - self._executor.wake() |
95 | | - |
96 | 107 | def _run(self): |
97 | 108 | try: |
98 | 109 | self._server_description = self._check_with_retry() |
@@ -182,3 +193,66 @@ def _check_with_socket(self, sock_info): |
182 | 193 | self._topology.receive_cluster_time( |
183 | 194 | exc.details.get('$clusterTime')) |
184 | 195 | raise |
| 196 | + |
| 197 | + |
| 198 | +class SrvMonitor(MonitorBase): |
| 199 | + def __init__(self, topology, topology_settings): |
| 200 | + """Class to poll SRV records on a background thread. |
| 201 | +
|
| 202 | + Pass a Topology and a TopologySettings. |
| 203 | +
|
| 204 | + The Topology is weakly referenced. |
| 205 | + """ |
| 206 | + self._settings = topology_settings |
| 207 | + self._fqdn = self._settings.fqdn |
| 208 | + |
| 209 | + # We strongly reference the executor and it weakly references us via |
| 210 | + # this closure. When the monitor is freed, stop the executor soon. |
| 211 | + def target(): |
| 212 | + monitor = self_ref() |
| 213 | + if monitor is None: |
| 214 | + return False # Stop the executor. |
| 215 | + SrvMonitor._run(monitor) |
| 216 | + return True |
| 217 | + |
| 218 | + executor = periodic_executor.PeriodicExecutor( |
| 219 | + interval=common.MIN_SRV_RESCAN_INTERVAL, |
| 220 | + min_interval=self._settings.heartbeat_frequency, |
| 221 | + target=target, |
| 222 | + name="pymongo_srv_polling_thread") |
| 223 | + |
| 224 | + self._executor = executor |
| 225 | + |
| 226 | + # Avoid cycles. When self or topology is freed, stop executor soon. |
| 227 | + self_ref = weakref.ref(self, executor.close) |
| 228 | + self._topology = weakref.proxy(topology, executor.close) |
| 229 | + |
| 230 | + def _run(self): |
| 231 | + try: |
| 232 | + self._seedlist = self._get_seedlist() |
| 233 | + self._topology.on_srv_update(self._seedlist) |
| 234 | + except ReferenceError: |
| 235 | + # Topology was garbage-collected. |
| 236 | + self.close() |
| 237 | + |
| 238 | + def _get_seedlist(self): |
| 239 | + """Poll SRV records for a seedlist. |
| 240 | +
|
| 241 | + Returns a list of ServerDescriptions. |
| 242 | + """ |
| 243 | + try: |
| 244 | + seedlist, ttl = _SrvResolver(self._fqdn).get_hosts_and_min_ttl() |
| 245 | + if len(seedlist) == 0: |
| 246 | + # As per the spec: this should be treated as a failure. |
| 247 | + raise Exception |
| 248 | + except Exception: |
| 249 | + # As per the spec, upon encountering an error: |
| 250 | + # - An error must not be raised |
| 251 | + # - SRV records must be rescanned every heartbeatFrequencyMS |
| 252 | + # - Topology must be left unchanged |
| 253 | + self.request_check() |
| 254 | + return self._seedlist |
| 255 | + else: |
| 256 | + self._executor.update_interval( |
| 257 | + max(ttl, common.MIN_SRV_RESCAN_INTERVAL)) |
| 258 | + return seedlist |
0 commit comments