Merge ~sylvain-pineau/checkbox-ng:remote-gee-up into checkbox-ng:master
- Git
- lp:~sylvain-pineau/checkbox-ng
- remote-gee-up
- Merge into master
Status: | Merged | ||||
---|---|---|---|---|---|
Approved by: | Sylvain Pineau | ||||
Approved revision: | 5a7483655bc48605f7a83f865e7e4e4c8a9228b7 | ||||
Merged at revision: | 57b6dacb5c2e4c45c66d6ba1491ff0e701db35ff | ||||
Proposed branch: | ~sylvain-pineau/checkbox-ng:remote-gee-up | ||||
Merge into: | checkbox-ng:master | ||||
Diff against target: |
5072 lines (+1341/-944) 29 files modified
checkbox_ng/launcher/master.py (+35/-11) dev/null (+0/-247) plainbox/impl/session/remote_assistant.py (+24/-14) plainbox/vendor/rpyc/__init__.py (+5/-4) plainbox/vendor/rpyc/core/__init__.py (+2/-2) plainbox/vendor/rpyc/core/async.py (+1/-1) plainbox/vendor/rpyc/core/async_.py (+6/-1) plainbox/vendor/rpyc/core/brine.py (+140/-79) plainbox/vendor/rpyc/core/channel.py (+22/-8) plainbox/vendor/rpyc/core/consts.py (+28/-29) plainbox/vendor/rpyc/core/netref.py (+143/-77) plainbox/vendor/rpyc/core/protocol.py (+242/-199) plainbox/vendor/rpyc/core/reactor.py (+7/-3) plainbox/vendor/rpyc/core/service.py (+38/-16) plainbox/vendor/rpyc/core/stream.py (+171/-45) plainbox/vendor/rpyc/core/vinegar.py (+36/-11) plainbox/vendor/rpyc/lib/__init__.py (+117/-6) plainbox/vendor/rpyc/lib/colls.py (+28/-5) plainbox/vendor/rpyc/lib/compat.py (+28/-10) plainbox/vendor/rpyc/utils/__init__.py (+0/-1) plainbox/vendor/rpyc/utils/authenticators.py (+6/-8) plainbox/vendor/rpyc/utils/classic.py (+60/-34) plainbox/vendor/rpyc/utils/factory.py (+27/-11) plainbox/vendor/rpyc/utils/helpers.py (+24/-4) plainbox/vendor/rpyc/utils/registry.py (+32/-27) plainbox/vendor/rpyc/utils/server.py (+48/-55) plainbox/vendor/rpyc/utils/teleportation.py (+46/-26) plainbox/vendor/rpyc/utils/zerodeploy.py (+23/-8) plainbox/vendor/rpyc/version.py (+2/-2) |
||||
Related bugs: |
|
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Sylvain Pineau (community) | Approve | ||
Maciej Kisielewski (community) | Approve | ||
Review via email:
|
Commit message
Description of the change
Two patches for two checkbox RPyC issues:
1. slowness when dealing with ultra verbose I/O logs
It could takes 5 to 20 minutes on a slow devices to process jobs like the sysfs-attachment. The mechanism relying on python Queues was the bottleneck. Instead the patch is still buffering the I/O logs but this time in a io.stringio memory buffer, much more efficient. The benchmarking provider on my i7 reports pretty much the same timings as the local run for the remote-templatey launcher.
2. slowness when generating reports
The solution is to generate reports on the slave. Master is using the exported stream filehandle to read from it instead of offering a filehandle to the slave for writing (much much slower). A new dependency (tqdm) will help monitoring the submission reports transfer from slave tom master (see below).
New dependency: python3-tqdm
Xenial backport:
https:/
Checkbox core snap: master & s16 rebased:
https:/
![](/+icing/build/overlay/assets/skins/sam/images/close.gif)
Sylvain Pineau (sylvain-pineau) wrote : | # |
Landing after testing various type of encodings for the slave (including ASCII). Seems to work well. Fixed a few pep8 errors and bumped to remote API to 10.
Preview Diff
1 | diff --git a/checkbox_ng/launcher/master.py b/checkbox_ng/launcher/master.py |
2 | index fd14a54..8c29e91 100644 |
3 | --- a/checkbox_ng/launcher/master.py |
4 | +++ b/checkbox_ng/launcher/master.py |
5 | @@ -49,6 +49,7 @@ from checkbox_ng.urwid_ui import resume_dialog |
6 | from checkbox_ng.launcher.run import NormalUI, ReRunJob |
7 | from checkbox_ng.launcher.stages import MainLoopStage |
8 | from checkbox_ng.launcher.stages import ReportsStage |
9 | +from tqdm import tqdm |
10 | _ = gettext.gettext |
11 | _logger = logging.getLogger("master") |
12 | |
13 | @@ -77,10 +78,10 @@ class SimpleUI(NormalUI, MainLoopStage): |
14 | print(SimpleUI.C.header(header, fill='-')) |
15 | |
16 | def green_text(text, end='\n'): |
17 | - print(SimpleUI.C.GREEN(text), end=end) |
18 | + print(SimpleUI.C.GREEN(text), end=end, file=sys.stdout) |
19 | |
20 | def red_text(text, end='\n'): |
21 | - print(SimpleUI.C.RED(text), end=end) |
22 | + print(SimpleUI.C.RED(text), end=end, file=sys.stderr) |
23 | |
24 | def horiz_line(): |
25 | print(SimpleUI.C.WHITE('-' * 80)) |
26 | @@ -160,7 +161,7 @@ class RemoteMaster(ReportsStage, MainLoopStage): |
27 | def connect_and_run(self, host, port=18871): |
28 | config = rpyc.core.protocol.DEFAULT_CONFIG.copy() |
29 | config['allow_all_attrs'] = True |
30 | - config['sync_request_timeout'] = 60 |
31 | + config['sync_request_timeout'] = 120 |
32 | keep_running = False |
33 | self._prepare_transports() |
34 | interrupted = False |
35 | @@ -416,11 +417,11 @@ class RemoteMaster(ReportsStage, MainLoopStage): |
36 | while True: |
37 | state, payload = self.sa.monitor_job() |
38 | if payload and not self._is_bootstrapping: |
39 | - for stream, line in payload: |
40 | - if stream == 'stderr': |
41 | - SimpleUI.red_text(line, end='') |
42 | + for line in payload.splitlines(): |
43 | + if line.startswith('stderr'): |
44 | + SimpleUI.red_text(line[6:]) |
45 | else: |
46 | - SimpleUI.green_text(line, end='') |
47 | + SimpleUI.green_text(line[6:]) |
48 | if state == 'running': |
49 | time.sleep(0.5) |
50 | while True: |
51 | @@ -452,13 +453,36 @@ class RemoteMaster(ReportsStage, MainLoopStage): |
52 | self.abandon() |
53 | self.new_session() |
54 | |
55 | + def _download_file(conn, remotepath, localpath, chunk_size=16384): |
56 | + try: |
57 | + rf = conn.root.open(remotepath, "rb") |
58 | + with tqdm(total=conn.root.getsize(remotepath), unit='B', unit_scale=True, unit_divisor=1024) as pbar: |
59 | + with open(localpath, "wb") as lf: |
60 | + while True: |
61 | + buf = rf.read(chunk_size) |
62 | + pbar.set_postfix(file=remotepath, refresh=False) |
63 | + pbar.update(chunk_size) |
64 | + if not buf: |
65 | + break |
66 | + #time.sleep(0.01) |
67 | + lf.write(buf) |
68 | + finally: |
69 | + rf.close() |
70 | + |
71 | def local_export(self, exporter_id, transport, options=()): |
72 | _logger.info("master: Exporting locally'") |
73 | - exporter = self._sa.manager.create_exporter(exporter_id, options) |
74 | + rf = self.sa.cache_report(exporter_id, options) |
75 | exported_stream = SpooledTemporaryFile(max_size=102400, mode='w+b') |
76 | - async_dump = rpyc.async_(exporter.dump_from_session_manager) |
77 | - res = async_dump(self._sa.manager, exported_stream) |
78 | - res.wait() |
79 | + chunk_size=16384 |
80 | + with tqdm(total=rf.tell(), unit='B', unit_scale=True, unit_divisor=1024) as pbar: |
81 | + rf.seek(0) |
82 | + while True: |
83 | + buf = rf.read(chunk_size) |
84 | + pbar.set_postfix(file=transport.url, refresh=False) |
85 | + pbar.update(chunk_size) |
86 | + if not buf: |
87 | + break |
88 | + exported_stream.write(buf) |
89 | exported_stream.seek(0) |
90 | result = transport.send(exported_stream) |
91 | return result |
92 | diff --git a/plainbox/impl/session/remote_assistant.py b/plainbox/impl/session/remote_assistant.py |
93 | index e1652f0..7185bb9 100644 |
94 | --- a/plainbox/impl/session/remote_assistant.py |
95 | +++ b/plainbox/impl/session/remote_assistant.py |
96 | @@ -16,14 +16,14 @@ |
97 | # You should have received a copy of the GNU General Public License |
98 | # along with Checkbox. If not, see <http://www.gnu.org/licenses/>. |
99 | import fnmatch |
100 | +import io |
101 | import json |
102 | import gettext |
103 | import logging |
104 | import os |
105 | -import queue |
106 | import time |
107 | -import sys |
108 | from collections import namedtuple |
109 | +from tempfile import SpooledTemporaryFile |
110 | from threading import Thread, Lock |
111 | from subprocess import CalledProcessError, check_output |
112 | |
113 | @@ -69,22 +69,25 @@ Finalizing = 'finalizing' |
114 | class BufferedUI(SilentUI): |
115 | """UI type that queues the output for later reading.""" |
116 | |
117 | - # XXX: using as string as a buffer and one lock over it |
118 | - # might be a cleaner approach than those queues |
119 | def __init__(self): |
120 | super().__init__() |
121 | - self._queue = queue.Queue() |
122 | + self.lock = Lock() |
123 | + self._output = io.StringIO() |
124 | |
125 | def got_program_output(self, stream_name, line): |
126 | - self._queue.put( |
127 | - (stream_name, line.decode(sys.stdout.encoding, 'replace'))) |
128 | + with self.lock: |
129 | + try: |
130 | + self._output.write(stream_name + line.decode("UTF-8")) |
131 | + except UnicodeDecodeError: |
132 | + # Don't start a slave->master transfer for binary attachments |
133 | + pass |
134 | |
135 | def get_output(self): |
136 | """Returns all the output queued up since previous call.""" |
137 | - output = [] |
138 | - while not self._queue.empty(): |
139 | - output.append(self._queue.get()) |
140 | - return output |
141 | + with self.lock: |
142 | + output = self._output.getvalue() |
143 | + self._output = io.StringIO() |
144 | + return output |
145 | |
146 | |
147 | class BackgroundExecutor(Thread): |
148 | @@ -119,7 +122,7 @@ class BackgroundExecutor(Thread): |
149 | class RemoteSessionAssistant(): |
150 | """Remote execution enabling wrapper for the SessionAssistant""" |
151 | |
152 | - REMOTE_API_VERSION = 9 |
153 | + REMOTE_API_VERSION = 10 |
154 | |
155 | def __init__(self, cmd_callback): |
156 | _logger.debug("__init__()") |
157 | @@ -212,8 +215,8 @@ class RemoteSessionAssistant(): |
158 | # psutil < 4.0.0 doesn't provide Process.environ() |
159 | return self._prepare_display_without_psutil() |
160 | except psutil.NoSuchProcess: |
161 | - # quietly ignore the process that died before we had a chance to |
162 | - # read the environment from them |
163 | + # quietly ignore the process that died before we had a chance |
164 | + # to read the environment from them |
165 | continue |
166 | if ("DISPLAY" in p_environ and p_user != 'gdm'): # gdm uses :1024 |
167 | return {'DISPLAY': p_environ['DISPLAY']} |
168 | @@ -635,3 +638,10 @@ class RemoteSessionAssistant(): |
169 | @property |
170 | def sideloaded_providers(self): |
171 | return self._sa.sideloaded_providers |
172 | + |
173 | + def exposed_cache_report(self, exporter_id, options): |
174 | + exporter = self._sa._manager.create_exporter(exporter_id, options) |
175 | + exported_stream = SpooledTemporaryFile(max_size=102400, mode='w+b') |
176 | + exporter.dump_from_session_manager(self._sa._manager, exported_stream) |
177 | + exported_stream.flush() |
178 | + return exported_stream |
179 | diff --git a/plainbox/vendor/rpyc/__init__.py b/plainbox/vendor/rpyc/__init__.py |
180 | index 7daf69c..9cd71d5 100644 |
181 | --- a/plainbox/vendor/rpyc/__init__.py |
182 | +++ b/plainbox/vendor/rpyc/__init__.py |
183 | @@ -41,12 +41,13 @@ Classic-style usage:: |
184 | ... |
185 | |
186 | """ |
187 | +# flake8: noqa: F401 |
188 | from plainbox.vendor.rpyc.core import (SocketStream, TunneledSocketStream, PipeStream, Channel, |
189 | - Connection, Service, BaseNetref, AsyncResult, GenericException, |
190 | - AsyncResultTimeout, VoidService, SlaveService, MasterService, ClassicService) |
191 | + Connection, Service, BaseNetref, AsyncResult, GenericException, |
192 | + AsyncResultTimeout, VoidService, SlaveService, MasterService, ClassicService) |
193 | from plainbox.vendor.rpyc.utils.factory import (connect_stream, connect_channel, connect_pipes, |
194 | - connect_stdpipes, connect, ssl_connect, discover, connect_by_service, connect_subproc, |
195 | - connect_thread, ssh_connect) |
196 | + connect_stdpipes, connect, ssl_connect, discover, connect_by_service, connect_subproc, |
197 | + connect_thread, ssh_connect) |
198 | from plainbox.vendor.rpyc.utils.helpers import async_, timed, buffiter, BgServingThread, restricted |
199 | from plainbox.vendor.rpyc.utils import classic |
200 | from plainbox.vendor.rpyc.version import version as __version__ |
201 | diff --git a/plainbox/vendor/rpyc/core/__init__.py b/plainbox/vendor/rpyc/core/__init__.py |
202 | index fb52c51..4155f1c 100644 |
203 | --- a/plainbox/vendor/rpyc/core/__init__.py |
204 | +++ b/plainbox/vendor/rpyc/core/__init__.py |
205 | @@ -1,8 +1,8 @@ |
206 | +# flake8: noqa: F401 |
207 | from plainbox.vendor.rpyc.core.stream import SocketStream, TunneledSocketStream, PipeStream |
208 | from plainbox.vendor.rpyc.core.channel import Channel |
209 | from plainbox.vendor.rpyc.core.protocol import Connection |
210 | from plainbox.vendor.rpyc.core.netref import BaseNetref |
211 | from plainbox.vendor.rpyc.core.async_ import AsyncResult, AsyncResultTimeout |
212 | -from plainbox.vendor.rpyc.core.service import (Service, VoidService, SlaveService, MasterService, |
213 | - ClassicService) |
214 | +from plainbox.vendor.rpyc.core.service import Service, VoidService, SlaveService, MasterService, ClassicService |
215 | from plainbox.vendor.rpyc.core.vinegar import GenericException |
216 | diff --git a/plainbox/vendor/rpyc/core/async.py b/plainbox/vendor/rpyc/core/async.py |
217 | index 96706b3..36dd63a 100644 |
218 | --- a/plainbox/vendor/rpyc/core/async.py |
219 | +++ b/plainbox/vendor/rpyc/core/async.py |
220 | @@ -1,2 +1,2 @@ |
221 | """Backward compatibility alias on py<=3.6.""" |
222 | -from plainbox.vendor.rpyc.core.async_ import * |
223 | +from plainbox.vendor.rpyc.core.async_ import * # noqa |
224 | diff --git a/plainbox/vendor/rpyc/core/async_.py b/plainbox/vendor/rpyc/core/async_.py |
225 | index 7812a37..fd69027 100644 |
226 | --- a/plainbox/vendor/rpyc/core/async_.py |
227 | +++ b/plainbox/vendor/rpyc/core/async_.py |
228 | @@ -1,4 +1,4 @@ |
229 | -import time |
230 | +import time # noqa: F401 |
231 | from plainbox.vendor.rpyc.lib import Timeout |
232 | from plainbox.vendor.rpyc.lib.compat import TimeoutError as AsyncResultTimeout |
233 | |
234 | @@ -9,6 +9,7 @@ class AsyncResult(object): |
235 | result (which will block if the result has not yet arrived). |
236 | """ |
237 | __slots__ = ["_conn", "_is_ready", "_is_exc", "_callbacks", "_obj", "_ttl"] |
238 | + |
239 | def __init__(self, conn): |
240 | self._conn = conn |
241 | self._is_ready = False |
242 | @@ -16,6 +17,7 @@ class AsyncResult(object): |
243 | self._obj = None |
244 | self._callbacks = [] |
245 | self._ttl = Timeout(None) |
246 | + |
247 | def __repr__(self): |
248 | if self._is_ready: |
249 | state = "ready" |
250 | @@ -58,6 +60,7 @@ class AsyncResult(object): |
251 | func(self) |
252 | else: |
253 | self._callbacks.append(func) |
254 | + |
255 | def set_expiry(self, timeout): |
256 | """Sets the expiry time (in seconds, relative to now) or ``None`` for |
257 | unlimited time |
258 | @@ -75,10 +78,12 @@ class AsyncResult(object): |
259 | return False |
260 | self._conn.poll_all() |
261 | return self._is_ready |
262 | + |
263 | @property |
264 | def error(self): |
265 | """Indicates whether the returned result is an exception""" |
266 | return self.ready and self._is_exc |
267 | + |
268 | @property |
269 | def expired(self): |
270 | """Indicates whether the AsyncResult has expired""" |
271 | diff --git a/plainbox/vendor/rpyc/core/brine.py b/plainbox/vendor/rpyc/core/brine.py |
272 | index 30f4135..0545cb3 100644 |
273 | --- a/plainbox/vendor/rpyc/core/brine.py |
274 | +++ b/plainbox/vendor/rpyc/core/brine.py |
275 | @@ -1,56 +1,55 @@ |
276 | -""" |
277 | -**Brine** is a simple, fast and secure object serializer for **immutable** objects. |
278 | +"""*Brine* is a simple, fast and secure object serializer for **immutable** objects. |
279 | + |
280 | The following types are supported: ``int``, ``long``, ``bool``, ``str``, ``float``, |
281 | ``unicode``, ``bytes``, ``slice``, ``complex``, ``tuple`` (of simple types), |
282 | ``frozenset`` (of simple types) as well as the following singletons: ``None``, |
283 | ``NotImplemented``, and ``Ellipsis``. |
284 | |
285 | Example:: |
286 | - |
287 | - >>> x = ("he", 7, u"llo", 8, (), 900, None, True, Ellipsis, 18.2, 18.2j + 13, |
288 | - ... slice(1,2,3), frozenset([5,6,7]), NotImplemented) |
289 | - >>> dumpable(x) |
290 | - True |
291 | - >>> y = dump(x) |
292 | - >>> y.encode("hex") |
293 | - '140e0b686557080c6c6c6f580216033930300003061840323333333333331b402a000000000000403233333333333319125152531a1255565705' |
294 | - >>> z = load(y) |
295 | - >>> x == z |
296 | - True |
297 | + >>> x = ("he", 7, u"llo", 8, (), 900, None, True, Ellipsis, 18.2, 18.2j + 13, |
298 | + ... slice(1,2,3), frozenset([5,6,7]), NotImplemented) |
299 | + >>> dumpable(x) |
300 | + True |
301 | + >>> y = dump(x) |
302 | + >>> y.encode("hex") |
303 | + '140e0b686557080c6c6c6f580216033930300003061840323333333333331b402a000000000000403233333333333319125152531a1255565705' |
304 | + >>> z = load(y) |
305 | + >>> x == z |
306 | + True |
307 | """ |
308 | -from plainbox.vendor.rpyc.lib.compat import Struct, BytesIO, is_py3k, BYTES_LITERAL |
309 | +from plainbox.vendor.rpyc.lib.compat import Struct, BytesIO, is_py_3k, BYTES_LITERAL |
310 | |
311 | |
312 | # singletons |
313 | -TAG_NONE = b"\x00" |
314 | -TAG_EMPTY_STR = b"\x01" |
315 | -TAG_EMPTY_TUPLE = b"\x02" |
316 | -TAG_TRUE = b"\x03" |
317 | -TAG_FALSE = b"\x04" |
318 | +TAG_NONE = b"\x00" |
319 | +TAG_EMPTY_STR = b"\x01" |
320 | +TAG_EMPTY_TUPLE = b"\x02" |
321 | +TAG_TRUE = b"\x03" |
322 | +TAG_FALSE = b"\x04" |
323 | TAG_NOT_IMPLEMENTED = b"\x05" |
324 | -TAG_ELLIPSIS = b"\x06" |
325 | +TAG_ELLIPSIS = b"\x06" |
326 | # types |
327 | -TAG_UNICODE = b"\x08" |
328 | -TAG_LONG = b"\x09" |
329 | -TAG_STR1 = b"\x0a" |
330 | -TAG_STR2 = b"\x0b" |
331 | -TAG_STR3 = b"\x0c" |
332 | -TAG_STR4 = b"\x0d" |
333 | -TAG_STR_L1 = b"\x0e" |
334 | -TAG_STR_L4 = b"\x0f" |
335 | -TAG_TUP1 = b"\x10" |
336 | -TAG_TUP2 = b"\x11" |
337 | -TAG_TUP3 = b"\x12" |
338 | -TAG_TUP4 = b"\x13" |
339 | -TAG_TUP_L1 = b"\x14" |
340 | -TAG_TUP_L4 = b"\x15" |
341 | -TAG_INT_L1 = b"\x16" |
342 | -TAG_INT_L4 = b"\x17" |
343 | -TAG_FLOAT = b"\x18" |
344 | -TAG_SLICE = b"\x19" |
345 | -TAG_FSET = b"\x1a" |
346 | -TAG_COMPLEX = b"\x1b" |
347 | -if is_py3k: |
348 | +TAG_UNICODE = b"\x08" |
349 | +TAG_LONG = b"\x09" |
350 | +TAG_STR1 = b"\x0a" |
351 | +TAG_STR2 = b"\x0b" |
352 | +TAG_STR3 = b"\x0c" |
353 | +TAG_STR4 = b"\x0d" |
354 | +TAG_STR_L1 = b"\x0e" |
355 | +TAG_STR_L4 = b"\x0f" |
356 | +TAG_TUP1 = b"\x10" |
357 | +TAG_TUP2 = b"\x11" |
358 | +TAG_TUP3 = b"\x12" |
359 | +TAG_TUP4 = b"\x13" |
360 | +TAG_TUP_L1 = b"\x14" |
361 | +TAG_TUP_L4 = b"\x15" |
362 | +TAG_INT_L1 = b"\x16" |
363 | +TAG_INT_L4 = b"\x17" |
364 | +TAG_FLOAT = b"\x18" |
365 | +TAG_SLICE = b"\x19" |
366 | +TAG_FSET = b"\x1a" |
367 | +TAG_COMPLEX = b"\x1b" |
368 | +if is_py_3k: |
369 | IMM_INTS = dict((i, bytes([i + 0x50])) for i in range(-0x30, 0xa0)) |
370 | else: |
371 | IMM_INTS = dict((i, chr(i + 0x50)) for i in range(-0x30, 0xa0)) |
372 | @@ -64,27 +63,31 @@ _dump_registry = {} |
373 | _load_registry = {} |
374 | IMM_INTS_LOADER = dict((v, k) for k, v in IMM_INTS.items()) |
375 | |
376 | + |
377 | def register(coll, key): |
378 | def deco(func): |
379 | coll[key] = func |
380 | return func |
381 | return deco |
382 | |
383 | -#=============================================================================== |
384 | +# =============================================================================== |
385 | # dumping |
386 | -#=============================================================================== |
387 | +# =============================================================================== |
388 | @register(_dump_registry, type(None)) |
389 | def _dump_none(obj, stream): |
390 | stream.append(TAG_NONE) |
391 | |
392 | + |
393 | @register(_dump_registry, type(NotImplemented)) |
394 | def _dump_notimplemeted(obj, stream): |
395 | stream.append(TAG_NOT_IMPLEMENTED) |
396 | |
397 | + |
398 | @register(_dump_registry, type(Ellipsis)) |
399 | def _dump_ellipsis(obj, stream): |
400 | stream.append(TAG_ELLIPSIS) |
401 | |
402 | + |
403 | @register(_dump_registry, bool) |
404 | def _dump_bool(obj, stream): |
405 | if obj: |
406 | @@ -92,53 +95,60 @@ def _dump_bool(obj, stream): |
407 | else: |
408 | stream.append(TAG_FALSE) |
409 | |
410 | + |
411 | @register(_dump_registry, slice) |
412 | def _dump_slice(obj, stream): |
413 | stream.append(TAG_SLICE) |
414 | _dump((obj.start, obj.stop, obj.step), stream) |
415 | |
416 | + |
417 | @register(_dump_registry, frozenset) |
418 | def _dump_frozenset(obj, stream): |
419 | stream.append(TAG_FSET) |
420 | _dump(tuple(obj), stream) |
421 | |
422 | + |
423 | @register(_dump_registry, int) |
424 | def _dump_int(obj, stream): |
425 | if obj in IMM_INTS: |
426 | stream.append(IMM_INTS[obj]) |
427 | else: |
428 | obj = BYTES_LITERAL(str(obj)) |
429 | - l = len(obj) |
430 | - if l < 256: |
431 | - stream.append(TAG_INT_L1 + I1.pack(l) + obj) |
432 | + lenobj = len(obj) |
433 | + if lenobj < 256: |
434 | + stream.append(TAG_INT_L1 + I1.pack(lenobj) + obj) |
435 | else: |
436 | - stream.append(TAG_INT_L4 + I4.pack(l) + obj) |
437 | + stream.append(TAG_INT_L4 + I4.pack(lenobj) + obj) |
438 | + |
439 | |
440 | @register(_dump_registry, float) |
441 | def _dump_float(obj, stream): |
442 | stream.append(TAG_FLOAT + F8.pack(obj)) |
443 | |
444 | + |
445 | @register(_dump_registry, complex) |
446 | def _dump_complex(obj, stream): |
447 | stream.append(TAG_COMPLEX + C16.pack(obj.real, obj.imag)) |
448 | |
449 | + |
450 | @register(_dump_registry, bytes) |
451 | def _dump_bytes(obj, stream): |
452 | - l = len(obj) |
453 | - if l == 0: |
454 | + lenobj = len(obj) |
455 | + if lenobj == 0: |
456 | stream.append(TAG_EMPTY_STR) |
457 | - elif l == 1: |
458 | + elif lenobj == 1: |
459 | stream.append(TAG_STR1 + obj) |
460 | - elif l == 2: |
461 | + elif lenobj == 2: |
462 | stream.append(TAG_STR2 + obj) |
463 | - elif l == 3: |
464 | + elif lenobj == 3: |
465 | stream.append(TAG_STR3 + obj) |
466 | - elif l == 4: |
467 | + elif lenobj == 4: |
468 | stream.append(TAG_STR4 + obj) |
469 | - elif l < 256: |
470 | - stream.append(TAG_STR_L1 + I1.pack(l) + obj) |
471 | + elif lenobj < 256: |
472 | + stream.append(TAG_STR_L1 + I1.pack(lenobj) + obj) |
473 | else: |
474 | - stream.append(TAG_STR_L4 + I4.pack(l) + obj) |
475 | + stream.append(TAG_STR_L4 + I4.pack(lenobj) + obj) |
476 | + |
477 | |
478 | @register(_dump_registry, type(u"")) |
479 | def _dump_str(obj, stream): |
480 | @@ -146,8 +156,8 @@ def _dump_str(obj, stream): |
481 | _dump_bytes(obj.encode("utf8"), stream) |
482 | |
483 | |
484 | -if not is_py3k: |
485 | - @register(_dump_registry, long) |
486 | +if not is_py_3k: |
487 | + @register(_dump_registry, long) # noqa: F821 |
488 | def _dump_long(obj, stream): |
489 | stream.append(TAG_LONG) |
490 | _dump_int(obj, stream) |
491 | @@ -155,57 +165,71 @@ if not is_py3k: |
492 | |
493 | @register(_dump_registry, tuple) |
494 | def _dump_tuple(obj, stream): |
495 | - l = len(obj) |
496 | - if l == 0: |
497 | + lenobj = len(obj) |
498 | + if lenobj == 0: |
499 | stream.append(TAG_EMPTY_TUPLE) |
500 | - elif l == 1: |
501 | + elif lenobj == 1: |
502 | stream.append(TAG_TUP1) |
503 | - elif l == 2: |
504 | + elif lenobj == 2: |
505 | stream.append(TAG_TUP2) |
506 | - elif l == 3: |
507 | + elif lenobj == 3: |
508 | stream.append(TAG_TUP3) |
509 | - elif l == 4: |
510 | + elif lenobj == 4: |
511 | stream.append(TAG_TUP4) |
512 | - elif l < 256: |
513 | - stream.append(TAG_TUP_L1 + I1.pack(l)) |
514 | + elif lenobj < 256: |
515 | + stream.append(TAG_TUP_L1 + I1.pack(lenobj)) |
516 | else: |
517 | - stream.append(TAG_TUP_L4 + I4.pack(l)) |
518 | + stream.append(TAG_TUP_L4 + I4.pack(lenobj)) |
519 | for item in obj: |
520 | _dump(item, stream) |
521 | |
522 | + |
523 | def _undumpable(obj, stream): |
524 | raise TypeError("cannot dump %r" % (obj,)) |
525 | |
526 | + |
527 | def _dump(obj, stream): |
528 | _dump_registry.get(type(obj), _undumpable)(obj, stream) |
529 | |
530 | -#=============================================================================== |
531 | +# =============================================================================== |
532 | # loading |
533 | -#=============================================================================== |
534 | +# =============================================================================== |
535 | @register(_load_registry, TAG_NONE) |
536 | def _load_none(stream): |
537 | return None |
538 | + |
539 | + |
540 | @register(_load_registry, TAG_NOT_IMPLEMENTED) |
541 | def _load_nonimp(stream): |
542 | return NotImplemented |
543 | + |
544 | + |
545 | @register(_load_registry, TAG_ELLIPSIS) |
546 | def _load_elipsis(stream): |
547 | return Ellipsis |
548 | + |
549 | + |
550 | @register(_load_registry, TAG_TRUE) |
551 | def _load_true(stream): |
552 | return True |
553 | + |
554 | + |
555 | @register(_load_registry, TAG_FALSE) |
556 | def _load_false(stream): |
557 | return False |
558 | + |
559 | + |
560 | @register(_load_registry, TAG_EMPTY_TUPLE) |
561 | def _load_empty_tuple(stream): |
562 | return () |
563 | |
564 | + |
565 | @register(_load_registry, TAG_EMPTY_STR) |
566 | def _load_empty_str(stream): |
567 | return b"" |
568 | |
569 | -if is_py3k: |
570 | + |
571 | +if is_py_3k: |
572 | @register(_load_registry, TAG_LONG) |
573 | def _load_long(stream): |
574 | obj = _load(stream) |
575 | @@ -214,60 +238,85 @@ else: |
576 | @register(_load_registry, TAG_LONG) |
577 | def _load_long(stream): |
578 | obj = _load(stream) |
579 | - return long(obj) |
580 | + return long(obj) # noqa: F821 |
581 | + |
582 | |
583 | @register(_load_registry, TAG_FLOAT) |
584 | def _load_float(stream): |
585 | return F8.unpack(stream.read(8))[0] |
586 | + |
587 | + |
588 | @register(_load_registry, TAG_COMPLEX) |
589 | def _load_complex(stream): |
590 | real, imag = C16.unpack(stream.read(16)) |
591 | return complex(real, imag) |
592 | |
593 | + |
594 | @register(_load_registry, TAG_STR1) |
595 | def _load_str1(stream): |
596 | return stream.read(1) |
597 | + |
598 | + |
599 | @register(_load_registry, TAG_STR2) |
600 | def _load_str2(stream): |
601 | return stream.read(2) |
602 | + |
603 | + |
604 | @register(_load_registry, TAG_STR3) |
605 | def _load_str3(stream): |
606 | return stream.read(3) |
607 | + |
608 | + |
609 | @register(_load_registry, TAG_STR4) |
610 | def _load_str4(stream): |
611 | return stream.read(4) |
612 | + |
613 | + |
614 | @register(_load_registry, TAG_STR_L1) |
615 | def _load_str_l1(stream): |
616 | l, = I1.unpack(stream.read(1)) |
617 | return stream.read(l) |
618 | + |
619 | + |
620 | @register(_load_registry, TAG_STR_L4) |
621 | def _load_str_l4(stream): |
622 | l, = I4.unpack(stream.read(4)) |
623 | return stream.read(l) |
624 | |
625 | + |
626 | @register(_load_registry, TAG_UNICODE) |
627 | def _load_unicode(stream): |
628 | obj = _load(stream) |
629 | return obj.decode("utf-8") |
630 | |
631 | + |
632 | @register(_load_registry, TAG_TUP1) |
633 | def _load_tup1(stream): |
634 | return (_load(stream),) |
635 | + |
636 | + |
637 | @register(_load_registry, TAG_TUP2) |
638 | def _load_tup2(stream): |
639 | return (_load(stream), _load(stream)) |
640 | + |
641 | + |
642 | @register(_load_registry, TAG_TUP3) |
643 | def _load_tup3(stream): |
644 | return (_load(stream), _load(stream), _load(stream)) |
645 | + |
646 | + |
647 | @register(_load_registry, TAG_TUP4) |
648 | def _load_tup4(stream): |
649 | return (_load(stream), _load(stream), _load(stream), _load(stream)) |
650 | + |
651 | + |
652 | @register(_load_registry, TAG_TUP_L1) |
653 | def _load_tup_l1(stream): |
654 | l, = I1.unpack(stream.read(1)) |
655 | return tuple(_load(stream) for i in range(l)) |
656 | |
657 | -if is_py3k: |
658 | + |
659 | +if is_py_3k: |
660 | @register(_load_registry, TAG_TUP_L4) |
661 | def _load_tup_l4(stream): |
662 | l, = I4.unpack(stream.read(4)) |
663 | @@ -276,34 +325,43 @@ else: |
664 | @register(_load_registry, TAG_TUP_L4) |
665 | def _load_tup_l4(stream): |
666 | l, = I4.unpack(stream.read(4)) |
667 | - return tuple(_load(stream) for i in xrange(l)) |
668 | + return tuple(_load(stream) for i in xrange(l)) # noqa |
669 | + |
670 | |
671 | @register(_load_registry, TAG_SLICE) |
672 | def _load_slice(stream): |
673 | start, stop, step = _load(stream) |
674 | return slice(start, stop, step) |
675 | + |
676 | + |
677 | @register(_load_registry, TAG_FSET) |
678 | def _load_frozenset(stream): |
679 | return frozenset(_load(stream)) |
680 | |
681 | + |
682 | @register(_load_registry, TAG_INT_L1) |
683 | def _load_int_l1(stream): |
684 | l, = I1.unpack(stream.read(1)) |
685 | return int(stream.read(l)) |
686 | + |
687 | + |
688 | @register(_load_registry, TAG_INT_L4) |
689 | def _load_int_l4(stream): |
690 | l, = I4.unpack(stream.read(4)) |
691 | return int(stream.read(l)) |
692 | |
693 | + |
694 | def _load(stream): |
695 | tag = stream.read(1) |
696 | if tag in IMM_INTS_LOADER: |
697 | return IMM_INTS_LOADER[tag] |
698 | return _load_registry.get(tag)(stream) |
699 | |
700 | -#=============================================================================== |
701 | +# =============================================================================== |
702 | # API |
703 | -#=============================================================================== |
704 | +# =============================================================================== |
705 | + |
706 | + |
707 | def dump(obj): |
708 | """Converts (dumps) the given object to a byte-string representation |
709 | |
710 | @@ -315,6 +373,7 @@ def dump(obj): |
711 | _dump(obj, stream) |
712 | return b"".join(stream) |
713 | |
714 | + |
715 | def load(data): |
716 | """Recreates (loads) an object from its byte-string representation |
717 | |
718 | @@ -325,12 +384,14 @@ def load(data): |
719 | stream = BytesIO(data) |
720 | return _load(stream) |
721 | |
722 | -if is_py3k: |
723 | + |
724 | +if is_py_3k: |
725 | simple_types = frozenset([type(None), int, bool, float, bytes, str, complex, |
726 | - type(NotImplemented), type(Ellipsis)]) |
727 | + type(NotImplemented), type(Ellipsis)]) |
728 | else: |
729 | - simple_types = frozenset([type(None), int, long, bool, float, bytes, unicode, complex, |
730 | - type(NotImplemented), type(Ellipsis)]) |
731 | + simple_types = frozenset([type(None), int, long, bool, float, bytes, unicode, complex, # noqa: F821 |
732 | + type(NotImplemented), type(Ellipsis)]) |
733 | + |
734 | |
735 | def dumpable(obj): |
736 | """Indicates whether the given object is *dumpable* by brine |
737 | diff --git a/plainbox/vendor/rpyc/core/channel.py b/plainbox/vendor/rpyc/core/channel.py |
738 | index 4d23e24..e0c9bfd 100644 |
739 | --- a/plainbox/vendor/rpyc/core/channel.py |
740 | +++ b/plainbox/vendor/rpyc/core/channel.py |
741 | @@ -1,5 +1,4 @@ |
742 | -""" |
743 | -*Channel* is an abstraction layer over streams that works with *packets of data*, |
744 | +"""*Channel* is an abstraction layer over streams that works with *packets of data*, |
745 | rather than an endless stream of bytes, and adds support for compression. |
746 | """ |
747 | from plainbox.vendor.rpyc.lib import safe_import |
748 | @@ -10,6 +9,7 @@ zlib = safe_import("zlib") |
749 | # * separate \n into a FlushingChannel subclass? |
750 | # * add thread safety as a subclass? |
751 | |
752 | + |
753 | class Channel(object): |
754 | """Channel implementation. |
755 | |
756 | @@ -20,27 +20,32 @@ class Channel(object): |
757 | COMPRESSION_THRESHOLD = 3000 |
758 | COMPRESSION_LEVEL = 1 |
759 | FRAME_HEADER = Struct("!LB") |
760 | - FLUSHER = BYTES_LITERAL("\n") # cause any line-buffered layers below us to flush |
761 | + FLUSHER = BYTES_LITERAL("\n") # cause any line-buffered layers below us to flush |
762 | __slots__ = ["stream", "compress"] |
763 | |
764 | - def __init__(self, stream, compress = True): |
765 | + def __init__(self, stream, compress=True): |
766 | self.stream = stream |
767 | if not zlib: |
768 | compress = False |
769 | self.compress = compress |
770 | + |
771 | def close(self): |
772 | """closes the channel and underlying stream""" |
773 | self.stream.close() |
774 | + |
775 | @property |
776 | def closed(self): |
777 | """indicates whether the underlying stream has been closed""" |
778 | return self.stream.closed |
779 | + |
780 | def fileno(self): |
781 | """returns the file descriptor of the underlying stream""" |
782 | return self.stream.fileno() |
783 | + |
784 | def poll(self, timeout): |
785 | """polls the underlying steam for data, waiting up to *timeout* seconds""" |
786 | return self.stream.poll(timeout) |
787 | + |
788 | def recv(self): |
789 | """Receives the next packet (or *frame*) from the underlying stream. |
790 | This method will block until the packet has been read completely |
791 | @@ -53,6 +58,7 @@ class Channel(object): |
792 | if compressed: |
793 | data = zlib.decompress(data) |
794 | return data |
795 | + |
796 | def send(self, data): |
797 | """Sends the given string of data as a packet over the underlying |
798 | stream. Blocks until the packet has been sent. |
799 | @@ -64,7 +70,15 @@ class Channel(object): |
800 | data = zlib.compress(data, self.COMPRESSION_LEVEL) |
801 | else: |
802 | compressed = 0 |
803 | - header = self.FRAME_HEADER.pack(len(data), compressed) |
804 | - buf = header + data + self.FLUSHER |
805 | - self.stream.write(buf) |
806 | - |
807 | + data_size = len(data) |
808 | + header = self.FRAME_HEADER.pack(data_size, compressed) |
809 | + flush_size = len(self.FLUSHER) |
810 | + if self.FRAME_HEADER.size + data_size + flush_size <= self.stream.MAX_IO_CHUNK: |
811 | + # avoid overhead from socket writes requiring GIL to be held |
812 | + self.stream.write(header + data + self.FLUSHER) |
813 | + else: |
814 | + # Data larger than 64KB, the extra writes are negligible |
815 | + part1 = self.stream.MAX_IO_CHUNK - self.FRAME_HEADER.size |
816 | + self.stream.write(header + data[:part1]) |
817 | + self.stream.write(data[part1:]) |
818 | + self.stream.write(self.FLUSHER) |
819 | diff --git a/plainbox/vendor/rpyc/core/consts.py b/plainbox/vendor/rpyc/core/consts.py |
820 | index 877999b..31a532d 100644 |
821 | --- a/plainbox/vendor/rpyc/core/consts.py |
822 | +++ b/plainbox/vendor/rpyc/core/consts.py |
823 | @@ -1,43 +1,42 @@ |
824 | -""" |
825 | -Constants used by the protocol |
826 | +"""Constants used by the protocol |
827 | """ |
828 | |
829 | # messages |
830 | -MSG_REQUEST = 1 |
831 | -MSG_REPLY = 2 |
832 | -MSG_EXCEPTION = 3 |
833 | +MSG_REQUEST = 1 |
834 | +MSG_REPLY = 2 |
835 | +MSG_EXCEPTION = 3 |
836 | |
837 | # boxing |
838 | -LABEL_VALUE = 1 |
839 | -LABEL_TUPLE = 2 |
840 | -LABEL_LOCAL_REF = 3 |
841 | +LABEL_VALUE = 1 |
842 | +LABEL_TUPLE = 2 |
843 | +LABEL_LOCAL_REF = 3 |
844 | LABEL_REMOTE_REF = 4 |
845 | |
846 | # action handlers |
847 | -HANDLE_PING = 1 |
848 | -HANDLE_CLOSE = 2 |
849 | -HANDLE_GETROOT = 3 |
850 | -HANDLE_GETATTR = 4 |
851 | -HANDLE_DELATTR = 5 |
852 | -HANDLE_SETATTR = 6 |
853 | -HANDLE_CALL = 7 |
854 | -HANDLE_CALLATTR = 8 |
855 | -HANDLE_REPR = 9 |
856 | -HANDLE_STR = 10 |
857 | -HANDLE_CMP = 11 |
858 | -HANDLE_HASH = 12 |
859 | -HANDLE_DIR = 13 |
860 | -HANDLE_PICKLE = 14 |
861 | -HANDLE_DEL = 15 |
862 | -HANDLE_INSPECT = 16 |
863 | -HANDLE_BUFFITER = 17 |
864 | -HANDLE_OLDSLICING = 18 |
865 | -HANDLE_CTXEXIT = 19 |
866 | +HANDLE_PING = 1 |
867 | +HANDLE_CLOSE = 2 |
868 | +HANDLE_GETROOT = 3 |
869 | +HANDLE_GETATTR = 4 |
870 | +HANDLE_DELATTR = 5 |
871 | +HANDLE_SETATTR = 6 |
872 | +HANDLE_CALL = 7 |
873 | +HANDLE_CALLATTR = 8 |
874 | +HANDLE_REPR = 9 |
875 | +HANDLE_STR = 10 |
876 | +HANDLE_CMP = 11 |
877 | +HANDLE_HASH = 12 |
878 | +HANDLE_DIR = 13 |
879 | +HANDLE_PICKLE = 14 |
880 | +HANDLE_DEL = 15 |
881 | +HANDLE_INSPECT = 16 |
882 | +HANDLE_BUFFITER = 17 |
883 | +HANDLE_OLDSLICING = 18 |
884 | +HANDLE_CTXEXIT = 19 |
885 | +HANDLE_INSTANCECHECK = 20 |
886 | |
887 | # optimized exceptions |
888 | EXC_STOP_ITERATION = 1 |
889 | |
890 | # DEBUG |
891 | -#for k in globals().keys(): |
892 | +# for k in globals().keys(): |
893 | # globals()[k] = k |
894 | - |
895 | diff --git a/plainbox/vendor/rpyc/core/netref.py b/plainbox/vendor/rpyc/core/netref.py |
896 | index 0c43535..fd15ef1 100644 |
897 | --- a/plainbox/vendor/rpyc/core/netref.py |
898 | +++ b/plainbox/vendor/rpyc/core/netref.py |
899 | @@ -1,27 +1,29 @@ |
900 | -""" |
901 | -**NetRef**: a transparent *network reference*. This module contains quite a lot |
902 | +"""*NetRef*: a transparent *network reference*. This module contains quite a lot |
903 | of *magic*, so beware. |
904 | """ |
905 | import sys |
906 | -import inspect |
907 | import types |
908 | -from plainbox.vendor.rpyc.lib.compat import pickle, is_py3k, maxint, with_metaclass |
909 | +from plainbox.vendor.rpyc.lib import get_methods, get_id_pack |
910 | +from plainbox.vendor.rpyc.lib.compat import pickle, is_py_3k, maxint, with_metaclass |
911 | from plainbox.vendor.rpyc.core import consts |
912 | |
913 | |
914 | +builtin_id_pack_cache = {} # name_pack -> id_pack |
915 | +builtin_classes_cache = {} # id_pack -> class |
916 | # If these can be accessed, numpy will try to load the array from local memory, |
917 | # resulting in exceptions and/or segfaults, see #236: |
918 | -_deleted_netref_attrs = frozenset([ |
919 | +DELETED_ATTRS = frozenset([ |
920 | '__array_struct__', '__array_interface__', |
921 | ]) |
922 | |
923 | -_local_netref_attrs = frozenset([ |
924 | - '____conn__', '____oid__', '____refcount__', '__class__', '__cmp__', '__del__', '__delattr__', |
925 | - '__dir__', '__doc__', '__getattr__', '__getattribute__', '__methods__', |
926 | +LOCAL_ATTRS = frozenset([ |
927 | + '____conn__', '____id_pack__', '____refcount__', '__class__', '__cmp__', '__del__', '__delattr__', |
928 | + '__dir__', '__doc__', '__getattr__', '__getattribute__', '__hash__', '__instancecheck__', |
929 | '__init__', '__metaclass__', '__module__', '__new__', '__reduce__', |
930 | '__reduce_ex__', '__repr__', '__setattr__', '__slots__', '__str__', |
931 | - '__weakref__', '__dict__', '__members__', '__exit__', |
932 | -]) | _deleted_netref_attrs |
933 | + '__weakref__', '__dict__', '__methods__', '__exit__', |
934 | + '__eq__', '__ne__', '__lt__', '__gt__', '__le__', '__ge__', |
935 | +]) | DELETED_ATTRS |
936 | """the set of attributes that are local to the netref object""" |
937 | |
938 | _builtin_types = [ |
939 | @@ -45,19 +47,18 @@ except NameError: |
940 | else: |
941 | _builtin_types.append(BaseException) |
942 | |
943 | -if is_py3k: |
944 | +if is_py_3k: |
945 | _builtin_types.extend([ |
946 | bytes, bytearray, type(iter(range(10))), memoryview, |
947 | ]) |
948 | xrange = range |
949 | else: |
950 | _builtin_types.extend([ |
951 | - basestring, unicode, long, xrange, type(iter(xrange(10))), file, |
952 | + basestring, unicode, long, xrange, type(iter(xrange(10))), file, # noqa |
953 | types.InstanceType, types.ClassType, types.DictProxyType, |
954 | ]) |
955 | +_normalized_builtin_types = {} |
956 | |
957 | -_normalized_builtin_types = dict(((t.__name__, t.__module__), t) |
958 | - for t in _builtin_types) |
959 | |
960 | def syncreq(proxy, handler, *args): |
961 | """Performs a synchronous request on the given proxy object. |
962 | @@ -74,6 +75,7 @@ def syncreq(proxy, handler, *args): |
963 | conn = object.__getattribute__(proxy, "____conn__") |
964 | return conn.sync_request(handler, proxy, *args) |
965 | |
966 | + |
967 | def asyncreq(proxy, handler, *args): |
968 | """Performs an asynchronous request on the given proxy object. |
969 | Not intended to be invoked directly. |
970 | @@ -89,18 +91,21 @@ def asyncreq(proxy, handler, *args): |
971 | conn = object.__getattribute__(proxy, "____conn__") |
972 | return conn.async_request(handler, proxy, *args) |
973 | |
974 | + |
975 | class NetrefMetaclass(type): |
976 | """A *metaclass* used to customize the ``__repr__`` of ``netref`` classes. |
977 | It is quite useless, but it makes debugging and interactive programming |
978 | easier""" |
979 | |
980 | __slots__ = () |
981 | + |
982 | def __repr__(self): |
983 | if self.__module__: |
984 | return "<netref class '%s.%s'>" % (self.__module__, self.__name__) |
985 | else: |
986 | return "<netref class '%s'>" % (self.__name__,) |
987 | |
988 | + |
989 | class BaseNetref(with_metaclass(NetrefMetaclass, object)): |
990 | """The base netref class, from which all netref classes derive. Some netref |
991 | classes are "pre-generated" and cached upon importing this module (those |
992 | @@ -113,12 +118,17 @@ class BaseNetref(with_metaclass(NetrefMetaclass, object)): |
993 | Do not use this class directly; use :func:`class_factory` instead. |
994 | |
995 | :param conn: the :class:`rpyc.core.protocol.Connection` instance |
996 | - :param oid: the unique object ID of the remote object |
997 | + :param id_pack: id tuple for an object ~ (name_pack, remote-class-id, remote-instance-id) |
998 | + (cont.) name_pack := __module__.__name__ (hits or misses on builtin cache and sys.module) |
999 | + remote-class-id := id of object class (hits or misses on netref classes cache and instance checks) |
1000 | + remote-instance-id := id object instance (hits or misses on proxy cache) |
1001 | + id_pack is usually created by rpyc.lib.get_id_pack |
1002 | """ |
1003 | - __slots__ = ["____conn__", "____oid__", "__weakref__", "____refcount__"] |
1004 | - def __init__(self, conn, oid): |
1005 | + __slots__ = ["____conn__", "____id_pack__", "__weakref__", "____refcount__"] |
1006 | + |
1007 | + def __init__(self, conn, id_pack): |
1008 | self.____conn__ = conn |
1009 | - self.____oid__ = oid |
1010 | + self.____id_pack__ = id_pack |
1011 | self.____refcount__ = 1 |
1012 | |
1013 | def __del__(self): |
1014 | @@ -131,7 +141,7 @@ class BaseNetref(with_metaclass(NetrefMetaclass, object)): |
1015 | pass |
1016 | |
1017 | def __getattribute__(self, name): |
1018 | - if name in _local_netref_attrs: |
1019 | + if name in LOCAL_ATTRS: |
1020 | if name == "__class__": |
1021 | cls = object.__getattribute__(self, "__class__") |
1022 | if cls is None: |
1023 | @@ -139,58 +149,102 @@ class BaseNetref(with_metaclass(NetrefMetaclass, object)): |
1024 | return cls |
1025 | elif name == "__doc__": |
1026 | return self.__getattr__("__doc__") |
1027 | - elif name == "__members__": # for Python < 2.6 |
1028 | - return self.__dir__() |
1029 | - elif name in _deleted_netref_attrs: |
1030 | + elif name in DELETED_ATTRS: |
1031 | raise AttributeError() |
1032 | else: |
1033 | return object.__getattribute__(self, name) |
1034 | - elif name == "__hash__": |
1035 | - return object.__getattribute__(self, "__hash__") |
1036 | elif name == "__call__": # IronPython issue #10 |
1037 | return object.__getattribute__(self, "__call__") |
1038 | elif name == "__array__": |
1039 | return object.__getattribute__(self, "__array__") |
1040 | else: |
1041 | return syncreq(self, consts.HANDLE_GETATTR, name) |
1042 | + |
1043 | def __getattr__(self, name): |
1044 | - if name in _deleted_netref_attrs: |
1045 | + if name in DELETED_ATTRS: |
1046 | raise AttributeError() |
1047 | return syncreq(self, consts.HANDLE_GETATTR, name) |
1048 | + |
1049 | def __delattr__(self, name): |
1050 | - if name in _local_netref_attrs: |
1051 | + if name in LOCAL_ATTRS: |
1052 | object.__delattr__(self, name) |
1053 | else: |
1054 | syncreq(self, consts.HANDLE_DELATTR, name) |
1055 | + |
1056 | def __setattr__(self, name, value): |
1057 | - if name in _local_netref_attrs: |
1058 | + if name in LOCAL_ATTRS: |
1059 | object.__setattr__(self, name, value) |
1060 | else: |
1061 | syncreq(self, consts.HANDLE_SETATTR, name, value) |
1062 | + |
1063 | def __dir__(self): |
1064 | return list(syncreq(self, consts.HANDLE_DIR)) |
1065 | |
1066 | # support for metaclasses |
1067 | def __hash__(self): |
1068 | return syncreq(self, consts.HANDLE_HASH) |
1069 | + |
1070 | def __cmp__(self, other): |
1071 | - return syncreq(self, consts.HANDLE_CMP, other) |
1072 | + return syncreq(self, consts.HANDLE_CMP, other, '__cmp__') |
1073 | + |
1074 | + def __eq__(self, other): |
1075 | + return syncreq(self, consts.HANDLE_CMP, other, '__eq__') |
1076 | + |
1077 | + def __ne__(self, other): |
1078 | + return syncreq(self, consts.HANDLE_CMP, other, '__ne__') |
1079 | + |
1080 | + def __lt__(self, other): |
1081 | + return syncreq(self, consts.HANDLE_CMP, other, '__lt__') |
1082 | + |
1083 | + def __gt__(self, other): |
1084 | + return syncreq(self, consts.HANDLE_CMP, other, '__gt__') |
1085 | + |
1086 | + def __le__(self, other): |
1087 | + return syncreq(self, consts.HANDLE_CMP, other, '__le__') |
1088 | + |
1089 | + def __ge__(self, other): |
1090 | + return syncreq(self, consts.HANDLE_CMP, other, '__ge__') |
1091 | + |
1092 | def __repr__(self): |
1093 | return syncreq(self, consts.HANDLE_REPR) |
1094 | + |
1095 | def __str__(self): |
1096 | return syncreq(self, consts.HANDLE_STR) |
1097 | + |
1098 | def __exit__(self, exc, typ, tb): |
1099 | return syncreq(self, consts.HANDLE_CTXEXIT, exc) # can't pass type nor traceback |
1100 | - # support for pickling netrefs |
1101 | + |
1102 | def __reduce_ex__(self, proto): |
1103 | + # support for pickling netrefs |
1104 | return pickle.loads, (syncreq(self, consts.HANDLE_PICKLE, proto),) |
1105 | |
1106 | + def __instancecheck__(self, other): |
1107 | + # support for checking cached instances across connections |
1108 | + if isinstance(other, BaseNetref): |
1109 | + if self.____id_pack__[2] != 0: |
1110 | + raise TypeError("isinstance() arg 2 must be a class, type, or tuple of classes and types") |
1111 | + elif self.____id_pack__[1] == other.____id_pack__[1]: |
1112 | + if other.____id_pack__[2] == 0: |
1113 | + return False |
1114 | + elif other.____id_pack__[2] != 0: |
1115 | + return True |
1116 | + else: |
1117 | + # seems dubious if each netref proxies to a different address spaces |
1118 | + return syncreq(self, consts.HANDLE_INSTANCECHECK, other.____id_pack__) |
1119 | + else: |
1120 | + if self.____id_pack__[2] == 0: |
1121 | + # outside the context of `__instancecheck__`, `__class__` is expected to be type(self) |
1122 | + # within the context of `__instancecheck__`, `other` should be compared to the proxied class |
1123 | + return isinstance(other, type(self).__dict__['__class__'].instance) |
1124 | + else: |
1125 | + raise TypeError("isinstance() arg 2 must be a class, type, or tuple of classes and types") |
1126 | + |
1127 | |
1128 | def _make_method(name, doc): |
1129 | """creates a method with the given name and docstring that invokes |
1130 | :func:`syncreq` on its `self` argument""" |
1131 | |
1132 | - slicers = {"__getslice__" : "__getitem__", "__delslice__" : "__delitem__", "__setslice__" : "__setitem__"} |
1133 | + slicers = {"__getslice__": "__getitem__", "__delslice__": "__delitem__", "__setslice__": "__setitem__"} |
1134 | |
1135 | name = str(name) # IronPython issue #10 |
1136 | if name == "__call__": |
1137 | @@ -222,62 +276,74 @@ def _make_method(name, doc): |
1138 | method.__doc__ = doc |
1139 | return method |
1140 | |
1141 | -def inspect_methods(obj): |
1142 | - """introspects the given (local) object, returning a list of all of its |
1143 | - methods (going up the MRO). |
1144 | |
1145 | - :param obj: any local (not proxy) python object |
1146 | +class NetrefClass(object): |
1147 | + """a descriptor of the class being proxied |
1148 | |
1149 | - :returns: a list of ``(method name, docstring)`` tuples of all the methods |
1150 | - of the given object |
1151 | + Future considerations: |
1152 | + + there may be a cleaner alternative but lib.compat.with_metaclass prevented using __new__ |
1153 | + + consider using __slot__ for this class |
1154 | + + revisit the design choice to use properties here |
1155 | """ |
1156 | - methods = {} |
1157 | - attrs = {} |
1158 | - if isinstance(obj, type): |
1159 | - # don't forget the darn metaclass |
1160 | - mros = list(reversed(type(obj).__mro__)) + list(reversed(obj.__mro__)) |
1161 | - else: |
1162 | - mros = reversed(type(obj).__mro__) |
1163 | - for basecls in mros: |
1164 | - attrs.update(basecls.__dict__) |
1165 | - for name, attr in attrs.items(): |
1166 | - if name not in _local_netref_attrs and hasattr(attr, "__call__"): |
1167 | - methods[name] = inspect.getdoc(attr) |
1168 | - return methods.items() |
1169 | - |
1170 | -def class_factory(clsname, modname, methods): |
1171 | + |
1172 | + def __init__(self, class_obj): |
1173 | + self._class_obj = class_obj |
1174 | + |
1175 | + @property |
1176 | + def instance(self): |
1177 | + """accessor to class object for the instance being proxied""" |
1178 | + return self._class_obj |
1179 | + |
1180 | + @property |
1181 | + def owner(self): |
1182 | + """accessor to the class object for the instance owner being proxied""" |
1183 | + return self._class_obj.__class__ |
1184 | + |
1185 | + def __get__(self, netref_instance, netref_owner): |
1186 | + """the value returned when accessing the netref class is dictated by whether or not an instance is proxied""" |
1187 | + return self.owner if netref_instance.____id_pack__[2] == 0 else self.instance |
1188 | + |
1189 | + |
1190 | +def class_factory(id_pack, methods): |
1191 | """Creates a netref class proxying the given class |
1192 | |
1193 | - :param clsname: the class's name |
1194 | - :param modname: the class's module name |
1195 | - :param methods: a list of ``(method name, docstring)`` tuples, of the methods |
1196 | - that the class defines |
1197 | + :param id_pack: the id pack used for proxy communication |
1198 | + :param methods: a list of ``(method name, docstring)`` tuples, of the methods that the class defines |
1199 | |
1200 | :returns: a netref class |
1201 | """ |
1202 | - clsname = str(clsname) # IronPython issue #10 |
1203 | - modname = str(modname) # IronPython issue #10 |
1204 | - ns = {"__slots__" : ()} |
1205 | + ns = {"__slots__": (), "__class__": None} |
1206 | + name_pack = id_pack[0] |
1207 | + class_descriptor = None |
1208 | + if name_pack is not None: |
1209 | + # attempt to resolve __class__ using sys.modules (i.e. builtins and imported modules) |
1210 | + _module = None |
1211 | + cursor = len(name_pack) |
1212 | + while cursor != -1: |
1213 | + _module = sys.modules.get(name_pack[:cursor]) |
1214 | + if _module is None: |
1215 | + cursor = name_pack[:cursor].rfind('.') |
1216 | + continue |
1217 | + _class_name = name_pack[cursor + 1:] |
1218 | + _class = getattr(_module, _class_name, None) |
1219 | + if _class is not None and hasattr(_class, '__class__'): |
1220 | + class_descriptor = NetrefClass(_class) |
1221 | + break |
1222 | + ns['__class__'] = class_descriptor |
1223 | + netref_name = class_descriptor.owner.__name__ if class_descriptor is not None else name_pack |
1224 | + # create methods that must perform a syncreq |
1225 | for name, doc in methods: |
1226 | - name = str(name) # IronPython issue #10 |
1227 | - if name not in _local_netref_attrs: |
1228 | + name = str(name) # IronPython issue #10 |
1229 | + # only create methods that wont shadow BaseNetref during merge for mro |
1230 | + if name not in LOCAL_ATTRS: # i.e. `name != __class__` |
1231 | ns[name] = _make_method(name, doc) |
1232 | - ns["__module__"] = modname |
1233 | - if modname in sys.modules and hasattr(sys.modules[modname], clsname): |
1234 | - ns["__class__"] = getattr(sys.modules[modname], clsname) |
1235 | - elif (clsname, modname) in _normalized_builtin_types: |
1236 | - ns["__class__"] = _normalized_builtin_types[clsname, modname] |
1237 | - else: |
1238 | - # to be resolved by the instance |
1239 | - ns["__class__"] = None |
1240 | - return type(clsname, (BaseNetref,), ns) |
1241 | - |
1242 | -builtin_classes_cache = {} |
1243 | -"""The cache of built-in netref classes (each of the types listed in |
1244 | -:data:`_builtin_types`). These are shared between all RPyC connections""" |
1245 | + return type(netref_name, (BaseNetref,), ns) |
1246 | |
1247 | -# init the builtin_classes_cache |
1248 | -for cls in _builtin_types: |
1249 | - builtin_classes_cache[cls.__name__, cls.__module__] = class_factory( |
1250 | - cls.__name__, cls.__module__, inspect_methods(cls)) |
1251 | |
1252 | +for _builtin in _builtin_types: |
1253 | + _id_pack = get_id_pack(_builtin) |
1254 | + _name_pack = _id_pack[0] |
1255 | + _normalized_builtin_types[_name_pack] = _builtin |
1256 | + _builtin_methods = get_methods(LOCAL_ATTRS, _builtin) |
1257 | + # assume all normalized builtins are classes |
1258 | + builtin_classes_cache[_name_pack] = class_factory(_id_pack, _builtin_methods) |
1259 | diff --git a/plainbox/vendor/rpyc/core/protocol.py b/plainbox/vendor/rpyc/core/protocol.py |
1260 | index a3be171..56cd9a9 100644 |
1261 | --- a/plainbox/vendor/rpyc/core/protocol.py |
1262 | +++ b/plainbox/vendor/rpyc/core/protocol.py |
1263 | @@ -1,64 +1,65 @@ |
1264 | -""" |
1265 | -The RPyC protocol |
1266 | +"""The RPyC protocol |
1267 | """ |
1268 | import sys |
1269 | import itertools |
1270 | import socket |
1271 | -import time |
1272 | -import gc |
1273 | +import time # noqa: F401 |
1274 | +import gc # noqa: F401 |
1275 | |
1276 | from threading import Lock, Condition |
1277 | -from plainbox.vendor.rpyc.lib import spawn, Timeout |
1278 | -from plainbox.vendor.rpyc.lib.compat import (pickle, next, is_py3k, maxint, select_error, |
1279 | - acquire_lock) |
1280 | +from plainbox.vendor.rpyc.lib import spawn, Timeout, get_methods, get_id_pack |
1281 | +from plainbox.vendor.rpyc.lib.compat import pickle, next, is_py_3k, maxint, select_error, acquire_lock # noqa: F401 |
1282 | from plainbox.vendor.rpyc.lib.colls import WeakValueDict, RefCountingColl |
1283 | from plainbox.vendor.rpyc.core import consts, brine, vinegar, netref |
1284 | from plainbox.vendor.rpyc.core.async_ import AsyncResult |
1285 | |
1286 | + |
1287 | class PingError(Exception): |
1288 | """The exception raised should :func:`Connection.ping` fail""" |
1289 | pass |
1290 | |
1291 | + |
1292 | DEFAULT_CONFIG = dict( |
1293 | # ATTRIBUTES |
1294 | - allow_safe_attrs = True, |
1295 | - allow_exposed_attrs = True, |
1296 | - allow_public_attrs = False, |
1297 | - allow_all_attrs = False, |
1298 | - safe_attrs = set(['__abs__', '__add__', '__and__', '__bool__', '__cmp__', '__contains__', |
1299 | - '__delitem__', '__delslice__', '__div__', '__divmod__', '__doc__', |
1300 | - '__eq__', '__float__', '__floordiv__', '__ge__', '__getitem__', |
1301 | - '__getslice__', '__gt__', '__hash__', '__hex__', '__iadd__', '__iand__', |
1302 | - '__idiv__', '__ifloordiv__', '__ilshift__', '__imod__', '__imul__', |
1303 | - '__index__', '__int__', '__invert__', '__ior__', '__ipow__', '__irshift__', |
1304 | - '__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__', '__len__', |
1305 | - '__long__', '__lshift__', '__lt__', '__mod__', '__mul__', '__ne__', |
1306 | - '__neg__', '__new__', '__nonzero__', '__oct__', '__or__', '__pos__', |
1307 | - '__pow__', '__radd__', '__rand__', '__rdiv__', '__rdivmod__', '__repr__', |
1308 | - '__rfloordiv__', '__rlshift__', '__rmod__', '__rmul__', '__ror__', |
1309 | - '__rpow__', '__rrshift__', '__rshift__', '__rsub__', '__rtruediv__', |
1310 | - '__rxor__', '__setitem__', '__setslice__', '__str__', '__sub__', |
1311 | - '__truediv__', '__xor__', 'next', '__length_hint__', '__enter__', |
1312 | - '__exit__', '__next__',]), |
1313 | - exposed_prefix = "exposed_", |
1314 | - allow_getattr = True, |
1315 | - allow_setattr = False, |
1316 | - allow_delattr = False, |
1317 | + allow_safe_attrs=True, |
1318 | + allow_exposed_attrs=True, |
1319 | + allow_public_attrs=False, |
1320 | + allow_all_attrs=False, |
1321 | + safe_attrs=set(['__abs__', '__add__', '__and__', '__bool__', '__cmp__', '__contains__', |
1322 | + '__delitem__', '__delslice__', '__div__', '__divmod__', '__doc__', |
1323 | + '__eq__', '__float__', '__floordiv__', '__ge__', '__getitem__', |
1324 | + '__getslice__', '__gt__', '__hash__', '__hex__', '__iadd__', '__iand__', |
1325 | + '__idiv__', '__ifloordiv__', '__ilshift__', '__imod__', '__imul__', |
1326 | + '__index__', '__int__', '__invert__', '__ior__', '__ipow__', '__irshift__', |
1327 | + '__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__', '__len__', |
1328 | + '__long__', '__lshift__', '__lt__', '__mod__', '__mul__', '__ne__', |
1329 | + '__neg__', '__new__', '__nonzero__', '__oct__', '__or__', '__pos__', |
1330 | + '__pow__', '__radd__', '__rand__', '__rdiv__', '__rdivmod__', '__repr__', |
1331 | + '__rfloordiv__', '__rlshift__', '__rmod__', '__rmul__', '__ror__', |
1332 | + '__rpow__', '__rrshift__', '__rshift__', '__rsub__', '__rtruediv__', |
1333 | + '__rxor__', '__setitem__', '__setslice__', '__str__', '__sub__', |
1334 | + '__truediv__', '__xor__', 'next', '__length_hint__', '__enter__', |
1335 | + '__exit__', '__next__', ]), |
1336 | + exposed_prefix="exposed_", |
1337 | + allow_getattr=True, |
1338 | + allow_setattr=False, |
1339 | + allow_delattr=False, |
1340 | # EXCEPTIONS |
1341 | - include_local_traceback = True, |
1342 | - instantiate_custom_exceptions = False, |
1343 | - import_custom_exceptions = False, |
1344 | - instantiate_oldstyle_exceptions = False, # which don't derive from Exception |
1345 | - propagate_SystemExit_locally = False, # whether to propagate SystemExit locally or to the other party |
1346 | - propagate_KeyboardInterrupt_locally = True, # whether to propagate KeyboardInterrupt locally or to the other party |
1347 | - log_exceptions = True, |
1348 | + include_local_traceback=True, |
1349 | + include_local_version=True, |
1350 | + instantiate_custom_exceptions=False, |
1351 | + import_custom_exceptions=False, |
1352 | + instantiate_oldstyle_exceptions=False, # which don't derive from Exception |
1353 | + propagate_SystemExit_locally=False, # whether to propagate SystemExit locally or to the other party |
1354 | + propagate_KeyboardInterrupt_locally=True, # whether to propagate KeyboardInterrupt locally or to the other party |
1355 | + log_exceptions=True, |
1356 | # MISC |
1357 | - allow_pickle = False, |
1358 | - connid = None, |
1359 | - credentials = None, |
1360 | - endpoints = None, |
1361 | - logger = None, |
1362 | - sync_request_timeout = 30, |
1363 | + allow_pickle=False, |
1364 | + connid=None, |
1365 | + credentials=None, |
1366 | + endpoints=None, |
1367 | + logger=None, |
1368 | + sync_request_timeout=30, |
1369 | ) |
1370 | """ |
1371 | The default configuration dictionary of the protocol. You can override these parameters |
1372 | @@ -157,18 +158,18 @@ class Connection(object): |
1373 | |
1374 | def __del__(self): |
1375 | self.close() |
1376 | + |
1377 | def __enter__(self): |
1378 | return self |
1379 | + |
1380 | def __exit__(self, t, v, tb): |
1381 | self.close() |
1382 | + |
1383 | def __repr__(self): |
1384 | a, b = object.__repr__(self).split(" object ") |
1385 | return "%s %r object %s" % (a, self._config["connid"], b) |
1386 | |
1387 | - # |
1388 | - # IO |
1389 | - # |
1390 | - def _cleanup(self, _anyway = True): |
1391 | + def _cleanup(self, _anyway=True): # IO |
1392 | if self._closed and not _anyway: |
1393 | return |
1394 | self._closed = True |
1395 | @@ -181,11 +182,11 @@ class Connection(object): |
1396 | self._last_traceback = None |
1397 | self._remote_root = None |
1398 | self._local_root = None |
1399 | - #self._seqcounter = None |
1400 | - #self._config.clear() |
1401 | + # self._seqcounter = None |
1402 | + # self._config.clear() |
1403 | del self._HANDLERS |
1404 | |
1405 | - def close(self, _catchall = True): |
1406 | + def close(self, _catchall=True): # IO |
1407 | """closes the connection, releasing all held resources""" |
1408 | if self._closed: |
1409 | return |
1410 | @@ -198,20 +199,19 @@ class Connection(object): |
1411 | if not _catchall: |
1412 | raise |
1413 | finally: |
1414 | - self._cleanup(_anyway = True) |
1415 | + self._cleanup(_anyway=True) |
1416 | |
1417 | @property |
1418 | - def closed(self): |
1419 | + def closed(self): # IO |
1420 | """Indicates whether the connection has been closed or not""" |
1421 | return self._closed |
1422 | |
1423 | - def fileno(self): |
1424 | + def fileno(self): # IO |
1425 | """Returns the connectin's underlying file descriptor""" |
1426 | return self._channel.fileno() |
1427 | |
1428 | - def ping(self, data = None, timeout = 3): |
1429 | - """ |
1430 | - Asserts that the other party is functioning properly, by making sure |
1431 | + def ping(self, data=None, timeout=3): # IO |
1432 | + """Asserts that the other party is functioning properly, by making sure |
1433 | the *data* is echoed back before the *timeout* expires |
1434 | |
1435 | :param data: the data to send (leave ``None`` for the default buffer) |
1436 | @@ -222,14 +222,14 @@ class Connection(object): |
1437 | """ |
1438 | if data is None: |
1439 | data = "abcdefghijklmnopqrstuvwxyz" * 20 |
1440 | - res = self.async_request(consts.HANDLE_PING, data, timeout = timeout) |
1441 | + res = self.async_request(consts.HANDLE_PING, data, timeout=timeout) |
1442 | if res.value != data: |
1443 | raise PingError("echo mismatches sent data") |
1444 | |
1445 | - def _get_seq_id(self): |
1446 | + def _get_seq_id(self): # IO |
1447 | return next(self._seqcounter) |
1448 | |
1449 | - def _send(self, msg, seq, args): |
1450 | + def _send(self, msg, seq, args): # IO |
1451 | data = brine.dump((msg, seq, args)) |
1452 | # GC might run while sending data |
1453 | # if so, a BaseNetref.__del__ might be called |
1454 | @@ -260,10 +260,7 @@ class Connection(object): |
1455 | finally: |
1456 | self._sendlock.release() |
1457 | |
1458 | - # |
1459 | - # boxing |
1460 | - # |
1461 | - def _box(self, obj): |
1462 | + def _box(self, obj): # boxing |
1463 | """store a local object in such a way that it could be recreated on |
1464 | the remote party either by-value or by-reference""" |
1465 | if brine.dumpable(obj): |
1466 | @@ -271,19 +268,13 @@ class Connection(object): |
1467 | if type(obj) is tuple: |
1468 | return consts.LABEL_TUPLE, tuple(self._box(item) for item in obj) |
1469 | elif isinstance(obj, netref.BaseNetref) and obj.____conn__ is self: |
1470 | - return consts.LABEL_LOCAL_REF, obj.____oid__ |
1471 | + return consts.LABEL_LOCAL_REF, obj.____id_pack__ |
1472 | else: |
1473 | - self._local_objects.add(obj) |
1474 | - try: |
1475 | - cls = obj.__class__ |
1476 | - except Exception: |
1477 | - # see issue #16 |
1478 | - cls = type(obj) |
1479 | - if not isinstance(cls, type): |
1480 | - cls = type(obj) |
1481 | - return consts.LABEL_REMOTE_REF, (id(obj), cls.__name__, cls.__module__) |
1482 | + id_pack = get_id_pack(obj) |
1483 | + self._local_objects.add(id_pack, obj) |
1484 | + return consts.LABEL_REMOTE_REF, id_pack |
1485 | |
1486 | - def _unbox(self, package): |
1487 | + def _unbox(self, package): # boxing |
1488 | """recreate a local object representation of the remote object: if the |
1489 | object is passed by value, just return it; if the object is passed by |
1490 | reference, create a netref to it""" |
1491 | @@ -295,39 +286,39 @@ class Connection(object): |
1492 | if label == consts.LABEL_LOCAL_REF: |
1493 | return self._local_objects[value] |
1494 | if label == consts.LABEL_REMOTE_REF: |
1495 | - oid, clsname, modname = value |
1496 | - if oid in self._proxy_cache: |
1497 | - proxy = self._proxy_cache[oid] |
1498 | - proxy.____refcount__ += 1 # other side increased refcount on boxing, |
1499 | - # if I'm returning from cache instead of new object, |
1500 | - # must increase refcount to match |
1501 | - return proxy |
1502 | - proxy = self._netref_factory(oid, clsname, modname) |
1503 | - self._proxy_cache[oid] = proxy |
1504 | + id_pack = (str(value[0]), value[1], value[2]) # so value is a id_pack |
1505 | + if id_pack in self._proxy_cache: |
1506 | + proxy = self._proxy_cache[id_pack] |
1507 | + proxy.____refcount__ += 1 # if cached then remote incremented refcount, so sync refcount |
1508 | + else: |
1509 | + proxy = self._netref_factory(id_pack) |
1510 | + self._proxy_cache[id_pack] = proxy |
1511 | return proxy |
1512 | raise ValueError("invalid label %r" % (label,)) |
1513 | |
1514 | - def _netref_factory(self, oid, clsname, modname): |
1515 | - typeinfo = (clsname, modname) |
1516 | - if typeinfo in self._netref_classes_cache: |
1517 | - cls = self._netref_classes_cache[typeinfo] |
1518 | - elif typeinfo in netref.builtin_classes_cache: |
1519 | - cls = netref.builtin_classes_cache[typeinfo] |
1520 | - else: |
1521 | - info = self.sync_request(consts.HANDLE_INSPECT, oid) |
1522 | - cls = netref.class_factory(clsname, modname, info) |
1523 | - self._netref_classes_cache[typeinfo] = cls |
1524 | - return cls(self, oid) |
1525 | - |
1526 | - # |
1527 | - # dispatching |
1528 | - # |
1529 | - def _dispatch_request(self, seq, raw_args): |
1530 | + def _netref_factory(self, id_pack): # boxing |
1531 | + """id_pack is for remote, so when class id fails to directly match """ |
1532 | + cls = None |
1533 | + if id_pack[2] == 0 and id_pack in self._netref_classes_cache: |
1534 | + cls = self._netref_classes_cache[id_pack] |
1535 | + elif id_pack[0] in netref.builtin_classes_cache: |
1536 | + cls = netref.builtin_classes_cache[id_pack[0]] |
1537 | + if cls is None: |
1538 | + # in the future, it could see if a sys.module cache/lookup hits first |
1539 | + cls_methods = self.sync_request(consts.HANDLE_INSPECT, id_pack) |
1540 | + cls = netref.class_factory(id_pack, cls_methods) |
1541 | + if id_pack[2] == 0: |
1542 | + # only use cached netrefs for classes |
1543 | + # ... instance caching after gc of a proxy will take some mental gymnastics |
1544 | + self._netref_classes_cache[id_pack] = cls |
1545 | + return cls(self, id_pack) |
1546 | + |
1547 | + def _dispatch_request(self, seq, raw_args): # dispatch |
1548 | try: |
1549 | handler, args = raw_args |
1550 | args = self._unbox(args) |
1551 | res = self._HANDLERS[handler](self, *args) |
1552 | - except: |
1553 | + except: # TODO: revist how to catch handle locally, this should simplify when py2 is dropped |
1554 | # need to catch old style exceptions too |
1555 | t, v, tb = sys.exc_info() |
1556 | self._last_traceback = tb |
1557 | @@ -342,34 +333,39 @@ class Connection(object): |
1558 | else: |
1559 | self._send(consts.MSG_REPLY, seq, self._box(res)) |
1560 | |
1561 | - def _box_exc(self, typ, val, tb): |
1562 | - return vinegar.dump(typ, val, tb, include_local_traceback= |
1563 | - self._config["include_local_traceback"]) |
1564 | + def _box_exc(self, typ, val, tb): # dispatch? |
1565 | + return vinegar.dump(typ, val, tb, |
1566 | + include_local_traceback=self._config["include_local_traceback"], |
1567 | + include_local_version=self._config["include_local_version"]) |
1568 | |
1569 | - def _unbox_exc(self, raw): |
1570 | + def _unbox_exc(self, raw): # dispatch? |
1571 | return vinegar.load(raw, |
1572 | - import_custom_exceptions = self._config["import_custom_exceptions"], |
1573 | - instantiate_custom_exceptions = self._config["instantiate_custom_exceptions"], |
1574 | - instantiate_oldstyle_exceptions = self._config["instantiate_oldstyle_exceptions"]) |
1575 | - |
1576 | - # |
1577 | - # serving |
1578 | - # |
1579 | - |
1580 | - def _dispatch(self, data): |
1581 | + import_custom_exceptions=self._config["import_custom_exceptions"], |
1582 | + instantiate_custom_exceptions=self._config["instantiate_custom_exceptions"], |
1583 | + instantiate_oldstyle_exceptions=self._config["instantiate_oldstyle_exceptions"]) |
1584 | + |
1585 | + def _seq_request_callback(self, msg, seq, is_exc, obj): |
1586 | + _callback = self._request_callbacks.pop(seq, None) |
1587 | + if _callback is not None: |
1588 | + _callback(is_exc, obj) |
1589 | + elif self._config["logger"] is not None: |
1590 | + debug_msg = 'Recieved {} seq {} and a related request callback did not exist' |
1591 | + self._config["logger"].debug(debug_msg.format(msg, seq)) |
1592 | + |
1593 | + def _dispatch(self, data): # serving---dispatch? |
1594 | msg, seq, args = brine.load(data) |
1595 | if msg == consts.MSG_REQUEST: |
1596 | self._dispatch_request(seq, args) |
1597 | elif msg == consts.MSG_REPLY: |
1598 | obj = self._unbox(args) |
1599 | - self._request_callbacks.pop(seq)(False, obj) |
1600 | + self._seq_request_callback(msg, seq, False, obj) |
1601 | elif msg == consts.MSG_EXCEPTION: |
1602 | obj = self._unbox_exc(args) |
1603 | - self._request_callbacks.pop(seq)(True, obj) |
1604 | + self._seq_request_callback(msg, seq, True, obj) |
1605 | else: |
1606 | raise ValueError("invalid message type: %r" % (msg,)) |
1607 | |
1608 | - def serve(self, timeout=1, wait_for_lock=True): |
1609 | + def serve(self, timeout=1, wait_for_lock=True): # serving |
1610 | """Serves a single request or reply that arrives within the given |
1611 | time frame (default is 1 sec). Note that the dispatching of a request |
1612 | might trigger multiple (nested) requests, thus this function may be |
1613 | @@ -381,8 +377,7 @@ class Connection(object): |
1614 | timeout = Timeout(timeout) |
1615 | with self._recv_event: |
1616 | if not self._recvlock.acquire(False): |
1617 | - return (wait_for_lock and |
1618 | - self._recv_event.wait(timeout.timeleft())) |
1619 | + return wait_for_lock and self._recv_event.wait(timeout.timeleft()) |
1620 | try: |
1621 | data = self._channel.poll(timeout) and self._channel.recv() |
1622 | if not data: |
1623 | @@ -397,7 +392,7 @@ class Connection(object): |
1624 | self._dispatch(data) |
1625 | return True |
1626 | |
1627 | - def poll(self, timeout = 0): |
1628 | + def poll(self, timeout=0): # serving |
1629 | """Serves a single transaction, should one arrives in the given |
1630 | interval. Note that handling a request/reply may trigger nested |
1631 | requests, which are all part of a single transaction. |
1632 | @@ -405,11 +400,11 @@ class Connection(object): |
1633 | :returns: ``True`` if a transaction was served, ``False`` otherwise""" |
1634 | return self.serve(timeout, False) |
1635 | |
1636 | - def serve_all(self): |
1637 | + def serve_all(self): # serving |
1638 | """Serves all requests and replies for as long as the connection is |
1639 | alive.""" |
1640 | try: |
1641 | - while True: |
1642 | + while not self.closed: |
1643 | self.serve(None) |
1644 | except (socket.error, select_error, IOError): |
1645 | if not self.closed: |
1646 | @@ -419,9 +414,15 @@ class Connection(object): |
1647 | finally: |
1648 | self.close() |
1649 | |
1650 | - def serve_threaded(self, thread_count=10): |
1651 | - """Serves all requests and replies for as long as the connection is |
1652 | - alive.""" |
1653 | + def serve_threaded(self, thread_count=10): # serving |
1654 | + """Serves all requests and replies for as long as the connection is alive. |
1655 | + |
1656 | + CAVEAT: using non-immutable types that require a netref to be constructed to serve a request, |
1657 | + or invoking anything else that performs a sync_request, may timeout due to the sync_request reply being |
1658 | + received by another thread serving the connection. A more conventional approach where each client thread |
1659 | + opens a new connection would allow `ThreadedServer` to naturally avoid such multiplexing issues and |
1660 | + is the preferred approach for threading procedures that invoke sync_request. See issue #345 |
1661 | + """ |
1662 | def _thread_target(): |
1663 | try: |
1664 | while True: |
1665 | @@ -441,7 +442,7 @@ class Connection(object): |
1666 | finally: |
1667 | self.close() |
1668 | |
1669 | - def poll_all(self, timeout=0): |
1670 | + def poll_all(self, timeout=0): # serving |
1671 | """Serves all requests and replies that arrive within the given interval. |
1672 | |
1673 | :returns: ``True`` if at least a single transaction was served, ``False`` otherwise |
1674 | @@ -458,11 +459,8 @@ class Connection(object): |
1675 | pass |
1676 | return at_least_once |
1677 | |
1678 | - # |
1679 | - # requests |
1680 | - # |
1681 | - def sync_request(self, handler, *args): |
1682 | - """Sends a synchronous request (waits for the reply to arrive) |
1683 | + def sync_request(self, handler, *args): # serving |
1684 | + """requests, sends a synchronous request (waits for the reply to arrive) |
1685 | |
1686 | :raises: any exception that the requets may be generated |
1687 | :returns: the result of the request |
1688 | @@ -470,16 +468,19 @@ class Connection(object): |
1689 | timeout = self._config["sync_request_timeout"] |
1690 | return self.async_request(handler, *args, timeout=timeout).value |
1691 | |
1692 | - def _async_request(self, handler, args = (), callback = (lambda a, b: None)): |
1693 | + def _async_request(self, handler, args=(), callback=(lambda a, b: None)): # serving |
1694 | seq = self._get_seq_id() |
1695 | self._request_callbacks[seq] = callback |
1696 | try: |
1697 | self._send(consts.MSG_REQUEST, seq, (handler, self._box(args))) |
1698 | - except: |
1699 | + except Exception: |
1700 | + # TODO: review test_remote_exception, logging exceptions show attempt to write on closed stream |
1701 | + # depending on the case, the MSG_REQUEST may or may not have been sent completely |
1702 | + # so, pop the callback and raise to keep response integrity is consistent |
1703 | self._request_callbacks.pop(seq, None) |
1704 | raise |
1705 | |
1706 | - def async_request(self, handler, *args, **kwargs): |
1707 | + def async_request(self, handler, *args, **kwargs): # serving |
1708 | """Send an asynchronous request (does not wait for it to finish) |
1709 | |
1710 | :returns: an :class:`rpyc.core.async_.AsyncResult` object, which will |
1711 | @@ -495,128 +496,170 @@ class Connection(object): |
1712 | return res |
1713 | |
1714 | @property |
1715 | - def root(self): |
1716 | + def root(self): # serving |
1717 | """Fetches the root object (service) of the other party""" |
1718 | if self._remote_root is None: |
1719 | self._remote_root = self.sync_request(consts.HANDLE_GETROOT) |
1720 | return self._remote_root |
1721 | |
1722 | - # |
1723 | - # attribute access |
1724 | - # |
1725 | - def _check_attr(self, obj, name, perm): |
1726 | + def _check_attr(self, obj, name, perm): # attribute access |
1727 | config = self._config |
1728 | if not config[perm]: |
1729 | raise AttributeError("cannot access %r" % (name,)) |
1730 | prefix = config["allow_exposed_attrs"] and config["exposed_prefix"] |
1731 | - plain = (config["allow_all_attrs"] or |
1732 | - config["allow_exposed_attrs"] and name.startswith(prefix) or |
1733 | - config["allow_safe_attrs"] and name in config["safe_attrs"] or |
1734 | - config["allow_public_attrs"] and not name.startswith("_")) |
1735 | - has_exposed = prefix and hasattr(obj, prefix+name) |
1736 | + plain = config["allow_all_attrs"] |
1737 | + plain |= config["allow_exposed_attrs"] and name.startswith(prefix) |
1738 | + plain |= config["allow_safe_attrs"] and name in config["safe_attrs"] |
1739 | + plain |= config["allow_public_attrs"] and not name.startswith("_") |
1740 | + has_exposed = prefix and hasattr(obj, prefix + name) |
1741 | if plain and (not has_exposed or hasattr(obj, name)): |
1742 | return name |
1743 | if has_exposed: |
1744 | - return prefix+name |
1745 | + return prefix + name |
1746 | if plain: |
1747 | - return name # chance for better traceback |
1748 | + return name # chance for better traceback |
1749 | raise AttributeError("cannot access %r" % (name,)) |
1750 | |
1751 | - def _access_attr(self, obj, name, args, overrider, param, default): |
1752 | - if is_py3k: |
1753 | + def _access_attr(self, obj, name, args, overrider, param, default): # attribute access |
1754 | + if is_py_3k: |
1755 | if type(name) is bytes: |
1756 | name = str(name, "utf8") |
1757 | elif type(name) is not str: |
1758 | raise TypeError("name must be a string") |
1759 | else: |
1760 | - if type(name) not in (str, unicode): |
1761 | + if type(name) not in (str, unicode): # noqa |
1762 | raise TypeError("name must be a string") |
1763 | - name = str(name) # IronPython issue #10 + py3k issue |
1764 | + name = str(name) # IronPython issue #10 + py3k issue |
1765 | accessor = getattr(type(obj), overrider, None) |
1766 | if accessor is None: |
1767 | accessor = default |
1768 | name = self._check_attr(obj, name, param) |
1769 | return accessor(obj, name, *args) |
1770 | |
1771 | - # |
1772 | - # request handlers |
1773 | - # |
1774 | @classmethod |
1775 | - def _request_handlers(cls): |
1776 | + def _request_handlers(cls): # request handlers |
1777 | return { |
1778 | - consts.HANDLE_PING: cls._handle_ping, |
1779 | - consts.HANDLE_CLOSE: cls._handle_close, |
1780 | - consts.HANDLE_GETROOT: cls._handle_getroot, |
1781 | - consts.HANDLE_GETATTR: cls._handle_getattr, |
1782 | - consts.HANDLE_DELATTR: cls._handle_delattr, |
1783 | - consts.HANDLE_SETATTR: cls._handle_setattr, |
1784 | - consts.HANDLE_CALL: cls._handle_call, |
1785 | - consts.HANDLE_CALLATTR: cls._handle_callattr, |
1786 | - consts.HANDLE_REPR: cls._handle_repr, |
1787 | - consts.HANDLE_STR: cls._handle_str, |
1788 | - consts.HANDLE_CMP: cls._handle_cmp, |
1789 | - consts.HANDLE_HASH: cls._handle_hash, |
1790 | - consts.HANDLE_DIR: cls._handle_dir, |
1791 | - consts.HANDLE_PICKLE: cls._handle_pickle, |
1792 | - consts.HANDLE_DEL: cls._handle_del, |
1793 | - consts.HANDLE_INSPECT: cls._handle_inspect, |
1794 | - consts.HANDLE_BUFFITER: cls._handle_buffiter, |
1795 | - consts.HANDLE_OLDSLICING: cls._handle_oldslicing, |
1796 | - consts.HANDLE_CTXEXIT: cls._handle_ctxexit, |
1797 | + consts.HANDLE_PING: cls._handle_ping, |
1798 | + consts.HANDLE_CLOSE: cls._handle_close, |
1799 | + consts.HANDLE_GETROOT: cls._handle_getroot, |
1800 | + consts.HANDLE_GETATTR: cls._handle_getattr, |
1801 | + consts.HANDLE_DELATTR: cls._handle_delattr, |
1802 | + consts.HANDLE_SETATTR: cls._handle_setattr, |
1803 | + consts.HANDLE_CALL: cls._handle_call, |
1804 | + consts.HANDLE_CALLATTR: cls._handle_callattr, |
1805 | + consts.HANDLE_REPR: cls._handle_repr, |
1806 | + consts.HANDLE_STR: cls._handle_str, |
1807 | + consts.HANDLE_CMP: cls._handle_cmp, |
1808 | + consts.HANDLE_HASH: cls._handle_hash, |
1809 | + consts.HANDLE_INSTANCECHECK: cls._handle_instancecheck, |
1810 | + consts.HANDLE_DIR: cls._handle_dir, |
1811 | + consts.HANDLE_PICKLE: cls._handle_pickle, |
1812 | + consts.HANDLE_DEL: cls._handle_del, |
1813 | + consts.HANDLE_INSPECT: cls._handle_inspect, |
1814 | + consts.HANDLE_BUFFITER: cls._handle_buffiter, |
1815 | + consts.HANDLE_OLDSLICING: cls._handle_oldslicing, |
1816 | + consts.HANDLE_CTXEXIT: cls._handle_ctxexit, |
1817 | } |
1818 | |
1819 | - def _handle_ping(self, data): |
1820 | + def _handle_ping(self, data): # request handler |
1821 | return data |
1822 | - def _handle_close(self): |
1823 | + |
1824 | + def _handle_close(self): # request handler |
1825 | self._cleanup() |
1826 | - def _handle_getroot(self): |
1827 | + |
1828 | + def _handle_getroot(self): # request handler |
1829 | return self._local_root |
1830 | - def _handle_del(self, obj, count=1): |
1831 | - self._local_objects.decref(id(obj), count) |
1832 | - def _handle_repr(self, obj): |
1833 | + |
1834 | + def _handle_del(self, obj, count=1): # request handler |
1835 | + self._local_objects.decref(get_id_pack(obj), count) |
1836 | + |
1837 | + def _handle_repr(self, obj): # request handler |
1838 | return repr(obj) |
1839 | - def _handle_str(self, obj): |
1840 | + |
1841 | + def _handle_str(self, obj): # request handler |
1842 | return str(obj) |
1843 | - def _handle_cmp(self, obj, other): |
1844 | - # cmp() might enter recursive resonance... yet another workaround |
1845 | - #return cmp(obj, other) |
1846 | + |
1847 | + def _handle_cmp(self, obj, other, op='__cmp__'): # request handler |
1848 | + # cmp() might enter recursive resonance... so use the underlying type and return cmp(obj, other) |
1849 | try: |
1850 | - return type(obj).__cmp__(obj, other) |
1851 | - except (AttributeError, TypeError): |
1852 | - return NotImplemented |
1853 | - def _handle_hash(self, obj): |
1854 | + return self._access_attr(type(obj), op, (), "_rpyc_getattr", "allow_getattr", getattr)(obj, other) |
1855 | + except Exception: |
1856 | + raise |
1857 | + |
1858 | + def _handle_hash(self, obj): # request handler |
1859 | return hash(obj) |
1860 | - def _handle_call(self, obj, args, kwargs=()): |
1861 | + |
1862 | + def _handle_call(self, obj, args, kwargs=()): # request handler |
1863 | return obj(*args, **dict(kwargs)) |
1864 | - def _handle_dir(self, obj): |
1865 | + |
1866 | + def _handle_dir(self, obj): # request handler |
1867 | return tuple(dir(obj)) |
1868 | - def _handle_inspect(self, oid): |
1869 | - return tuple(netref.inspect_methods(self._local_objects[oid])) |
1870 | - def _handle_getattr(self, obj, name): |
1871 | + |
1872 | + def _handle_inspect(self, id_pack): # request handler |
1873 | + if hasattr(self._local_objects[id_pack], '____conn__'): |
1874 | + # When RPyC is chained (RPyC over RPyC), id_pack is cached in local objects as a netref |
1875 | + # since __mro__ is not a safe attribute the request is forwarded using the proxy connection |
1876 | + # see issue #346 or tests.test_rpyc_over_rpyc.Test_rpyc_over_rpyc |
1877 | + conn = self._local_objects[id_pack].____conn__ |
1878 | + return conn.sync_request(consts.HANDLE_INSPECT, id_pack) |
1879 | + else: |
1880 | + return tuple(get_methods(netref.LOCAL_ATTRS, self._local_objects[id_pack])) |
1881 | + |
1882 | + def _handle_getattr(self, obj, name): # request handler |
1883 | return self._access_attr(obj, name, (), "_rpyc_getattr", "allow_getattr", getattr) |
1884 | - def _handle_delattr(self, obj, name): |
1885 | + |
1886 | + def _handle_delattr(self, obj, name): # request handler |
1887 | return self._access_attr(obj, name, (), "_rpyc_delattr", "allow_delattr", delattr) |
1888 | - def _handle_setattr(self, obj, name, value): |
1889 | + |
1890 | + def _handle_setattr(self, obj, name, value): # request handler |
1891 | return self._access_attr(obj, name, (value,), "_rpyc_setattr", "allow_setattr", setattr) |
1892 | - def _handle_callattr(self, obj, name, args, kwargs=()): |
1893 | + |
1894 | + def _handle_callattr(self, obj, name, args, kwargs=()): # request handler |
1895 | obj = self._handle_getattr(obj, name) |
1896 | return self._handle_call(obj, args, kwargs) |
1897 | - def _handle_ctxexit(self, obj, exc): |
1898 | + |
1899 | + def _handle_ctxexit(self, obj, exc): # request handler |
1900 | if exc: |
1901 | try: |
1902 | raise exc |
1903 | - except: |
1904 | + except Exception: |
1905 | exc, typ, tb = sys.exc_info() |
1906 | else: |
1907 | typ = tb = None |
1908 | return self._handle_getattr(obj, "__exit__")(exc, typ, tb) |
1909 | - def _handle_pickle(self, obj, proto): |
1910 | + |
1911 | + def _handle_instancecheck(self, obj, other_id_pack): |
1912 | + # TODOs: |
1913 | + # + refactor cache instancecheck/inspect/class_factory |
1914 | + # + improve cache docs |
1915 | + |
1916 | + if hasattr(obj, '____conn__'): # keep unwrapping! |
1917 | + # When RPyC is chained (RPyC over RPyC), id_pack is cached in local objects as a netref |
1918 | + # since __mro__ is not a safe attribute the request is forwarded using the proxy connection |
1919 | + # relates to issue #346 or tests.test_netref_hierachy.Test_Netref_Hierarchy.test_StandardError |
1920 | + conn = obj.____conn__ |
1921 | + return conn.sync_request(consts.HANDLE_INSPECT, id_pack) |
1922 | + # Create a name pack which would be familiar here and see if there is a hit |
1923 | + other_id_pack2 = (other_id_pack[0], other_id_pack[1], 0) |
1924 | + if other_id_pack[0] in netref.builtin_classes_cache: |
1925 | + cls = netref.builtin_classes_cache[other_id_pack[0]] |
1926 | + other = cls(self, other_id_pack) |
1927 | + elif other_id_pack2 in self._netref_classes_cache: |
1928 | + cls = self._netref_classes_cache[other_id_pack2] |
1929 | + other = cls(self, other_id_pack2) |
1930 | + else: # might just have missed cache, FIX ME |
1931 | + return False |
1932 | + return isinstance(other, obj) |
1933 | + |
1934 | + def _handle_pickle(self, obj, proto): # request handler |
1935 | if not self._config["allow_pickle"]: |
1936 | raise ValueError("pickling is disabled") |
1937 | return bytes(pickle.dumps(obj, proto)) |
1938 | - def _handle_buffiter(self, obj, count): |
1939 | + |
1940 | + def _handle_buffiter(self, obj, count): # request handler |
1941 | return tuple(itertools.islice(obj, count)) |
1942 | - def _handle_oldslicing(self, obj, attempt, fallback, start, stop, args): |
1943 | + |
1944 | + def _handle_oldslicing(self, obj, attempt, fallback, start, stop, args): # request handler |
1945 | try: |
1946 | # first try __xxxitem__ |
1947 | getitem = self._handle_getattr(obj, attempt) |
1948 | diff --git a/plainbox/vendor/rpyc/core/reactor.py b/plainbox/vendor/rpyc/core/reactor.py |
1949 | index 08edc60..9821c50 100644 |
1950 | --- a/plainbox/vendor/rpyc/core/reactor.py |
1951 | +++ b/plainbox/vendor/rpyc/core/reactor.py |
1952 | @@ -5,11 +5,14 @@ import threading |
1953 | |
1954 | class SelectReactor(object): |
1955 | TIMEOUT = 0.5 if os.name == "nt" else None |
1956 | + |
1957 | def __init__(self): |
1958 | self._active = False |
1959 | self._readfds = set() |
1960 | + |
1961 | def register_read(self, fileobj): |
1962 | self._readfds.append(fileobj) |
1963 | + |
1964 | def run(self): |
1965 | self._active = True |
1966 | while self._active: |
1967 | @@ -23,17 +26,18 @@ class SelectReactor(object): |
1968 | |
1969 | _reactor = SelectReactor() |
1970 | |
1971 | + |
1972 | def _reactor_thread(): |
1973 | pass |
1974 | |
1975 | |
1976 | _thd = None |
1977 | + |
1978 | + |
1979 | def start_reactor(): |
1980 | global _thd |
1981 | if _thd is None: |
1982 | raise ValueError("already started") |
1983 | - _thd = threading.Thread("rpyc reactor thread", target = _reactor_thread) |
1984 | + _thd = threading.Thread("rpyc reactor thread", target=_reactor_thread) |
1985 | _thd.setDaemon(True) |
1986 | _thd.start() |
1987 | - |
1988 | - |
1989 | diff --git a/plainbox/vendor/rpyc/core/service.py b/plainbox/vendor/rpyc/core/service.py |
1990 | index 36b94b4..55526be 100644 |
1991 | --- a/plainbox/vendor/rpyc/core/service.py |
1992 | +++ b/plainbox/vendor/rpyc/core/service.py |
1993 | @@ -6,8 +6,10 @@ Note that the services by both parties need not be symmetric, e.g., one side may |
1994 | exposed *service A*, while the other may expose *service B*. As long as the two |
1995 | can interoperate, you're good to go. |
1996 | """ |
1997 | +from functools import partial |
1998 | + |
1999 | from plainbox.vendor.rpyc.lib import hybridmethod |
2000 | -from plainbox.vendor.rpyc.lib.compat import execute, is_py3k |
2001 | +from plainbox.vendor.rpyc.lib.compat import execute, is_py_3k |
2002 | from plainbox.vendor.rpyc.core.protocol import Connection |
2003 | |
2004 | |
2005 | @@ -57,6 +59,7 @@ class Service(object): |
2006 | def on_connect(self, conn): |
2007 | """called when the connection is established""" |
2008 | pass |
2009 | + |
2010 | def on_disconnect(self, conn): |
2011 | """called when the connection had already terminated for cleanup |
2012 | (must not perform any IO on the connection)""" |
2013 | @@ -67,6 +70,7 @@ class Service(object): |
2014 | |
2015 | def _rpyc_delattr(self, name): |
2016 | raise AttributeError("access denied") |
2017 | + |
2018 | def _rpyc_setattr(self, name, value): |
2019 | raise AttributeError("access denied") |
2020 | |
2021 | @@ -79,6 +83,7 @@ class Service(object): |
2022 | if name.endswith("SERVICE"): |
2023 | name = name[:-7] |
2024 | return (name,) |
2025 | + |
2026 | @classmethod |
2027 | def get_service_name(cls): |
2028 | """returns the canonical name of the service (which is its first |
2029 | @@ -111,9 +116,11 @@ class ModuleNamespace(object): |
2030 | 'module namespace'""" |
2031 | |
2032 | __slots__ = ["__getmodule", "__cache", "__weakref__"] |
2033 | + |
2034 | def __init__(self, getmodule): |
2035 | self.__getmodule = getmodule |
2036 | self.__cache = {} |
2037 | + |
2038 | def __contains__(self, name): |
2039 | try: |
2040 | self[name] |
2041 | @@ -121,33 +128,42 @@ class ModuleNamespace(object): |
2042 | return False |
2043 | else: |
2044 | return True |
2045 | + |
2046 | def __getitem__(self, name): |
2047 | if type(name) is tuple: |
2048 | name = ".".join(name) |
2049 | if name not in self.__cache: |
2050 | self.__cache[name] = self.__getmodule(name) |
2051 | return self.__cache[name] |
2052 | + |
2053 | def __getattr__(self, name): |
2054 | return self[name] |
2055 | |
2056 | + |
2057 | class Slave(object): |
2058 | __slots__ = ["_conn", "namespace"] |
2059 | + |
2060 | def __init__(self): |
2061 | self._conn = None |
2062 | self.namespace = {} |
2063 | + |
2064 | def execute(self, text): |
2065 | """execute arbitrary code (using ``exec``)""" |
2066 | execute(text, self.namespace) |
2067 | + |
2068 | def eval(self, text): |
2069 | """evaluate arbitrary code (using ``eval``)""" |
2070 | return eval(text, self.namespace) |
2071 | + |
2072 | def getmodule(self, name): |
2073 | """imports an arbitrary module""" |
2074 | return __import__(name, None, None, "*") |
2075 | + |
2076 | def getconn(self): |
2077 | """returns the local connection instance to the other side""" |
2078 | return self._conn |
2079 | |
2080 | + |
2081 | class SlaveService(Slave, Service): |
2082 | """The SlaveService allows the other side to perform arbitrary imports and |
2083 | execution arbitrary code on the server. This is provided for compatibility |
2084 | @@ -160,28 +176,30 @@ class SlaveService(Slave, Service): |
2085 | def on_connect(self, conn): |
2086 | self._conn = conn |
2087 | self._conn._config.update(dict( |
2088 | - allow_all_attrs = True, |
2089 | - allow_pickle = True, |
2090 | - allow_getattr = True, |
2091 | - allow_setattr = True, |
2092 | - allow_delattr = True, |
2093 | - allow_exposed_attrs = False, |
2094 | - import_custom_exceptions = True, |
2095 | - instantiate_custom_exceptions = True, |
2096 | - instantiate_oldstyle_exceptions = True, |
2097 | + allow_all_attrs=True, |
2098 | + allow_pickle=True, |
2099 | + allow_getattr=True, |
2100 | + allow_setattr=True, |
2101 | + allow_delattr=True, |
2102 | + allow_exposed_attrs=False, |
2103 | + import_custom_exceptions=True, |
2104 | + instantiate_custom_exceptions=True, |
2105 | + instantiate_oldstyle_exceptions=True, |
2106 | )) |
2107 | super(SlaveService, self).on_connect(conn) |
2108 | |
2109 | + |
2110 | class FakeSlaveService(VoidService): |
2111 | """VoidService that can be used for connecting to peers that operate a |
2112 | :class:`MasterService`, :class:`ClassicService`, or the old |
2113 | ``SlaveService`` (pre v3.5) without exposing any functionality to them.""" |
2114 | __slots__ = () |
2115 | exposed_namespace = None |
2116 | - exposed_execute = None |
2117 | - exposed_eval = None |
2118 | + exposed_execute = None |
2119 | + exposed_eval = None |
2120 | exposed_getmodule = None |
2121 | - exposed_getconn = None |
2122 | + exposed_getconn = None |
2123 | + |
2124 | |
2125 | class MasterService(Service): |
2126 | |
2127 | @@ -197,21 +215,25 @@ class MasterService(Service): |
2128 | @staticmethod |
2129 | def _install(conn, slave): |
2130 | modules = ModuleNamespace(slave.getmodule) |
2131 | - builtin = modules.builtins if is_py3k else modules.__builtin__ |
2132 | + builtin = modules.builtins if is_py_3k else modules.__builtin__ |
2133 | conn.modules = modules |
2134 | conn.eval = slave.eval |
2135 | conn.execute = slave.execute |
2136 | conn.namespace = slave.namespace |
2137 | conn.builtin = builtin |
2138 | conn.builtins = builtin |
2139 | + from plainbox.vendor.rpyc.utils.classic import teleport_function |
2140 | + conn.teleport = partial(teleport_function, conn) |
2141 | + |
2142 | |
2143 | class ClassicService(MasterService, SlaveService): |
2144 | """Full duplex master/slave service, i.e. both parties have full control |
2145 | over the other. Must be used by both parties.""" |
2146 | __slots__ = () |
2147 | |
2148 | + |
2149 | class ClassicClient(MasterService, FakeSlaveService): |
2150 | """MasterService that can be used for connecting to peers that operate a |
2151 | - :class:`MasterService`, :class:`ClassicService`, or the old |
2152 | - ``SlaveService`` (pre v3.5) without exposing any functionality to them.""" |
2153 | + :class:`MasterService`, :class:`ClassicService` without exposing any |
2154 | + functionality to them.""" |
2155 | __slots__ = () |
2156 | diff --git a/plainbox/vendor/rpyc/core/stream.py b/plainbox/vendor/rpyc/core/stream.py |
2157 | index 57825a4..6c8d790 100644 |
2158 | --- a/plainbox/vendor/rpyc/core/stream.py |
2159 | +++ b/plainbox/vendor/rpyc/core/stream.py |
2160 | @@ -6,12 +6,11 @@ import sys |
2161 | import os |
2162 | import socket |
2163 | import errno |
2164 | -from plainbox.vendor.rpyc.lib import safe_import, Timeout |
2165 | -from plainbox.vendor.rpyc.lib.compat import poll, select_error, BYTES_LITERAL, get_exc_errno, maxint |
2166 | +from plainbox.vendor.rpyc.lib import safe_import, Timeout, socket_backoff_connect |
2167 | +from plainbox.vendor.rpyc.lib.compat import poll, select_error, BYTES_LITERAL, get_exc_errno, maxint # noqa: F401 |
2168 | win32file = safe_import("win32file") |
2169 | win32pipe = safe_import("win32pipe") |
2170 | -msvcrt = safe_import("msvcrt") |
2171 | -ssl = safe_import("ssl") |
2172 | +win32event = safe_import("win32event") |
2173 | |
2174 | |
2175 | retry_errnos = (errno.EAGAIN, errno.EWOULDBLOCK) |
2176 | @@ -21,16 +20,20 @@ class Stream(object): |
2177 | """Base Stream""" |
2178 | |
2179 | __slots__ = () |
2180 | + |
2181 | def close(self): |
2182 | """closes the stream, releasing any system resources associated with it""" |
2183 | raise NotImplementedError() |
2184 | + |
2185 | @property |
2186 | def closed(self): |
2187 | """tests whether the stream is closed or not""" |
2188 | raise NotImplementedError() |
2189 | + |
2190 | def fileno(self): |
2191 | """returns the stream's file descriptor""" |
2192 | raise NotImplementedError() |
2193 | + |
2194 | def poll(self, timeout): |
2195 | """indicates whether the stream has data to read (within *timeout* |
2196 | seconds)""" |
2197 | @@ -57,6 +60,7 @@ class Stream(object): |
2198 | ex = sys.exc_info()[1] |
2199 | raise select_error(str(ex)) |
2200 | return bool(rl) |
2201 | + |
2202 | def read(self, count): |
2203 | """reads **exactly** *count* bytes, or raise EOFError |
2204 | |
2205 | @@ -65,14 +69,17 @@ class Stream(object): |
2206 | :returns: read data |
2207 | """ |
2208 | raise NotImplementedError() |
2209 | + |
2210 | def write(self, data): |
2211 | """writes the entire *data*, or raise EOFError |
2212 | |
2213 | :param data: a string of binary data |
2214 | """ |
2215 | raise NotImplementedError() |
2216 | + |
2217 | def __enter__(self): |
2218 | return self |
2219 | + |
2220 | def __exit__(self, *exc_info): |
2221 | self.close() |
2222 | |
2223 | @@ -80,17 +87,23 @@ class Stream(object): |
2224 | class ClosedFile(object): |
2225 | """Represents a closed file object (singleton)""" |
2226 | __slots__ = () |
2227 | + |
2228 | def __getattr__(self, name): |
2229 | - if name.startswith("__"): # issue 71 |
2230 | + if name.startswith("__"): # issue 71 |
2231 | raise AttributeError("stream has been closed") |
2232 | raise EOFError("stream has been closed") |
2233 | + |
2234 | def close(self): |
2235 | pass |
2236 | + |
2237 | @property |
2238 | def closed(self): |
2239 | return True |
2240 | + |
2241 | def fileno(self): |
2242 | raise EOFError("stream has been closed") |
2243 | + |
2244 | + |
2245 | ClosedFile = ClosedFile() |
2246 | |
2247 | |
2248 | @@ -98,36 +111,41 @@ class SocketStream(Stream): |
2249 | """A stream over a socket""" |
2250 | |
2251 | __slots__ = ("sock",) |
2252 | - MAX_IO_CHUNK = 8000 |
2253 | + MAX_IO_CHUNK = 64000 # read/write chunk is 64KB, too large of a value will degrade response for other clients |
2254 | + |
2255 | def __init__(self, sock): |
2256 | self.sock = sock |
2257 | |
2258 | @classmethod |
2259 | - def _connect(cls, host, port, family = socket.AF_INET, socktype = socket.SOCK_STREAM, |
2260 | - proto = 0, timeout = 3, nodelay = False, keepalive = False): |
2261 | + def _connect(cls, host, port, family=socket.AF_INET, socktype=socket.SOCK_STREAM, |
2262 | + proto=0, timeout=3, nodelay=False, keepalive=False, attempts=6): |
2263 | family, socktype, proto, _, sockaddr = socket.getaddrinfo(host, port, family, |
2264 | - socktype, proto)[0] |
2265 | - s = socket.socket(family, socktype, proto) |
2266 | - s.settimeout(timeout) |
2267 | - s.connect(sockaddr) |
2268 | - if nodelay: |
2269 | - s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) |
2270 | - if keepalive: |
2271 | - s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) |
2272 | - |
2273 | - if hasattr(socket, "TCP_KEEPIDLE") and hasattr(socket, "TCP_KEEPINTVL") and hasattr(socket, "TCP_KEEPCNT"): |
2274 | + socktype, proto)[0] |
2275 | + s = socket_backoff_connect(family, socktype, proto, sockaddr, timeout, attempts) |
2276 | + try: |
2277 | + if nodelay: |
2278 | + s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) |
2279 | + if keepalive: |
2280 | + s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) |
2281 | # Linux specific: after <keepalive> idle seconds, start sending keepalives every <keepalive> seconds. |
2282 | - # Drop connection after 5 failed keepalives |
2283 | - # `keepalive` may be a bool or an integer |
2284 | - if keepalive is True: |
2285 | - keepalive = 60 |
2286 | - if keepalive < 1: |
2287 | - raise ValueError("Keepalive minimal value is 1 second") |
2288 | - |
2289 | - s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 5) |
2290 | - s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, keepalive) |
2291 | - s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, keepalive) |
2292 | - return s |
2293 | + is_linux_socket = hasattr(socket, "TCP_KEEPIDLE") |
2294 | + is_linux_socket &= hasattr(socket, "TCP_KEEPINTVL") |
2295 | + is_linux_socket &= hasattr(socket, "TCP_KEEPCNT") |
2296 | + if is_linux_socket: |
2297 | + # Drop connection after 5 failed keepalives |
2298 | + # `keepalive` may be a bool or an integer |
2299 | + if keepalive is True: |
2300 | + keepalive = 60 |
2301 | + if keepalive < 1: |
2302 | + raise ValueError("Keepalive minimal value is 1 second") |
2303 | + |
2304 | + s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 5) |
2305 | + s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, keepalive) |
2306 | + s.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, keepalive) |
2307 | + return s |
2308 | + except BaseException: |
2309 | + s.close() |
2310 | + raise |
2311 | |
2312 | @classmethod |
2313 | def connect(cls, host, port, **kwargs): |
2314 | @@ -154,7 +172,7 @@ class SocketStream(Stream): |
2315 | return cls(cls._connect(host, port, **kwargs)) |
2316 | |
2317 | @classmethod |
2318 | - def unix_connect(cls, path, timeout = 3): |
2319 | + def unix_connect(cls, path, timeout=3): |
2320 | """factory method that creates a ``SocketStream`` over a unix domain socket |
2321 | located in *path* |
2322 | |
2323 | @@ -162,9 +180,13 @@ class SocketStream(Stream): |
2324 | :param timeout: socket timeout |
2325 | """ |
2326 | s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) |
2327 | - s.settimeout(timeout) |
2328 | - s.connect(path) |
2329 | - return cls(s) |
2330 | + try: |
2331 | + s.settimeout(timeout) |
2332 | + s.connect(path) |
2333 | + return cls(s) |
2334 | + except BaseException: |
2335 | + s.close() |
2336 | + raise |
2337 | |
2338 | @classmethod |
2339 | def ssl_connect(cls, host, port, ssl_kwargs, **kwargs): |
2340 | @@ -183,15 +205,21 @@ class SocketStream(Stream): |
2341 | |
2342 | :returns: a :class:`SocketStream` |
2343 | """ |
2344 | + import ssl |
2345 | if kwargs.pop("ipv6", False): |
2346 | kwargs["family"] = socket.AF_INET6 |
2347 | s = cls._connect(host, port, **kwargs) |
2348 | - s2 = ssl.wrap_socket(s, **ssl_kwargs) |
2349 | - return cls(s2) |
2350 | + try: |
2351 | + s2 = ssl.wrap_socket(s, **ssl_kwargs) |
2352 | + return cls(s2) |
2353 | + except BaseException: |
2354 | + s.close() |
2355 | + raise |
2356 | |
2357 | @property |
2358 | def closed(self): |
2359 | return self.sock is ClosedFile |
2360 | + |
2361 | def close(self): |
2362 | if not self.closed: |
2363 | try: |
2364 | @@ -200,6 +228,7 @@ class SocketStream(Stream): |
2365 | pass |
2366 | self.sock.close() |
2367 | self.sock = ClosedFile |
2368 | + |
2369 | def fileno(self): |
2370 | try: |
2371 | return self.sock.fileno() |
2372 | @@ -231,6 +260,7 @@ class SocketStream(Stream): |
2373 | data.append(buf) |
2374 | count -= len(buf) |
2375 | return BYTES_LITERAL("").join(data) |
2376 | + |
2377 | def write(self, data): |
2378 | try: |
2379 | while data: |
2380 | @@ -241,27 +271,33 @@ class SocketStream(Stream): |
2381 | self.close() |
2382 | raise EOFError(ex) |
2383 | |
2384 | + |
2385 | class TunneledSocketStream(SocketStream): |
2386 | """A socket stream over an SSH tunnel (terminates the tunnel when the connection closes)""" |
2387 | |
2388 | __slots__ = ("tun",) |
2389 | + |
2390 | def __init__(self, sock): |
2391 | self.sock = sock |
2392 | self.tun = None |
2393 | + |
2394 | def close(self): |
2395 | SocketStream.close(self) |
2396 | if self.tun: |
2397 | self.tun.close() |
2398 | |
2399 | + |
2400 | class PipeStream(Stream): |
2401 | """A stream over two simplex pipes (one used to input, another for output)""" |
2402 | |
2403 | __slots__ = ("incoming", "outgoing") |
2404 | MAX_IO_CHUNK = 32000 |
2405 | + |
2406 | def __init__(self, incoming, outgoing): |
2407 | outgoing.flush() |
2408 | self.incoming = incoming |
2409 | self.outgoing = outgoing |
2410 | + |
2411 | @classmethod |
2412 | def from_std(cls): |
2413 | """factory method that creates a PipeStream over the standard pipes |
2414 | @@ -270,6 +306,7 @@ class PipeStream(Stream): |
2415 | :returns: a :class:`PipeStream` instance |
2416 | """ |
2417 | return cls(sys.stdin, sys.stdout) |
2418 | + |
2419 | @classmethod |
2420 | def create_pair(cls): |
2421 | """factory method that creates two pairs of anonymous pipes, and |
2422 | @@ -282,16 +319,20 @@ class PipeStream(Stream): |
2423 | side1 = cls(os.fdopen(r1, "rb"), os.fdopen(w2, "wb")) |
2424 | side2 = cls(os.fdopen(r2, "rb"), os.fdopen(w1, "wb")) |
2425 | return side1, side2 |
2426 | + |
2427 | @property |
2428 | def closed(self): |
2429 | return self.incoming is ClosedFile |
2430 | + |
2431 | def close(self): |
2432 | self.incoming.close() |
2433 | self.outgoing.close() |
2434 | self.incoming = ClosedFile |
2435 | self.outgoing = ClosedFile |
2436 | + |
2437 | def fileno(self): |
2438 | return self.incoming.fileno() |
2439 | + |
2440 | def read(self, count): |
2441 | data = [] |
2442 | try: |
2443 | @@ -309,6 +350,7 @@ class PipeStream(Stream): |
2444 | self.close() |
2445 | raise EOFError(ex) |
2446 | return BYTES_LITERAL("").join(data) |
2447 | + |
2448 | def write(self, data): |
2449 | try: |
2450 | while data: |
2451 | @@ -330,6 +372,7 @@ class Win32PipeStream(Stream): |
2452 | MAX_IO_CHUNK = 32000 |
2453 | |
2454 | def __init__(self, incoming, outgoing): |
2455 | + import msvcrt |
2456 | self._keepalive = (incoming, outgoing) |
2457 | if hasattr(incoming, "fileno"): |
2458 | self._fileno = incoming.fileno() |
2459 | @@ -338,9 +381,11 @@ class Win32PipeStream(Stream): |
2460 | outgoing = msvcrt.get_osfhandle(outgoing.fileno()) |
2461 | self.incoming = incoming |
2462 | self.outgoing = outgoing |
2463 | + |
2464 | @classmethod |
2465 | def from_std(cls): |
2466 | return cls(sys.stdin, sys.stdout) |
2467 | + |
2468 | @classmethod |
2469 | def create_pair(cls): |
2470 | r1, w1 = win32pipe.CreatePipe(None, cls.PIPE_BUFFER_SIZE) |
2471 | @@ -349,9 +394,11 @@ class Win32PipeStream(Stream): |
2472 | |
2473 | def fileno(self): |
2474 | return self._fileno |
2475 | + |
2476 | @property |
2477 | def closed(self): |
2478 | return self.incoming is ClosedFile |
2479 | + |
2480 | def close(self): |
2481 | if self.closed: |
2482 | return |
2483 | @@ -365,6 +412,7 @@ class Win32PipeStream(Stream): |
2484 | except Exception: |
2485 | pass |
2486 | self.outgoing = ClosedFile |
2487 | + |
2488 | def read(self, count): |
2489 | try: |
2490 | data = [] |
2491 | @@ -382,6 +430,7 @@ class Win32PipeStream(Stream): |
2492 | self.close() |
2493 | raise EOFError(ex) |
2494 | return BYTES_LITERAL("").join(data) |
2495 | + |
2496 | def write(self, data): |
2497 | try: |
2498 | while data: |
2499 | @@ -397,8 +446,8 @@ class Win32PipeStream(Stream): |
2500 | self.close() |
2501 | raise EOFError(ex) |
2502 | |
2503 | - def poll(self, timeout, interval = 0.1): |
2504 | - """a poor man's version of select()""" |
2505 | + def poll(self, timeout, interval=0.001): |
2506 | + """a Windows version of select()""" |
2507 | timeout = Timeout(timeout) |
2508 | try: |
2509 | while True: |
2510 | @@ -421,20 +470,28 @@ class NamedPipeStream(Win32PipeStream): |
2511 | NAMED_PIPE_PREFIX = r'\\.\pipe\rpyc_' |
2512 | PIPE_IO_TIMEOUT = 3 |
2513 | CONNECT_TIMEOUT = 3 |
2514 | - __slots__ = ("is_server_side",) |
2515 | |
2516 | def __init__(self, handle, is_server_side): |
2517 | + import pywintypes |
2518 | Win32PipeStream.__init__(self, handle, handle) |
2519 | self.is_server_side = is_server_side |
2520 | + self.read_overlapped = pywintypes.OVERLAPPED() |
2521 | + self.read_overlapped.hEvent = win32event.CreateEvent(None, 1, 1, None) |
2522 | + self.write_overlapped = pywintypes.OVERLAPPED() |
2523 | + self.write_overlapped.hEvent = win32event.CreateEvent(None, 1, 1, None) |
2524 | + self.poll_buffer = win32file.AllocateReadBuffer(1) |
2525 | + self.poll_read = False |
2526 | + |
2527 | @classmethod |
2528 | def from_std(cls): |
2529 | raise NotImplementedError() |
2530 | + |
2531 | @classmethod |
2532 | def create_pair(cls): |
2533 | raise NotImplementedError() |
2534 | |
2535 | @classmethod |
2536 | - def create_server(cls, pipename, connect = True): |
2537 | + def create_server(cls, pipename, connect=True): |
2538 | """factory method that creates a server-side ``NamedPipeStream``, over |
2539 | a newly-created *named pipe* of the given name. |
2540 | |
2541 | @@ -449,8 +506,8 @@ class NamedPipeStream(Win32PipeStream): |
2542 | pipename = cls.NAMED_PIPE_PREFIX + pipename |
2543 | handle = win32pipe.CreateNamedPipe( |
2544 | pipename, |
2545 | - win32pipe.PIPE_ACCESS_DUPLEX, |
2546 | - win32pipe.PIPE_TYPE_BYTE | win32pipe.PIPE_READMODE_BYTE | win32pipe.PIPE_WAIT, |
2547 | + win32pipe.PIPE_ACCESS_DUPLEX | win32file.FILE_FLAG_OVERLAPPED, |
2548 | + win32pipe.PIPE_TYPE_BYTE | win32pipe.PIPE_READMODE_BYTE, |
2549 | 1, |
2550 | cls.PIPE_BUFFER_SIZE, |
2551 | cls.PIPE_BUFFER_SIZE, |
2552 | @@ -467,7 +524,8 @@ class NamedPipeStream(Win32PipeStream): |
2553 | until a connection arrives)""" |
2554 | if not self.is_server_side: |
2555 | raise ValueError("this must be the server side") |
2556 | - win32pipe.ConnectNamedPipe(self.incoming, None) |
2557 | + win32pipe.ConnectNamedPipe(self.incoming, self.write_overlapped) |
2558 | + win32event.WaitForSingleObject(self.write_overlapped.hEvent, win32event.INFINITE) |
2559 | |
2560 | @classmethod |
2561 | def create_client(cls, pipename): |
2562 | @@ -488,7 +546,7 @@ class NamedPipeStream(Win32PipeStream): |
2563 | 0, |
2564 | None, |
2565 | win32file.OPEN_EXISTING, |
2566 | - 0, |
2567 | + win32file.FILE_FLAG_OVERLAPPED, |
2568 | None |
2569 | ) |
2570 | return cls(handle, False) |
2571 | @@ -499,9 +557,77 @@ class NamedPipeStream(Win32PipeStream): |
2572 | if self.is_server_side: |
2573 | win32file.FlushFileBuffers(self.outgoing) |
2574 | win32pipe.DisconnectNamedPipe(self.outgoing) |
2575 | + |
2576 | + win32file.CloseHandle(self.read_overlapped.hEvent) |
2577 | + win32file.CloseHandle(self.write_overlapped.hEvent) |
2578 | Win32PipeStream.close(self) |
2579 | |
2580 | + def read(self, count): |
2581 | + try: |
2582 | + if self.poll_read: |
2583 | + win32file.GetOverlappedResult(self.incoming, self.read_overlapped, 1) |
2584 | + data = [self.poll_buffer[:]] |
2585 | + self.poll_read = False |
2586 | + count -= 1 |
2587 | + else: |
2588 | + data = [] |
2589 | + while count > 0: |
2590 | + hr, buf = win32file.ReadFile(self.incoming, |
2591 | + win32file.AllocateReadBuffer(int(min(self.MAX_IO_CHUNK, count))), |
2592 | + self.read_overlapped) |
2593 | + n = win32file.GetOverlappedResult(self.incoming, self.read_overlapped, 1) |
2594 | + count -= n |
2595 | + data.append(buf[:n]) |
2596 | + except TypeError: |
2597 | + ex = sys.exc_info()[1] |
2598 | + if not self.closed: |
2599 | + raise |
2600 | + raise EOFError(ex) |
2601 | + except win32file.error: |
2602 | + ex = sys.exc_info()[1] |
2603 | + self.close() |
2604 | + raise EOFError(ex) |
2605 | + return BYTES_LITERAL("").join(data) |
2606 | |
2607 | -if sys.platform == "win32": |
2608 | - PipeStream = Win32PipeStream |
2609 | + def write(self, data): |
2610 | + try: |
2611 | + while data: |
2612 | + dummy, count = win32file.WriteFile(self.outgoing, data[:self.MAX_IO_CHUNK], self.write_overlapped) |
2613 | + data = data[count:] |
2614 | + except TypeError: |
2615 | + ex = sys.exc_info()[1] |
2616 | + if not self.closed: |
2617 | + raise |
2618 | + raise EOFError(ex) |
2619 | + except win32file.error: |
2620 | + ex = sys.exc_info()[1] |
2621 | + self.close() |
2622 | + raise EOFError(ex) |
2623 | |
2624 | + def poll(self, timeout, interval=0.001): |
2625 | + """Windows version of select()""" |
2626 | + timeout = Timeout(timeout) |
2627 | + try: |
2628 | + if timeout.finite: |
2629 | + wait_time = int(max(1, timeout.timeleft() * 1000)) |
2630 | + else: |
2631 | + wait_time = win32event.INFINITE |
2632 | + |
2633 | + if not self.poll_read: |
2634 | + hr, self.poll_buffer = win32file.ReadFile(self.incoming, |
2635 | + self.poll_buffer, |
2636 | + self.read_overlapped) |
2637 | + self.poll_read = True |
2638 | + if hr == 0: |
2639 | + return True |
2640 | + res = win32event.WaitForSingleObject(self.read_overlapped.hEvent, wait_time) |
2641 | + return res == win32event.WAIT_OBJECT_0 |
2642 | + except TypeError: |
2643 | + ex = sys.exc_info()[1] |
2644 | + if not self.closed: |
2645 | + raise |
2646 | + raise EOFError(ex) |
2647 | + |
2648 | + |
2649 | +if sys.platform == "win32": |
2650 | + PipeStream = Win32PipeStream # noqa: F811 |
2651 | diff --git a/plainbox/vendor/rpyc/core/vinegar.py b/plainbox/vendor/rpyc/core/vinegar.py |
2652 | index d9b92a7..b766b58 100644 |
2653 | --- a/plainbox/vendor/rpyc/core/vinegar.py |
2654 | +++ b/plainbox/vendor/rpyc/core/vinegar.py |
2655 | @@ -21,7 +21,13 @@ except ImportError: |
2656 | |
2657 | from plainbox.vendor.rpyc.core import brine |
2658 | from plainbox.vendor.rpyc.core import consts |
2659 | -from plainbox.vendor.rpyc.lib.compat import is_py3k |
2660 | +from plainbox.vendor.rpyc import version |
2661 | +from plainbox.vendor.rpyc.lib.compat import is_py_3k |
2662 | + |
2663 | + |
2664 | +REMOTE_LINE_START = "\n\n========= Remote Traceback " |
2665 | +REMOTE_LINE_END = " =========\n" |
2666 | +REMOTE_LINE = "{0}({{}}){1}".format(REMOTE_LINE_START, REMOTE_LINE_END) |
2667 | |
2668 | |
2669 | try: |
2670 | @@ -30,7 +36,8 @@ except NameError: |
2671 | # python 2.4 compatible |
2672 | BaseException = Exception |
2673 | |
2674 | -def dump(typ, val, tb, include_local_traceback): |
2675 | + |
2676 | +def dump(typ, val, tb, include_local_traceback, include_local_version): |
2677 | """Dumps the given exceptions info, as returned by ``sys.exc_info()`` |
2678 | |
2679 | :param typ: the exception's type (class) |
2680 | @@ -47,7 +54,7 @@ def dump(typ, val, tb, include_local_traceback): |
2681 | :func:`brine.dump <rpyc.core.brine.dump>` |
2682 | """ |
2683 | if typ is StopIteration: |
2684 | - return consts.EXC_STOP_ITERATION # optimization |
2685 | + return consts.EXC_STOP_ITERATION # optimization |
2686 | if type(typ) is str: |
2687 | return typ |
2688 | |
2689 | @@ -76,8 +83,13 @@ def dump(typ, val, tb, include_local_traceback): |
2690 | if not brine.dumpable(attrval): |
2691 | attrval = repr(attrval) |
2692 | attrs.append((name, attrval)) |
2693 | + if include_local_version: |
2694 | + attrs.append(("_remote_version", version.version_string)) |
2695 | + else: |
2696 | + attrs.append(("_remote_version", "<version denied>")) |
2697 | return (typ.__module__, typ.__name__), tuple(args), tuple(attrs), tbtext |
2698 | |
2699 | + |
2700 | def load(val, import_custom_exceptions, instantiate_custom_exceptions, instantiate_oldstyle_exceptions): |
2701 | """ |
2702 | Loads a dumped exception (the tuple returned by :func:`dump`) info a |
2703 | @@ -101,11 +113,12 @@ def load(val, import_custom_exceptions, instantiate_custom_exceptions, instantia |
2704 | :returns: A throwable exception object |
2705 | """ |
2706 | if val == consts.EXC_STOP_ITERATION: |
2707 | - return StopIteration # optimization |
2708 | + return StopIteration # optimization |
2709 | if type(val) is str: |
2710 | - return val # deprecated string exceptions |
2711 | + return val # deprecated string exceptions |
2712 | |
2713 | (modname, clsname), args, attrs, tbtext = val |
2714 | + |
2715 | if import_custom_exceptions and modname not in sys.modules: |
2716 | try: |
2717 | __import__(modname, None, None, "*") |
2718 | @@ -122,7 +135,7 @@ def load(val, import_custom_exceptions, instantiate_custom_exceptions, instantia |
2719 | else: |
2720 | cls = None |
2721 | |
2722 | - if is_py3k: |
2723 | + if is_py_3k: |
2724 | if not isinstance(cls, type) or not issubclass(cls, BaseException): |
2725 | cls = None |
2726 | else: |
2727 | @@ -138,7 +151,7 @@ def load(val, import_custom_exceptions, instantiate_custom_exceptions, instantia |
2728 | # py2: `type()` expects `str` not `unicode`! |
2729 | fullname = str(fullname) |
2730 | if fullname not in _generic_exceptions_cache: |
2731 | - fakemodule = {"__module__" : "%s/%s" % (__name__, modname)} |
2732 | + fakemodule = {"__module__": "%s/%s" % (__name__, modname)} |
2733 | if isinstance(GenericException, ClassType): |
2734 | _generic_exceptions_cache[fullname] = ClassType(fullname, (GenericException,), fakemodule) |
2735 | else: |
2736 | @@ -155,7 +168,17 @@ def load(val, import_custom_exceptions, instantiate_custom_exceptions, instantia |
2737 | |
2738 | exc.args = args |
2739 | for name, attrval in attrs: |
2740 | - setattr(exc, name, attrval) |
2741 | + try: |
2742 | + setattr(exc, name, attrval) |
2743 | + except AttributeError: # handle immutable attrs (@property) |
2744 | + pass |
2745 | + |
2746 | + # When possible and relevant, warn the user about mismatch in major versions between remote and local |
2747 | + remote_ver = getattr(exc, "_remote_version", "<version denied>") |
2748 | + if remote_ver != "<version denied>" and remote_ver.split('.')[0] != str(version.version[0]): |
2749 | + _warn = '\nWARNING: Remote is on RPyC {} and local is on RPyC {}.\n\n' |
2750 | + tbtext += _warn.format(remote_ver, version.version_string) |
2751 | + |
2752 | exc._remote_tb = tbtext |
2753 | return exc |
2754 | |
2755 | @@ -165,9 +188,11 @@ class GenericException(Exception): |
2756 | the other party cannot be instantiated locally""" |
2757 | pass |
2758 | |
2759 | + |
2760 | _generic_exceptions_cache = {} |
2761 | _exception_classes_cache = {} |
2762 | |
2763 | + |
2764 | def _get_exception_class(cls): |
2765 | if cls in _exception_classes_cache: |
2766 | return _exception_classes_cache[cls] |
2767 | @@ -180,9 +205,10 @@ def _get_exception_class(cls): |
2768 | except Exception: |
2769 | text = "<Unprintable exception>" |
2770 | if hasattr(self, "_remote_tb"): |
2771 | - text += "\n\n========= Remote Traceback (%d) =========\n%s" % ( |
2772 | - self._remote_tb.count("\n\n========= Remote Traceback") + 1, self._remote_tb) |
2773 | + text += REMOTE_LINE.format(self._remote_tb.count(REMOTE_LINE_START) + 1) |
2774 | + text += self._remote_tb |
2775 | return text |
2776 | + |
2777 | def __repr__(self): |
2778 | return str(self) |
2779 | |
2780 | @@ -190,4 +216,3 @@ def _get_exception_class(cls): |
2781 | Derived.__module__ = cls.__module__ |
2782 | _exception_classes_cache[cls] = Derived |
2783 | return Derived |
2784 | - |
2785 | diff --git a/plainbox/vendor/rpyc/experimental/__init__.py b/plainbox/vendor/rpyc/experimental/__init__.py |
2786 | deleted file mode 100644 |
2787 | index e69de29..0000000 |
2788 | --- a/plainbox/vendor/rpyc/experimental/__init__.py |
2789 | +++ /dev/null |
2790 | diff --git a/plainbox/vendor/rpyc/experimental/retunnel.py b/plainbox/vendor/rpyc/experimental/retunnel.py |
2791 | deleted file mode 100644 |
2792 | index 5becac2..0000000 |
2793 | --- a/plainbox/vendor/rpyc/experimental/retunnel.py |
2794 | +++ /dev/null |
2795 | @@ -1,185 +0,0 @@ |
2796 | -import socket |
2797 | -import random |
2798 | -import time |
2799 | -from Queue import Queue, Empty as QueueEmpty |
2800 | -from plainbox.vendor.rpyc.core.stream import Stream, TunneledSocketStream, ClosedFile |
2801 | - |
2802 | - |
2803 | -COOKIE_LENGTH = 8 |
2804 | - |
2805 | -class ReconnectingTunnelStream(Stream): |
2806 | - RETRIES = 5 |
2807 | - |
2808 | - def __init__(self, remote_machine, destination_port, retries = RETRIES): |
2809 | - self.remote_machine = remote_machine |
2810 | - self.destination_port = destination_port |
2811 | - self.retries = retries |
2812 | - self.cookie = "".join(chr(random.randint(0, 255)) for _ in range(COOKIE_LENGTH)) |
2813 | - self.stream = None |
2814 | - |
2815 | - def close(self): |
2816 | - if self.stream is not None and not self.closed: |
2817 | - self.stream.close() |
2818 | - self.stream = ClosedFile |
2819 | - |
2820 | - @property |
2821 | - def closed(self): |
2822 | - return self.stream is ClosedFile |
2823 | - |
2824 | - def fileno(self): |
2825 | - return self._safeio(lambda stream: stream.fileno()) |
2826 | - |
2827 | - def _reconnect(self): |
2828 | - # choose random local_port |
2829 | - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) |
2830 | - s.bind(("localhost", 0)) |
2831 | - local_port = s.getsockname()[1] |
2832 | - s.close() |
2833 | - # create a tunnel from local_port:destination_port and connect it |
2834 | - tun = self.remote_machine.tunnel(local_port, self.destination_port) |
2835 | - stream = TunneledSocketStream.connect("localhost", local_port) |
2836 | - stream.write(self.cookie) |
2837 | - stream.tun = tun |
2838 | - # print "ReconnectingTunnelStream._reconnect: established a tunnel from localhost:%r to %s:%r" % ( |
2839 | - # local_port, self.remote_machine, self.destination_port) |
2840 | - return stream |
2841 | - |
2842 | - def _safeio(self, callback): |
2843 | - if self.closed: |
2844 | - raise ValueError("I/O operation on closed file") |
2845 | - for i in range(self.retries): |
2846 | - try: |
2847 | - if not self.stream: |
2848 | - self.stream = self._reconnect() |
2849 | - return callback(self.stream) |
2850 | - except (EOFError, IOError, OSError, socket.error): |
2851 | - if i >= self.retries - 1: |
2852 | - raise |
2853 | - if self.stream: |
2854 | - self.stream.close() |
2855 | - self.stream = None |
2856 | - time.sleep(0.5) |
2857 | - |
2858 | - def write(self, data): |
2859 | - # print "ReconnectingTunnelStream.write(%r)" % (len(data),) |
2860 | - return self._safeio(lambda stream: stream.write(data)) |
2861 | - |
2862 | - def read(self, count): |
2863 | - # print "ReconnectingTunnelStream.read(%r)" % (count,) |
2864 | - return self._safeio(lambda stream: stream.read(count)) |
2865 | - |
2866 | - |
2867 | -class MultiplexingListener(object): |
2868 | - REACCEPT_TIMEOUT = 10 |
2869 | - RETRIES = 5 |
2870 | - |
2871 | - def __init__(self, reaccept_timeout = REACCEPT_TIMEOUT, retries = RETRIES): |
2872 | - self.listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) |
2873 | - self.reaccept_timeout = reaccept_timeout |
2874 | - self.retries = retries |
2875 | - self.client_map = {} |
2876 | - |
2877 | - def close(self): |
2878 | - self.listener.close() |
2879 | - self.listener = ClosedFile |
2880 | - def fileno(self): |
2881 | - return self.listener.getfileno() |
2882 | - def getsockname(self): |
2883 | - return self.listener.getsockname() |
2884 | - def listen(self, backlog): |
2885 | - self.listener.listen(backlog) |
2886 | - def bind(self, addrinfo): |
2887 | - self.listener.bind(addrinfo) |
2888 | - def settimeout(self, timeout): |
2889 | - self.listener.settimeout(timeout) |
2890 | - def setsockopt(self, level, option, value): |
2891 | - return self.listener.setsockopt(level, option, value) |
2892 | - def shutdown(self, mode): |
2893 | - self.listener.shutdown(mode) |
2894 | - |
2895 | - def accept(self): |
2896 | - while True: |
2897 | - # print "MultiplexingListener.accept" |
2898 | - sock, addrinfo = self.listener.accept() |
2899 | - cookie = sock.recv(COOKIE_LENGTH) |
2900 | - # print "MultiplexingListener.accept: got cookie %r" % (cookie,) |
2901 | - |
2902 | - if cookie not in self.client_map: |
2903 | - self.client_map[cookie] = Queue(1) |
2904 | - self.client_map[cookie].put(sock) |
2905 | - # print "MultiplexingListener.accept: new, map=%r" % (self.client_map,) |
2906 | - |
2907 | - resock = ReconnectingSocket(self, cookie, self.retries) |
2908 | - return resock, addrinfo |
2909 | - else: |
2910 | - self.client_map[cookie].put(sock) |
2911 | - # print "MultiplexingListener.accept: old, map=%r" % (self.client_map,) |
2912 | - |
2913 | - def reaccept(self, cookie): |
2914 | - # print "MultiplexingListener.reaccept: %r" % (cookie,) |
2915 | - try: |
2916 | - return self.client_map[cookie].get(self.reaccept_timeout) |
2917 | - except QueueEmpty: |
2918 | - raise EOFError("Client did not reconnect within the timeout") |
2919 | - |
2920 | - |
2921 | -class ReconnectingSocket(object): |
2922 | - def __init__(self, listener, cookie, retries): |
2923 | - self.listener = listener |
2924 | - self.cookie = cookie |
2925 | - self.sock = None |
2926 | - self.retries = retries |
2927 | - self.blocking_mode = None |
2928 | - |
2929 | - def close(self): |
2930 | - if self.sock: |
2931 | - self.sock.close() |
2932 | - self.sock = ClosedFile |
2933 | - |
2934 | - def fileno(self): |
2935 | - return self._safeio(lambda sock: sock.fileno()) |
2936 | - def getsockname(self): |
2937 | - return self._safeio(lambda sock: sock.getsockname()) |
2938 | - def getpeername(self): |
2939 | - return self._safeio(lambda sock: sock.getpeername()) |
2940 | - def shutdown(self, mode): |
2941 | - if self.sock: |
2942 | - self.sock.shutdown(mode) |
2943 | - |
2944 | - def setblocking(self, mode): |
2945 | - self.blocking_mode = mode |
2946 | - if self.sock: |
2947 | - self.sock.setblocking(mode) |
2948 | - |
2949 | - def _safeio(self, callback): |
2950 | - for i in range(self.retries): |
2951 | - if self.sock is None: |
2952 | - self.sock = self.listener.reaccept(self.cookie) |
2953 | - if self.blocking_mode is not None: |
2954 | - self.sock.setblocking(self.blocking_mode) |
2955 | - |
2956 | - try: |
2957 | - return callback(self.sock) |
2958 | - except (EOFError, IOError, OSError, socket.error): |
2959 | - if i >= self.retries - 1: |
2960 | - raise |
2961 | - if self.sock: |
2962 | - self.sock.close() |
2963 | - self.sock = None |
2964 | - |
2965 | - def recv(self, count): |
2966 | - # print "ReconnectingSocket.recv(%r)" % (count,) |
2967 | - return self._safeio(lambda sock: sock.recv(count)) |
2968 | - |
2969 | - def send(self, data): |
2970 | - # print "ReconnectingSocket.send(%r)" % (len(data),) |
2971 | - return self._safeio(lambda sock: sock.send(data)) |
2972 | - |
2973 | - |
2974 | - |
2975 | - |
2976 | - |
2977 | - |
2978 | - |
2979 | - |
2980 | - |
2981 | diff --git a/plainbox/vendor/rpyc/experimental/splitbrain.py b/plainbox/vendor/rpyc/experimental/splitbrain.py |
2982 | deleted file mode 100644 |
2983 | index 3c89cf3..0000000 |
2984 | --- a/plainbox/vendor/rpyc/experimental/splitbrain.py |
2985 | +++ /dev/null |
2986 | @@ -1,247 +0,0 @@ |
2987 | -""" |
2988 | -The Magnificent Splitbrain |
2989 | -.. versionadded:: 3.3 |
2990 | - |
2991 | -""" |
2992 | -import sys |
2993 | -import atexit |
2994 | -import threading |
2995 | -from contextlib import contextmanager |
2996 | -import functools |
2997 | -import gc |
2998 | -try: |
2999 | - import __builtin__ as builtins |
3000 | -except ImportError: |
3001 | - import builtins # python 3+ |
3002 | -from types import ModuleType |
3003 | - |
3004 | -router = threading.local() |
3005 | - |
3006 | -routed_modules = set(["os", "os.path", "platform", "ntpath", "posixpath", "zipimport", "genericpath", |
3007 | - "posix", "nt", "signal", "time", "sysconfig", "_locale", "locale", "socket", "_socket", "ssl", "_ssl", |
3008 | - "struct", "_struct", "_symtable", "errno", "fcntl", "grp", "pwd", "select", "spwd", "syslog", "thread", |
3009 | - "_io", "io", "subprocess", "_subprocess", "datetime", "mmap", "msvcrt", "pdb", "bdb", "glob", "fnmatch", |
3010 | - #"_frozen_importlib", "imp", "exceptions" |
3011 | - ]) |
3012 | - |
3013 | -class RoutedModule(ModuleType): |
3014 | - def __init__(self, realmod): |
3015 | - ModuleType.__init__(self, realmod.__name__, getattr(realmod, "__doc__", None)) |
3016 | - object.__setattr__(self, "__realmod__", realmod) |
3017 | - object.__setattr__(self, "__file__", getattr(realmod, "__file__", None)) |
3018 | - def __repr__(self): |
3019 | - if self.__file__: |
3020 | - return "<module %r from %r>" % (self.__name__, self.__file__) |
3021 | - else: |
3022 | - return "<module %r (built-in)>" % (self.__name__,) |
3023 | - def __dir__(self): |
3024 | - return dir(self.__currmod__) |
3025 | - def __getattribute__(self, name): |
3026 | - if name == "__realmod__": |
3027 | - return object.__getattribute__(self, "__realmod__") |
3028 | - elif name == "__name__": |
3029 | - return object.__getattribute__(self, "__name__") |
3030 | - elif name == "__currmod__": |
3031 | - modname = object.__getattribute__(self, "__name__") |
3032 | - if hasattr(router, "conn"): |
3033 | - return router.conn.modules[modname] |
3034 | - else: |
3035 | - return object.__getattribute__(self, "__realmod__") |
3036 | - else: |
3037 | - return getattr(self.__currmod__, name) |
3038 | - def __delattr__(self, name, val): |
3039 | - return setattr(self.__currmod__, name, val) |
3040 | - def __setattr__(self, name, val): |
3041 | - return setattr(self.__currmod__, name, val) |
3042 | - |
3043 | -routed_sys_attrs = set(["byteorder", "platform", "getfilesystemencoding", "getdefaultencoding", "settrace", |
3044 | - "setprofile", "setrecursionlimit", "getprofile", "getrecursionlimit", "getsizeof", "gettrace", |
3045 | - "exc_clear", "exc_info", "exc_type", "last_type", "last_value", "last_traceback", |
3046 | - ]) |
3047 | - |
3048 | -class RoutedSysModule(ModuleType): |
3049 | - def __init__(self): |
3050 | - ModuleType.__init__(self, "sys", sys.__doc__) |
3051 | - def __dir__(self): |
3052 | - return dir(sys) |
3053 | - def __getattribute__(self, name): |
3054 | - if name in routed_sys_attrs and hasattr(router, "conn"): |
3055 | - return getattr(router.conn.modules["sys"], name) |
3056 | - else: |
3057 | - return getattr(sys, name) |
3058 | - def __setattr__(self, name, value): |
3059 | - if name in routed_sys_attrs and hasattr(router, "conn"): |
3060 | - setattr(router.conn.modules["sys"], name, value) |
3061 | - else: |
3062 | - setattr(sys, name, value) |
3063 | - |
3064 | -rsys = RoutedSysModule() |
3065 | - |
3066 | -class RemoteModule(ModuleType): |
3067 | - def __init__(self, realmod): |
3068 | - ModuleType.__init__(self, realmod.__name__, getattr(realmod, "__doc__", None)) |
3069 | - object.__setattr__(self, "__file__", getattr(realmod, "__file__", None)) |
3070 | - def __repr__(self): |
3071 | - try: |
3072 | - self.__currmod__ |
3073 | - except (AttributeError, ImportError): |
3074 | - return "<module %r (stale)>" % (self.__name__,) |
3075 | - if self.__file__: |
3076 | - return "<module %r from %r>" % (self.__name__, self.__file__) |
3077 | - else: |
3078 | - return "<module %r (built-in)>" % (self.__name__,) |
3079 | - def __dir__(self): |
3080 | - return dir(self.__currmod__) |
3081 | - |
3082 | - def __getattribute__(self, name): |
3083 | - if name == "__name__": |
3084 | - return object.__getattribute__(self, "__name__") |
3085 | - elif name == "__currmod__": |
3086 | - modname = object.__getattribute__(self, "__name__") |
3087 | - if not hasattr(router, "conn"): |
3088 | - raise AttributeError("Module %r is not available in this context" % (modname,)) |
3089 | - mod = router.conn.modules._ModuleNamespace__cache.get(modname) |
3090 | - if not mod: |
3091 | - raise AttributeError("Module %r is not available in this context" % (modname,)) |
3092 | - return mod |
3093 | - else: |
3094 | - return getattr(self.__currmod__, name) |
3095 | - def __delattr__(self, name, val): |
3096 | - return setattr(self.__currmod__, name, val) |
3097 | - def __setattr__(self, name, val): |
3098 | - return setattr(self.__currmod__, name, val) |
3099 | - |
3100 | - |
3101 | -_orig_import = builtins.__import__ |
3102 | - |
3103 | -def _importer(modname, *args, **kwargs): |
3104 | - if not hasattr(router, "conn"): |
3105 | - return _orig_import(modname, *args, **kwargs) |
3106 | - existing = sys.modules.get(modname, None) |
3107 | - if type(existing) is RoutedModule: |
3108 | - return existing |
3109 | - |
3110 | - mod = router.conn.modules[modname] |
3111 | - if existing and type(existing) is RemoteModule: |
3112 | - return existing |
3113 | - rmod = RemoteModule(mod) |
3114 | - sys.modules[modname] = rmod |
3115 | - return rmod |
3116 | - |
3117 | -_enabled = False |
3118 | -_prev_builtins = {} |
3119 | - |
3120 | -def enable_splitbrain(): |
3121 | - """Enables (activates) the Splitbrain machinery; must be called before entering |
3122 | - ``splitbrain`` or ``localbrain`` contexts""" |
3123 | - global _enabled |
3124 | - if _enabled: |
3125 | - return |
3126 | - sys.modules["sys"] = rsys |
3127 | - for modname in routed_modules: |
3128 | - try: |
3129 | - realmod = _orig_import(modname, [], [], "*") |
3130 | - except ImportError: |
3131 | - continue |
3132 | - rmod = RoutedModule(realmod) |
3133 | - sys.modules[modname] = rmod |
3134 | - for ref in gc.get_referrers(realmod): |
3135 | - if not isinstance(ref, dict) or "__name__" not in ref or ref.get("__file__") is None: |
3136 | - continue |
3137 | - n = ref["__name__"] |
3138 | - if n in routed_modules or n.startswith("rpyc") or n.startswith("importlib") or n.startswith("imp"): |
3139 | - continue |
3140 | - for k, v in ref.items(): |
3141 | - if v is realmod: |
3142 | - #print ("## %s.%s = %s" % (ref["__name__"], ref[k], modname)) |
3143 | - ref[k] = rmod |
3144 | - |
3145 | - builtins.__import__ = _importer |
3146 | - for funcname in ["open", "execfile", "file"]: |
3147 | - if not hasattr(builtins, funcname): |
3148 | - continue |
3149 | - def mkfunc(funcname, origfunc): |
3150 | - @functools.wraps(getattr(builtins, funcname)) |
3151 | - def tlbuiltin(*args, **kwargs): |
3152 | - if hasattr(router, "conn"): |
3153 | - func = getattr(router.conn.builtins, funcname) |
3154 | - else: |
3155 | - func = origfunc |
3156 | - return func(*args, **kwargs) |
3157 | - return tlbuiltin |
3158 | - origfunc = getattr(builtins, funcname) |
3159 | - _prev_builtins[funcname] = origfunc |
3160 | - setattr(builtins, funcname, mkfunc(funcname, origfunc)) |
3161 | - |
3162 | - _enabled = True |
3163 | - |
3164 | -def disable_splitbrain(): |
3165 | - """Disables (deactivates) the Splitbrain machinery""" |
3166 | - global _enabled |
3167 | - if not _enabled: |
3168 | - return |
3169 | - _enabled = False |
3170 | - for funcname, origfunc in _prev_builtins.items(): |
3171 | - setattr(builtins, funcname, origfunc) |
3172 | - for modname, mod in sys.modules.items(): |
3173 | - if isinstance(mod, RoutedModule): |
3174 | - sys.modules[modname] = mod.__realmod__ |
3175 | - for ref in gc.get_referrers(mod): |
3176 | - if isinstance(ref, dict) and "__name__" in ref and ref.get("__file__") is not None: |
3177 | - for k, v in ref.items(): |
3178 | - if v is mod: |
3179 | - ref[k] = mod.__realmod__ |
3180 | - sys.modules["sys"] = sys |
3181 | - builtins.__import__ = _orig_import |
3182 | - |
3183 | -atexit.register(disable_splitbrain) |
3184 | - |
3185 | -@contextmanager |
3186 | -def splitbrain(conn): |
3187 | - """Enter a splitbrain context in which imports take place over the given RPyC connection (expected to |
3188 | - be a SlaveService). You can enter this context only after calling ``enable()``""" |
3189 | - if not _enabled: |
3190 | - enable_splitbrain() |
3191 | - #raise ValueError("Splitbrain not enabled") |
3192 | - prev_conn = getattr(router, "conn", None) |
3193 | - prev_modules = sys.modules.copy() |
3194 | - router.conn = conn |
3195 | - prev_stdin = conn.modules.sys.stdin |
3196 | - prev_stdout = conn.modules.sys.stdout |
3197 | - prev_stderr = conn.modules.sys.stderr |
3198 | - conn.modules["sys"].stdin = sys.stdin |
3199 | - conn.modules["sys"].stdout = sys.stdout |
3200 | - conn.modules["sys"].stderr = sys.stderr |
3201 | - try: |
3202 | - yield |
3203 | - finally: |
3204 | - conn.modules["sys"].stdin = prev_stdin |
3205 | - conn.modules["sys"].stdout = prev_stdout |
3206 | - conn.modules["sys"].stderr = prev_stderr |
3207 | - sys.modules.clear() |
3208 | - sys.modules.update(prev_modules) |
3209 | - router.conn = prev_conn |
3210 | - if not router.conn: |
3211 | - del router.conn |
3212 | - |
3213 | -@contextmanager |
3214 | -def localbrain(): |
3215 | - """Return to operate on the local machine. You can enter this context only after calling ``enable()``""" |
3216 | - if not _enabled: |
3217 | - raise ValueError("Splitbrain not enabled") |
3218 | - prev_conn = getattr(router, "conn", None) |
3219 | - prev_modules = sys.modules.copy() |
3220 | - if hasattr(router, "conn"): |
3221 | - del router.conn |
3222 | - try: |
3223 | - yield |
3224 | - finally: |
3225 | - sys.modules.clear() |
3226 | - sys.modules.update(prev_modules) |
3227 | - router.conn = prev_conn |
3228 | - if not router.conn: |
3229 | - del router.conn |
3230 | - |
3231 | - |
3232 | - |
3233 | - |
3234 | diff --git a/plainbox/vendor/rpyc/lib/__init__.py b/plainbox/vendor/rpyc/lib/__init__.py |
3235 | index 9895f1e..d96656b 100644 |
3236 | --- a/plainbox/vendor/rpyc/lib/__init__.py |
3237 | +++ b/plainbox/vendor/rpyc/lib/__init__.py |
3238 | @@ -1,25 +1,32 @@ |
3239 | """ |
3240 | A library of various helpers functions and classes |
3241 | """ |
3242 | +import inspect |
3243 | import sys |
3244 | +import socket |
3245 | import logging |
3246 | import threading |
3247 | import time |
3248 | -from plainbox.vendor.rpyc.lib.compat import maxint |
3249 | +import random |
3250 | +from plainbox.vendor.rpyc.lib.compat import maxint # noqa: F401 |
3251 | |
3252 | |
3253 | class MissingModule(object): |
3254 | __slots__ = ["__name"] |
3255 | + |
3256 | def __init__(self, name): |
3257 | self.__name = name |
3258 | + |
3259 | def __getattr__(self, name): |
3260 | - if name.startswith("__"): # issue 71 |
3261 | + if name.startswith("__"): # issue 71 |
3262 | raise AttributeError("module %r not found" % (self.__name,)) |
3263 | raise ImportError("module %r not found" % (self.__name,)) |
3264 | + |
3265 | def __bool__(self): |
3266 | return False |
3267 | __nonzero__ = __bool__ |
3268 | |
3269 | + |
3270 | def safe_import(name): |
3271 | try: |
3272 | mod = __import__(name, None, None, "*") |
3273 | @@ -27,13 +34,14 @@ def safe_import(name): |
3274 | mod = MissingModule(name) |
3275 | except Exception: |
3276 | # issue 72: IronPython on Mono |
3277 | - if sys.platform == "cli" and name == "signal": #os.name == "posix": |
3278 | + if sys.platform == "cli" and name == "signal": # os.name == "posix": |
3279 | mod = MissingModule(name) |
3280 | else: |
3281 | raise |
3282 | return mod |
3283 | |
3284 | -def setup_logger(quiet = False, logfile = None): |
3285 | + |
3286 | +def setup_logger(quiet=False, logfile=None): |
3287 | opts = {} |
3288 | if quiet: |
3289 | opts['level'] = logging.ERROR |
3290 | @@ -48,10 +56,13 @@ class hybridmethod(object): |
3291 | """Decorator for hybrid instance/class methods that will act like a normal |
3292 | method if accessed via an instance, but act like classmethod if accessed |
3293 | via the class.""" |
3294 | + |
3295 | def __init__(self, func): |
3296 | self.func = func |
3297 | + |
3298 | def __get__(self, obj, cls): |
3299 | return self.func.__get__(cls if obj is None else obj, obj) |
3300 | + |
3301 | def __set__(self, obj, val): |
3302 | raise AttributeError("Cannot overwrite method") |
3303 | |
3304 | @@ -74,7 +85,8 @@ def spawn_waitready(init, main): |
3305 | """ |
3306 | event = threading.Event() |
3307 | stack = [event] # used to exchange arguments with thread, so `event` |
3308 | - # can be deleted when it has fulfilled its purpose. |
3309 | + # can be deleted when it has fulfilled its purpose. |
3310 | + |
3311 | def start(): |
3312 | stack.append(init()) |
3313 | stack.pop(0).set() |
3314 | @@ -92,7 +104,7 @@ class Timeout: |
3315 | self.tmax = timeout.tmax |
3316 | else: |
3317 | self.finite = timeout is not None and timeout >= 0 |
3318 | - self.tmax = time.time()+timeout if self.finite else None |
3319 | + self.tmax = time.time() + timeout if self.finite else None |
3320 | |
3321 | def expired(self): |
3322 | return self.finite and time.time() >= self.tmax |
3323 | @@ -102,3 +114,102 @@ class Timeout: |
3324 | |
3325 | def sleep(self, interval): |
3326 | time.sleep(min(interval, self.timeleft()) if self.finite else interval) |
3327 | + |
3328 | + |
3329 | +def socket_backoff_connect(family, socktype, proto, addr, timeout, attempts): |
3330 | + """connect will backoff if the response is not ready for a pseudo random number greater than zero and less than |
3331 | + 51e-6, 153e-6, 358e-6, 768e-6, 1587e-6, 3225e-6, 6502e-6, 13056e-6, 26163e-6, 52377e-6 |
3332 | + this should help avoid congestion. |
3333 | + """ |
3334 | + sock = socket.socket(family, socktype, proto) |
3335 | + collision = 0 |
3336 | + connecting = True |
3337 | + while connecting: |
3338 | + collision += 1 |
3339 | + try: |
3340 | + sock.settimeout(timeout) |
3341 | + sock.connect(addr) |
3342 | + connecting = False |
3343 | + except socket.timeout: |
3344 | + if collision == attempts or attempts < 1: |
3345 | + raise |
3346 | + else: |
3347 | + sock.close() |
3348 | + sock = socket.socket(family, socktype, proto) |
3349 | + time.sleep(exp_backoff(collision)) |
3350 | + return sock |
3351 | + |
3352 | + |
3353 | +def exp_backoff(collision): |
3354 | + """ Exponential backoff algorithm from |
3355 | + Peterson, L.L., and Davie, B.S. Computer Networks: a systems approach. 5th ed. pp. 127 |
3356 | + """ |
3357 | + n = min(collision, 10) |
3358 | + supremum_adjustment = 1 if n > 3 else 0 |
3359 | + k = random.uniform(0, 2**n - supremum_adjustment) |
3360 | + return k * 0.0000512 |
3361 | + |
3362 | + |
3363 | +def get_id_pack(obj): |
3364 | + """introspects the given "local" object, returns id_pack as expected by BaseNetref |
3365 | + |
3366 | + The given object is "local" in the sense that it is from the local cache. Any object in the local cache exists |
3367 | + in the current address space or is a netref. A netref in the local cache could be from a chained-connection. |
3368 | + To handle type related behavior properly, the attribute `__class__` is a descriptor for netrefs. |
3369 | + |
3370 | + So, check thy assumptions regarding the given object when creating `id_pack`. |
3371 | + """ |
3372 | + if hasattr(obj, '____id_pack__'): |
3373 | + # netrefs are handled first since __class__ is a descriptor |
3374 | + return obj.____id_pack__ |
3375 | + elif inspect.ismodule(obj) or getattr(obj, '__name__', None) == 'module': |
3376 | + # TODO: not sure about this, need to enumerate cases in units |
3377 | + if isinstance(obj, type): # module |
3378 | + obj_cls = type(obj) |
3379 | + name_pack = '{0}.{1}'.format(obj_cls.__module__, obj_cls.__name__) |
3380 | + return (name_pack, id(type(obj)), id(obj)) |
3381 | + else: |
3382 | + if inspect.ismodule(obj) and obj.__name__ != 'module': |
3383 | + if obj.__name__ in sys.modules: |
3384 | + name_pack = obj.__name__ |
3385 | + else: |
3386 | + name_pack = '{0}.{1}'.format(obj.__class__.__module__, obj.__name__) |
3387 | + elif inspect.ismodule(obj): |
3388 | + name_pack = '{0}.{1}'.format(obj__module__, obj.__name__) |
3389 | + print(name_pack) |
3390 | + elif hasattr(obj, '__module__'): |
3391 | + name_pack = '{0}.{1}'.format(obj.__module__, obj.__name__) |
3392 | + else: |
3393 | + obj_cls = type(obj) |
3394 | + name_pack = '{0}'.format(obj.__name__) |
3395 | + return (name_pack, id(type(obj)), id(obj)) |
3396 | + elif not inspect.isclass(obj): |
3397 | + name_pack = '{0}.{1}'.format(obj.__class__.__module__, obj.__class__.__name__) |
3398 | + return (name_pack, id(type(obj)), id(obj)) |
3399 | + else: |
3400 | + name_pack = '{0}.{1}'.format(obj.__module__, obj.__name__) |
3401 | + return (name_pack, id(obj), 0) |
3402 | + |
3403 | + |
3404 | +def get_methods(obj_attrs, obj): |
3405 | + """introspects the given (local) object, returning a list of all of its |
3406 | + methods (going up the MRO). |
3407 | + |
3408 | + :param obj: any local (not proxy) python object |
3409 | + |
3410 | + :returns: a list of ``(method name, docstring)`` tuples of all the methods |
3411 | + of the given object |
3412 | + """ |
3413 | + methods = {} |
3414 | + attrs = {} |
3415 | + if isinstance(obj, type): |
3416 | + # don't forget the darn metaclass |
3417 | + mros = list(reversed(type(obj).__mro__)) + list(reversed(obj.__mro__)) |
3418 | + else: |
3419 | + mros = reversed(type(obj).__mro__) |
3420 | + for basecls in mros: |
3421 | + attrs.update(basecls.__dict__) |
3422 | + for name, attr in attrs.items(): |
3423 | + if name not in obj_attrs and hasattr(attr, "__call__"): |
3424 | + methods[name] = inspect.getdoc(attr) |
3425 | + return methods.items() |
3426 | diff --git a/plainbox/vendor/rpyc/lib/colls.py b/plainbox/vendor/rpyc/lib/colls.py |
3427 | index 1301862..e3b2be9 100644 |
3428 | --- a/plainbox/vendor/rpyc/lib/colls.py |
3429 | +++ b/plainbox/vendor/rpyc/lib/colls.py |
3430 | @@ -2,17 +2,23 @@ from __future__ import with_statement |
3431 | import weakref |
3432 | from threading import Lock |
3433 | |
3434 | + |
3435 | class WeakValueDict(object): |
3436 | """a light-weight version of weakref.WeakValueDictionary""" |
3437 | __slots__ = ("_dict",) |
3438 | + |
3439 | def __init__(self): |
3440 | self._dict = {} |
3441 | + |
3442 | def __repr__(self): |
3443 | return repr(self._dict) |
3444 | + |
3445 | def __iter__(self): |
3446 | return self.iterkeys() |
3447 | + |
3448 | def __len__(self): |
3449 | return len(self._dict) |
3450 | + |
3451 | def __contains__(self, key): |
3452 | try: |
3453 | self[key] |
3454 | @@ -20,59 +26,76 @@ class WeakValueDict(object): |
3455 | return False |
3456 | else: |
3457 | return True |
3458 | - def get(self, key, default = None): |
3459 | + |
3460 | + def get(self, key, default=None): |
3461 | try: |
3462 | return self[key] |
3463 | except KeyError: |
3464 | return default |
3465 | + |
3466 | def __getitem__(self, key): |
3467 | obj = self._dict[key]() |
3468 | if obj is None: |
3469 | raise KeyError(key) |
3470 | return obj |
3471 | + |
3472 | def __setitem__(self, key, value): |
3473 | - def remover(wr, _dict = self._dict, key = key): |
3474 | + def remover(wr, _dict=self._dict, key=key): |
3475 | _dict.pop(key, None) |
3476 | self._dict[key] = weakref.ref(value, remover) |
3477 | + |
3478 | def __delitem__(self, key): |
3479 | del self._dict[key] |
3480 | + |
3481 | def iterkeys(self): |
3482 | return self._dict.keys() |
3483 | + |
3484 | def keys(self): |
3485 | return self._dict.keys() |
3486 | + |
3487 | def itervalues(self): |
3488 | for k in self: |
3489 | yield self[k] |
3490 | + |
3491 | def values(self): |
3492 | return list(self.itervalues()) |
3493 | + |
3494 | def iteritems(self): |
3495 | for k in self: |
3496 | yield k, self[k] |
3497 | + |
3498 | def items(self): |
3499 | return list(self.iteritems()) |
3500 | + |
3501 | def clear(self): |
3502 | self._dict.clear() |
3503 | |
3504 | + |
3505 | class RefCountingColl(object): |
3506 | """a set-like object that implements refcounting on its contained objects""" |
3507 | __slots__ = ("_lock", "_dict") |
3508 | + |
3509 | def __init__(self): |
3510 | self._lock = Lock() |
3511 | self._dict = {} |
3512 | + |
3513 | def __repr__(self): |
3514 | return repr(self._dict) |
3515 | - def add(self, obj): |
3516 | + |
3517 | + def add(self, key, obj): |
3518 | + """Add object to refcounting coll.""" |
3519 | with self._lock: |
3520 | - key = id(obj) |
3521 | slot = self._dict.get(key, None) |
3522 | if slot is None: |
3523 | slot = [obj, 0] |
3524 | else: |
3525 | slot[1] += 1 |
3526 | self._dict[key] = slot |
3527 | + |
3528 | def clear(self): |
3529 | with self._lock: |
3530 | self._dict.clear() |
3531 | + |
3532 | def decref(self, key, count=1): |
3533 | with self._lock: |
3534 | slot = self._dict[key] |
3535 | @@ -81,7 +104,7 @@ class RefCountingColl(object): |
3536 | else: |
3537 | slot[1] -= count |
3538 | self._dict[key] = slot |
3539 | + |
3540 | def __getitem__(self, key): |
3541 | with self._lock: |
3542 | return self._dict[key][0] |
3543 | - |
3544 | diff --git a/plainbox/vendor/rpyc/lib/compat.py b/plainbox/vendor/rpyc/lib/compat.py |
3545 | index e941ec5..35be8fd 100644 |
3546 | --- a/plainbox/vendor/rpyc/lib/compat.py |
3547 | +++ b/plainbox/vendor/rpyc/lib/compat.py |
3548 | @@ -5,38 +5,46 @@ and various platforms (posix/windows) |
3549 | import sys |
3550 | import time |
3551 | |
3552 | -is_py3k = (sys.version_info[0] >= 3) |
3553 | +is_py_3k = (sys.version_info[0] >= 3) |
3554 | +is_py_gte38 = is_py_3k and (sys.version_info[1] >= 8) |
3555 | |
3556 | -if is_py3k: |
3557 | + |
3558 | +if is_py_3k: |
3559 | exec("execute = exec") |
3560 | + |
3561 | def BYTES_LITERAL(text): |
3562 | return bytes(text, "utf8") |
3563 | maxint = sys.maxsize |
3564 | else: |
3565 | exec("""def execute(code, globals = None, locals = None): |
3566 | exec code in globals, locals""") |
3567 | + |
3568 | def BYTES_LITERAL(text): |
3569 | return text |
3570 | maxint = sys.maxint |
3571 | |
3572 | try: |
3573 | - from struct import Struct #@UnusedImport |
3574 | + from struct import Struct # noqa: F401 |
3575 | except ImportError: |
3576 | import struct |
3577 | + |
3578 | class Struct(object): |
3579 | __slots__ = ["format", "size"] |
3580 | + |
3581 | def __init__(self, format): |
3582 | self.format = format |
3583 | self.size = struct.calcsize(format) |
3584 | + |
3585 | def pack(self, *args): |
3586 | return struct.pack(self.format, *args) |
3587 | + |
3588 | def unpack(self, data): |
3589 | return struct.unpack(self.format, data) |
3590 | |
3591 | try: |
3592 | from cStringIO import StringIO as BytesIO |
3593 | except ImportError: |
3594 | - from io import BytesIO #@UnusedImport |
3595 | + from io import BytesIO # noqa: F401 |
3596 | |
3597 | try: |
3598 | next = next |
3599 | @@ -47,7 +55,7 @@ except NameError: |
3600 | try: |
3601 | import cPickle as pickle |
3602 | except ImportError: |
3603 | - import pickle #@UnusedImport |
3604 | + import pickle # noqa: F401 |
3605 | |
3606 | try: |
3607 | callable = callable |
3608 | @@ -59,6 +67,7 @@ try: |
3609 | import select as select_module |
3610 | except ImportError: |
3611 | select_module = None |
3612 | + |
3613 | def select(*args): |
3614 | raise ImportError("select not supported on this platform") |
3615 | else: |
3616 | @@ -68,12 +77,14 @@ else: |
3617 | else: |
3618 | from select import select |
3619 | |
3620 | + |
3621 | def get_exc_errno(exc): |
3622 | if hasattr(exc, "errno"): |
3623 | return exc.errno |
3624 | else: |
3625 | return exc[0] |
3626 | |
3627 | + |
3628 | if select_module: |
3629 | select_error = select_module.error |
3630 | else: |
3631 | @@ -83,6 +94,7 @@ if hasattr(select_module, "poll"): |
3632 | class PollingPoll(object): |
3633 | def __init__(self): |
3634 | self._poll = select_module.poll() |
3635 | + |
3636 | def register(self, fd, mode): |
3637 | flags = 0 |
3638 | if "r" in mode: |
3639 | @@ -98,12 +110,14 @@ if hasattr(select_module, "poll"): |
3640 | flags |= select_module.POLLHUP | select_module.POLLNVAL | POLLRDHUP |
3641 | self._poll.register(fd, flags) |
3642 | modify = register |
3643 | + |
3644 | def unregister(self, fd): |
3645 | self._poll.unregister(fd) |
3646 | - def poll(self, timeout = None): |
3647 | + |
3648 | + def poll(self, timeout=None): |
3649 | if timeout: |
3650 | # the real poll takes milliseconds while we have seconds here |
3651 | - timeout = 1000*timeout |
3652 | + timeout = 1000 * timeout |
3653 | events = self._poll.poll(timeout) |
3654 | processed = [] |
3655 | for fd, evt in events: |
3656 | @@ -127,16 +141,19 @@ else: |
3657 | def __init__(self): |
3658 | self.rlist = set() |
3659 | self.wlist = set() |
3660 | + |
3661 | def register(self, fd, mode): |
3662 | if "r" in mode: |
3663 | self.rlist.add(fd) |
3664 | if "w" in mode: |
3665 | self.wlist.add(fd) |
3666 | modify = register |
3667 | + |
3668 | def unregister(self, fd): |
3669 | self.rlist.discard(fd) |
3670 | self.wlist.discard(fd) |
3671 | - def poll(self, timeout = None): |
3672 | + |
3673 | + def poll(self, timeout=None): |
3674 | if not self.rlist and not self.wlist: |
3675 | time.sleep(timeout) |
3676 | return [] # need to return an empty array in this case |
3677 | @@ -157,10 +174,11 @@ def with_metaclass(meta, *bases): |
3678 | return meta(name, bases, d) |
3679 | return type.__new__(metaclass, 'temporary_class', (), {}) |
3680 | |
3681 | + |
3682 | if sys.version_info >= (3, 3): |
3683 | - TimeoutError = TimeoutError |
3684 | + TimeoutError = TimeoutError # noqa: F821 |
3685 | else: |
3686 | - class TimeoutError(Exception): |
3687 | + class TimeoutError(Exception): # noqa: F821 |
3688 | pass |
3689 | |
3690 | if sys.version_info >= (3, 2): |
3691 | diff --git a/plainbox/vendor/rpyc/utils/__init__.py b/plainbox/vendor/rpyc/utils/__init__.py |
3692 | index bdc7f07..6a9f9ef 100644 |
3693 | --- a/plainbox/vendor/rpyc/utils/__init__.py |
3694 | +++ b/plainbox/vendor/rpyc/utils/__init__.py |
3695 | @@ -1,4 +1,3 @@ |
3696 | """ |
3697 | Utilities (not part of the core protocol) |
3698 | """ |
3699 | - |
3700 | diff --git a/plainbox/vendor/rpyc/utils/authenticators.py b/plainbox/vendor/rpyc/utils/authenticators.py |
3701 | index 226cd02..0d97882 100644 |
3702 | --- a/plainbox/vendor/rpyc/utils/authenticators.py |
3703 | +++ b/plainbox/vendor/rpyc/utils/authenticators.py |
3704 | @@ -27,6 +27,7 @@ import sys |
3705 | from plainbox.vendor.rpyc.lib import safe_import |
3706 | ssl = safe_import("ssl") |
3707 | |
3708 | + |
3709 | class AuthenticationError(Exception): |
3710 | """raised to signal a failed authentication attempt""" |
3711 | pass |
3712 | @@ -56,8 +57,8 @@ class SSLAuthenticator(object): |
3713 | service parameters. |
3714 | """ |
3715 | |
3716 | - def __init__(self, keyfile, certfile, ca_certs = None, cert_reqs = None, |
3717 | - ssl_version = None, ciphers = None): |
3718 | + def __init__(self, keyfile, certfile, ca_certs=None, cert_reqs=None, |
3719 | + ssl_version=None, ciphers=None): |
3720 | self.keyfile = str(keyfile) |
3721 | self.certfile = str(certfile) |
3722 | self.ca_certs = str(ca_certs) if ca_certs else None |
3723 | @@ -75,9 +76,9 @@ class SSLAuthenticator(object): |
3724 | self.ssl_version = ssl_version |
3725 | |
3726 | def __call__(self, sock): |
3727 | - kwargs = dict(keyfile = self.keyfile, certfile = self.certfile, |
3728 | - server_side = True, ca_certs = self.ca_certs, cert_reqs = self.cert_reqs, |
3729 | - ssl_version = self.ssl_version) |
3730 | + kwargs = dict(keyfile=self.keyfile, certfile=self.certfile, |
3731 | + server_side=True, ca_certs=self.ca_certs, cert_reqs=self.cert_reqs, |
3732 | + ssl_version=self.ssl_version) |
3733 | if self.ciphers is not None: |
3734 | kwargs["ciphers"] = self.ciphers |
3735 | try: |
3736 | @@ -86,6 +87,3 @@ class SSLAuthenticator(object): |
3737 | ex = sys.exc_info()[1] |
3738 | raise AuthenticationError(str(ex)) |
3739 | return sock2, sock2.getpeercert() |
3740 | - |
3741 | - |
3742 | - |
3743 | diff --git a/plainbox/vendor/rpyc/utils/classic.py b/plainbox/vendor/rpyc/utils/classic.py |
3744 | index 7699c06..2f97dcf 100644 |
3745 | --- a/plainbox/vendor/rpyc/utils/classic.py |
3746 | +++ b/plainbox/vendor/rpyc/utils/classic.py |
3747 | @@ -2,22 +2,23 @@ from __future__ import with_statement |
3748 | import sys |
3749 | import os |
3750 | import inspect |
3751 | -from plainbox.vendor.rpyc.lib.compat import pickle, execute, is_py3k |
3752 | +from plainbox.vendor.rpyc.lib.compat import pickle, execute, is_py_3k # noqa: F401 |
3753 | from plainbox.vendor.rpyc.core.service import ClassicService, Slave |
3754 | from plainbox.vendor.rpyc.utils import factory |
3755 | -from plainbox.vendor.rpyc.core.service import ModuleNamespace |
3756 | +from plainbox.vendor.rpyc.core.service import ModuleNamespace # noqa: F401 |
3757 | from contextlib import contextmanager |
3758 | |
3759 | |
3760 | DEFAULT_SERVER_PORT = 18812 |
3761 | DEFAULT_SERVER_SSL_PORT = 18821 |
3762 | |
3763 | -SlaveService = ClassicService # avoid renaming SlaveService in this module |
3764 | - # for now |
3765 | +SlaveService = ClassicService # avoid renaming SlaveService in this module for now |
3766 | |
3767 | -#=============================================================================== |
3768 | +# =============================================================================== |
3769 | # connecting |
3770 | -#=============================================================================== |
3771 | +# =============================================================================== |
3772 | + |
3773 | + |
3774 | def connect_channel(channel): |
3775 | """ |
3776 | Creates an RPyC connection over the given ``channel`` |
3777 | @@ -28,6 +29,7 @@ def connect_channel(channel): |
3778 | """ |
3779 | return factory.connect_channel(channel, SlaveService) |
3780 | |
3781 | + |
3782 | def connect_stream(stream): |
3783 | """ |
3784 | Creates an RPyC connection over the given stream |
3785 | @@ -38,6 +40,7 @@ def connect_stream(stream): |
3786 | """ |
3787 | return factory.connect_stream(stream, SlaveService) |
3788 | |
3789 | + |
3790 | def connect_stdpipes(): |
3791 | """ |
3792 | Creates an RPyC connection over the standard pipes (``stdin`` and ``stdout``) |
3793 | @@ -46,6 +49,7 @@ def connect_stdpipes(): |
3794 | """ |
3795 | return factory.connect_stdpipes(SlaveService) |
3796 | |
3797 | + |
3798 | def connect_pipes(input, output): |
3799 | """ |
3800 | Creates an RPyC connection over two pipes |
3801 | @@ -57,7 +61,8 @@ def connect_pipes(input, output): |
3802 | """ |
3803 | return factory.connect_pipes(input, output, SlaveService) |
3804 | |
3805 | -def connect(host, port = DEFAULT_SERVER_PORT, ipv6 = False, keepalive = False): |
3806 | + |
3807 | +def connect(host, port=DEFAULT_SERVER_PORT, ipv6=False, keepalive=False): |
3808 | """ |
3809 | Creates a socket connection to the given host and port. |
3810 | |
3811 | @@ -67,7 +72,8 @@ def connect(host, port = DEFAULT_SERVER_PORT, ipv6 = False, keepalive = False): |
3812 | |
3813 | :returns: an RPyC connection exposing ``SlaveService`` |
3814 | """ |
3815 | - return factory.connect(host, port, SlaveService, ipv6 = ipv6, keepalive = keepalive) |
3816 | + return factory.connect(host, port, SlaveService, ipv6=ipv6, keepalive=keepalive) |
3817 | + |
3818 | |
3819 | def unix_connect(path): |
3820 | """ |
3821 | @@ -79,9 +85,10 @@ def unix_connect(path): |
3822 | """ |
3823 | return factory.unix_connect(path, SlaveService) |
3824 | |
3825 | -def ssl_connect(host, port = DEFAULT_SERVER_SSL_PORT, keyfile = None, |
3826 | - certfile = None, ca_certs = None, cert_reqs = None, ssl_version = None, |
3827 | - ciphers = None, ipv6 = False): |
3828 | + |
3829 | +def ssl_connect(host, port=DEFAULT_SERVER_SSL_PORT, keyfile=None, |
3830 | + certfile=None, ca_certs=None, cert_reqs=None, ssl_version=None, |
3831 | + ciphers=None, ipv6=False): |
3832 | """Creates a secure (``SSL``) socket connection to the given host and port, |
3833 | authenticating with the given certfile and CA file. |
3834 | |
3835 | @@ -105,9 +112,10 @@ def ssl_connect(host, port = DEFAULT_SERVER_SSL_PORT, keyfile = None, |
3836 | |
3837 | .. _wrap_socket: |
3838 | """ |
3839 | - return factory.ssl_connect(host, port, keyfile = keyfile, certfile = certfile, |
3840 | - ssl_version = ssl_version, ca_certs = ca_certs, service = SlaveService, |
3841 | - ipv6 = ipv6) |
3842 | + return factory.ssl_connect(host, port, keyfile=keyfile, certfile=certfile, |
3843 | + ssl_version=ssl_version, ca_certs=ca_certs, service=SlaveService, |
3844 | + ipv6=ipv6) |
3845 | + |
3846 | |
3847 | def ssh_connect(remote_machine, remote_port): |
3848 | """Connects to the remote server over an SSH tunnel. See |
3849 | @@ -120,7 +128,8 @@ def ssh_connect(remote_machine, remote_port): |
3850 | """ |
3851 | return factory.ssh_connect(remote_machine, remote_port, SlaveService) |
3852 | |
3853 | -def connect_subproc(server_file = None): |
3854 | + |
3855 | +def connect_subproc(server_file=None): |
3856 | """Runs an RPyC classic server as a subprocess and returns an RPyC |
3857 | connection to it over stdio |
3858 | |
3859 | @@ -134,7 +143,8 @@ def connect_subproc(server_file = None): |
3860 | if not server_file: |
3861 | raise ValueError("server_file not given and could not be inferred") |
3862 | return factory.connect_subproc([sys.executable, "-u", server_file, "-q", "-m", "stdio"], |
3863 | - SlaveService) |
3864 | + SlaveService) |
3865 | + |
3866 | |
3867 | def connect_thread(): |
3868 | """ |
3869 | @@ -143,9 +153,10 @@ def connect_thread(): |
3870 | |
3871 | :returns: an RPyC connection exposing ``SlaveService`` |
3872 | """ |
3873 | - return factory.connect_thread(SlaveService, remote_service = SlaveService) |
3874 | + return factory.connect_thread(SlaveService, remote_service=SlaveService) |
3875 | |
3876 | -def connect_multiprocess(args = {}): |
3877 | + |
3878 | +def connect_multiprocess(args={}): |
3879 | """ |
3880 | Starts a SlaveService on a multiprocess process and connects to it. |
3881 | Useful for testing purposes and running multicore code thats uses shared |
3882 | @@ -153,14 +164,14 @@ def connect_multiprocess(args = {}): |
3883 | |
3884 | :returns: an RPyC connection exposing ``SlaveService`` |
3885 | """ |
3886 | - return factory.connect_multiprocess(SlaveService, remote_service = SlaveService, args=args) |
3887 | + return factory.connect_multiprocess(SlaveService, remote_service=SlaveService, args=args) |
3888 | |
3889 | |
3890 | -#=============================================================================== |
3891 | +# =============================================================================== |
3892 | # remoting utilities |
3893 | -#=============================================================================== |
3894 | +# =============================================================================== |
3895 | |
3896 | -def upload(conn, localpath, remotepath, filter = None, ignore_invalid = False, chunk_size = 16000): |
3897 | +def upload(conn, localpath, remotepath, filter=None, ignore_invalid=False, chunk_size=16000): |
3898 | """uploads a file or a directory to the given remote path |
3899 | |
3900 | :param localpath: the local file or directory |
3901 | @@ -177,7 +188,8 @@ def upload(conn, localpath, remotepath, filter = None, ignore_invalid = False, c |
3902 | if not ignore_invalid: |
3903 | raise ValueError("cannot upload %r" % (localpath,)) |
3904 | |
3905 | -def upload_file(conn, localpath, remotepath, chunk_size = 16000): |
3906 | + |
3907 | +def upload_file(conn, localpath, remotepath, chunk_size=16000): |
3908 | with open(localpath, "rb") as lf: |
3909 | with conn.builtin.open(remotepath, "wb") as rf: |
3910 | while True: |
3911 | @@ -186,16 +198,18 @@ def upload_file(conn, localpath, remotepath, chunk_size = 16000): |
3912 | break |
3913 | rf.write(buf) |
3914 | |
3915 | -def upload_dir(conn, localpath, remotepath, filter = None, chunk_size = 16000): |
3916 | + |
3917 | +def upload_dir(conn, localpath, remotepath, filter=None, chunk_size=16000): |
3918 | if not conn.modules.os.path.isdir(remotepath): |
3919 | conn.modules.os.makedirs(remotepath) |
3920 | for fn in os.listdir(localpath): |
3921 | if not filter or filter(fn): |
3922 | lfn = os.path.join(localpath, fn) |
3923 | rfn = conn.modules.os.path.join(remotepath, fn) |
3924 | - upload(conn, lfn, rfn, filter = filter, ignore_invalid = True, chunk_size = chunk_size) |
3925 | + upload(conn, lfn, rfn, filter=filter, ignore_invalid=True, chunk_size=chunk_size) |
3926 | |
3927 | -def download(conn, remotepath, localpath, filter = None, ignore_invalid = False, chunk_size = 16000): |
3928 | + |
3929 | +def download(conn, remotepath, localpath, filter=None, ignore_invalid=False, chunk_size=16000): |
3930 | """ |
3931 | download a file or a directory to the given remote path |
3932 | |
3933 | @@ -213,7 +227,8 @@ def download(conn, remotepath, localpath, filter = None, ignore_invalid = False, |
3934 | if not ignore_invalid: |
3935 | raise ValueError("cannot download %r" % (remotepath,)) |
3936 | |
3937 | -def download_file(conn, remotepath, localpath, chunk_size = 16000): |
3938 | + |
3939 | +def download_file(conn, remotepath, localpath, chunk_size=16000): |
3940 | with conn.builtin.open(remotepath, "rb") as rf: |
3941 | with open(localpath, "wb") as lf: |
3942 | while True: |
3943 | @@ -222,16 +237,18 @@ def download_file(conn, remotepath, localpath, chunk_size = 16000): |
3944 | break |
3945 | lf.write(buf) |
3946 | |
3947 | -def download_dir(conn, remotepath, localpath, filter = None, chunk_size = 16000): |
3948 | + |
3949 | +def download_dir(conn, remotepath, localpath, filter=None, chunk_size=16000): |
3950 | if not os.path.isdir(localpath): |
3951 | os.makedirs(localpath) |
3952 | for fn in conn.modules.os.listdir(remotepath): |
3953 | if not filter or filter(fn): |
3954 | rfn = conn.modules.os.path.join(remotepath, fn) |
3955 | lfn = os.path.join(localpath, fn) |
3956 | - download(conn, rfn, lfn, filter = filter, ignore_invalid = True) |
3957 | + download(conn, rfn, lfn, filter=filter, ignore_invalid=True) |
3958 | + |
3959 | |
3960 | -def upload_package(conn, module, remotepath = None, chunk_size = 16000): |
3961 | +def upload_package(conn, module, remotepath=None, chunk_size=16000): |
3962 | """ |
3963 | uploads a module or a package to the remote party |
3964 | |
3965 | @@ -255,10 +272,12 @@ def upload_package(conn, module, remotepath = None, chunk_size = 16000): |
3966 | site = conn.modules["distutils.sysconfig"].get_python_lib() |
3967 | remotepath = conn.modules.os.path.join(site, module.__name__) |
3968 | localpath = os.path.dirname(os.path.abspath(inspect.getsourcefile(module))) |
3969 | - upload(conn, localpath, remotepath, chunk_size = chunk_size) |
3970 | + upload(conn, localpath, remotepath, chunk_size=chunk_size) |
3971 | + |
3972 | |
3973 | upload_module = upload_package |
3974 | |
3975 | + |
3976 | def obtain(proxy): |
3977 | """obtains (copies) a remote object from a proxy object. the object is |
3978 | ``pickled`` on the remote side and ``unpickled`` locally, thus moved |
3979 | @@ -272,6 +291,7 @@ def obtain(proxy): |
3980 | """ |
3981 | return pickle.loads(pickle.dumps(proxy)) |
3982 | |
3983 | + |
3984 | def deliver(conn, localobj): |
3985 | """delivers (recreates) a local object on the other party. the object is |
3986 | ``pickled`` locally and ``unpickled`` on the remote side, thus moved |
3987 | @@ -288,6 +308,7 @@ def deliver(conn, localobj): |
3988 | return conn.modules["rpyc.lib.compat"].pickle.loads( |
3989 | bytes(pickle.dumps(localobj))) |
3990 | |
3991 | + |
3992 | @contextmanager |
3993 | def redirected_stdio(conn): |
3994 | r""" |
3995 | @@ -313,16 +334,18 @@ def redirected_stdio(conn): |
3996 | conn.modules.sys.stdout = orig_stdout |
3997 | conn.modules.sys.stderr = orig_stderr |
3998 | |
3999 | + |
4000 | def pm(conn): |
4001 | """same as ``pdb.pm()`` but on a remote exception |
4002 | |
4003 | :param conn: the RPyC connection |
4004 | """ |
4005 | - #pdb.post_mortem(conn.root.getconn()._last_traceback) |
4006 | + # pdb.post_mortem(conn.root.getconn()._last_traceback) |
4007 | with redirected_stdio(conn): |
4008 | conn.modules.pdb.post_mortem(conn.root.getconn()._last_traceback) |
4009 | |
4010 | -def interact(conn, namespace = None): |
4011 | + |
4012 | +def interact(conn, namespace=None): |
4013 | """remote interactive interpreter |
4014 | |
4015 | :param conn: the RPyC connection |
4016 | @@ -337,13 +360,16 @@ def interact(conn, namespace = None): |
4017 | code.interact(local = dict(ns))""") |
4018 | conn.namespace["_rinteract"](namespace) |
4019 | |
4020 | + |
4021 | class MockClassicConnection(object): |
4022 | """Mock classic RPyC connection object. Useful when you want the same code to run remotely or locally. |
4023 | """ |
4024 | + |
4025 | def __init__(self): |
4026 | self.root = Slave() |
4027 | ClassicService._install(self, self.root) |
4028 | |
4029 | + |
4030 | def teleport_function(conn, func, globals=None, def_=True): |
4031 | """ |
4032 | "Teleports" a function (including nested functions/closures) over the RPyC connection. |
4033 | @@ -363,7 +389,7 @@ def teleport_function(conn, func, globals=None, def_=True): |
4034 | """ |
4035 | if globals is None: |
4036 | globals = conn.namespace |
4037 | - from rpyc.utils.teleportation import export_function |
4038 | + from plainbox.vendor.rpyc.utils.teleportation import export_function |
4039 | exported = export_function(func) |
4040 | return conn.modules["rpyc.utils.teleportation"].import_function( |
4041 | exported, globals, def_) |
4042 | diff --git a/plainbox/vendor/rpyc/utils/factory.py b/plainbox/vendor/rpyc/utils/factory.py |
4043 | index 38cfdf0..23acc1f 100644 |
4044 | --- a/plainbox/vendor/rpyc/utils/factory.py |
4045 | +++ b/plainbox/vendor/rpyc/utils/factory.py |
4046 | @@ -29,9 +29,9 @@ class DiscoveryError(Exception): |
4047 | pass |
4048 | |
4049 | |
4050 | -#------------------------------------------------------------------------------ |
4051 | +# ------------------------------------------------------------------------------ |
4052 | # API |
4053 | -#------------------------------------------------------------------------------ |
4054 | +# ------------------------------------------------------------------------------ |
4055 | def connect_channel(channel, service=VoidService, config={}): |
4056 | """creates a connection over a given channel |
4057 | |
4058 | @@ -43,6 +43,7 @@ def connect_channel(channel, service=VoidService, config={}): |
4059 | """ |
4060 | return service._connect(channel, config) |
4061 | |
4062 | + |
4063 | def connect_stream(stream, service=VoidService, config={}): |
4064 | """creates a connection over a given stream |
4065 | |
4066 | @@ -54,6 +55,7 @@ def connect_stream(stream, service=VoidService, config={}): |
4067 | """ |
4068 | return connect_channel(Channel(stream), service=service, config=config) |
4069 | |
4070 | + |
4071 | def connect_pipes(input, output, service=VoidService, config={}): |
4072 | """ |
4073 | creates a connection over the given input/output pipes |
4074 | @@ -67,6 +69,7 @@ def connect_pipes(input, output, service=VoidService, config={}): |
4075 | """ |
4076 | return connect_stream(PipeStream(input, output), service=service, config=config) |
4077 | |
4078 | + |
4079 | def connect_stdpipes(service=VoidService, config={}): |
4080 | """ |
4081 | creates a connection over the standard input/output pipes |
4082 | @@ -78,6 +81,7 @@ def connect_stdpipes(service=VoidService, config={}): |
4083 | """ |
4084 | return connect_stream(PipeStream.from_std(), service=service, config=config) |
4085 | |
4086 | + |
4087 | def connect(host, port, service=VoidService, config={}, ipv6=False, keepalive=False): |
4088 | """ |
4089 | creates a socket-connection to the given host and port |
4090 | @@ -86,16 +90,18 @@ def connect(host, port, service=VoidService, config={}, ipv6=False, keepalive=Fa |
4091 | :param port: the TCP port to use |
4092 | :param service: the local service to expose (defaults to Void) |
4093 | :param config: configuration dict |
4094 | - :param ipv6: whether to use IPv6 or not |
4095 | + :param ipv6: whether to create an IPv6 socket (defaults to ``False``) |
4096 | + :param keepalive: whether to set TCP keepalive on the socket (defaults to ``False``) |
4097 | |
4098 | :returns: an RPyC connection |
4099 | """ |
4100 | s = SocketStream.connect(host, port, ipv6=ipv6, keepalive=keepalive) |
4101 | return connect_stream(s, service, config) |
4102 | |
4103 | + |
4104 | def unix_connect(path, service=VoidService, config={}): |
4105 | """ |
4106 | - creates a socket-connection to the given host and port |
4107 | + creates a socket-connection to the given unix domain socket |
4108 | |
4109 | :param path: the path to the unix domain socket |
4110 | :param service: the local service to expose (defaults to Void) |
4111 | @@ -106,6 +112,7 @@ def unix_connect(path, service=VoidService, config={}): |
4112 | s = SocketStream.unix_connect(path) |
4113 | return connect_stream(s, service, config) |
4114 | |
4115 | + |
4116 | def ssl_connect(host, port, keyfile=None, certfile=None, ca_certs=None, |
4117 | cert_reqs=None, ssl_version=None, ciphers=None, |
4118 | service=VoidService, config={}, ipv6=False, keepalive=False): |
4119 | @@ -117,7 +124,8 @@ def ssl_connect(host, port, keyfile=None, certfile=None, ca_certs=None, |
4120 | :param port: the TCP port to use |
4121 | :param service: the local service to expose (defaults to Void) |
4122 | :param config: configuration dict |
4123 | - :param ipv6: whether to create an IPv6 socket or an IPv4 one |
4124 | + :param ipv6: whether to create an IPv6 socket or an IPv4 one(defaults to ``False``) |
4125 | + :param keepalive: whether to set TCP keepalive on the socket (defaults to ``False``) |
4126 | |
4127 | The following arguments are passed directly to |
4128 | `ssl.wrap_socket <http://docs.python.org/dev/library/ssl.html#ssl.wrap_socket>`_: |
4129 | @@ -133,7 +141,7 @@ def ssl_connect(host, port, keyfile=None, certfile=None, ca_certs=None, |
4130 | |
4131 | :returns: an RPyC connection |
4132 | """ |
4133 | - ssl_kwargs = {"server_side" : False} |
4134 | + ssl_kwargs = {"server_side": False} |
4135 | if keyfile is not None: |
4136 | ssl_kwargs["keyfile"] = keyfile |
4137 | if certfile is not None: |
4138 | @@ -152,6 +160,7 @@ def ssl_connect(host, port, keyfile=None, certfile=None, ca_certs=None, |
4139 | s = SocketStream.ssl_connect(host, port, ssl_kwargs, ipv6=ipv6, keepalive=keepalive) |
4140 | return connect_stream(s, service, config) |
4141 | |
4142 | + |
4143 | def _get_free_port(): |
4144 | """attempts to find a free port""" |
4145 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) |
4146 | @@ -159,8 +168,10 @@ def _get_free_port(): |
4147 | s.bind(("localhost", 0)) |
4148 | return s.getsockname()[1] |
4149 | |
4150 | + |
4151 | _ssh_connect_lock = threading.Lock() |
4152 | |
4153 | + |
4154 | def ssh_connect(remote_machine, remote_port, service=VoidService, config={}): |
4155 | """ |
4156 | Connects to an RPyC server over an SSH tunnel (created by plumbum). |
4157 | @@ -186,6 +197,7 @@ def ssh_connect(remote_machine, remote_port, service=VoidService, config={}): |
4158 | stream.tun = tun |
4159 | return service._connect(Channel(stream), config=config) |
4160 | |
4161 | + |
4162 | def discover(service_name, host=None, registrar=None, timeout=2): |
4163 | """ |
4164 | discovers hosts running the given service |
4165 | @@ -212,6 +224,7 @@ def discover(service_name, host=None, registrar=None, timeout=2): |
4166 | raise DiscoveryError("no servers exposing %r were found on %r" % (service_name, host)) |
4167 | return addrs |
4168 | |
4169 | + |
4170 | def connect_by_service(service_name, host=None, service=VoidService, config={}): |
4171 | """create a connection to an arbitrary server that exposes the requested service |
4172 | |
4173 | @@ -235,6 +248,7 @@ def connect_by_service(service_name, host=None, service=VoidService, config={}): |
4174 | pass |
4175 | raise DiscoveryError("All services are down: %s" % (addrs,)) |
4176 | |
4177 | + |
4178 | def connect_subproc(args, service=VoidService, config={}): |
4179 | """runs an rpyc server on a child process that and connects to it over |
4180 | the stdio pipes. uses the subprocess module. |
4181 | @@ -246,17 +260,18 @@ def connect_subproc(args, service=VoidService, config={}): |
4182 | from subprocess import Popen, PIPE |
4183 | proc = Popen(args, stdin=PIPE, stdout=PIPE) |
4184 | conn = connect_pipes(proc.stdout, proc.stdin, service=service, config=config) |
4185 | - conn.proc = proc # just so you can have control over the processs |
4186 | + conn.proc = proc # just so you can have control over the processs |
4187 | return conn |
4188 | |
4189 | + |
4190 | def connect_thread(service=VoidService, config={}, remote_service=VoidService, remote_config={}): |
4191 | """starts an rpyc server on a new thread, bound to an arbitrary port, |
4192 | and connects to it over a socket. |
4193 | |
4194 | :param service: the local service to expose (defaults to Void) |
4195 | :param config: configuration dict |
4196 | - :param server_service: the remote service to expose (of the server; defaults to Void) |
4197 | - :param server_config: remote configuration dict (of the server) |
4198 | + :param remote_service: the remote service to expose (of the server; defaults to Void) |
4199 | + :param remote_config: remote configuration dict (of the server) |
4200 | """ |
4201 | listener = socket.socket() |
4202 | listener.bind(("localhost", 0)) |
4203 | @@ -276,6 +291,7 @@ def connect_thread(service=VoidService, config={}, remote_service=VoidService, r |
4204 | host, port = listener.getsockname() |
4205 | return connect(host, port, service=service, config=config) |
4206 | |
4207 | + |
4208 | def connect_multiprocess(service=VoidService, config={}, remote_service=VoidService, remote_config={}, args={}): |
4209 | """starts an rpyc server on a new process, bound to an arbitrary port, |
4210 | and connects to it over a socket. Basically a copy of connect_thread(). |
4211 | @@ -284,8 +300,8 @@ def connect_multiprocess(service=VoidService, config={}, remote_service=VoidServ |
4212 | |
4213 | :param service: the local service to expose (defaults to Void) |
4214 | :param config: configuration dict |
4215 | - :param server_service: the remote service to expose (of the server; defaults to Void) |
4216 | - :param server_config: remote configuration dict (of the server) |
4217 | + :param remote_service: the remote service to expose (of the server; defaults to Void) |
4218 | + :param remote_config: remote configuration dict (of the server) |
4219 | :param args: dict of local vars to pass to new connection, form {'name':var} |
4220 | |
4221 | Contributed by *@tvanzyl* |
4222 | diff --git a/plainbox/vendor/rpyc/utils/helpers.py b/plainbox/vendor/rpyc/utils/helpers.py |
4223 | index 221f171..521e226 100644 |
4224 | --- a/plainbox/vendor/rpyc/utils/helpers.py |
4225 | +++ b/plainbox/vendor/rpyc/utils/helpers.py |
4226 | @@ -9,7 +9,7 @@ from plainbox.vendor.rpyc.core.consts import HANDLE_BUFFITER, HANDLE_CALL |
4227 | from plainbox.vendor.rpyc.core.netref import syncreq, asyncreq |
4228 | |
4229 | |
4230 | -def buffiter(obj, chunk = 10, max_chunk = 1000, factor = 2): |
4231 | +def buffiter(obj, chunk=10, max_chunk=1000, factor=2): |
4232 | """Buffered iterator - reads the remote iterator in chunks starting with |
4233 | *chunk*, multiplying the chunk size by *factor* every time, as an |
4234 | exponential-backoff, up to a chunk of *max_chunk* size. |
4235 | @@ -45,7 +45,8 @@ def buffiter(obj, chunk = 10, max_chunk = 1000, factor = 2): |
4236 | for elem in items: |
4237 | yield elem |
4238 | |
4239 | -def restricted(obj, attrs, wattrs = None): |
4240 | + |
4241 | +def restricted(obj, attrs, wattrs=None): |
4242 | """Returns a 'restricted' version of an object, i.e., allowing access only to a subset of its |
4243 | attributes. This is useful when returning a "broad" or "dangerous" object, where you don't |
4244 | want the other party to have access to all of its attributes. |
4245 | @@ -71,12 +72,14 @@ def restricted(obj, attrs, wattrs = None): |
4246 | """ |
4247 | if wattrs is None: |
4248 | wattrs = attrs |
4249 | + |
4250 | class Restricted(object): |
4251 | def _rpyc_getattr(self, name): |
4252 | if name not in attrs: |
4253 | raise AttributeError(name) |
4254 | return getattr(obj, name) |
4255 | __getattr__ = _rpyc_getattr |
4256 | + |
4257 | def _rpyc_setattr(self, name, value): |
4258 | if name not in wattrs: |
4259 | raise AttributeError(name) |
4260 | @@ -84,20 +87,27 @@ def restricted(obj, attrs, wattrs = None): |
4261 | __setattr__ = _rpyc_setattr |
4262 | return Restricted() |
4263 | |
4264 | + |
4265 | class _Async(object): |
4266 | """Creates an async proxy wrapper over an existing proxy. Async proxies |
4267 | are cached. Invoking an async proxy will return an AsyncResult instead of |
4268 | blocking""" |
4269 | |
4270 | __slots__ = ("proxy", "__weakref__") |
4271 | + |
4272 | def __init__(self, proxy): |
4273 | self.proxy = proxy |
4274 | + |
4275 | def __call__(self, *args, **kwargs): |
4276 | return asyncreq(self.proxy, HANDLE_CALL, args, tuple(kwargs.items())) |
4277 | + |
4278 | def __repr__(self): |
4279 | return "async_(%r)" % (self.proxy,) |
4280 | |
4281 | + |
4282 | _async_proxies_cache = WeakValueDict() |
4283 | + |
4284 | + |
4285 | def async_(proxy): |
4286 | """ |
4287 | Returns an asynchronous "version" of the given proxy. Invoking the returned |
4288 | @@ -134,7 +144,7 @@ def async_(proxy): |
4289 | pid = id(proxy) |
4290 | if pid in _async_proxies_cache: |
4291 | return _async_proxies_cache[pid] |
4292 | - if not hasattr(proxy, "____conn__") or not hasattr(proxy, "____oid__"): |
4293 | + if not hasattr(proxy, "____conn__") or not hasattr(proxy, "____id_pack__"): |
4294 | raise TypeError("'proxy' must be a Netref: %r", (proxy,)) |
4295 | if not callable(proxy): |
4296 | raise TypeError("'proxy' must be callable: %r" % (proxy,)) |
4297 | @@ -142,6 +152,7 @@ def async_(proxy): |
4298 | _async_proxies_cache[id(caller)] = _async_proxies_cache[pid] = caller |
4299 | return caller |
4300 | |
4301 | + |
4302 | async_.__doc__ = _Async.__doc__ |
4303 | globals()['async'] = async_ # backward compatibility alias |
4304 | |
4305 | @@ -164,16 +175,20 @@ class timed(object): |
4306 | """ |
4307 | |
4308 | __slots__ = ("__weakref__", "proxy", "timeout") |
4309 | + |
4310 | def __init__(self, proxy, timeout): |
4311 | self.proxy = async_(proxy) |
4312 | self.timeout = timeout |
4313 | + |
4314 | def __call__(self, *args, **kwargs): |
4315 | res = self.proxy(*args, **kwargs) |
4316 | res.set_expiry(self.timeout) |
4317 | return res |
4318 | + |
4319 | def __repr__(self): |
4320 | return "timed(%r, %r)" % (self.proxy.proxy, self.timeout) |
4321 | |
4322 | + |
4323 | class BgServingThread(object): |
4324 | """Runs an RPyC server in the background to serve all requests and replies |
4325 | that arrive on the given RPyC connection. The thread is started upon the |
4326 | @@ -201,20 +216,23 @@ class BgServingThread(object): |
4327 | self._active = True |
4328 | self._callback = callback |
4329 | self._thread = spawn(self._bg_server) |
4330 | + |
4331 | def __del__(self): |
4332 | if self._active: |
4333 | self.stop() |
4334 | + |
4335 | def _bg_server(self): |
4336 | try: |
4337 | while self._active: |
4338 | self._conn.serve(self.SERVE_INTERVAL) |
4339 | - time.sleep(self.SLEEP_INTERVAL) # to reduce contention |
4340 | + time.sleep(self.SLEEP_INTERVAL) # to reduce contention |
4341 | except Exception: |
4342 | if self._active: |
4343 | self._active = False |
4344 | if self._callback is None: |
4345 | raise |
4346 | self._callback() |
4347 | + |
4348 | def stop(self): |
4349 | """stop the server thread. once stopped, it cannot be resumed. you will |
4350 | have to create a new BgServingThread object later.""" |
4351 | @@ -227,8 +245,10 @@ class BgServingThread(object): |
4352 | def classpartial(*args, **kwargs): |
4353 | """Bind arguments to a class's __init__.""" |
4354 | cls, args = args[0], args[1:] |
4355 | + |
4356 | class Partial(cls): |
4357 | __doc__ = cls.__doc__ |
4358 | + |
4359 | def __new__(self): |
4360 | return cls(*args, **kwargs) |
4361 | Partial.__name__ = cls.__name__ |
4362 | diff --git a/plainbox/vendor/rpyc/utils/registry.py b/plainbox/vendor/rpyc/utils/registry.py |
4363 | index 91b4eff..315d9f3 100644 |
4364 | --- a/plainbox/vendor/rpyc/utils/registry.py |
4365 | +++ b/plainbox/vendor/rpyc/utils/registry.py |
4366 | @@ -20,18 +20,18 @@ from plainbox.vendor.rpyc.core import brine |
4367 | |
4368 | |
4369 | DEFAULT_PRUNING_TIMEOUT = 4 * 60 |
4370 | -MAX_DGRAM_SIZE = 1500 |
4371 | -REGISTRY_PORT = 18811 |
4372 | +MAX_DGRAM_SIZE = 1500 |
4373 | +REGISTRY_PORT = 18811 |
4374 | |
4375 | |
4376 | -#------------------------------------------------------------------------------ |
4377 | +# ------------------------------------------------------------------------------ |
4378 | # servers |
4379 | -#------------------------------------------------------------------------------ |
4380 | +# ------------------------------------------------------------------------------ |
4381 | |
4382 | class RegistryServer(object): |
4383 | """Base registry server""" |
4384 | |
4385 | - def __init__(self, listenersock, pruning_timeout = None, logger = None): |
4386 | + def __init__(self, listenersock, pruning_timeout=None, logger=None): |
4387 | self.sock = listenersock |
4388 | self.port = self.sock.getsockname()[1] |
4389 | self.active = False |
4390 | @@ -85,7 +85,7 @@ class RegistryServer(object): |
4391 | return () |
4392 | |
4393 | oldest = time.time() - self.pruning_timeout |
4394 | - all_servers = sorted(self.services[name].items(), key = lambda x: x[1]) |
4395 | + all_servers = sorted(self.services[name].items(), key=lambda x: x[1]) |
4396 | servers = [] |
4397 | for addrinfo, t in all_servers: |
4398 | if t < oldest: |
4399 | @@ -167,20 +167,21 @@ class RegistryServer(object): |
4400 | self.logger.debug("stopping server...") |
4401 | self.active = False |
4402 | |
4403 | + |
4404 | class UDPRegistryServer(RegistryServer): |
4405 | """UDP-based registry server. The server listens to UDP broadcasts and |
4406 | answers them. Useful in local networks, were broadcasts are allowed""" |
4407 | |
4408 | TIMEOUT = 1.0 |
4409 | |
4410 | - def __init__(self, host = "0.0.0.0", port = REGISTRY_PORT, pruning_timeout = None, logger = None): |
4411 | + def __init__(self, host="0.0.0.0", port=REGISTRY_PORT, pruning_timeout=None, logger=None): |
4412 | family, socktype, proto, _, sockaddr = socket.getaddrinfo(host, port, 0, |
4413 | - socket.SOCK_DGRAM)[0] |
4414 | + socket.SOCK_DGRAM)[0] |
4415 | sock = socket.socket(family, socktype, proto) |
4416 | sock.bind(sockaddr) |
4417 | sock.settimeout(self.TIMEOUT) |
4418 | - RegistryServer.__init__(self, sock, pruning_timeout = pruning_timeout, |
4419 | - logger = logger) |
4420 | + RegistryServer.__init__(self, sock, pruning_timeout=pruning_timeout, |
4421 | + logger=logger) |
4422 | |
4423 | def _get_logger(self): |
4424 | return logging.getLogger("REGSRV/UDP/%d" % (self.port,)) |
4425 | @@ -194,6 +195,7 @@ class UDPRegistryServer(RegistryServer): |
4426 | except (socket.error, socket.timeout): |
4427 | pass |
4428 | |
4429 | + |
4430 | class TCPRegistryServer(RegistryServer): |
4431 | """TCP-based registry server. The server listens to a certain TCP port and |
4432 | answers requests. Useful when you need to cross routers in the network, since |
4433 | @@ -201,11 +203,11 @@ class TCPRegistryServer(RegistryServer): |
4434 | |
4435 | TIMEOUT = 3.0 |
4436 | |
4437 | - def __init__(self, host = "0.0.0.0", port = REGISTRY_PORT, pruning_timeout = None, |
4438 | - logger = None, reuse_addr = True): |
4439 | + def __init__(self, host="0.0.0.0", port=REGISTRY_PORT, pruning_timeout=None, |
4440 | + logger=None, reuse_addr=True): |
4441 | |
4442 | family, socktype, proto, _, sockaddr = socket.getaddrinfo(host, port, 0, |
4443 | - socket.SOCK_STREAM)[0] |
4444 | + socket.SOCK_STREAM)[0] |
4445 | sock = socket.socket(family, socktype, proto) |
4446 | if reuse_addr and sys.platform != "win32": |
4447 | # warning: reuseaddr is not what you expect on windows! |
4448 | @@ -213,8 +215,8 @@ class TCPRegistryServer(RegistryServer): |
4449 | sock.bind(sockaddr) |
4450 | sock.listen(10) |
4451 | sock.settimeout(self.TIMEOUT) |
4452 | - RegistryServer.__init__(self, sock, pruning_timeout = pruning_timeout, |
4453 | - logger = logger) |
4454 | + RegistryServer.__init__(self, sock, pruning_timeout=pruning_timeout, |
4455 | + logger=logger) |
4456 | self._connected_sockets = {} |
4457 | |
4458 | def _get_logger(self): |
4459 | @@ -235,15 +237,17 @@ class TCPRegistryServer(RegistryServer): |
4460 | except (socket.error, socket.timeout): |
4461 | pass |
4462 | |
4463 | -#------------------------------------------------------------------------------ |
4464 | +# ------------------------------------------------------------------------------ |
4465 | # clients (registrars) |
4466 | -#------------------------------------------------------------------------------ |
4467 | +# ------------------------------------------------------------------------------ |
4468 | + |
4469 | + |
4470 | class RegistryClient(object): |
4471 | """Base registry client. Also known as **registrar**""" |
4472 | |
4473 | REREGISTER_INTERVAL = 60 |
4474 | |
4475 | - def __init__(self, ip, port, timeout, logger = None): |
4476 | + def __init__(self, ip, port, timeout, logger=None): |
4477 | self.ip = ip |
4478 | self.port = port |
4479 | self.timeout = timeout |
4480 | @@ -280,6 +284,7 @@ class RegistryClient(object): |
4481 | """ |
4482 | raise NotImplementedError() |
4483 | |
4484 | + |
4485 | class UDPRegistryClient(RegistryClient): |
4486 | """UDP-based registry clients. By default, it sends UDP broadcasts (requires |
4487 | special user privileges on certain OS's) and collects the replies. You can |
4488 | @@ -294,10 +299,10 @@ class UDPRegistryClient(RegistryClient): |
4489 | Consider using :func:`rpyc.utils.factory.discover` instead |
4490 | """ |
4491 | |
4492 | - def __init__(self, ip = "255.255.255.255", port = REGISTRY_PORT, timeout = 2, |
4493 | - bcast = None, logger = None, ipv6 = False): |
4494 | - RegistryClient.__init__(self, ip = ip, port = port, timeout = timeout, |
4495 | - logger = logger) |
4496 | + def __init__(self, ip="255.255.255.255", port=REGISTRY_PORT, timeout=2, |
4497 | + bcast=None, logger=None, ipv6=False): |
4498 | + RegistryClient.__init__(self, ip=ip, port=port, timeout=timeout, |
4499 | + logger=logger) |
4500 | |
4501 | if ipv6: |
4502 | self.sock_family = socket.AF_INET6 |
4503 | @@ -329,7 +334,7 @@ class UDPRegistryClient(RegistryClient): |
4504 | servers = brine.load(data) |
4505 | return servers |
4506 | |
4507 | - def register(self, aliases, port, interface = ""): |
4508 | + def register(self, aliases, port, interface=""): |
4509 | self.logger.info("registering on %s:%s", self.ip, self.port) |
4510 | sock = socket.socket(self.sock_family, socket.SOCK_DGRAM) |
4511 | with closing(sock): |
4512 | @@ -384,9 +389,9 @@ class TCPRegistryClient(RegistryClient): |
4513 | Consider using :func:`rpyc.utils.factory.discover` instead |
4514 | """ |
4515 | |
4516 | - def __init__(self, ip, port = REGISTRY_PORT, timeout = 2, logger = None): |
4517 | - RegistryClient.__init__(self, ip = ip, port = port, timeout = timeout, |
4518 | - logger = logger) |
4519 | + def __init__(self, ip, port=REGISTRY_PORT, timeout=2, logger=None): |
4520 | + RegistryClient.__init__(self, ip=ip, port=port, timeout=timeout, |
4521 | + logger=logger) |
4522 | |
4523 | def _get_logger(self): |
4524 | return logging.getLogger('REGCLNT/TCP') |
4525 | @@ -407,7 +412,7 @@ class TCPRegistryClient(RegistryClient): |
4526 | servers = brine.load(data) |
4527 | return servers |
4528 | |
4529 | - def register(self, aliases, port, interface = ""): |
4530 | + def register(self, aliases, port, interface=""): |
4531 | self.logger.info("registering on %s:%s", self.ip, self.port) |
4532 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) |
4533 | with closing(sock): |
4534 | diff --git a/plainbox/vendor/rpyc/utils/server.py b/plainbox/vendor/rpyc/utils/server.py |
4535 | index f8e9c37..d50e118 100644 |
4536 | --- a/plainbox/vendor/rpyc/utils/server.py |
4537 | +++ b/plainbox/vendor/rpyc/utils/server.py |
4538 | @@ -5,7 +5,7 @@ import sys |
4539 | import os |
4540 | import socket |
4541 | import time |
4542 | -import threading |
4543 | +import threading # noqa: F401 |
4544 | import errno |
4545 | import logging |
4546 | from contextlib import closing |
4547 | @@ -22,7 +22,6 @@ signal = safe_import("signal") |
4548 | gevent = safe_import("gevent") |
4549 | |
4550 | |
4551 | - |
4552 | class Server(object): |
4553 | """Base server implementation |
4554 | |
4555 | @@ -48,10 +47,10 @@ class Server(object): |
4556 | on embedded platforms with limited battery) |
4557 | """ |
4558 | |
4559 | - def __init__(self, service, hostname = "", ipv6 = False, port = 0, |
4560 | - backlog = 10, reuse_addr = True, authenticator = None, registrar = None, |
4561 | - auto_register = None, protocol_config = {}, logger = None, listener_timeout = 0.5, |
4562 | - socket_path = None): |
4563 | + def __init__(self, service, hostname="", ipv6=False, port=0, |
4564 | + backlog=socket.SOMAXCONN, reuse_addr=True, authenticator=None, registrar=None, |
4565 | + auto_register=None, protocol_config={}, logger=None, listener_timeout=0.5, |
4566 | + socket_path=None): |
4567 | self.active = False |
4568 | self._closed = False |
4569 | self.service = service |
4570 | @@ -65,7 +64,7 @@ class Server(object): |
4571 | self.clients = set() |
4572 | |
4573 | if socket_path is not None: |
4574 | - if hostname != "" or port != 0 or ipv6 != False: |
4575 | + if hostname != "" or port != 0 or ipv6 is not False: |
4576 | raise ValueError("socket_path is mutually exclusive with: hostname, port, ipv6") |
4577 | self.listener = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) |
4578 | self.listener.bind(socket_path) |
4579 | @@ -99,7 +98,7 @@ class Server(object): |
4580 | if "logger" not in self.protocol_config: |
4581 | self.protocol_config["logger"] = self.logger |
4582 | if registrar is None: |
4583 | - registrar = UDPRegistryClient(logger = self.logger) |
4584 | + registrar = UDPRegistryClient(logger=self.logger) |
4585 | self.registrar = registrar |
4586 | |
4587 | def close(self): |
4588 | @@ -187,7 +186,7 @@ class Server(object): |
4589 | sock.shutdown(socket.SHUT_RDWR) |
4590 | except Exception: |
4591 | pass |
4592 | - sock.close() |
4593 | + closing(sock) |
4594 | self.clients.discard(sock) |
4595 | |
4596 | def _serve_client(self, sock, credentials): |
4597 | @@ -197,8 +196,8 @@ class Server(object): |
4598 | else: |
4599 | self.logger.info("welcome %s", addrinfo) |
4600 | try: |
4601 | - config = dict(self.protocol_config, credentials = credentials, |
4602 | - endpoints = (sock.getsockname(), addrinfo), logger = self.logger) |
4603 | + config = dict(self.protocol_config, credentials=credentials, |
4604 | + endpoints=(sock.getsockname(), addrinfo), logger=self.logger) |
4605 | conn = self.service._connect(Channel(SocketStream(sock)), config) |
4606 | self._handle_connection(conn) |
4607 | finally: |
4608 | @@ -211,7 +210,7 @@ class Server(object): |
4609 | def _bg_register(self): |
4610 | interval = self.registrar.REREGISTER_INTERVAL |
4611 | self.logger.info("started background auto-register thread " |
4612 | - "(interval = %s)", interval) |
4613 | + "(interval = %s)", interval) |
4614 | tnext = 0 |
4615 | try: |
4616 | while self.active: |
4617 | @@ -220,7 +219,7 @@ class Server(object): |
4618 | did_register = False |
4619 | aliases = self.service.get_service_aliases() |
4620 | try: |
4621 | - did_register = self.registrar.register(aliases, self.port, interface = self.host) |
4622 | + did_register = self.registrar.register(aliases, self.port, interface=self.host) |
4623 | except Exception: |
4624 | self.logger.exception("error registering services") |
4625 | |
4626 | @@ -262,7 +261,7 @@ class Server(object): |
4627 | while self.active: |
4628 | self.accept() |
4629 | except EOFError: |
4630 | - pass # server closed by another thread |
4631 | + pass # server closed by another thread |
4632 | except KeyboardInterrupt: |
4633 | print("") |
4634 | self.logger.warn("keyboard interrupt!") |
4635 | @@ -285,9 +284,13 @@ class OneShotServer(Server): |
4636 | |
4637 | Parameters: see :class:`Server` |
4638 | """ |
4639 | + |
4640 | def _accept_method(self, sock): |
4641 | - with closing(sock): |
4642 | + try: |
4643 | self._authenticate_and_serve_client(sock) |
4644 | + finally: |
4645 | + self.close() |
4646 | + |
4647 | |
4648 | class ThreadedServer(Server): |
4649 | """ |
4650 | @@ -296,6 +299,7 @@ class ThreadedServer(Server): |
4651 | |
4652 | Parameters: see :class:`Server` |
4653 | """ |
4654 | + |
4655 | def _accept_method(self, sock): |
4656 | spawn(self._authenticate_and_serve_client, sock) |
4657 | |
4658 | @@ -317,31 +321,27 @@ class ThreadPoolServer(Server): |
4659 | def __init__(self, *args, **kwargs): |
4660 | '''Initializes a ThreadPoolServer. In particular, instantiate the thread pool.''' |
4661 | # get the number of threads in the pool |
4662 | - nbthreads = 20 |
4663 | - if 'nbThreads' in kwargs: |
4664 | - nbthreads = kwargs['nbThreads'] |
4665 | - del kwargs['nbThreads'] |
4666 | - # get the request batch size |
4667 | - self.request_batch_size = 10 |
4668 | - if 'requestBatchSize' in kwargs: |
4669 | - self.request_batch_size = kwargs['requestBatchSize'] |
4670 | - del kwargs['requestBatchSize'] |
4671 | + self.nbthreads = kwargs.pop('nbThreads', 20) |
4672 | + self.request_batch_size = kwargs.pop('requestBatchSize', 10) |
4673 | # init the parent |
4674 | Server.__init__(self, *args, **kwargs) |
4675 | # a queue of connections having something to process |
4676 | self._active_connection_queue = Queue.Queue() |
4677 | - # declare the pool as already active |
4678 | - self.active = True |
4679 | + # a dictionary fd -> connection |
4680 | + self.fd_to_conn = {} |
4681 | + # a polling object to be used be the polling thread |
4682 | + self.poll_object = poll() |
4683 | + |
4684 | + def _listen(self): |
4685 | + if self.active: |
4686 | + return |
4687 | + super(ThreadPoolServer, self)._listen() |
4688 | # setup the thread pool for handling requests |
4689 | self.workers = [] |
4690 | - for i in range(nbthreads): |
4691 | + for i in range(self.nbthreads): |
4692 | t = spawn(self._serve_clients) |
4693 | t.setName('Worker%i' % i) |
4694 | self.workers.append(t) |
4695 | - # a polling object to be used be the polling thread |
4696 | - self.poll_object = poll() |
4697 | - # a dictionary fd -> connection |
4698 | - self.fd_to_conn = {} |
4699 | # setup a thread for polling inactive connections |
4700 | self.polling_thread = spawn(self._poll_inactive_clients) |
4701 | self.polling_thread.setName('PollingThread') |
4702 | @@ -428,7 +428,7 @@ class ThreadPoolServer(Server): |
4703 | # serve a maximum of RequestBatchSize requests for this connection |
4704 | for _ in range(self.request_batch_size): |
4705 | try: |
4706 | - if not self.fd_to_conn[fd].poll(): # note that poll serves the request |
4707 | + if not self.fd_to_conn[fd].poll(): # note that poll serves the request |
4708 | # we could not find a request, so we put this connection back to the inactive set |
4709 | self._add_inactive_connection(fd) |
4710 | return |
4711 | @@ -472,40 +472,32 @@ class ThreadPoolServer(Server): |
4712 | changed if rpyc evolves''' |
4713 | # authenticate |
4714 | if self.authenticator: |
4715 | - h, p = sock.getpeername() |
4716 | - try: |
4717 | - sock, credentials = self.authenticator(sock) |
4718 | - except AuthenticationError: |
4719 | - self.logger.warning("%s:%s failed to authenticate, rejecting connection", h, p) |
4720 | - return None |
4721 | + sock, credentials = self.authenticator(sock) |
4722 | else: |
4723 | credentials = None |
4724 | # build a connection |
4725 | h, p = sock.getpeername() |
4726 | - config = dict(self.protocol_config, credentials=credentials, connid="%s:%d"%(h, p), |
4727 | + config = dict(self.protocol_config, credentials=credentials, connid="%s:%d" % (h, p), |
4728 | endpoints=(sock.getsockname(), (h, p))) |
4729 | - return self.service._connect(Channel(SocketStream(sock)), config) |
4730 | + return sock, self.service._connect(Channel(SocketStream(sock)), config) |
4731 | |
4732 | def _accept_method(self, sock): |
4733 | '''Implementation of the accept method : only pushes the work to the internal queue. |
4734 | In case the queue is full, raises an AsynResultTimeout error''' |
4735 | try: |
4736 | + h, p = None, None |
4737 | # authenticate and build connection object |
4738 | - conn = self._authenticate_and_build_connection(sock) |
4739 | + sock, conn = self._authenticate_and_build_connection(sock) |
4740 | # put the connection in the active queue |
4741 | - if conn: |
4742 | - h, p = sock.getpeername() |
4743 | - fd = conn.fileno() |
4744 | - self.logger.debug("Created connection to %s:%d with fd %d", h, p, fd) |
4745 | - self.fd_to_conn[fd] = conn |
4746 | - self._add_inactive_connection(fd) |
4747 | - self.clients.clear() |
4748 | - else: |
4749 | - self.logger.warning("Failed to authenticate and build connection, closing %s:%d", h, p) |
4750 | - sock.close() |
4751 | - except Exception: |
4752 | h, p = sock.getpeername() |
4753 | - self.logger.exception("Failed to serve client for %s:%d, caught exception", h, p) |
4754 | + fd = conn.fileno() |
4755 | + self.logger.debug("Created connection to %s:%d with fd %d", h, p, fd) |
4756 | + self.fd_to_conn[fd] = conn |
4757 | + self._add_inactive_connection(fd) |
4758 | + self.clients.clear() |
4759 | + except Exception: |
4760 | + err_msg = "Failed to serve client for {}:{}, caught exception".format(h, p) |
4761 | + self.logger.exception(err_msg) |
4762 | sock.close() |
4763 | |
4764 | |
4765 | @@ -547,12 +539,12 @@ class ForkingServer(Server): |
4766 | try: |
4767 | self.logger.debug("child process created") |
4768 | signal.signal(signal.SIGCHLD, self._prevhandler) |
4769 | - #76: call signal.siginterrupt(False) in forked child |
4770 | + # 76: call signal.siginterrupt(False) in forked child |
4771 | signal.siginterrupt(signal.SIGCHLD, False) |
4772 | self.listener.close() |
4773 | self.clients.clear() |
4774 | self._authenticate_and_serve_client(sock) |
4775 | - except: |
4776 | + except Exception: |
4777 | self.logger.exception("child process terminated abnormally") |
4778 | else: |
4779 | self.logger.debug("child process terminated") |
4780 | @@ -562,6 +554,7 @@ class ForkingServer(Server): |
4781 | else: |
4782 | # parent |
4783 | sock.close() |
4784 | + self.clients.discard(sock) |
4785 | |
4786 | |
4787 | class GeventServer(Server): |
4788 | diff --git a/plainbox/vendor/rpyc/utils/teleportation.py b/plainbox/vendor/rpyc/utils/teleportation.py |
4789 | index 36aafe4..451c93d 100644 |
4790 | --- a/plainbox/vendor/rpyc/utils/teleportation.py |
4791 | +++ b/plainbox/vendor/rpyc/utils/teleportation.py |
4792 | @@ -3,8 +3,8 @@ import sys |
4793 | try: |
4794 | import __builtin__ |
4795 | except ImportError: |
4796 | - import builtins as __builtin__ |
4797 | -from plainbox.vendor.rpyc.lib.compat import is_py3k |
4798 | + import builtins as __builtin__ # noqa: F401 |
4799 | +from plainbox.vendor.rpyc.lib.compat import is_py_3k, is_py_gte38 |
4800 | from types import CodeType, FunctionType |
4801 | from plainbox.vendor.rpyc.core import brine |
4802 | from plainbox.vendor.rpyc.core import netref |
4803 | @@ -14,7 +14,7 @@ CODEOBJ_MAGIC = "MAg1c J0hNNzo0hn ZqhuBP17LQk8" |
4804 | |
4805 | # NOTE: dislike this kind of hacking on the level of implementation details, |
4806 | # should search for a more reliable/future-proof way: |
4807 | -CODE_HAVEARG_SIZE = 2 if sys.version_info >= (3,6) else 3 |
4808 | +CODE_HAVEARG_SIZE = 2 if sys.version_info >= (3, 6) else 3 |
4809 | try: |
4810 | from dis import _unpack_opargs |
4811 | except ImportError: |
4812 | @@ -26,20 +26,20 @@ except ImportError: |
4813 | while i < n: |
4814 | op = code[i] |
4815 | offset = i |
4816 | - i = i+1 |
4817 | + i = i + 1 |
4818 | arg = None |
4819 | if op >= opcode.HAVE_ARGUMENT: |
4820 | - arg = code[i] + code[i+1]*256 + extended_arg |
4821 | + arg = code[i] + code[i + 1] * 256 + extended_arg |
4822 | extended_arg = 0 |
4823 | - i = i+2 |
4824 | + i = i + 2 |
4825 | if op == opcode.EXTENDED_ARG: |
4826 | - extended_arg = arg*65536 |
4827 | + extended_arg = arg * 65536 |
4828 | yield (offset, op, arg) |
4829 | |
4830 | |
4831 | def decode_codeobj(codeobj): |
4832 | # adapted from dis.dis |
4833 | - if is_py3k: |
4834 | + if is_py_3k: |
4835 | codestr = codeobj.co_code |
4836 | else: |
4837 | codestr = [ord(ch) for ch in codeobj.co_code] |
4838 | @@ -64,6 +64,7 @@ def decode_codeobj(codeobj): |
4839 | |
4840 | yield (opname, argval) |
4841 | |
4842 | + |
4843 | def _export_codeobj(cobj): |
4844 | consts2 = [] |
4845 | for const in cobj.co_consts: |
4846 | @@ -74,20 +75,27 @@ def _export_codeobj(cobj): |
4847 | else: |
4848 | raise TypeError("Cannot export a function with non-brinable constants: %r" % (const,)) |
4849 | |
4850 | - if is_py3k: |
4851 | + if is_py_gte38: |
4852 | + # Constructor was changed in 3.8 to support "advanced" programming styles |
4853 | + exported = (cobj.co_argcount, cobj.co_posonlyargcount, cobj.co_kwonlyargcount, cobj.co_nlocals, |
4854 | + cobj.co_stacksize, cobj.co_flags, cobj.co_code, tuple(consts2), cobj.co_names, cobj.co_varnames, |
4855 | + cobj.co_filename, cobj.co_name, cobj.co_firstlineno, cobj.co_lnotab, cobj.co_freevars, |
4856 | + cobj.co_cellvars) |
4857 | + elif is_py_3k: |
4858 | exported = (cobj.co_argcount, cobj.co_kwonlyargcount, cobj.co_nlocals, cobj.co_stacksize, cobj.co_flags, |
4859 | - cobj.co_code, tuple(consts2), cobj.co_names, cobj.co_varnames, cobj.co_filename, |
4860 | - cobj.co_name, cobj.co_firstlineno, cobj.co_lnotab, cobj.co_freevars, cobj.co_cellvars) |
4861 | + cobj.co_code, tuple(consts2), cobj.co_names, cobj.co_varnames, cobj.co_filename, |
4862 | + cobj.co_name, cobj.co_firstlineno, cobj.co_lnotab, cobj.co_freevars, cobj.co_cellvars) |
4863 | else: |
4864 | exported = (cobj.co_argcount, cobj.co_nlocals, cobj.co_stacksize, cobj.co_flags, |
4865 | - cobj.co_code, tuple(consts2), cobj.co_names, cobj.co_varnames, cobj.co_filename, |
4866 | - cobj.co_name, cobj.co_firstlineno, cobj.co_lnotab, cobj.co_freevars, cobj.co_cellvars) |
4867 | + cobj.co_code, tuple(consts2), cobj.co_names, cobj.co_varnames, cobj.co_filename, |
4868 | + cobj.co_name, cobj.co_firstlineno, cobj.co_lnotab, cobj.co_freevars, cobj.co_cellvars) |
4869 | |
4870 | assert brine.dumpable(exported) |
4871 | return (CODEOBJ_MAGIC, exported) |
4872 | |
4873 | + |
4874 | def export_function(func): |
4875 | - if is_py3k: |
4876 | + if is_py_3k: |
4877 | func_closure = func.__closure__ |
4878 | func_code = func.__code__ |
4879 | func_defaults = func.__defaults__ |
4880 | @@ -103,13 +111,20 @@ def export_function(func): |
4881 | |
4882 | return func.__name__, func.__module__, func_defaults, _export_codeobj(func_code)[1] |
4883 | |
4884 | + |
4885 | def _import_codetup(codetup): |
4886 | - if is_py3k: |
4887 | - (argcnt, kwargcnt, nloc, stk, flg, codestr, consts, names, varnames, filename, name, |
4888 | - firstlineno, lnotab, freevars, cellvars) = codetup |
4889 | + if is_py_3k: |
4890 | + # Handle tuples sent from 3.8 as well as 3 < version < 3.8. |
4891 | + if len(codetup) == 16: |
4892 | + (argcount, posonlyargcount, kwonlyargcount, nlocals, stacksize, flags, code, consts, names, varnames, |
4893 | + filename, name, firstlineno, lnotab, freevars, cellvars) = codetup |
4894 | + else: |
4895 | + (argcount, kwonlyargcount, nlocals, stacksize, flags, code, consts, names, varnames, |
4896 | + filename, name, firstlineno, lnotab, freevars, cellvars) = codetup |
4897 | + posonlyargcount = 0 |
4898 | else: |
4899 | - (argcnt, nloc, stk, flg, codestr, consts, names, varnames, filename, name, |
4900 | - firstlineno, lnotab, freevars, cellvars) = codetup |
4901 | + (argcount, nlocals, stacksize, flags, code, consts, names, varnames, |
4902 | + filename, name, firstlineno, lnotab, freevars, cellvars) = codetup |
4903 | |
4904 | consts2 = [] |
4905 | for const in consts: |
4906 | @@ -117,13 +132,18 @@ def _import_codetup(codetup): |
4907 | consts2.append(_import_codetup(const[1])) |
4908 | else: |
4909 | consts2.append(const) |
4910 | - |
4911 | - if is_py3k: |
4912 | - return CodeType(argcnt, kwargcnt, nloc, stk, flg, codestr, tuple(consts2), names, varnames, filename, name, |
4913 | - firstlineno, lnotab, freevars, cellvars) |
4914 | + consts = tuple(consts2) |
4915 | + if is_py_gte38: |
4916 | + codetup = (argcount, posonlyargcount, kwonlyargcount, nlocals, stacksize, flags, code, consts, names, varnames, |
4917 | + filename, name, firstlineno, lnotab, freevars, cellvars) |
4918 | + elif is_py_3k: |
4919 | + codetup = (argcount, kwonlyargcount, nlocals, stacksize, flags, code, consts, names, varnames, filename, name, |
4920 | + firstlineno, lnotab, freevars, cellvars) |
4921 | else: |
4922 | - return CodeType(argcnt, nloc, stk, flg, codestr, tuple(consts2), names, varnames, filename, name, |
4923 | - firstlineno, lnotab, freevars, cellvars) |
4924 | + codetup = (argcount, nlocals, stacksize, flags, code, consts, names, varnames, filename, name, firstlineno, |
4925 | + lnotab, freevars, cellvars) |
4926 | + return CodeType(*codetup) |
4927 | + |
4928 | |
4929 | def import_function(functup, globals=None, def_=True): |
4930 | name, modname, defaults, codetup = functup |
4931 | @@ -135,7 +155,7 @@ def import_function(functup, globals=None, def_=True): |
4932 | globals = mod.__dict__ |
4933 | # function globals must be real dicts, sadly: |
4934 | if isinstance(globals, netref.BaseNetref): |
4935 | - from rpyc.utils.classic import obtain |
4936 | + from plainbox.vendor.rpyc.utils.classic import obtain |
4937 | globals = obtain(globals) |
4938 | globals.setdefault('__builtins__', __builtins__) |
4939 | codeobj = _import_codetup(codetup) |
4940 | diff --git a/plainbox/vendor/rpyc/utils/zerodeploy.py b/plainbox/vendor/rpyc/utils/zerodeploy.py |
4941 | index bbe48bc..bbc4922 100644 |
4942 | --- a/plainbox/vendor/rpyc/utils/zerodeploy.py |
4943 | +++ b/plainbox/vendor/rpyc/utils/zerodeploy.py |
4944 | @@ -5,7 +5,7 @@ Requires [plumbum](http://plumbum.readthedocs.org/) |
4945 | """ |
4946 | from __future__ import with_statement |
4947 | import sys |
4948 | -import socket |
4949 | +import socket # noqa: F401 |
4950 | from plainbox.vendor.rpyc.lib.compat import BYTES_LITERAL |
4951 | from plainbox.vendor.rpyc.core.service import VoidService |
4952 | from plainbox.vendor.rpyc.core.stream import SocketStream |
4953 | @@ -13,6 +13,7 @@ import rpyc.utils.factory |
4954 | import rpyc.utils.classic |
4955 | try: |
4956 | from plumbum import local, ProcessExecutionError, CommandNotFound |
4957 | + from plumbum.commands.base import BoundCommand |
4958 | from plumbum.path import copy |
4959 | except ImportError: |
4960 | import inspect |
4961 | @@ -64,6 +65,7 @@ finally: |
4962 | thd.join(2) |
4963 | """ |
4964 | |
4965 | + |
4966 | class DeployedServer(object): |
4967 | """ |
4968 | Sets up a temporary, short-lived RPyC deployment on the given remote machine. It will: |
4969 | @@ -85,7 +87,8 @@ class DeployedServer(object): |
4970 | :param extra_setup: any extra code to add to the script |
4971 | """ |
4972 | |
4973 | - def __init__(self, remote_machine, server_class = "rpyc.utils.server.ThreadedServer", extra_setup = "", python_executable=None): |
4974 | + def __init__(self, remote_machine, server_class="rpyc.utils.server.ThreadedServer", |
4975 | + extra_setup="", python_executable=None): |
4976 | self.proc = None |
4977 | self.tun = None |
4978 | self.remote_machine = remote_machine |
4979 | @@ -98,8 +101,11 @@ class DeployedServer(object): |
4980 | |
4981 | script = (tmp / "deployed-rpyc.py") |
4982 | modname, clsname = server_class.rsplit(".", 1) |
4983 | - script.write(SERVER_SCRIPT.replace("$MODULE$", modname).replace("$SERVER$", clsname).replace("$EXTRA_SETUP$", extra_setup)) |
4984 | - if python_executable: |
4985 | + script.write(SERVER_SCRIPT.replace("$MODULE$", modname).replace( |
4986 | + "$SERVER$", clsname).replace("$EXTRA_SETUP$", extra_setup)) |
4987 | + if isinstance(python_executable, BoundCommand): |
4988 | + cmd = python_executable |
4989 | + elif python_executable: |
4990 | cmd = remote_machine[python_executable] |
4991 | else: |
4992 | major = sys.version_info[0] |
4993 | @@ -115,7 +121,7 @@ class DeployedServer(object): |
4994 | if not cmd: |
4995 | cmd = remote_machine.python |
4996 | |
4997 | - self.proc = cmd.popen(script, new_session = True) |
4998 | + self.proc = cmd.popen(script, new_session=True) |
4999 | |
5000 | line = "" |
Wonderful patchset. Landable as is, but see my comments for some potential improvements in fit and finish.