diff -Nru mapproxy-1.11.0/debian/changelog mapproxy-1.11.0/debian/changelog --- mapproxy-1.11.0/debian/changelog 2017-11-20 16:07:46.000000000 +0000 +++ mapproxy-1.11.0/debian/changelog 2019-07-14 14:00:00.000000000 +0000 @@ -1,3 +1,42 @@ +mapproxy (1.11.0-3~bionic2) bionic; urgency=medium + + * No change rebuild for GDAL 2.4.2 transition. + + -- Angelos Tzotsos Sun, 14 Jul 2019 16:00:00 +0200 + +mapproxy (1.11.0-3~bionic1) bionic; urgency=medium + + * No change rebuild for GDAL 2.4.0 transition. + + -- Angelos Tzotsos Wed, 27 Feb 2019 21:00:00 +0200 + +mapproxy (1.11.0-3~bionic0) bionic; urgency=medium + + * No change rebuild for Bionic. + + -- Angelos Tzotsos Sat, 17 Nov 2018 19:00:00 +0200 + +mapproxy (1.11.0-3) unstable; urgency=medium + + * Drop autopkgtest to test installability. + * Add lintian override for testsuite-autopkgtest-missing. + * Add lintian override for python-module-in-wrong-location. + + -- Bas Couwenberg Tue, 31 Jul 2018 21:42:22 +0200 + +mapproxy (1.11.0-2) unstable; urgency=medium + + * Update Vcs-* URLs for Salsa. + * Bump Standards-Version to 4.1.5, no changes. + * Drop ancient X-Python-Version field. + * Strip trailing whitespace from control & rules files. + * Add patch to rename async.py to async_.py for Python 3.7 compatibility. + * Add lintian overrides for embedded JS & fonts. + * Remove documentation outside usr/share/doc. + * Fix 'every time' typo. + + -- Bas Couwenberg Fri, 20 Jul 2018 19:11:25 +0200 + mapproxy (1.11.0-1) unstable; urgency=medium * New upstream release. diff -Nru mapproxy-1.11.0/debian/control mapproxy-1.11.0/debian/control --- mapproxy-1.11.0/debian/control 2017-11-20 15:50:01.000000000 +0000 +++ mapproxy-1.11.0/debian/control 2018-07-20 17:53:56.000000000 +0000 @@ -36,11 +36,10 @@ docbook-xsl, docbook-xml, xsltproc -Standards-Version: 4.1.1 -Vcs-Browser: https://anonscm.debian.org/cgit/pkg-grass/mapproxy.git -Vcs-Git: https://anonscm.debian.org/git/pkg-grass/mapproxy.git +Standards-Version: 4.1.5 +Vcs-Browser: https://salsa.debian.org/debian-gis-team/mapproxy +Vcs-Git: https://salsa.debian.org/debian-gis-team/mapproxy.git Homepage: http://mapproxy.org/ -X-Python-Version: >= 2.7 Package: mapproxy Architecture: all @@ -126,4 +125,3 @@ fully compliant WMS server supporting any WMS client (desktop and web). . This package provides the MapProxy documentation. - diff -Nru mapproxy-1.11.0/debian/man/mapproxy-util-autoconfig.1.xml mapproxy-1.11.0/debian/man/mapproxy-util-autoconfig.1.xml --- mapproxy-1.11.0/debian/man/mapproxy-util-autoconfig.1.xml 2016-08-22 12:38:54.000000000 +0000 +++ mapproxy-1.11.0/debian/man/mapproxy-util-autoconfig.1.xml 2018-07-20 17:55:29.000000000 +0000 @@ -164,7 +164,7 @@ define another coverage, disable featureinfo, etc. You can do this by editing the output file of course, or you can modify the output by defining all changes to an overwrite file. - Overwrite files are applied everytime you call + Overwrite files are applied every time you call mapproxy-util autoconfig. diff -Nru mapproxy-1.11.0/debian/mapproxy-doc.lintian-overrides mapproxy-1.11.0/debian/mapproxy-doc.lintian-overrides --- mapproxy-1.11.0/debian/mapproxy-doc.lintian-overrides 1970-01-01 00:00:00.000000000 +0000 +++ mapproxy-1.11.0/debian/mapproxy-doc.lintian-overrides 2018-07-20 17:53:56.000000000 +0000 @@ -0,0 +1,8 @@ +# libjs-twitter-bootstrap is not compatible +embedded-javascript-library usr/share/doc/mapproxy/html/_static/bootstrap-*/js/bootstrap.js please use libjs-twitter-bootstrap +font-in-non-font-package usr/share/doc/mapproxy/html/_static/boot*/fonts/* +font-outside-font-dir usr/share/doc/mapproxy/html/_static/boots*/fonts/* + +# libjs-jquery is not compatible +embedded-javascript-library usr/share/doc/mapproxy/html/_static/js/jquery* please use libjs-jquery + diff -Nru mapproxy-1.11.0/debian/patches/python3.7-async.patch mapproxy-1.11.0/debian/patches/python3.7-async.patch --- mapproxy-1.11.0/debian/patches/python3.7-async.patch 1970-01-01 00:00:00.000000000 +0000 +++ mapproxy-1.11.0/debian/patches/python3.7-async.patch 2018-07-23 12:47:55.000000000 +0000 @@ -0,0 +1,827 @@ +Description: Rename async.py to async_.py to support Python 3.7. + async became a reserved keyword in Python 3.7. +Author: Bas Couwenberg +Forwarded: https://github.com/mapproxy/mapproxy/pull/372 +Applied-Upstream: https://github.com/mapproxy/mapproxy/commit/ee5c9589274610e43e34f31ac067e949531d9d8e + +--- a/mapproxy/util/async.py ++++ /dev/null +@@ -1,343 +0,0 @@ +-# This file is part of the MapProxy project. +-# Copyright (C) 2011 Omniscale +-# +-# Licensed under the Apache License, Version 2.0 (the "License"); +-# you may not use this file except in compliance with the License. +-# You may obtain a copy of the License at +-# +-# http://www.apache.org/licenses/LICENSE-2.0 +-# +-# Unless required by applicable law or agreed to in writing, software +-# distributed under the License is distributed on an "AS IS" BASIS, +-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +-# See the License for the specific language governing permissions and +-# limitations under the License. +- +- +-MAX_MAP_ASYNC_THREADS = 20 +- +-try: +- import Queue +-except ImportError: +- import queue as Queue +- +-import sys +-import threading +- +-try: +- import eventlet +- import eventlet.greenpool +- import eventlet.tpool +- import eventlet.patcher +- _has_eventlet = True +- +- import eventlet.debug +- eventlet.debug.hub_exceptions(False) +- +-except ImportError: +- _has_eventlet = False +- +-from mapproxy.config import base_config +-from mapproxy.config import local_base_config +-from mapproxy.compat import PY2 +- +-import logging +-log_system = logging.getLogger('mapproxy.system') +- +-class AsyncResult(object): +- def __init__(self, result=None, exception=None): +- self.result = result +- self.exception = exception +- +- def __repr__(self): +- return "" % ( +- self.result, self.exception) +- +- +-def _result_iter(results, use_result_objects=False): +- for result in results: +- if use_result_objects: +- exception = None +- if (isinstance(result, tuple) and len(result) == 3 and +- isinstance(result[1], Exception)): +- exception = result +- result = None +- yield AsyncResult(result, exception) +- else: +- yield result +- +-class EventletPool(object): +- def __init__(self, size=100): +- self.size = size +- self.base_config = base_config() +- +- def shutdown(self, force=False): +- # there is not way to stop a GreenPool +- pass +- +- def map(self, func, *args, **kw): +- return list(self.imap(func, *args, **kw)) +- +- def imap(self, func, *args, **kw): +- use_result_objects = kw.get('use_result_objects', False) +- def call(*args): +- with local_base_config(self.base_config): +- try: +- return func(*args) +- except Exception: +- if use_result_objects: +- return sys.exc_info() +- else: +- raise +- if len(args[0]) == 1: +- eventlet.sleep() +- return _result_iter([call(*list(zip(*args))[0])], use_result_objects) +- pool = eventlet.greenpool.GreenPool(self.size) +- return _result_iter(pool.imap(call, *args), use_result_objects) +- +- def starmap(self, func, args, **kw): +- use_result_objects = kw.get('use_result_objects', False) +- def call(*args): +- with local_base_config(self.base_config): +- try: +- return func(*args) +- except Exception: +- if use_result_objects: +- return sys.exc_info() +- else: +- raise +- if len(args) == 1: +- eventlet.sleep() +- return _result_iter([call(*args[0])], use_result_objects) +- pool = eventlet.greenpool.GreenPool(self.size) +- return _result_iter(pool.starmap(call, args), use_result_objects) +- +- def starcall(self, args, **kw): +- use_result_objects = kw.get('use_result_objects', False) +- def call(func, *args): +- with local_base_config(self.base_config): +- try: +- return func(*args) +- except Exception: +- if use_result_objects: +- return sys.exc_info() +- else: +- raise +- if len(args) == 1: +- eventlet.sleep() +- return _result_iter([call(args[0][0], *args[0][1:])], use_result_objects) +- pool = eventlet.greenpool.GreenPool(self.size) +- return _result_iter(pool.starmap(call, args), use_result_objects) +- +- +-class ThreadWorker(threading.Thread): +- def __init__(self, task_queue, result_queue): +- threading.Thread.__init__(self) +- self.task_queue = task_queue +- self.result_queue = result_queue +- self.base_config = base_config() +- def run(self): +- with local_base_config(self.base_config): +- while True: +- task = self.task_queue.get() +- if task is None: +- self.task_queue.task_done() +- break +- exec_id, func, args = task +- try: +- result = func(*args) +- except Exception: +- result = sys.exc_info() +- self.result_queue.put((exec_id, result)) +- self.task_queue.task_done() +- +- +-def _consume_queue(queue): +- """ +- Get all items from queue. +- """ +- while not queue.empty(): +- try: +- queue.get(block=False) +- queue.task_done() +- except Queue.Empty: +- pass +- +- +-class ThreadPool(object): +- def __init__(self, size=4): +- self.pool_size = size +- self.task_queue = Queue.Queue() +- self.result_queue = Queue.Queue() +- self.pool = None +- def map_each(self, func_args, raise_exceptions): +- """ +- args should be a list of function arg tuples. +- map_each calls each function with the given arg. +- """ +- if self.pool_size < 2: +- for func, arg in func_args: +- try: +- yield func(*arg) +- except Exception: +- yield sys.exc_info() +- raise StopIteration() +- +- self.pool = self._init_pool() +- +- i = 0 +- for i, (func, arg) in enumerate(func_args): +- self.task_queue.put((i, func, arg)) +- +- results = {} +- +- next_result = 0 +- for value in self._get_results(next_result, results, raise_exceptions): +- yield value +- next_result += 1 +- +- self.task_queue.join() +- for value in self._get_results(next_result, results, raise_exceptions): +- yield value +- next_result += 1 +- +- self.shutdown() +- +- def _single_call(self, func, args, use_result_objects): +- try: +- result = func(*args) +- except Exception: +- if not use_result_objects: +- raise +- result = sys.exc_info() +- return _result_iter([result], use_result_objects) +- +- def map(self, func, *args, **kw): +- return list(self.imap(func, *args, **kw)) +- +- def imap(self, func, *args, **kw): +- use_result_objects = kw.get('use_result_objects', False) +- if len(args[0]) == 1: +- return self._single_call(func, next(iter(zip(*args))), use_result_objects) +- return _result_iter(self.map_each([(func, arg) for arg in zip(*args)], raise_exceptions=not use_result_objects), +- use_result_objects) +- +- def starmap(self, func, args, **kw): +- use_result_objects = kw.get('use_result_objects', False) +- if len(args[0]) == 1: +- return self._single_call(func, args[0], use_result_objects) +- +- return _result_iter(self.map_each([(func, arg) for arg in args], raise_exceptions=not use_result_objects), +- use_result_objects) +- +- def starcall(self, args, **kw): +- def call(func, *args): +- return func(*args) +- return self.starmap(call, args, **kw) +- +- def _get_results(self, next_result, results, raise_exceptions): +- for i, value in self._fetch_results(raise_exceptions): +- if i == next_result: +- yield value +- next_result += 1 +- while next_result in results: +- yield results.pop(next_result) +- next_result += 1 +- else: +- results[i] = value +- +- def _fetch_results(self, raise_exceptions): +- while not self.task_queue.empty() or not self.result_queue.empty(): +- task_result = self.result_queue.get() +- if (raise_exceptions and isinstance(task_result[1], tuple) and +- len(task_result[1]) == 3 and +- isinstance(task_result[1][1], Exception)): +- self.shutdown(force=True) +- exc_class, exc, tb = task_result[1] +- if PY2: +- exec('raise exc_class, exc, tb') +- else: +- raise exc.with_traceback(tb) +- yield task_result +- +- def shutdown(self, force=False): +- """ +- Send shutdown sentinel to all executor threads. If `force` is True, +- clean task_queue and result_queue. +- """ +- if force: +- _consume_queue(self.task_queue) +- _consume_queue(self.result_queue) +- for _ in range(self.pool_size): +- self.task_queue.put(None) +- +- def _init_pool(self): +- if self.pool_size < 2: +- return [] +- pool = [] +- for _ in range(self.pool_size): +- t = ThreadWorker(self.task_queue, self.result_queue) +- t.daemon = True +- t.start() +- pool.append(t) +- return pool +- +- +-def imap_async_eventlet(func, *args): +- pool = EventletPool() +- return pool.imap(func, *args) +- +-def imap_async_threaded(func, *args): +- pool = ThreadPool(min(len(args[0]), MAX_MAP_ASYNC_THREADS)) +- return pool.imap(func, *args) +- +-def starmap_async_eventlet(func, args): +- pool = EventletPool() +- return pool.starmap(func, args) +- +-def starmap_async_threaded(func, args): +- pool = ThreadPool(min(len(args[0]), MAX_MAP_ASYNC_THREADS)) +- return pool.starmap(func, args) +- +-def starcall_async_eventlet(args): +- pool = EventletPool() +- return pool.starcall(args) +- +-def starcall_async_threaded(args): +- pool = ThreadPool(min(len(args[0]), MAX_MAP_ASYNC_THREADS)) +- return pool.starcall(args) +- +- +-def run_non_blocking_eventlet(func, args, kw={}): +- return eventlet.tpool.execute(func, *args, **kw) +- +-def run_non_blocking_threaded(func, args, kw={}): +- return func(*args, **kw) +- +- +-def import_module(module): +- """ +- Import ``module``. Import patched version if eventlet is used. +- """ +- if uses_eventlet: +- return eventlet.import_patched(module) +- else: +- return __import__(module) +- +-uses_eventlet = False +- +-# socket should be monkey patched when MapProxy runs inside eventlet +-if _has_eventlet and eventlet.patcher.is_monkey_patched('socket'): +- uses_eventlet = True +- log_system.info('using eventlet for asynchronous operations') +- imap = imap_async_eventlet +- starmap = starmap_async_eventlet +- starcall = starcall_async_eventlet +- Pool = EventletPool +- run_non_blocking = run_non_blocking_eventlet +-else: +- imap = imap_async_threaded +- starmap = starmap_async_threaded +- starcall = starcall_async_threaded +- Pool = ThreadPool +- run_non_blocking = run_non_blocking_threaded +--- /dev/null ++++ b/mapproxy/util/async_.py +@@ -0,0 +1,343 @@ ++# This file is part of the MapProxy project. ++# Copyright (C) 2011 Omniscale ++# ++# Licensed under the Apache License, Version 2.0 (the "License"); ++# you may not use this file except in compliance with the License. ++# You may obtain a copy of the License at ++# ++# http://www.apache.org/licenses/LICENSE-2.0 ++# ++# Unless required by applicable law or agreed to in writing, software ++# distributed under the License is distributed on an "AS IS" BASIS, ++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++# See the License for the specific language governing permissions and ++# limitations under the License. ++ ++ ++MAX_MAP_ASYNC_THREADS = 20 ++ ++try: ++ import Queue ++except ImportError: ++ import queue as Queue ++ ++import sys ++import threading ++ ++try: ++ import eventlet ++ import eventlet.greenpool ++ import eventlet.tpool ++ import eventlet.patcher ++ _has_eventlet = True ++ ++ import eventlet.debug ++ eventlet.debug.hub_exceptions(False) ++ ++except ImportError: ++ _has_eventlet = False ++ ++from mapproxy.config import base_config ++from mapproxy.config import local_base_config ++from mapproxy.compat import PY2 ++ ++import logging ++log_system = logging.getLogger('mapproxy.system') ++ ++class AsyncResult(object): ++ def __init__(self, result=None, exception=None): ++ self.result = result ++ self.exception = exception ++ ++ def __repr__(self): ++ return "" % ( ++ self.result, self.exception) ++ ++ ++def _result_iter(results, use_result_objects=False): ++ for result in results: ++ if use_result_objects: ++ exception = None ++ if (isinstance(result, tuple) and len(result) == 3 and ++ isinstance(result[1], Exception)): ++ exception = result ++ result = None ++ yield AsyncResult(result, exception) ++ else: ++ yield result ++ ++class EventletPool(object): ++ def __init__(self, size=100): ++ self.size = size ++ self.base_config = base_config() ++ ++ def shutdown(self, force=False): ++ # there is not way to stop a GreenPool ++ pass ++ ++ def map(self, func, *args, **kw): ++ return list(self.imap(func, *args, **kw)) ++ ++ def imap(self, func, *args, **kw): ++ use_result_objects = kw.get('use_result_objects', False) ++ def call(*args): ++ with local_base_config(self.base_config): ++ try: ++ return func(*args) ++ except Exception: ++ if use_result_objects: ++ return sys.exc_info() ++ else: ++ raise ++ if len(args[0]) == 1: ++ eventlet.sleep() ++ return _result_iter([call(*list(zip(*args))[0])], use_result_objects) ++ pool = eventlet.greenpool.GreenPool(self.size) ++ return _result_iter(pool.imap(call, *args), use_result_objects) ++ ++ def starmap(self, func, args, **kw): ++ use_result_objects = kw.get('use_result_objects', False) ++ def call(*args): ++ with local_base_config(self.base_config): ++ try: ++ return func(*args) ++ except Exception: ++ if use_result_objects: ++ return sys.exc_info() ++ else: ++ raise ++ if len(args) == 1: ++ eventlet.sleep() ++ return _result_iter([call(*args[0])], use_result_objects) ++ pool = eventlet.greenpool.GreenPool(self.size) ++ return _result_iter(pool.starmap(call, args), use_result_objects) ++ ++ def starcall(self, args, **kw): ++ use_result_objects = kw.get('use_result_objects', False) ++ def call(func, *args): ++ with local_base_config(self.base_config): ++ try: ++ return func(*args) ++ except Exception: ++ if use_result_objects: ++ return sys.exc_info() ++ else: ++ raise ++ if len(args) == 1: ++ eventlet.sleep() ++ return _result_iter([call(args[0][0], *args[0][1:])], use_result_objects) ++ pool = eventlet.greenpool.GreenPool(self.size) ++ return _result_iter(pool.starmap(call, args), use_result_objects) ++ ++ ++class ThreadWorker(threading.Thread): ++ def __init__(self, task_queue, result_queue): ++ threading.Thread.__init__(self) ++ self.task_queue = task_queue ++ self.result_queue = result_queue ++ self.base_config = base_config() ++ def run(self): ++ with local_base_config(self.base_config): ++ while True: ++ task = self.task_queue.get() ++ if task is None: ++ self.task_queue.task_done() ++ break ++ exec_id, func, args = task ++ try: ++ result = func(*args) ++ except Exception: ++ result = sys.exc_info() ++ self.result_queue.put((exec_id, result)) ++ self.task_queue.task_done() ++ ++ ++def _consume_queue(queue): ++ """ ++ Get all items from queue. ++ """ ++ while not queue.empty(): ++ try: ++ queue.get(block=False) ++ queue.task_done() ++ except Queue.Empty: ++ pass ++ ++ ++class ThreadPool(object): ++ def __init__(self, size=4): ++ self.pool_size = size ++ self.task_queue = Queue.Queue() ++ self.result_queue = Queue.Queue() ++ self.pool = None ++ def map_each(self, func_args, raise_exceptions): ++ """ ++ args should be a list of function arg tuples. ++ map_each calls each function with the given arg. ++ """ ++ if self.pool_size < 2: ++ for func, arg in func_args: ++ try: ++ yield func(*arg) ++ except Exception: ++ yield sys.exc_info() ++ raise StopIteration() ++ ++ self.pool = self._init_pool() ++ ++ i = 0 ++ for i, (func, arg) in enumerate(func_args): ++ self.task_queue.put((i, func, arg)) ++ ++ results = {} ++ ++ next_result = 0 ++ for value in self._get_results(next_result, results, raise_exceptions): ++ yield value ++ next_result += 1 ++ ++ self.task_queue.join() ++ for value in self._get_results(next_result, results, raise_exceptions): ++ yield value ++ next_result += 1 ++ ++ self.shutdown() ++ ++ def _single_call(self, func, args, use_result_objects): ++ try: ++ result = func(*args) ++ except Exception: ++ if not use_result_objects: ++ raise ++ result = sys.exc_info() ++ return _result_iter([result], use_result_objects) ++ ++ def map(self, func, *args, **kw): ++ return list(self.imap(func, *args, **kw)) ++ ++ def imap(self, func, *args, **kw): ++ use_result_objects = kw.get('use_result_objects', False) ++ if len(args[0]) == 1: ++ return self._single_call(func, next(iter(zip(*args))), use_result_objects) ++ return _result_iter(self.map_each([(func, arg) for arg in zip(*args)], raise_exceptions=not use_result_objects), ++ use_result_objects) ++ ++ def starmap(self, func, args, **kw): ++ use_result_objects = kw.get('use_result_objects', False) ++ if len(args[0]) == 1: ++ return self._single_call(func, args[0], use_result_objects) ++ ++ return _result_iter(self.map_each([(func, arg) for arg in args], raise_exceptions=not use_result_objects), ++ use_result_objects) ++ ++ def starcall(self, args, **kw): ++ def call(func, *args): ++ return func(*args) ++ return self.starmap(call, args, **kw) ++ ++ def _get_results(self, next_result, results, raise_exceptions): ++ for i, value in self._fetch_results(raise_exceptions): ++ if i == next_result: ++ yield value ++ next_result += 1 ++ while next_result in results: ++ yield results.pop(next_result) ++ next_result += 1 ++ else: ++ results[i] = value ++ ++ def _fetch_results(self, raise_exceptions): ++ while not self.task_queue.empty() or not self.result_queue.empty(): ++ task_result = self.result_queue.get() ++ if (raise_exceptions and isinstance(task_result[1], tuple) and ++ len(task_result[1]) == 3 and ++ isinstance(task_result[1][1], Exception)): ++ self.shutdown(force=True) ++ exc_class, exc, tb = task_result[1] ++ if PY2: ++ exec('raise exc_class, exc, tb') ++ else: ++ raise exc.with_traceback(tb) ++ yield task_result ++ ++ def shutdown(self, force=False): ++ """ ++ Send shutdown sentinel to all executor threads. If `force` is True, ++ clean task_queue and result_queue. ++ """ ++ if force: ++ _consume_queue(self.task_queue) ++ _consume_queue(self.result_queue) ++ for _ in range(self.pool_size): ++ self.task_queue.put(None) ++ ++ def _init_pool(self): ++ if self.pool_size < 2: ++ return [] ++ pool = [] ++ for _ in range(self.pool_size): ++ t = ThreadWorker(self.task_queue, self.result_queue) ++ t.daemon = True ++ t.start() ++ pool.append(t) ++ return pool ++ ++ ++def imap_async_eventlet(func, *args): ++ pool = EventletPool() ++ return pool.imap(func, *args) ++ ++def imap_async_threaded(func, *args): ++ pool = ThreadPool(min(len(args[0]), MAX_MAP_ASYNC_THREADS)) ++ return pool.imap(func, *args) ++ ++def starmap_async_eventlet(func, args): ++ pool = EventletPool() ++ return pool.starmap(func, args) ++ ++def starmap_async_threaded(func, args): ++ pool = ThreadPool(min(len(args[0]), MAX_MAP_ASYNC_THREADS)) ++ return pool.starmap(func, args) ++ ++def starcall_async_eventlet(args): ++ pool = EventletPool() ++ return pool.starcall(args) ++ ++def starcall_async_threaded(args): ++ pool = ThreadPool(min(len(args[0]), MAX_MAP_ASYNC_THREADS)) ++ return pool.starcall(args) ++ ++ ++def run_non_blocking_eventlet(func, args, kw={}): ++ return eventlet.tpool.execute(func, *args, **kw) ++ ++def run_non_blocking_threaded(func, args, kw={}): ++ return func(*args, **kw) ++ ++ ++def import_module(module): ++ """ ++ Import ``module``. Import patched version if eventlet is used. ++ """ ++ if uses_eventlet: ++ return eventlet.import_patched(module) ++ else: ++ return __import__(module) ++ ++uses_eventlet = False ++ ++# socket should be monkey patched when MapProxy runs inside eventlet ++if _has_eventlet and eventlet.patcher.is_monkey_patched('socket'): ++ uses_eventlet = True ++ log_system.info('using eventlet for asynchronous operations') ++ imap = imap_async_eventlet ++ starmap = starmap_async_eventlet ++ starcall = starcall_async_eventlet ++ Pool = EventletPool ++ run_non_blocking = run_non_blocking_eventlet ++else: ++ imap = imap_async_threaded ++ starmap = starmap_async_threaded ++ starcall = starcall_async_threaded ++ Pool = ThreadPool ++ run_non_blocking = run_non_blocking_threaded +--- a/mapproxy/service/wms.py ++++ b/mapproxy/service/wms.py +@@ -33,7 +33,7 @@ from mapproxy.image.opts import ImageOpt + from mapproxy.image.message import attribution_image, message_image + from mapproxy.layer import BlankImage, MapQuery, InfoQuery, LegendQuery, MapError, LimitedLayer + from mapproxy.layer import MapBBOXError, merge_layer_extents, merge_layer_res_ranges +-from mapproxy.util import async ++from mapproxy.util import async_ + from mapproxy.util.py import cached_property, reraise + from mapproxy.util.coverage import load_limited_to + from mapproxy.util.ext.odict import odict +@@ -568,7 +568,7 @@ class LayerRenderer(object): + render_layers = combined_layers(self.layers, self.query) + if not render_layers: return + +- async_pool = async.Pool(size=min(len(render_layers), self.concurrent_rendering)) ++ async_pool = async_.Pool(size=min(len(render_layers), self.concurrent_rendering)) + + if self.raise_source_errors: + return self._render_raise_exceptions(async_pool, render_layers, layer_merger) +--- a/mapproxy/client/cgi.py ++++ b/mapproxy/client/cgi.py +@@ -26,7 +26,7 @@ from mapproxy.source import SourceError + from mapproxy.image import ImageSource + from mapproxy.client.http import HTTPClientError + from mapproxy.client.log import log_request +-from mapproxy.util.async import import_module ++from mapproxy.util.async_ import import_module + from mapproxy.compat.modules import urlparse + from mapproxy.compat import BytesIO + +--- a/mapproxy/cache/s3.py ++++ b/mapproxy/cache/s3.py +@@ -22,7 +22,7 @@ import threading + from mapproxy.image import ImageSource + from mapproxy.cache import path + from mapproxy.cache.base import tile_buffer, TileCacheBase +-from mapproxy.util import async ++from mapproxy.util import async_ + from mapproxy.util.py import reraise_exception + + try: +@@ -111,7 +111,7 @@ class S3Cache(TileCacheBase): + return True + + def load_tiles(self, tiles, with_metadata=True): +- p = async.Pool(min(4, len(tiles))) ++ p = async_.Pool(min(4, len(tiles))) + return all(p.map(self.load_tile, tiles)) + + def load_tile(self, tile, with_metadata=True): +@@ -139,7 +139,7 @@ class S3Cache(TileCacheBase): + self.conn().delete_object(Bucket=self.bucket_name, Key=key) + + def store_tiles(self, tiles): +- p = async.Pool(min(self._concurrent_writer, len(tiles))) ++ p = async_.Pool(min(self._concurrent_writer, len(tiles))) + p.map(self.store_tile, tiles) + + def store_tile(self, tile): +--- a/mapproxy/test/unit/test_async.py ++++ b/mapproxy/test/unit/test_async.py +@@ -17,7 +17,7 @@ from __future__ import print_function + + import time + import threading +-from mapproxy.util.async import imap_async_threaded, ThreadPool ++from mapproxy.util.async_ import imap_async_threaded, ThreadPool + + from nose.tools import eq_ + from nose.plugins.skip import SkipTest +@@ -49,7 +49,7 @@ class TestThreaded(object): + + try: + import eventlet +- from mapproxy.util.async import imap_async_eventlet, EventletPool ++ from mapproxy.util.async_ import imap_async_eventlet, EventletPool + _has_eventlet = True + except ImportError: + _has_eventlet = False +--- a/mapproxy/cache/tile.py ++++ b/mapproxy/cache/tile.py +@@ -42,7 +42,7 @@ from mapproxy.grid import MetaGrid + from mapproxy.image.merge import merge_images + from mapproxy.image.tile import TileSplitter + from mapproxy.layer import MapQuery, BlankImage +-from mapproxy.util import async ++from mapproxy.util import async_ + from mapproxy.util.py import reraise + + class TileManager(object): +@@ -250,7 +250,7 @@ class TileCreator(object): + + def _create_threaded(self, create_func, tiles): + result = [] +- async_pool = async.Pool(self.tile_mgr.concurrent_tile_creators) ++ async_pool = async_.Pool(self.tile_mgr.concurrent_tile_creators) + for new_tiles in async_pool.imap(create_func, tiles): + result.extend(new_tiles) + return result +@@ -303,7 +303,7 @@ class TileCreator(object): + return (img, source.coverage) + + layers = [] +- for layer in async.imap(get_map_from_source, self.sources): ++ for layer in async_.imap(get_map_from_source, self.sources): + if layer[0] is not None: + layers.append(layer) + +@@ -358,7 +358,7 @@ class TileCreator(object): + main_tile = Tile(meta_tile.main_tile_coord) + with self.tile_mgr.lock(main_tile): + if not all(self.is_cached(t) for t in meta_tile.tiles if t is not None): +- async_pool = async.Pool(self.tile_mgr.concurrent_tile_creators) ++ async_pool = async_.Pool(self.tile_mgr.concurrent_tile_creators) + def query_tile(coord): + try: + query = MapQuery(self.grid.tile_bbox(coord), tile_size, self.grid.srs, self.tile_mgr.request_format, +--- a/mapproxy/source/mapnik.py ++++ b/mapproxy/source/mapnik.py +@@ -26,7 +26,7 @@ from mapproxy.layer import MapExtent, De + from mapproxy.source import SourceError + from mapproxy.client.log import log_request + from mapproxy.util.py import reraise_exception +-from mapproxy.util.async import run_non_blocking ++from mapproxy.util.async_ import run_non_blocking + from mapproxy.compat import BytesIO + + try: diff -Nru mapproxy-1.11.0/debian/patches/series mapproxy-1.11.0/debian/patches/series --- mapproxy-1.11.0/debian/patches/series 2017-05-18 17:12:53.000000000 +0000 +++ mapproxy-1.11.0/debian/patches/series 2018-07-20 17:55:29.000000000 +0000 @@ -1,3 +1,5 @@ offline-tests.patch disable-tag_date.patch skip-tests-for-missing-files.patch +python3.7-async.patch +spelling-errors.patch diff -Nru mapproxy-1.11.0/debian/patches/spelling-errors.patch mapproxy-1.11.0/debian/patches/spelling-errors.patch --- mapproxy-1.11.0/debian/patches/spelling-errors.patch 1970-01-01 00:00:00.000000000 +0000 +++ mapproxy-1.11.0/debian/patches/spelling-errors.patch 2018-07-23 12:42:07.000000000 +0000 @@ -0,0 +1,31 @@ +Description: Fix spelling errors + * everytime -> every time +Author: Bas Couwenberg +Forwarded: https://github.com/mapproxy/mapproxy/pull/373 +Applied-Upstream: https://github.com/mapproxy/mapproxy/commit/f8c32bfae05f29b49e9c7b2f4fccdbabdec58ff1 + +--- a/doc/mapproxy_util_autoconfig.rst ++++ b/doc/mapproxy_util_autoconfig.rst +@@ -78,7 +78,7 @@ Write MapProxy configuration with caches + Overwrites + ========== + +-It's likely that you need to tweak the created configuration – e.g. to define another coverage, disable featureinfo, etc. You can do this by editing the output file of course, or you can modify the output by defining all changes to an overwrite file. Overwrite files are applied everytime you call ``mapproxy-util autoconfig``. ++It's likely that you need to tweak the created configuration – e.g. to define another coverage, disable featureinfo, etc. You can do this by editing the output file of course, or you can modify the output by defining all changes to an overwrite file. Overwrite files are applied every time you call ``mapproxy-util autoconfig``. + + Overwrites are YAML files that will be merged with the created configuration file. + +--- a/doc/seed.rst ++++ b/doc/seed.rst +@@ -411,9 +411,9 @@ Example: Background seeding + The ``--duration`` option allows you run MapProxy seeding for a limited time. In combination with the ``--continue`` option, you can resume the seeding process at a later time. + You can use this to call ``mapproxy-seed`` with ``cron`` to seed in the off-hours. + +-However, this will restart the seeding process from the begining everytime the is seeding completed. ++However, this will restart the seeding process from the begining every time the is seeding completed. + You can prevent this with the ``--reeseed-interval`` and ``--reseed-file`` option. +-The follwing example starts seeding for six hours. It will seed for another six hours, everytime you call this command again. Once all seed and cleanup tasks were proccessed the command will exit immediately everytime you call it within 14 days after the first call. After 14 days, the modification time of the ``reseed.time`` file will be updated and the re-seeding process starts again. ++The follwing example starts seeding for six hours. It will seed for another six hours, every time you call this command again. Once all seed and cleanup tasks were proccessed the command will exit immediately every time you call it within 14 days after the first call. After 14 days, the modification time of the ``reseed.time`` file will be updated and the re-seeding process starts again. + + :: + diff -Nru mapproxy-1.11.0/debian/python3-mapproxy.lintian-overrides mapproxy-1.11.0/debian/python3-mapproxy.lintian-overrides --- mapproxy-1.11.0/debian/python3-mapproxy.lintian-overrides 1970-01-01 00:00:00.000000000 +0000 +++ mapproxy-1.11.0/debian/python3-mapproxy.lintian-overrides 2018-07-31 19:42:22.000000000 +0000 @@ -0,0 +1,3 @@ +# False positive? dh_python3 should do the right thing. +python-module-in-wrong-location usr/lib/python*/dist-packages/mapproxy/* usr/lib/python3/dist-packages/mapproxy/* + diff -Nru mapproxy-1.11.0/debian/rules mapproxy-1.11.0/debian/rules --- mapproxy-1.11.0/debian/rules 2017-11-20 15:49:37.000000000 +0000 +++ mapproxy-1.11.0/debian/rules 2018-07-20 17:53:56.000000000 +0000 @@ -114,3 +114,4 @@ override_dh_install: dh_install --list-missing + $(RM) debian/*/usr/share/python*-mapproxy/test/schemas/*/*/ReadMe.txt diff -Nru mapproxy-1.11.0/debian/source/lintian-overrides mapproxy-1.11.0/debian/source/lintian-overrides --- mapproxy-1.11.0/debian/source/lintian-overrides 1970-01-01 00:00:00.000000000 +0000 +++ mapproxy-1.11.0/debian/source/lintian-overrides 2018-07-31 19:42:19.000000000 +0000 @@ -0,0 +1,3 @@ +# Not worth the effort +testsuite-autopkgtest-missing + diff -Nru mapproxy-1.11.0/debian/tests/control mapproxy-1.11.0/debian/tests/control --- mapproxy-1.11.0/debian/tests/control 2017-06-21 16:04:19.000000000 +0000 +++ mapproxy-1.11.0/debian/tests/control 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -# Test installability -Depends: @ -Test-Command: /bin/true