Merge lp:~jtaylor/ubuntu/trusty/python-numpy/new-upstream-20140124 into lp:ubuntu/trusty/python-numpy

Proposed by Julian Taylor
Status: Merged
Merge reported by: Martin Pitt
Merged at revision: not available
Proposed branch: lp:~jtaylor/ubuntu/trusty/python-numpy/new-upstream-20140124
Merge into: lp:ubuntu/trusty/python-numpy
Diff against target: 7113 lines (+326/-6020)
52 files modified
.pc/02_build_dotblas.patch/numpy/core/setup.py (+0/-990)
.pc/03_force_f2py_version.patch/numpy/f2py/setup.py (+0/-129)
.pc/10_use_local_python.org_object.inv_sphinx.diff/doc/source/conf.py (+0/-331)
.pc/20_disable-plot-extension.patch/doc/source/conf.py (+0/-331)
.pc/applied-patches (+0/-6)
.pc/ppc64el_cpu_config.patch/numpy/core/include/numpy/npy_cpu.h (+0/-111)
.pc/ppc64el_cpu_config.patch/numpy/core/include/numpy/npy_endian.h (+0/-47)
.pc/ppc64el_cpu_config.patch/numpy/core/setup.py (+0/-990)
.pc/ppc64el_cpu_config.patch/numpy/core/setup_common.py (+0/-308)
.pc/ppc64el_cpu_config.patch/numpy/core/src/npymath/ieee754.c.src (+0/-680)
.pc/ppc64el_cpu_config.patch/numpy/core/src/npymath/npy_math_private.h (+0/-518)
.pc/ppc64el_cpu_config.patch/numpy/core/src/private/npy_fpmath.h (+0/-48)
.pc/python3-soabi.patch/numpy/ctypeslib.py (+0/-426)
.pc/python3-soabi.patch/numpy/tests/test_ctypeslib.py (+0/-102)
PKG-INFO (+1/-1)
debian/changelog (+8/-0)
debian/control (+2/-1)
debian/rules (+3/-0)
doc/scipy-sphinx-theme/_theme/scipy/layout.html (+1/-2)
doc/scipy-sphinx-theme/_theme/scipy/static/js/bootstrap.min.js (+0/-7)
doc/scipy-sphinx-theme/_theme/scipy/static/js/copybutton.js (+60/-0)
doc/scipy-sphinx-theme/_theme/scipy/static/js/jquery.form.js (+0/-911)
doc/scipy-sphinx-theme/_theme/scipy/static/js/jquery.min.js (+0/-5)
doc/source/conf.py (+4/-3)
numpy/add_newdocs.py (+2/-0)
numpy/core/include/numpy/_numpyconfig.h.in (+1/-0)
numpy/core/include/numpy/ndarraytypes.h (+1/-0)
numpy/core/include/numpy/npy_3kcompat.h (+50/-9)
numpy/core/include/numpy/npy_common.h (+36/-1)
numpy/core/include/numpy/npy_cpu.h (+0/-3)
numpy/core/include/numpy/npy_endian.h (+1/-2)
numpy/core/setup.py (+8/-4)
numpy/core/setup_common.py (+2/-6)
numpy/core/src/multiarray/buffer.c (+2/-1)
numpy/core/src/multiarray/conversion_utils.c (+2/-2)
numpy/core/src/multiarray/ctors.c (+3/-3)
numpy/core/src/multiarray/mapping.c (+2/-2)
numpy/core/src/multiarray/methods.c (+3/-2)
numpy/core/src/multiarray/multiarraymodule.c (+20/-13)
numpy/core/src/multiarray/nditer_api.c (+3/-2)
numpy/core/src/npymath/ieee754.c.src (+1/-2)
numpy/core/src/npymath/npy_math_private.h (+1/-2)
numpy/core/src/private/npy_fpmath.h (+1/-4)
numpy/core/tests/test_indexing.py (+11/-0)
numpy/core/tests/test_multiarray.py (+54/-0)
numpy/ctypeslib.py (+0/-8)
numpy/f2py/setup.py (+1/-1)
numpy/lib/tests/test_format.py (+27/-0)
numpy/linalg/lapack_lite/python_xerbla.c (+8/-2)
numpy/random/setup.py (+5/-1)
numpy/tests/test_ctypeslib.py (+0/-1)
numpy/version.py (+2/-2)
To merge this branch: bzr merge lp:~jtaylor/ubuntu/trusty/python-numpy/new-upstream-20140124
Reviewer Review Type Date Requested Status
Martin Pitt Approve
Review via email: mp+203159@code.launchpad.net

Description of the change

bump to newer snapshot of the maintenance branch fixing a couple minor issues, python3 file handling bugs
it should also fix pandas build

To post a comment you must log in.
43. By Julian Taylor

bump to commit 95f7a469b1e9ce460e31c41e1bd897ceff396f6b

removes embedded jquery and bootstrap

Revision history for this message
Martin Pitt (pitti) wrote :
Download full text (5.0 KiB)

Thanks Julian. That certainly makes the pandas build better, although not working yet; two tests are still failing:

======================================================================
ERROR: test_gap_upsample (pandas.tseries.tests.test_plotting.TestTSPlot)
----------------------------------------------------------------------
Traceback (most recent call last):
  File "/tmp/pandas-0.12.0/debian/tmp/usr/lib/python2.7/dist-packages/pandas/tseries/tests/test_plotting.py", line 523, in test_gap_upsample
    ax = low.plot()
  File "/tmp/pandas-0.12.0/debian/tmp/usr/lib/python2.7/dist-packages/pandas/tools/plotting.py", line 1735, in plot_series
    plot_obj.generate()
  File "/tmp/pandas-0.12.0/debian/tmp/usr/lib/python2.7/dist-packages/pandas/tools/plotting.py", line 857, in generate
    self._make_plot()
  File "/tmp/pandas-0.12.0/debian/tmp/usr/lib/python2.7/dist-packages/pandas/tools/plotting.py", line 1241, in _make_plot
    self._make_ts_plot(data, **self.kwds)
  File "/tmp/pandas-0.12.0/debian/tmp/usr/lib/python2.7/dist-packages/pandas/tools/plotting.py", line 1312, in _make_ts_plot
    _plot(data, 0, ax, label, self.style, **kwds)
  File "/tmp/pandas-0.12.0/debian/tmp/usr/lib/python2.7/dist-packages/pandas/tools/plotting.py", line 1296, in _plot
    style=style, **kwds)
  File "/tmp/pandas-0.12.0/debian/tmp/usr/lib/python2.7/dist-packages/pandas/tseries/plotting.py", line 77, in tsplot
    lines = plotf(ax, *args, **kwargs)
  File "/usr/lib/pymodules/python2.7/matplotlib/axes.py", line 4137, in plot
    for line in self._get_lines(*args, **kwargs):
  File "/usr/lib/pymodules/python2.7/matplotlib/axes.py", line 317, in _grab_next_args
    for seg in self._plot_args(remaining, kwargs):
  File "/usr/lib/pymodules/python2.7/matplotlib/axes.py", line 295, in _plot_args
    x, y = self._xy_from_xy(x, y)
  File "/usr/lib/pymodules/python2.7/matplotlib/axes.py", line 214, in _xy_from_xy
    by = self.axes.yaxis.update_units(y)
  File "/usr/lib/pymodules/python2.7/matplotlib/axis.py", line 1336, in update_units
    converter = munits.registry.get_converter(data)
  File "/usr/lib/pymodules/python2.7/matplotlib/units.py", line 137, in get_converter
    xravel = x.ravel()
  File "/usr/lib/python2.7/dist-packages/numpy/ma/core.py", line 4025, in ravel
    r._mask = ndarray.ravel(self._mask).reshape(r.shape)
  File "/tmp/pandas-0.12.0/debian/tmp/usr/lib/python2.7/dist-packages/pandas/core/series.py", line 981, in reshape
    return ndarray.reshape(self, newshape, order)
TypeError: an integer is required

======================================================================
ERROR: test_gaps (pandas.tseries.tests.test_plotting.TestTSPlot)
----------------------------------------------------------------------
Traceback (most recent call last):
  File "/tmp/pandas-0.12.0/debian/tmp/usr/lib/python2.7/dist-packages/pandas/tseries/tests/test_plotting.py", line 480, in test_gaps
    ax = ts.plot()
  File "/tmp/pandas-0.12.0/debian/tmp/usr/lib/python2.7/dist-packages/pandas/tools/plotting.py", line 1735, in plot_series
    plot_obj.generate()
  File "/tmp/pandas-0.12.0/debian/tmp/usr/lib/python2.7/dist-packages/pandas/tools/plotting.py", line 857, in generat...

Read more...

review: Approve
Revision history for this message
Julian Taylor (jtaylor) wrote :

you used 0.12, pandas 0.13 in proposed is fixed
https://jenkins.qa.ubuntu.com/view/Trusty/view/AutoPkgTest/job/trusty-adt-pandas/

now fixing/ignoring the statsmodels arm failures remains

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== removed directory '.pc/02_build_dotblas.patch'
2=== removed directory '.pc/02_build_dotblas.patch/numpy'
3=== removed directory '.pc/02_build_dotblas.patch/numpy/core'
4=== removed file '.pc/02_build_dotblas.patch/numpy/core/setup.py'
5--- .pc/02_build_dotblas.patch/numpy/core/setup.py 2013-11-02 13:18:24 +0000
6+++ .pc/02_build_dotblas.patch/numpy/core/setup.py 1970-01-01 00:00:00 +0000
7@@ -1,990 +0,0 @@
8-from __future__ import division, print_function
9-
10-import imp
11-import os
12-import sys
13-import shutil
14-import pickle
15-import copy
16-import warnings
17-import re
18-from os.path import join
19-from numpy.distutils import log
20-from distutils.dep_util import newer
21-from distutils.sysconfig import get_config_var
22-
23-from setup_common import *
24-
25-# Set to True to enable multiple file compilations (experimental)
26-ENABLE_SEPARATE_COMPILATION = (os.environ.get('NPY_SEPARATE_COMPILATION', "1") != "0")
27-# Set to True to enable relaxed strides checking. This (mostly) means
28-# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.
29-NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "0") != "0")
30-
31-# XXX: ugly, we use a class to avoid calling twice some expensive functions in
32-# config.h/numpyconfig.h. I don't see a better way because distutils force
33-# config.h generation inside an Extension class, and as such sharing
34-# configuration informations between extensions is not easy.
35-# Using a pickled-based memoize does not work because config_cmd is an instance
36-# method, which cPickle does not like.
37-#
38-# Use pickle in all cases, as cPickle is gone in python3 and the difference
39-# in time is only in build. -- Charles Harris, 2013-03-30
40-
41-class CallOnceOnly(object):
42- def __init__(self):
43- self._check_types = None
44- self._check_ieee_macros = None
45- self._check_complex = None
46-
47- def check_types(self, *a, **kw):
48- if self._check_types is None:
49- out = check_types(*a, **kw)
50- self._check_types = pickle.dumps(out)
51- else:
52- out = copy.deepcopy(pickle.loads(self._check_types))
53- return out
54-
55- def check_ieee_macros(self, *a, **kw):
56- if self._check_ieee_macros is None:
57- out = check_ieee_macros(*a, **kw)
58- self._check_ieee_macros = pickle.dumps(out)
59- else:
60- out = copy.deepcopy(pickle.loads(self._check_ieee_macros))
61- return out
62-
63- def check_complex(self, *a, **kw):
64- if self._check_complex is None:
65- out = check_complex(*a, **kw)
66- self._check_complex = pickle.dumps(out)
67- else:
68- out = copy.deepcopy(pickle.loads(self._check_complex))
69- return out
70-
71-PYTHON_HAS_UNICODE_WIDE = True
72-
73-def pythonlib_dir():
74- """return path where libpython* is."""
75- if sys.platform == 'win32':
76- return os.path.join(sys.prefix, "libs")
77- else:
78- return get_config_var('LIBDIR')
79-
80-def is_npy_no_signal():
81- """Return True if the NPY_NO_SIGNAL symbol must be defined in configuration
82- header."""
83- return sys.platform == 'win32'
84-
85-def is_npy_no_smp():
86- """Return True if the NPY_NO_SMP symbol must be defined in public
87- header (when SMP support cannot be reliably enabled)."""
88- # Python 2.3 causes a segfault when
89- # trying to re-acquire the thread-state
90- # which is done in error-handling
91- # ufunc code. NPY_ALLOW_C_API and friends
92- # cause the segfault. So, we disable threading
93- # for now.
94- if sys.version[:5] < '2.4.2':
95- nosmp = 1
96- else:
97- # Perhaps a fancier check is in order here.
98- # so that threads are only enabled if there
99- # are actually multiple CPUS? -- but
100- # threaded code can be nice even on a single
101- # CPU so that long-calculating code doesn't
102- # block.
103- try:
104- nosmp = os.environ['NPY_NOSMP']
105- nosmp = 1
106- except KeyError:
107- nosmp = 0
108- return nosmp == 1
109-
110-def win32_checks(deflist):
111- from numpy.distutils.misc_util import get_build_architecture
112- a = get_build_architecture()
113-
114- # Distutils hack on AMD64 on windows
115- print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %
116- (a, os.name, sys.platform))
117- if a == 'AMD64':
118- deflist.append('DISTUTILS_USE_SDK')
119-
120- # On win32, force long double format string to be 'g', not
121- # 'Lg', since the MS runtime does not support long double whose
122- # size is > sizeof(double)
123- if a == "Intel" or a == "AMD64":
124- deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')
125-
126-def check_math_capabilities(config, moredefs, mathlibs):
127- def check_func(func_name):
128- return config.check_func(func_name, libraries=mathlibs,
129- decl=True, call=True)
130-
131- def check_funcs_once(funcs_name):
132- decl = dict([(f, True) for f in funcs_name])
133- st = config.check_funcs_once(funcs_name, libraries=mathlibs,
134- decl=decl, call=decl)
135- if st:
136- moredefs.extend([(fname2def(f), 1) for f in funcs_name])
137- return st
138-
139- def check_funcs(funcs_name):
140- # Use check_funcs_once first, and if it does not work, test func per
141- # func. Return success only if all the functions are available
142- if not check_funcs_once(funcs_name):
143- # Global check failed, check func per func
144- for f in funcs_name:
145- if check_func(f):
146- moredefs.append((fname2def(f), 1))
147- return 0
148- else:
149- return 1
150-
151- #use_msvc = config.check_decl("_MSC_VER")
152-
153- if not check_funcs_once(MANDATORY_FUNCS):
154- raise SystemError("One of the required function to build numpy is not"
155- " available (the list is %s)." % str(MANDATORY_FUNCS))
156-
157- # Standard functions which may not be available and for which we have a
158- # replacement implementation. Note that some of these are C99 functions.
159-
160- # XXX: hack to circumvent cpp pollution from python: python put its
161- # config.h in the public namespace, so we have a clash for the common
162- # functions we test. We remove every function tested by python's
163- # autoconf, hoping their own test are correct
164- for f in OPTIONAL_STDFUNCS_MAYBE:
165- if config.check_decl(fname2def(f),
166- headers=["Python.h", "math.h"]):
167- OPTIONAL_STDFUNCS.remove(f)
168-
169- check_funcs(OPTIONAL_STDFUNCS)
170-
171- for h in OPTIONAL_HEADERS:
172- if config.check_func("", decl=False, call=False, headers=[h]):
173- moredefs.append((fname2def(h).replace(".", "_"), 1))
174-
175- for tup in OPTIONAL_INTRINSICS:
176- headers = None
177- if len(tup) == 2:
178- f, args = tup
179- else:
180- f, args, headers = tup[0], tup[1], [tup[2]]
181- if config.check_func(f, decl=False, call=True, call_args=args,
182- headers=headers):
183- moredefs.append((fname2def(f), 1))
184-
185- for dec, fn in OPTIONAL_GCC_ATTRIBUTES:
186- if config.check_funcs_once([fn],
187- decl=dict((('%s %s' % (dec, fn), True),)),
188- call=False):
189- moredefs.append((fname2def(fn), 1))
190-
191- # C99 functions: float and long double versions
192- check_funcs(C99_FUNCS_SINGLE)
193- check_funcs(C99_FUNCS_EXTENDED)
194-
195-def check_complex(config, mathlibs):
196- priv = []
197- pub = []
198-
199- try:
200- if os.uname()[0] == "Interix":
201- warnings.warn("Disabling broken complex support. See #1365")
202- return priv, pub
203- except:
204- # os.uname not available on all platforms. blanket except ugly but safe
205- pass
206-
207- # Check for complex support
208- st = config.check_header('complex.h')
209- if st:
210- priv.append(('HAVE_COMPLEX_H', 1))
211- pub.append(('NPY_USE_C99_COMPLEX', 1))
212-
213- for t in C99_COMPLEX_TYPES:
214- st = config.check_type(t, headers=["complex.h"])
215- if st:
216- pub.append(('NPY_HAVE_%s' % type2def(t), 1))
217-
218- def check_prec(prec):
219- flist = [f + prec for f in C99_COMPLEX_FUNCS]
220- decl = dict([(f, True) for f in flist])
221- if not config.check_funcs_once(flist, call=decl, decl=decl,
222- libraries=mathlibs):
223- for f in flist:
224- if config.check_func(f, call=True, decl=True,
225- libraries=mathlibs):
226- priv.append((fname2def(f), 1))
227- else:
228- priv.extend([(fname2def(f), 1) for f in flist])
229-
230- check_prec('')
231- check_prec('f')
232- check_prec('l')
233-
234- return priv, pub
235-
236-def check_ieee_macros(config):
237- priv = []
238- pub = []
239-
240- macros = []
241-
242- def _add_decl(f):
243- priv.append(fname2def("decl_%s" % f))
244- pub.append('NPY_%s' % fname2def("decl_%s" % f))
245-
246- # XXX: hack to circumvent cpp pollution from python: python put its
247- # config.h in the public namespace, so we have a clash for the common
248- # functions we test. We remove every function tested by python's
249- # autoconf, hoping their own test are correct
250- _macros = ["isnan", "isinf", "signbit", "isfinite"]
251- for f in _macros:
252- py_symbol = fname2def("decl_%s" % f)
253- already_declared = config.check_decl(py_symbol,
254- headers=["Python.h", "math.h"])
255- if already_declared:
256- if config.check_macro_true(py_symbol,
257- headers=["Python.h", "math.h"]):
258- pub.append('NPY_%s' % fname2def("decl_%s" % f))
259- else:
260- macros.append(f)
261- # Normally, isnan and isinf are macro (C99), but some platforms only have
262- # func, or both func and macro version. Check for macro only, and define
263- # replacement ones if not found.
264- # Note: including Python.h is necessary because it modifies some math.h
265- # definitions
266- for f in macros:
267- st = config.check_decl(f, headers = ["Python.h", "math.h"])
268- if st:
269- _add_decl(f)
270-
271- return priv, pub
272-
273-def check_types(config_cmd, ext, build_dir):
274- private_defines = []
275- public_defines = []
276-
277- # Expected size (in number of bytes) for each type. This is an
278- # optimization: those are only hints, and an exhaustive search for the size
279- # is done if the hints are wrong.
280- expected = {}
281- expected['short'] = [2]
282- expected['int'] = [4]
283- expected['long'] = [8, 4]
284- expected['float'] = [4]
285- expected['double'] = [8]
286- expected['long double'] = [8, 12, 16]
287- expected['Py_intptr_t'] = [4, 8]
288- expected['PY_LONG_LONG'] = [8]
289- expected['long long'] = [8]
290-
291- # Check we have the python header (-dev* packages on Linux)
292- result = config_cmd.check_header('Python.h')
293- if not result:
294- raise SystemError(
295- "Cannot compile 'Python.h'. Perhaps you need to "\
296- "install python-dev|python-devel.")
297- res = config_cmd.check_header("endian.h")
298- if res:
299- private_defines.append(('HAVE_ENDIAN_H', 1))
300- public_defines.append(('NPY_HAVE_ENDIAN_H', 1))
301-
302- # Check basic types sizes
303- for type in ('short', 'int', 'long'):
304- res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers = ["Python.h"])
305- if res:
306- public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type)))
307- else:
308- res = config_cmd.check_type_size(type, expected=expected[type])
309- if res >= 0:
310- public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
311- else:
312- raise SystemError("Checking sizeof (%s) failed !" % type)
313-
314- for type in ('float', 'double', 'long double'):
315- already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type),
316- headers = ["Python.h"])
317- res = config_cmd.check_type_size(type, expected=expected[type])
318- if res >= 0:
319- public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
320- if not already_declared and not type == 'long double':
321- private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
322- else:
323- raise SystemError("Checking sizeof (%s) failed !" % type)
324-
325- # Compute size of corresponding complex type: used to check that our
326- # definition is binary compatible with C99 complex type (check done at
327- # build time in npy_common.h)
328- complex_def = "struct {%s __x; %s __y;}" % (type, type)
329- res = config_cmd.check_type_size(complex_def, expected=2*expected[type])
330- if res >= 0:
331- public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))
332- else:
333- raise SystemError("Checking sizeof (%s) failed !" % complex_def)
334-
335-
336- for type in ('Py_intptr_t',):
337- res = config_cmd.check_type_size(type, headers=["Python.h"],
338- library_dirs=[pythonlib_dir()],
339- expected=expected[type])
340-
341- if res >= 0:
342- private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
343- public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
344- else:
345- raise SystemError("Checking sizeof (%s) failed !" % type)
346-
347- # We check declaration AND type because that's how distutils does it.
348- if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):
349- res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'],
350- library_dirs=[pythonlib_dir()],
351- expected=expected['PY_LONG_LONG'])
352- if res >= 0:
353- private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
354- public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
355- else:
356- raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG')
357-
358- res = config_cmd.check_type_size('long long',
359- expected=expected['long long'])
360- if res >= 0:
361- #private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res))
362- public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res))
363- else:
364- raise SystemError("Checking sizeof (%s) failed !" % 'long long')
365-
366- if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):
367- raise RuntimeError(
368- "Config wo CHAR_BIT is not supported"\
369- ", please contact the maintainers")
370-
371- return private_defines, public_defines
372-
373-def check_mathlib(config_cmd):
374- # Testing the C math library
375- mathlibs = []
376- mathlibs_choices = [[], ['m'], ['cpml']]
377- mathlib = os.environ.get('MATHLIB')
378- if mathlib:
379- mathlibs_choices.insert(0, mathlib.split(','))
380- for libs in mathlibs_choices:
381- if config_cmd.check_func("exp", libraries=libs, decl=True, call=True):
382- mathlibs = libs
383- break
384- else:
385- raise EnvironmentError("math library missing; rerun "
386- "setup.py after setting the "
387- "MATHLIB env variable")
388- return mathlibs
389-
390-def visibility_define(config):
391- """Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty
392- string)."""
393- if config.check_compiler_gcc4():
394- return '__attribute__((visibility("hidden")))'
395- else:
396- return ''
397-
398-def configuration(parent_package='',top_path=None):
399- from numpy.distutils.misc_util import Configuration, dot_join
400- from numpy.distutils.system_info import get_info, default_lib_dirs
401-
402- config = Configuration('core', parent_package, top_path)
403- local_dir = config.local_path
404- codegen_dir = join(local_dir, 'code_generators')
405-
406- if is_released(config):
407- warnings.simplefilter('error', MismatchCAPIWarning)
408-
409- # Check whether we have a mismatch between the set C API VERSION and the
410- # actual C API VERSION
411- check_api_version(C_API_VERSION, codegen_dir)
412-
413- generate_umath_py = join(codegen_dir, 'generate_umath.py')
414- n = dot_join(config.name, 'generate_umath')
415- generate_umath = imp.load_module('_'.join(n.split('.')),
416- open(generate_umath_py, 'U'), generate_umath_py,
417- ('.py', 'U', 1))
418-
419- header_dir = 'include/numpy' # this is relative to config.path_in_package
420-
421- cocache = CallOnceOnly()
422-
423- def generate_config_h(ext, build_dir):
424- target = join(build_dir, header_dir, 'config.h')
425- d = os.path.dirname(target)
426- if not os.path.exists(d):
427- os.makedirs(d)
428-
429- if newer(__file__, target):
430- config_cmd = config.get_config_cmd()
431- log.info('Generating %s', target)
432-
433- # Check sizeof
434- moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)
435-
436- # Check math library and C99 math funcs availability
437- mathlibs = check_mathlib(config_cmd)
438- moredefs.append(('MATHLIB', ','.join(mathlibs)))
439-
440- check_math_capabilities(config_cmd, moredefs, mathlibs)
441- moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])
442- moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])
443-
444- # Signal check
445- if is_npy_no_signal():
446- moredefs.append('__NPY_PRIVATE_NO_SIGNAL')
447-
448- # Windows checks
449- if sys.platform=='win32' or os.name=='nt':
450- win32_checks(moredefs)
451-
452- # Inline check
453- inline = config_cmd.check_inline()
454-
455- # Check whether we need our own wide character support
456- if not config_cmd.check_decl('Py_UNICODE_WIDE', headers=['Python.h']):
457- PYTHON_HAS_UNICODE_WIDE = True
458- else:
459- PYTHON_HAS_UNICODE_WIDE = False
460-
461- if ENABLE_SEPARATE_COMPILATION:
462- moredefs.append(('ENABLE_SEPARATE_COMPILATION', 1))
463-
464- if NPY_RELAXED_STRIDES_CHECKING:
465- moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
466-
467- # Get long double representation
468- if sys.platform != 'darwin':
469- rep = check_long_double_representation(config_cmd)
470- if rep in ['INTEL_EXTENDED_12_BYTES_LE',
471- 'INTEL_EXTENDED_16_BYTES_LE',
472- 'MOTOROLA_EXTENDED_12_BYTES_BE',
473- 'IEEE_QUAD_LE', 'IEEE_QUAD_BE',
474- 'IEEE_DOUBLE_LE', 'IEEE_DOUBLE_BE',
475- 'DOUBLE_DOUBLE_BE']:
476- moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
477- else:
478- raise ValueError("Unrecognized long double format: %s" % rep)
479-
480- # Py3K check
481- if sys.version_info[0] == 3:
482- moredefs.append(('NPY_PY3K', 1))
483-
484- # Generate the config.h file from moredefs
485- target_f = open(target, 'w')
486- for d in moredefs:
487- if isinstance(d, str):
488- target_f.write('#define %s\n' % (d))
489- else:
490- target_f.write('#define %s %s\n' % (d[0], d[1]))
491-
492- # define inline to our keyword, or nothing
493- target_f.write('#ifndef __cplusplus\n')
494- if inline == 'inline':
495- target_f.write('/* #undef inline */\n')
496- else:
497- target_f.write('#define inline %s\n' % inline)
498- target_f.write('#endif\n')
499-
500- # add the guard to make sure config.h is never included directly,
501- # but always through npy_config.h
502- target_f.write("""
503-#ifndef _NPY_NPY_CONFIG_H_
504-#error config.h should never be included directly, include npy_config.h instead
505-#endif
506-""")
507-
508- target_f.close()
509- print('File:', target)
510- target_f = open(target)
511- print(target_f.read())
512- target_f.close()
513- print('EOF')
514- else:
515- mathlibs = []
516- target_f = open(target)
517- for line in target_f:
518- s = '#define MATHLIB'
519- if line.startswith(s):
520- value = line[len(s):].strip()
521- if value:
522- mathlibs.extend(value.split(','))
523- target_f.close()
524-
525- # Ugly: this can be called within a library and not an extension,
526- # in which case there is no libraries attributes (and none is
527- # needed).
528- if hasattr(ext, 'libraries'):
529- ext.libraries.extend(mathlibs)
530-
531- incl_dir = os.path.dirname(target)
532- if incl_dir not in config.numpy_include_dirs:
533- config.numpy_include_dirs.append(incl_dir)
534-
535- return target
536-
537- def generate_numpyconfig_h(ext, build_dir):
538- """Depends on config.h: generate_config_h has to be called before !"""
539- target = join(build_dir, header_dir, '_numpyconfig.h')
540- d = os.path.dirname(target)
541- if not os.path.exists(d):
542- os.makedirs(d)
543- if newer(__file__, target):
544- config_cmd = config.get_config_cmd()
545- log.info('Generating %s', target)
546-
547- # Check sizeof
548- ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)
549-
550- if is_npy_no_signal():
551- moredefs.append(('NPY_NO_SIGNAL', 1))
552-
553- if is_npy_no_smp():
554- moredefs.append(('NPY_NO_SMP', 1))
555- else:
556- moredefs.append(('NPY_NO_SMP', 0))
557-
558- mathlibs = check_mathlib(config_cmd)
559- moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])
560- moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])
561-
562- if ENABLE_SEPARATE_COMPILATION:
563- moredefs.append(('NPY_ENABLE_SEPARATE_COMPILATION', 1))
564-
565- if NPY_RELAXED_STRIDES_CHECKING:
566- moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
567-
568- # Check wether we can use inttypes (C99) formats
569- if config_cmd.check_decl('PRIdPTR', headers = ['inttypes.h']):
570- moredefs.append(('NPY_USE_C99_FORMATS', 1))
571-
572- # visibility check
573- hidden_visibility = visibility_define(config_cmd)
574- moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))
575-
576- # Add the C API/ABI versions
577- moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
578- moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))
579-
580- # Add moredefs to header
581- target_f = open(target, 'w')
582- for d in moredefs:
583- if isinstance(d, str):
584- target_f.write('#define %s\n' % (d))
585- else:
586- target_f.write('#define %s %s\n' % (d[0], d[1]))
587-
588- # Define __STDC_FORMAT_MACROS
589- target_f.write("""
590-#ifndef __STDC_FORMAT_MACROS
591-#define __STDC_FORMAT_MACROS 1
592-#endif
593-""")
594- target_f.close()
595-
596- # Dump the numpyconfig.h header to stdout
597- print('File: %s' % target)
598- target_f = open(target)
599- print(target_f.read())
600- target_f.close()
601- print('EOF')
602- config.add_data_files((header_dir, target))
603- return target
604-
605- def generate_api_func(module_name):
606- def generate_api(ext, build_dir):
607- script = join(codegen_dir, module_name + '.py')
608- sys.path.insert(0, codegen_dir)
609- try:
610- m = __import__(module_name)
611- log.info('executing %s', script)
612- h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))
613- finally:
614- del sys.path[0]
615- config.add_data_files((header_dir, h_file),
616- (header_dir, doc_file))
617- return (h_file,)
618- return generate_api
619-
620- generate_numpy_api = generate_api_func('generate_numpy_api')
621- generate_ufunc_api = generate_api_func('generate_ufunc_api')
622-
623- config.add_include_dirs(join(local_dir, "src", "private"))
624- config.add_include_dirs(join(local_dir, "src"))
625- config.add_include_dirs(join(local_dir))
626-
627- config.add_data_files('include/numpy/*.h')
628- config.add_include_dirs(join('src', 'npymath'))
629- config.add_include_dirs(join('src', 'multiarray'))
630- config.add_include_dirs(join('src', 'umath'))
631- config.add_include_dirs(join('src', 'npysort'))
632-
633- config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])
634-
635- config.numpy_include_dirs.extend(config.paths('include'))
636-
637- deps = [join('src', 'npymath', '_signbit.c'),
638- join('include', 'numpy', '*object.h'),
639- 'include/numpy/fenv/fenv.c',
640- 'include/numpy/fenv/fenv.h',
641- join(codegen_dir, 'genapi.py'),
642- ]
643-
644- # Don't install fenv unless we need them.
645- if sys.platform == 'cygwin':
646- config.add_data_dir('include/numpy/fenv')
647-
648- #######################################################################
649- # dummy module #
650- #######################################################################
651-
652- # npymath needs the config.h and numpyconfig.h files to be generated, but
653- # build_clib cannot handle generate_config_h and generate_numpyconfig_h
654- # (don't ask). Because clib are generated before extensions, we have to
655- # explicitly add an extension which has generate_config_h and
656- # generate_numpyconfig_h as sources *before* adding npymath.
657-
658- config.add_extension('_dummy',
659- sources = [join('src', 'dummymodule.c'),
660- generate_config_h,
661- generate_numpyconfig_h,
662- generate_numpy_api]
663- )
664-
665- #######################################################################
666- # npymath library #
667- #######################################################################
668-
669- subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])
670- def get_mathlib_info(*args):
671- # Another ugly hack: the mathlib info is known once build_src is run,
672- # but we cannot use add_installed_pkg_config here either, so we only
673- # update the substition dictionary during npymath build
674- config_cmd = config.get_config_cmd()
675-
676- # Check that the toolchain works, to fail early if it doesn't
677- # (avoid late errors with MATHLIB which are confusing if the
678- # compiler does not work).
679- st = config_cmd.try_link('int main(void) { return 0;}')
680- if not st:
681- raise RuntimeError("Broken toolchain: cannot link a simple C program")
682- mlibs = check_mathlib(config_cmd)
683-
684- posix_mlib = ' '.join(['-l%s' % l for l in mlibs])
685- msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])
686- subst_dict["posix_mathlib"] = posix_mlib
687- subst_dict["msvc_mathlib"] = msvc_mlib
688-
689- config.add_installed_library('npymath',
690- sources=[join('src', 'npymath', 'npy_math.c.src'),
691- join('src', 'npymath', 'ieee754.c.src'),
692- join('src', 'npymath', 'npy_math_complex.c.src'),
693- join('src', 'npymath', 'halffloat.c'),
694- get_mathlib_info],
695- install_dir='lib')
696- config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config",
697- subst_dict)
698- config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config",
699- subst_dict)
700-
701- #######################################################################
702- # npysort library #
703- #######################################################################
704-
705- # This library is created for the build but it is not installed
706- config.add_library('npysort',
707- sources = [join('src', 'npysort', 'quicksort.c.src'),
708- join('src', 'npysort', 'mergesort.c.src'),
709- join('src', 'npysort', 'heapsort.c.src'),
710- join('src', 'npysort', 'selection.c.src')])
711-
712-
713- #######################################################################
714- # multiarray module #
715- #######################################################################
716-
717- # Multiarray version: this function is needed to build foo.c from foo.c.src
718- # when foo.c is included in another file and as such not in the src
719- # argument of build_ext command
720- def generate_multiarray_templated_sources(ext, build_dir):
721- from numpy.distutils.misc_util import get_cmd
722-
723- subpath = join('src', 'multiarray')
724- sources = [join(local_dir, subpath, 'scalartypes.c.src'),
725- join(local_dir, subpath, 'arraytypes.c.src'),
726- join(local_dir, subpath, 'nditer_templ.c.src'),
727- join(local_dir, subpath, 'lowlevel_strided_loops.c.src'),
728- join(local_dir, subpath, 'einsum.c.src')]
729-
730- # numpy.distutils generate .c from .c.src in weird directories, we have
731- # to add them there as they depend on the build_dir
732- config.add_include_dirs(join(build_dir, subpath))
733- cmd = get_cmd('build_src')
734- cmd.ensure_finalized()
735- cmd.template_sources(sources, ext)
736-
737- multiarray_deps = [
738- join('src', 'multiarray', 'arrayobject.h'),
739- join('src', 'multiarray', 'arraytypes.h'),
740- join('src', 'multiarray', 'array_assign.h'),
741- join('src', 'multiarray', 'buffer.h'),
742- join('src', 'multiarray', 'calculation.h'),
743- join('src', 'multiarray', 'common.h'),
744- join('src', 'multiarray', 'convert_datatype.h'),
745- join('src', 'multiarray', 'convert.h'),
746- join('src', 'multiarray', 'conversion_utils.h'),
747- join('src', 'multiarray', 'ctors.h'),
748- join('src', 'multiarray', 'descriptor.h'),
749- join('src', 'multiarray', 'getset.h'),
750- join('src', 'multiarray', 'hashdescr.h'),
751- join('src', 'multiarray', 'iterators.h'),
752- join('src', 'multiarray', 'mapping.h'),
753- join('src', 'multiarray', 'methods.h'),
754- join('src', 'multiarray', 'multiarraymodule.h'),
755- join('src', 'multiarray', 'nditer_impl.h'),
756- join('src', 'multiarray', 'numpymemoryview.h'),
757- join('src', 'multiarray', 'number.h'),
758- join('src', 'multiarray', 'numpyos.h'),
759- join('src', 'multiarray', 'refcount.h'),
760- join('src', 'multiarray', 'scalartypes.h'),
761- join('src', 'multiarray', 'sequence.h'),
762- join('src', 'multiarray', 'shape.h'),
763- join('src', 'multiarray', 'ucsnarrow.h'),
764- join('src', 'multiarray', 'usertypes.h'),
765- join('src', 'private', 'lowlevel_strided_loops.h'),
766- join('include', 'numpy', 'arrayobject.h'),
767- join('include', 'numpy', '_neighborhood_iterator_imp.h'),
768- join('include', 'numpy', 'npy_endian.h'),
769- join('include', 'numpy', 'arrayscalars.h'),
770- join('include', 'numpy', 'noprefix.h'),
771- join('include', 'numpy', 'npy_interrupt.h'),
772- join('include', 'numpy', 'oldnumeric.h'),
773- join('include', 'numpy', 'npy_3kcompat.h'),
774- join('include', 'numpy', 'npy_math.h'),
775- join('include', 'numpy', 'halffloat.h'),
776- join('include', 'numpy', 'npy_common.h'),
777- join('include', 'numpy', 'npy_os.h'),
778- join('include', 'numpy', 'utils.h'),
779- join('include', 'numpy', 'ndarrayobject.h'),
780- join('include', 'numpy', 'npy_cpu.h'),
781- join('include', 'numpy', 'numpyconfig.h'),
782- join('include', 'numpy', 'ndarraytypes.h'),
783- join('include', 'numpy', 'npy_1_7_deprecated_api.h'),
784- join('include', 'numpy', '_numpyconfig.h.in'),
785- ]
786-
787- multiarray_src = [
788- join('src', 'multiarray', 'arrayobject.c'),
789- join('src', 'multiarray', 'arraytypes.c.src'),
790- join('src', 'multiarray', 'array_assign.c'),
791- join('src', 'multiarray', 'array_assign_scalar.c'),
792- join('src', 'multiarray', 'array_assign_array.c'),
793- join('src', 'multiarray', 'buffer.c'),
794- join('src', 'multiarray', 'calculation.c'),
795- join('src', 'multiarray', 'common.c'),
796- join('src', 'multiarray', 'convert.c'),
797- join('src', 'multiarray', 'convert_datatype.c'),
798- join('src', 'multiarray', 'conversion_utils.c'),
799- join('src', 'multiarray', 'ctors.c'),
800- join('src', 'multiarray', 'datetime.c'),
801- join('src', 'multiarray', 'datetime_strings.c'),
802- join('src', 'multiarray', 'datetime_busday.c'),
803- join('src', 'multiarray', 'datetime_busdaycal.c'),
804- join('src', 'multiarray', 'descriptor.c'),
805- join('src', 'multiarray', 'dtype_transfer.c'),
806- join('src', 'multiarray', 'einsum.c.src'),
807- join('src', 'multiarray', 'flagsobject.c'),
808- join('src', 'multiarray', 'getset.c'),
809- join('src', 'multiarray', 'hashdescr.c'),
810- join('src', 'multiarray', 'item_selection.c'),
811- join('src', 'multiarray', 'iterators.c'),
812- join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),
813- join('src', 'multiarray', 'mapping.c'),
814- join('src', 'multiarray', 'methods.c'),
815- join('src', 'multiarray', 'multiarraymodule.c'),
816- join('src', 'multiarray', 'nditer_templ.c.src'),
817- join('src', 'multiarray', 'nditer_api.c'),
818- join('src', 'multiarray', 'nditer_constr.c'),
819- join('src', 'multiarray', 'nditer_pywrap.c'),
820- join('src', 'multiarray', 'number.c'),
821- join('src', 'multiarray', 'numpymemoryview.c'),
822- join('src', 'multiarray', 'numpyos.c'),
823- join('src', 'multiarray', 'refcount.c'),
824- join('src', 'multiarray', 'sequence.c'),
825- join('src', 'multiarray', 'shape.c'),
826- join('src', 'multiarray', 'scalarapi.c'),
827- join('src', 'multiarray', 'scalartypes.c.src'),
828- join('src', 'multiarray', 'usertypes.c'),
829- join('src', 'multiarray', 'ucsnarrow.c')]
830-
831-
832- if not ENABLE_SEPARATE_COMPILATION:
833- multiarray_deps.extend(multiarray_src)
834- multiarray_src = [join('src', 'multiarray', 'multiarraymodule_onefile.c')]
835- multiarray_src.append(generate_multiarray_templated_sources)
836-
837- config.add_extension('multiarray',
838- sources = multiarray_src +
839- [generate_config_h,
840- generate_numpyconfig_h,
841- generate_numpy_api,
842- join(codegen_dir, 'generate_numpy_api.py'),
843- join('*.py')],
844- depends = deps + multiarray_deps,
845- libraries = ['npymath', 'npysort'])
846-
847- #######################################################################
848- # umath module #
849- #######################################################################
850-
851- # umath version: this function is needed to build foo.c from foo.c.src
852- # when foo.c is included in another file and as such not in the src
853- # argument of build_ext command
854- def generate_umath_templated_sources(ext, build_dir):
855- from numpy.distutils.misc_util import get_cmd
856-
857- subpath = join('src', 'umath')
858- # NOTE: For manual template conversion of loops.h.src, read the note
859- # in that file.
860- sources = [
861- join(local_dir, subpath, 'loops.c.src'),
862- join(local_dir, subpath, 'simd.inc.src')]
863-
864- # numpy.distutils generate .c from .c.src in weird directories, we have
865- # to add them there as they depend on the build_dir
866- config.add_include_dirs(join(build_dir, subpath))
867- cmd = get_cmd('build_src')
868- cmd.ensure_finalized()
869- cmd.template_sources(sources, ext)
870-
871-
872- def generate_umath_c(ext, build_dir):
873- target = join(build_dir, header_dir, '__umath_generated.c')
874- dir = os.path.dirname(target)
875- if not os.path.exists(dir):
876- os.makedirs(dir)
877- script = generate_umath_py
878- if newer(script, target):
879- f = open(target, 'w')
880- f.write(generate_umath.make_code(generate_umath.defdict,
881- generate_umath.__file__))
882- f.close()
883- return []
884-
885- umath_src = [
886- join('src', 'umath', 'umathmodule.c'),
887- join('src', 'umath', 'reduction.c'),
888- join('src', 'umath', 'funcs.inc.src'),
889- join('src', 'umath', 'simd.inc.src'),
890- join('src', 'umath', 'loops.c.src'),
891- join('src', 'umath', 'ufunc_object.c'),
892- join('src', 'umath', 'ufunc_type_resolution.c')]
893-
894- umath_deps = [
895- generate_umath_py,
896- join('src', 'umath', 'simd.inc.src'),
897- join(codegen_dir, 'generate_ufunc_api.py')]
898-
899- if not ENABLE_SEPARATE_COMPILATION:
900- umath_deps.extend(umath_src)
901- umath_src = [join('src', 'umath', 'umathmodule_onefile.c')]
902- umath_src.append(generate_umath_templated_sources)
903- umath_src.append(join('src', 'umath', 'funcs.inc.src'))
904- umath_src.append(join('src', 'umath', 'simd.inc.src'))
905-
906- config.add_extension('umath',
907- sources = umath_src +
908- [generate_config_h,
909- generate_numpyconfig_h,
910- generate_umath_c,
911- generate_ufunc_api],
912- depends = deps + umath_deps,
913- libraries = ['npymath'],
914- )
915-
916- #######################################################################
917- # scalarmath module #
918- #######################################################################
919-
920- config.add_extension('scalarmath',
921- sources = [join('src', 'scalarmathmodule.c.src'),
922- generate_config_h,
923- generate_numpyconfig_h,
924- generate_numpy_api,
925- generate_ufunc_api],
926- depends = deps,
927- libraries = ['npymath'],
928- )
929-
930- #######################################################################
931- # _dotblas module #
932- #######################################################################
933-
934- # Configure blasdot
935- blas_info = get_info('blas_opt', 0)
936- #blas_info = {}
937- def get_dotblas_sources(ext, build_dir):
938- if blas_info:
939- if ('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', []):
940- return None # dotblas needs ATLAS, Fortran compiled blas will not be sufficient.
941- return ext.depends[:1]
942- return None # no extension module will be built
943-
944- config.add_extension('_dotblas',
945- sources = [get_dotblas_sources],
946- depends = [join('blasdot', '_dotblas.c'),
947- join('blasdot', 'cblas.h'),
948- ],
949- include_dirs = ['blasdot'],
950- extra_info = blas_info
951- )
952-
953- #######################################################################
954- # umath_tests module #
955- #######################################################################
956-
957- config.add_extension('umath_tests',
958- sources = [join('src', 'umath', 'umath_tests.c.src')])
959-
960- #######################################################################
961- # custom rational dtype module #
962- #######################################################################
963-
964- config.add_extension('test_rational',
965- sources = [join('src', 'umath', 'test_rational.c.src')])
966-
967- #######################################################################
968- # struct_ufunc_test module #
969- #######################################################################
970-
971- config.add_extension('struct_ufunc_test',
972- sources = [join('src', 'umath', 'struct_ufunc_test.c.src')])
973-
974- #######################################################################
975- # multiarray_tests module #
976- #######################################################################
977-
978- config.add_extension('multiarray_tests',
979- sources = [join('src', 'multiarray', 'multiarray_tests.c.src')])
980-
981- #######################################################################
982- # operand_flag_tests module #
983- #######################################################################
984-
985- config.add_extension('operand_flag_tests',
986- sources = [join('src', 'umath', 'operand_flag_tests.c.src')])
987-
988- config.add_data_dir('tests')
989- config.add_data_dir('tests/data')
990-
991- config.make_svn_version_py()
992-
993- return config
994-
995-if __name__=='__main__':
996- from numpy.distutils.core import setup
997- setup(configuration=configuration)
998
999=== removed directory '.pc/03_force_f2py_version.patch'
1000=== removed directory '.pc/03_force_f2py_version.patch/numpy'
1001=== removed directory '.pc/03_force_f2py_version.patch/numpy/f2py'
1002=== removed file '.pc/03_force_f2py_version.patch/numpy/f2py/setup.py'
1003--- .pc/03_force_f2py_version.patch/numpy/f2py/setup.py 2013-11-02 13:18:24 +0000
1004+++ .pc/03_force_f2py_version.patch/numpy/f2py/setup.py 1970-01-01 00:00:00 +0000
1005@@ -1,129 +0,0 @@
1006-#!/usr/bin/env python
1007-"""
1008-setup.py for installing F2PY
1009-
1010-Usage:
1011- python setup.py install
1012-
1013-Copyright 2001-2005 Pearu Peterson all rights reserved,
1014-Pearu Peterson <pearu@cens.ioc.ee>
1015-Permission to use, modify, and distribute this software is given under the
1016-terms of the NumPy License.
1017-
1018-NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
1019-$Revision: 1.32 $
1020-$Date: 2005/01/30 17:22:14 $
1021-Pearu Peterson
1022-
1023-"""
1024-from __future__ import division, print_function
1025-
1026-__version__ = "$Id: setup.py,v 1.32 2005/01/30 17:22:14 pearu Exp $"
1027-
1028-import os
1029-import sys
1030-from distutils.dep_util import newer
1031-from numpy.distutils import log
1032-from numpy.distutils.core import setup
1033-from numpy.distutils.misc_util import Configuration
1034-
1035-from __version__ import version
1036-
1037-def configuration(parent_package='',top_path=None):
1038- config = Configuration('f2py', parent_package, top_path)
1039-
1040- config.add_data_dir('docs')
1041- config.add_data_dir('tests')
1042-
1043- config.add_data_files('src/fortranobject.c',
1044- 'src/fortranobject.h',
1045- 'f2py.1'
1046- )
1047-
1048- config.make_svn_version_py()
1049-
1050- def generate_f2py_py(build_dir):
1051- f2py_exe = 'f2py'+os.path.basename(sys.executable)[6:]
1052- if f2py_exe[-4:]=='.exe':
1053- f2py_exe = f2py_exe[:-4] + '.py'
1054- if 'bdist_wininst' in sys.argv and f2py_exe[-3:] != '.py':
1055- f2py_exe = f2py_exe + '.py'
1056- target = os.path.join(build_dir, f2py_exe)
1057- if newer(__file__, target):
1058- log.info('Creating %s', target)
1059- f = open(target, 'w')
1060- f.write('''\
1061-#!%s
1062-# See http://cens.ioc.ee/projects/f2py2e/
1063-import os, sys
1064-for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
1065- try:
1066- i=sys.argv.index("--"+mode)
1067- del sys.argv[i]
1068- break
1069- except ValueError: pass
1070-os.environ["NO_SCIPY_IMPORT"]="f2py"
1071-if mode=="g3-numpy":
1072- sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
1073- sys.exit(1)
1074-elif mode=="2e-numeric":
1075- from f2py2e import main
1076-elif mode=="2e-numarray":
1077- sys.argv.append("-DNUMARRAY")
1078- from f2py2e import main
1079-elif mode=="2e-numpy":
1080- from numpy.f2py import main
1081-else:
1082- sys.stderr.write("Unknown mode: " + repr(mode) + "\\n")
1083- sys.exit(1)
1084-main()
1085-'''%(sys.executable))
1086- f.close()
1087- return target
1088-
1089- config.add_scripts(generate_f2py_py)
1090-
1091- log.info('F2PY Version %s', config.get_version())
1092-
1093- return config
1094-
1095-if __name__ == "__main__":
1096-
1097- config = configuration(top_path='')
1098- version = config.get_version()
1099- print('F2PY Version', version)
1100- config = config.todict()
1101-
1102- if sys.version[:3]>='2.3':
1103- config['download_url'] = "http://cens.ioc.ee/projects/f2py2e/2.x"\
1104- "/F2PY-2-latest.tar.gz"
1105- config['classifiers'] = [
1106- 'Development Status :: 5 - Production/Stable',
1107- 'Intended Audience :: Developers',
1108- 'Intended Audience :: Science/Research',
1109- 'License :: OSI Approved :: NumPy License',
1110- 'Natural Language :: English',
1111- 'Operating System :: OS Independent',
1112- 'Programming Language :: C',
1113- 'Programming Language :: Fortran',
1114- 'Programming Language :: Python',
1115- 'Topic :: Scientific/Engineering',
1116- 'Topic :: Software Development :: Code Generators',
1117- ]
1118- setup(version=version,
1119- description = "F2PY - Fortran to Python Interface Generaton",
1120- author = "Pearu Peterson",
1121- author_email = "pearu@cens.ioc.ee",
1122- maintainer = "Pearu Peterson",
1123- maintainer_email = "pearu@cens.ioc.ee",
1124- license = "BSD",
1125- platforms = "Unix, Windows (mingw|cygwin), Mac OSX",
1126- long_description = """\
1127-The Fortran to Python Interface Generator, or F2PY for short, is a
1128-command line tool (f2py) for generating Python C/API modules for
1129-wrapping Fortran 77/90/95 subroutines, accessing common blocks from
1130-Python, and calling Python functions from Fortran (call-backs).
1131-Interfacing subroutines/data from Fortran 90/95 modules is supported.""",
1132- url = "http://cens.ioc.ee/projects/f2py2e/",
1133- keywords = ['Fortran', 'f2py'],
1134- **config)
1135
1136=== removed directory '.pc/10_use_local_python.org_object.inv_sphinx.diff'
1137=== removed directory '.pc/10_use_local_python.org_object.inv_sphinx.diff/doc'
1138=== removed directory '.pc/10_use_local_python.org_object.inv_sphinx.diff/doc/source'
1139=== removed file '.pc/10_use_local_python.org_object.inv_sphinx.diff/doc/source/conf.py'
1140--- .pc/10_use_local_python.org_object.inv_sphinx.diff/doc/source/conf.py 2013-11-02 13:18:24 +0000
1141+++ .pc/10_use_local_python.org_object.inv_sphinx.diff/doc/source/conf.py 1970-01-01 00:00:00 +0000
1142@@ -1,331 +0,0 @@
1143-# -*- coding: utf-8 -*-
1144-from __future__ import division, absolute_import, print_function
1145-
1146-import sys, os, re
1147-
1148-# Check Sphinx version
1149-import sphinx
1150-if sphinx.__version__ < "1.0.1":
1151- raise RuntimeError("Sphinx 1.0.1 or newer required")
1152-
1153-needs_sphinx = '1.0'
1154-
1155-# -----------------------------------------------------------------------------
1156-# General configuration
1157-# -----------------------------------------------------------------------------
1158-
1159-# Add any Sphinx extension module names here, as strings. They can be extensions
1160-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
1161-
1162-sys.path.insert(0, os.path.abspath('../sphinxext'))
1163-
1164-extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'numpydoc',
1165- 'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
1166- 'sphinx.ext.doctest', 'sphinx.ext.autosummary',
1167- 'matplotlib.sphinxext.plot_directive']
1168-
1169-# Add any paths that contain templates here, relative to this directory.
1170-templates_path = ['_templates']
1171-
1172-# The suffix of source filenames.
1173-source_suffix = '.rst'
1174-
1175-# General substitutions.
1176-project = 'NumPy'
1177-copyright = '2008-2009, The Scipy community'
1178-
1179-# The default replacements for |version| and |release|, also used in various
1180-# other places throughout the built documents.
1181-#
1182-import numpy
1183-# The short X.Y version (including .devXXXX, rcX, b1 suffixes if present)
1184-version = re.sub(r'(\d+\.\d+)\.\d+(.*)', r'\1\2', numpy.__version__)
1185-version = re.sub(r'(\.dev\d+).*?$', r'\1', version)
1186-# The full version, including alpha/beta/rc tags.
1187-release = numpy.__version__
1188-print("%s %s" % (version, release))
1189-
1190-# There are two options for replacing |today|: either, you set today to some
1191-# non-false value, then it is used:
1192-#today = ''
1193-# Else, today_fmt is used as the format for a strftime call.
1194-today_fmt = '%B %d, %Y'
1195-
1196-# List of documents that shouldn't be included in the build.
1197-#unused_docs = []
1198-
1199-# The reST default role (used for this markup: `text`) to use for all documents.
1200-default_role = "autolink"
1201-
1202-# List of directories, relative to source directories, that shouldn't be searched
1203-# for source files.
1204-exclude_dirs = []
1205-
1206-# If true, '()' will be appended to :func: etc. cross-reference text.
1207-add_function_parentheses = False
1208-
1209-# If true, the current module name will be prepended to all description
1210-# unit titles (such as .. function::).
1211-#add_module_names = True
1212-
1213-# If true, sectionauthor and moduleauthor directives will be shown in the
1214-# output. They are ignored by default.
1215-#show_authors = False
1216-
1217-# The name of the Pygments (syntax highlighting) style to use.
1218-pygments_style = 'sphinx'
1219-
1220-
1221-# -----------------------------------------------------------------------------
1222-# HTML output
1223-# -----------------------------------------------------------------------------
1224-
1225-themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme')
1226-if not os.path.isdir(themedir):
1227- raise RuntimeError("Get the scipy-sphinx-theme first, "
1228- "via git submodule init && git submodule update")
1229-
1230-html_theme = 'scipy'
1231-html_theme_path = [themedir]
1232-
1233-if 'scipyorg' in tags:
1234- # Build for the scipy.org website
1235- html_theme_options = {
1236- "edit_link": True,
1237- "sidebar": "right",
1238- "scipy_org_logo": True,
1239- "rootlinks": [("http://scipy.org/", "Scipy.org"),
1240- ("http://docs.scipy.org/", "Docs")]
1241- }
1242-else:
1243- # Default build
1244- html_theme_options = {
1245- "edit_link": False,
1246- "sidebar": "left",
1247- "scipy_org_logo": False,
1248- "rootlinks": []
1249- }
1250- html_sidebars = {'index': 'indexsidebar.html'}
1251-
1252-html_additional_pages = {
1253- 'index': 'indexcontent.html',
1254-}
1255-
1256-html_title = "%s v%s Manual" % (project, version)
1257-html_static_path = ['_static']
1258-html_last_updated_fmt = '%b %d, %Y'
1259-
1260-html_use_modindex = True
1261-html_copy_source = False
1262-html_domain_indices = False
1263-html_file_suffix = '.html'
1264-
1265-htmlhelp_basename = 'numpy'
1266-
1267-pngmath_use_preview = True
1268-pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent']
1269-
1270-
1271-# -----------------------------------------------------------------------------
1272-# LaTeX output
1273-# -----------------------------------------------------------------------------
1274-
1275-# The paper size ('letter' or 'a4').
1276-#latex_paper_size = 'letter'
1277-
1278-# The font size ('10pt', '11pt' or '12pt').
1279-#latex_font_size = '10pt'
1280-
1281-# Grouping the document tree into LaTeX files. List of tuples
1282-# (source start file, target name, title, author, document class [howto/manual]).
1283-_stdauthor = 'Written by the NumPy community'
1284-latex_documents = [
1285- ('reference/index', 'numpy-ref.tex', 'NumPy Reference',
1286- _stdauthor, 'manual'),
1287- ('user/index', 'numpy-user.tex', 'NumPy User Guide',
1288- _stdauthor, 'manual'),
1289-]
1290-
1291-# The name of an image file (relative to this directory) to place at the top of
1292-# the title page.
1293-#latex_logo = None
1294-
1295-# For "manual" documents, if this is true, then toplevel headings are parts,
1296-# not chapters.
1297-#latex_use_parts = False
1298-
1299-# Additional stuff for the LaTeX preamble.
1300-latex_preamble = r'''
1301-\usepackage{amsmath}
1302-\DeclareUnicodeCharacter{00A0}{\nobreakspace}
1303-
1304-% In the parameters section, place a newline after the Parameters
1305-% header
1306-\usepackage{expdlist}
1307-\let\latexdescription=\description
1308-\def\description{\latexdescription{}{} \breaklabel}
1309-
1310-% Make Examples/etc section headers smaller and more compact
1311-\makeatletter
1312-\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
1313- {\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
1314-\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
1315-\makeatother
1316-
1317-% Fix footer/header
1318-\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
1319-\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
1320-'''
1321-
1322-# Documents to append as an appendix to all manuals.
1323-#latex_appendices = []
1324-
1325-# If false, no module index is generated.
1326-latex_use_modindex = False
1327-
1328-
1329-# -----------------------------------------------------------------------------
1330-# Texinfo output
1331-# -----------------------------------------------------------------------------
1332-
1333-texinfo_documents = [
1334- ("contents", 'numpy', 'Numpy Documentation', _stdauthor, 'Numpy',
1335- "NumPy: array processing for numbers, strings, records, and objects.",
1336- 'Programming',
1337- 1),
1338-]
1339-
1340-
1341-# -----------------------------------------------------------------------------
1342-# Intersphinx configuration
1343-# -----------------------------------------------------------------------------
1344-intersphinx_mapping = {'http://docs.python.org/dev': None}
1345-
1346-
1347-# -----------------------------------------------------------------------------
1348-# Numpy extensions
1349-# -----------------------------------------------------------------------------
1350-
1351-# If we want to do a phantom import from an XML file for all autodocs
1352-phantom_import_file = 'dump.xml'
1353-
1354-# Make numpydoc to generate plots for example sections
1355-numpydoc_use_plots = True
1356-
1357-# -----------------------------------------------------------------------------
1358-# Autosummary
1359-# -----------------------------------------------------------------------------
1360-
1361-import glob
1362-autosummary_generate = glob.glob("reference/*.rst")
1363-
1364-# -----------------------------------------------------------------------------
1365-# Coverage checker
1366-# -----------------------------------------------------------------------------
1367-coverage_ignore_modules = r"""
1368- """.split()
1369-coverage_ignore_functions = r"""
1370- test($|_) (some|all)true bitwise_not cumproduct pkgload
1371- generic\.
1372- """.split()
1373-coverage_ignore_classes = r"""
1374- """.split()
1375-
1376-coverage_c_path = []
1377-coverage_c_regexes = {}
1378-coverage_ignore_c_items = {}
1379-
1380-
1381-# -----------------------------------------------------------------------------
1382-# Plots
1383-# -----------------------------------------------------------------------------
1384-plot_pre_code = """
1385-import numpy as np
1386-np.random.seed(0)
1387-"""
1388-plot_include_source = True
1389-plot_formats = [('png', 100), 'pdf']
1390-
1391-import math
1392-phi = (math.sqrt(5) + 1)/2
1393-
1394-plot_rcparams = {
1395- 'font.size': 8,
1396- 'axes.titlesize': 8,
1397- 'axes.labelsize': 8,
1398- 'xtick.labelsize': 8,
1399- 'ytick.labelsize': 8,
1400- 'legend.fontsize': 8,
1401- 'figure.figsize': (3*phi, 3),
1402- 'figure.subplot.bottom': 0.2,
1403- 'figure.subplot.left': 0.2,
1404- 'figure.subplot.right': 0.9,
1405- 'figure.subplot.top': 0.85,
1406- 'figure.subplot.wspace': 0.4,
1407- 'text.usetex': False,
1408-}
1409-
1410-
1411-# -----------------------------------------------------------------------------
1412-# Source code links
1413-# -----------------------------------------------------------------------------
1414-
1415-import inspect
1416-from os.path import relpath, dirname
1417-
1418-for name in ['sphinx.ext.linkcode', 'numpydoc.linkcode']:
1419- try:
1420- __import__(name)
1421- extensions.append(name)
1422- break
1423- except ImportError:
1424- pass
1425-else:
1426- print("NOTE: linkcode extension not found -- no links to source generated")
1427-
1428-def linkcode_resolve(domain, info):
1429- """
1430- Determine the URL corresponding to Python object
1431- """
1432- if domain != 'py':
1433- return None
1434-
1435- modname = info['module']
1436- fullname = info['fullname']
1437-
1438- submod = sys.modules.get(modname)
1439- if submod is None:
1440- return None
1441-
1442- obj = submod
1443- for part in fullname.split('.'):
1444- try:
1445- obj = getattr(obj, part)
1446- except:
1447- return None
1448-
1449- try:
1450- fn = inspect.getsourcefile(obj)
1451- except:
1452- fn = None
1453- if not fn:
1454- return None
1455-
1456- try:
1457- source, lineno = inspect.findsource(obj)
1458- except:
1459- lineno = None
1460-
1461- if lineno:
1462- linespec = "#L%d" % (lineno + 1)
1463- else:
1464- linespec = ""
1465-
1466- fn = relpath(fn, start=dirname(numpy.__file__))
1467-
1468- if 'dev' in numpy.__version__:
1469- return "http://github.com/numpy/numpy/blob/master/numpy/%s%s" % (
1470- fn, linespec)
1471- else:
1472- return "http://github.com/numpy/numpy/blob/v%s/numpy/%s%s" % (
1473- numpy.__version__, fn, linespec)
1474
1475=== removed directory '.pc/20_disable-plot-extension.patch'
1476=== removed directory '.pc/20_disable-plot-extension.patch/doc'
1477=== removed directory '.pc/20_disable-plot-extension.patch/doc/source'
1478=== removed file '.pc/20_disable-plot-extension.patch/doc/source/conf.py'
1479--- .pc/20_disable-plot-extension.patch/doc/source/conf.py 2014-01-03 17:10:24 +0000
1480+++ .pc/20_disable-plot-extension.patch/doc/source/conf.py 1970-01-01 00:00:00 +0000
1481@@ -1,331 +0,0 @@
1482-# -*- coding: utf-8 -*-
1483-from __future__ import division, absolute_import, print_function
1484-
1485-import sys, os, re
1486-
1487-# Check Sphinx version
1488-import sphinx
1489-if sphinx.__version__ < "1.0.1":
1490- raise RuntimeError("Sphinx 1.0.1 or newer required")
1491-
1492-needs_sphinx = '1.0'
1493-
1494-# -----------------------------------------------------------------------------
1495-# General configuration
1496-# -----------------------------------------------------------------------------
1497-
1498-# Add any Sphinx extension module names here, as strings. They can be extensions
1499-# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
1500-
1501-sys.path.insert(0, os.path.abspath('../sphinxext'))
1502-
1503-extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'numpydoc',
1504- 'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
1505- 'sphinx.ext.doctest', 'sphinx.ext.autosummary',
1506- 'matplotlib.sphinxext.plot_directive']
1507-
1508-# Add any paths that contain templates here, relative to this directory.
1509-templates_path = ['_templates']
1510-
1511-# The suffix of source filenames.
1512-source_suffix = '.rst'
1513-
1514-# General substitutions.
1515-project = 'NumPy'
1516-copyright = '2008-2009, The Scipy community'
1517-
1518-# The default replacements for |version| and |release|, also used in various
1519-# other places throughout the built documents.
1520-#
1521-import numpy
1522-# The short X.Y version (including .devXXXX, rcX, b1 suffixes if present)
1523-version = re.sub(r'(\d+\.\d+)\.\d+(.*)', r'\1\2', numpy.__version__)
1524-version = re.sub(r'(\.dev\d+).*?$', r'\1', version)
1525-# The full version, including alpha/beta/rc tags.
1526-release = numpy.__version__
1527-print("%s %s" % (version, release))
1528-
1529-# There are two options for replacing |today|: either, you set today to some
1530-# non-false value, then it is used:
1531-#today = ''
1532-# Else, today_fmt is used as the format for a strftime call.
1533-today_fmt = '%B %d, %Y'
1534-
1535-# List of documents that shouldn't be included in the build.
1536-#unused_docs = []
1537-
1538-# The reST default role (used for this markup: `text`) to use for all documents.
1539-default_role = "autolink"
1540-
1541-# List of directories, relative to source directories, that shouldn't be searched
1542-# for source files.
1543-exclude_dirs = []
1544-
1545-# If true, '()' will be appended to :func: etc. cross-reference text.
1546-add_function_parentheses = False
1547-
1548-# If true, the current module name will be prepended to all description
1549-# unit titles (such as .. function::).
1550-#add_module_names = True
1551-
1552-# If true, sectionauthor and moduleauthor directives will be shown in the
1553-# output. They are ignored by default.
1554-#show_authors = False
1555-
1556-# The name of the Pygments (syntax highlighting) style to use.
1557-pygments_style = 'sphinx'
1558-
1559-
1560-# -----------------------------------------------------------------------------
1561-# HTML output
1562-# -----------------------------------------------------------------------------
1563-
1564-themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme')
1565-if not os.path.isdir(themedir):
1566- raise RuntimeError("Get the scipy-sphinx-theme first, "
1567- "via git submodule init && git submodule update")
1568-
1569-html_theme = 'scipy'
1570-html_theme_path = [themedir]
1571-
1572-if 'scipyorg' in tags:
1573- # Build for the scipy.org website
1574- html_theme_options = {
1575- "edit_link": True,
1576- "sidebar": "right",
1577- "scipy_org_logo": True,
1578- "rootlinks": [("http://scipy.org/", "Scipy.org"),
1579- ("http://docs.scipy.org/", "Docs")]
1580- }
1581-else:
1582- # Default build
1583- html_theme_options = {
1584- "edit_link": False,
1585- "sidebar": "left",
1586- "scipy_org_logo": False,
1587- "rootlinks": []
1588- }
1589- html_sidebars = {'index': 'indexsidebar.html'}
1590-
1591-html_additional_pages = {
1592- 'index': 'indexcontent.html',
1593-}
1594-
1595-html_title = "%s v%s Manual" % (project, version)
1596-html_static_path = ['_static']
1597-html_last_updated_fmt = '%b %d, %Y'
1598-
1599-html_use_modindex = True
1600-html_copy_source = False
1601-html_domain_indices = False
1602-html_file_suffix = '.html'
1603-
1604-htmlhelp_basename = 'numpy'
1605-
1606-pngmath_use_preview = True
1607-pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent']
1608-
1609-
1610-# -----------------------------------------------------------------------------
1611-# LaTeX output
1612-# -----------------------------------------------------------------------------
1613-
1614-# The paper size ('letter' or 'a4').
1615-#latex_paper_size = 'letter'
1616-
1617-# The font size ('10pt', '11pt' or '12pt').
1618-#latex_font_size = '10pt'
1619-
1620-# Grouping the document tree into LaTeX files. List of tuples
1621-# (source start file, target name, title, author, document class [howto/manual]).
1622-_stdauthor = 'Written by the NumPy community'
1623-latex_documents = [
1624- ('reference/index', 'numpy-ref.tex', 'NumPy Reference',
1625- _stdauthor, 'manual'),
1626- ('user/index', 'numpy-user.tex', 'NumPy User Guide',
1627- _stdauthor, 'manual'),
1628-]
1629-
1630-# The name of an image file (relative to this directory) to place at the top of
1631-# the title page.
1632-#latex_logo = None
1633-
1634-# For "manual" documents, if this is true, then toplevel headings are parts,
1635-# not chapters.
1636-#latex_use_parts = False
1637-
1638-# Additional stuff for the LaTeX preamble.
1639-latex_preamble = r'''
1640-\usepackage{amsmath}
1641-\DeclareUnicodeCharacter{00A0}{\nobreakspace}
1642-
1643-% In the parameters section, place a newline after the Parameters
1644-% header
1645-\usepackage{expdlist}
1646-\let\latexdescription=\description
1647-\def\description{\latexdescription{}{} \breaklabel}
1648-
1649-% Make Examples/etc section headers smaller and more compact
1650-\makeatletter
1651-\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
1652- {\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
1653-\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
1654-\makeatother
1655-
1656-% Fix footer/header
1657-\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
1658-\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
1659-'''
1660-
1661-# Documents to append as an appendix to all manuals.
1662-#latex_appendices = []
1663-
1664-# If false, no module index is generated.
1665-latex_use_modindex = False
1666-
1667-
1668-# -----------------------------------------------------------------------------
1669-# Texinfo output
1670-# -----------------------------------------------------------------------------
1671-
1672-texinfo_documents = [
1673- ("contents", 'numpy', 'Numpy Documentation', _stdauthor, 'Numpy',
1674- "NumPy: array processing for numbers, strings, records, and objects.",
1675- 'Programming',
1676- 1),
1677-]
1678-
1679-
1680-# -----------------------------------------------------------------------------
1681-# Intersphinx configuration
1682-# -----------------------------------------------------------------------------
1683-intersphinx_mapping = {'http://docs.python.org/dev': '../../debian/python.org_objects.inv'}
1684-
1685-
1686-# -----------------------------------------------------------------------------
1687-# Numpy extensions
1688-# -----------------------------------------------------------------------------
1689-
1690-# If we want to do a phantom import from an XML file for all autodocs
1691-phantom_import_file = 'dump.xml'
1692-
1693-# Make numpydoc to generate plots for example sections
1694-numpydoc_use_plots = True
1695-
1696-# -----------------------------------------------------------------------------
1697-# Autosummary
1698-# -----------------------------------------------------------------------------
1699-
1700-import glob
1701-autosummary_generate = glob.glob("reference/*.rst")
1702-
1703-# -----------------------------------------------------------------------------
1704-# Coverage checker
1705-# -----------------------------------------------------------------------------
1706-coverage_ignore_modules = r"""
1707- """.split()
1708-coverage_ignore_functions = r"""
1709- test($|_) (some|all)true bitwise_not cumproduct pkgload
1710- generic\.
1711- """.split()
1712-coverage_ignore_classes = r"""
1713- """.split()
1714-
1715-coverage_c_path = []
1716-coverage_c_regexes = {}
1717-coverage_ignore_c_items = {}
1718-
1719-
1720-# -----------------------------------------------------------------------------
1721-# Plots
1722-# -----------------------------------------------------------------------------
1723-plot_pre_code = """
1724-import numpy as np
1725-np.random.seed(0)
1726-"""
1727-plot_include_source = True
1728-plot_formats = [('png', 100), 'pdf']
1729-
1730-import math
1731-phi = (math.sqrt(5) + 1)/2
1732-
1733-plot_rcparams = {
1734- 'font.size': 8,
1735- 'axes.titlesize': 8,
1736- 'axes.labelsize': 8,
1737- 'xtick.labelsize': 8,
1738- 'ytick.labelsize': 8,
1739- 'legend.fontsize': 8,
1740- 'figure.figsize': (3*phi, 3),
1741- 'figure.subplot.bottom': 0.2,
1742- 'figure.subplot.left': 0.2,
1743- 'figure.subplot.right': 0.9,
1744- 'figure.subplot.top': 0.85,
1745- 'figure.subplot.wspace': 0.4,
1746- 'text.usetex': False,
1747-}
1748-
1749-
1750-# -----------------------------------------------------------------------------
1751-# Source code links
1752-# -----------------------------------------------------------------------------
1753-
1754-import inspect
1755-from os.path import relpath, dirname
1756-
1757-for name in ['sphinx.ext.linkcode', 'numpydoc.linkcode']:
1758- try:
1759- __import__(name)
1760- extensions.append(name)
1761- break
1762- except ImportError:
1763- pass
1764-else:
1765- print("NOTE: linkcode extension not found -- no links to source generated")
1766-
1767-def linkcode_resolve(domain, info):
1768- """
1769- Determine the URL corresponding to Python object
1770- """
1771- if domain != 'py':
1772- return None
1773-
1774- modname = info['module']
1775- fullname = info['fullname']
1776-
1777- submod = sys.modules.get(modname)
1778- if submod is None:
1779- return None
1780-
1781- obj = submod
1782- for part in fullname.split('.'):
1783- try:
1784- obj = getattr(obj, part)
1785- except:
1786- return None
1787-
1788- try:
1789- fn = inspect.getsourcefile(obj)
1790- except:
1791- fn = None
1792- if not fn:
1793- return None
1794-
1795- try:
1796- source, lineno = inspect.findsource(obj)
1797- except:
1798- lineno = None
1799-
1800- if lineno:
1801- linespec = "#L%d" % (lineno + 1)
1802- else:
1803- linespec = ""
1804-
1805- fn = relpath(fn, start=dirname(numpy.__file__))
1806-
1807- if 'dev' in numpy.__version__:
1808- return "http://github.com/numpy/numpy/blob/master/numpy/%s%s" % (
1809- fn, linespec)
1810- else:
1811- return "http://github.com/numpy/numpy/blob/v%s/numpy/%s%s" % (
1812- numpy.__version__, fn, linespec)
1813
1814=== removed file '.pc/applied-patches'
1815--- .pc/applied-patches 2014-01-03 17:10:24 +0000
1816+++ .pc/applied-patches 1970-01-01 00:00:00 +0000
1817@@ -1,6 +0,0 @@
1818-02_build_dotblas.patch
1819-03_force_f2py_version.patch
1820-10_use_local_python.org_object.inv_sphinx.diff
1821-20_disable-plot-extension.patch
1822-ppc64el_cpu_config.patch
1823-python3-soabi.patch
1824
1825=== removed directory '.pc/ppc64el_cpu_config.patch'
1826=== removed directory '.pc/ppc64el_cpu_config.patch/numpy'
1827=== removed directory '.pc/ppc64el_cpu_config.patch/numpy/core'
1828=== removed directory '.pc/ppc64el_cpu_config.patch/numpy/core/include'
1829=== removed directory '.pc/ppc64el_cpu_config.patch/numpy/core/include/numpy'
1830=== removed file '.pc/ppc64el_cpu_config.patch/numpy/core/include/numpy/npy_cpu.h'
1831--- .pc/ppc64el_cpu_config.patch/numpy/core/include/numpy/npy_cpu.h 2013-12-15 14:33:37 +0000
1832+++ .pc/ppc64el_cpu_config.patch/numpy/core/include/numpy/npy_cpu.h 1970-01-01 00:00:00 +0000
1833@@ -1,111 +0,0 @@
1834-/*
1835- * This set (target) cpu specific macros:
1836- * - Possible values:
1837- * NPY_CPU_X86
1838- * NPY_CPU_AMD64
1839- * NPY_CPU_PPC
1840- * NPY_CPU_PPC64
1841- * NPY_CPU_SPARC
1842- * NPY_CPU_S390
1843- * NPY_CPU_IA64
1844- * NPY_CPU_HPPA
1845- * NPY_CPU_ALPHA
1846- * NPY_CPU_ARMEL
1847- * NPY_CPU_ARMEB
1848- * NPY_CPU_SH_LE
1849- * NPY_CPU_SH_BE
1850- */
1851-#ifndef _NPY_CPUARCH_H_
1852-#define _NPY_CPUARCH_H_
1853-
1854-#include "numpyconfig.h"
1855-
1856-#if defined( __i386__ ) || defined(i386) || defined(_M_IX86)
1857- /*
1858- * __i386__ is defined by gcc and Intel compiler on Linux,
1859- * _M_IX86 by VS compiler,
1860- * i386 by Sun compilers on opensolaris at least
1861- */
1862- #define NPY_CPU_X86
1863-#elif defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(_M_AMD64)
1864- /*
1865- * both __x86_64__ and __amd64__ are defined by gcc
1866- * __x86_64 defined by sun compiler on opensolaris at least
1867- * _M_AMD64 defined by MS compiler
1868- */
1869- #define NPY_CPU_AMD64
1870-#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC)
1871- /*
1872- * __ppc__ is defined by gcc, I remember having seen __powerpc__ once,
1873- * but can't find it ATM
1874- * _ARCH_PPC is used by at least gcc on AIX
1875- */
1876- #define NPY_CPU_PPC
1877-#elif defined(__ppc64__)
1878- #define NPY_CPU_PPC64
1879-#elif defined(__sparc__) || defined(__sparc)
1880- /* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */
1881- #define NPY_CPU_SPARC
1882-#elif defined(__s390__)
1883- #define NPY_CPU_S390
1884-#elif defined(__ia64)
1885- #define NPY_CPU_IA64
1886-#elif defined(__hppa)
1887- #define NPY_CPU_HPPA
1888-#elif defined(__alpha__)
1889- #define NPY_CPU_ALPHA
1890-#elif defined(__arm__) && defined(__ARMEL__)
1891- #define NPY_CPU_ARMEL
1892-#elif defined(__arm__) && defined(__ARMEB__)
1893- #define NPY_CPU_ARMEB
1894-#elif defined(__sh__) && defined(__LITTLE_ENDIAN__)
1895- #define NPY_CPU_SH_LE
1896-#elif defined(__sh__) && defined(__BIG_ENDIAN__)
1897- #define NPY_CPU_SH_BE
1898-#elif defined(__MIPSEL__)
1899- #define NPY_CPU_MIPSEL
1900-#elif defined(__MIPSEB__)
1901- #define NPY_CPU_MIPSEB
1902-#elif defined(__aarch64__)
1903- #define NPY_CPU_AARCH64
1904-#elif defined(__mc68000__)
1905- #define NPY_CPU_M68K
1906-#else
1907- #error Unknown CPU, please report this to numpy maintainers with \
1908- information about your platform (OS, CPU and compiler)
1909-#endif
1910-
1911-/*
1912- This "white-lists" the architectures that we know don't require
1913- pointer alignment. We white-list, since the memcpy version will
1914- work everywhere, whereas assignment will only work where pointer
1915- dereferencing doesn't require alignment.
1916-
1917- TODO: There may be more architectures we can white list.
1918-*/
1919-#if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64)
1920- #define NPY_COPY_PYOBJECT_PTR(dst, src) (*((PyObject **)(dst)) = *((PyObject **)(src)))
1921-#else
1922- #if NPY_SIZEOF_PY_INTPTR_T == 4
1923- #define NPY_COPY_PYOBJECT_PTR(dst, src) \
1924- ((char*)(dst))[0] = ((char*)(src))[0]; \
1925- ((char*)(dst))[1] = ((char*)(src))[1]; \
1926- ((char*)(dst))[2] = ((char*)(src))[2]; \
1927- ((char*)(dst))[3] = ((char*)(src))[3];
1928- #elif NPY_SIZEOF_PY_INTPTR_T == 8
1929- #define NPY_COPY_PYOBJECT_PTR(dst, src) \
1930- ((char*)(dst))[0] = ((char*)(src))[0]; \
1931- ((char*)(dst))[1] = ((char*)(src))[1]; \
1932- ((char*)(dst))[2] = ((char*)(src))[2]; \
1933- ((char*)(dst))[3] = ((char*)(src))[3]; \
1934- ((char*)(dst))[4] = ((char*)(src))[4]; \
1935- ((char*)(dst))[5] = ((char*)(src))[5]; \
1936- ((char*)(dst))[6] = ((char*)(src))[6]; \
1937- ((char*)(dst))[7] = ((char*)(src))[7];
1938- #else
1939- #error Unknown architecture, please report this to numpy maintainers with \
1940- information about your platform (OS, CPU and compiler)
1941- #endif
1942-#endif
1943-
1944-#endif
1945
1946=== removed file '.pc/ppc64el_cpu_config.patch/numpy/core/include/numpy/npy_endian.h'
1947--- .pc/ppc64el_cpu_config.patch/numpy/core/include/numpy/npy_endian.h 2013-12-15 14:33:37 +0000
1948+++ .pc/ppc64el_cpu_config.patch/numpy/core/include/numpy/npy_endian.h 1970-01-01 00:00:00 +0000
1949@@ -1,47 +0,0 @@
1950-#ifndef _NPY_ENDIAN_H_
1951-#define _NPY_ENDIAN_H_
1952-
1953-/*
1954- * NPY_BYTE_ORDER is set to the same value as BYTE_ORDER set by glibc in
1955- * endian.h
1956- */
1957-
1958-#ifdef NPY_HAVE_ENDIAN_H
1959- /* Use endian.h if available */
1960- #include <endian.h>
1961-
1962- #define NPY_BYTE_ORDER __BYTE_ORDER
1963- #define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN
1964- #define NPY_BIG_ENDIAN __BIG_ENDIAN
1965-#else
1966- /* Set endianness info using target CPU */
1967- #include "npy_cpu.h"
1968-
1969- #define NPY_LITTLE_ENDIAN 1234
1970- #define NPY_BIG_ENDIAN 4321
1971-
1972- #if defined(NPY_CPU_X86) \
1973- || defined(NPY_CPU_AMD64) \
1974- || defined(NPY_CPU_IA64) \
1975- || defined(NPY_CPU_ALPHA) \
1976- || defined(NPY_CPU_ARMEL) \
1977- || defined(NPY_CPU_AARCH64) \
1978- || defined(NPY_CPU_SH_LE) \
1979- || defined(NPY_CPU_MIPSEL)
1980- #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN
1981- #elif defined(NPY_CPU_PPC) \
1982- || defined(NPY_CPU_SPARC) \
1983- || defined(NPY_CPU_S390) \
1984- || defined(NPY_CPU_HPPA) \
1985- || defined(NPY_CPU_PPC64) \
1986- || defined(NPY_CPU_ARMEB) \
1987- || defined(NPY_CPU_SH_BE) \
1988- || defined(NPY_CPU_MIPSEB) \
1989- || defined(NPY_CPU_M68K)
1990- #define NPY_BYTE_ORDER NPY_BIG_ENDIAN
1991- #else
1992- #error Unknown CPU: can not set endianness
1993- #endif
1994-#endif
1995-
1996-#endif
1997
1998=== removed file '.pc/ppc64el_cpu_config.patch/numpy/core/setup.py'
1999--- .pc/ppc64el_cpu_config.patch/numpy/core/setup.py 2014-01-03 17:10:24 +0000
2000+++ .pc/ppc64el_cpu_config.patch/numpy/core/setup.py 1970-01-01 00:00:00 +0000
2001@@ -1,990 +0,0 @@
2002-from __future__ import division, print_function
2003-
2004-import imp
2005-import os
2006-import sys
2007-import shutil
2008-import pickle
2009-import copy
2010-import warnings
2011-import re
2012-from os.path import join
2013-from numpy.distutils import log
2014-from distutils.dep_util import newer
2015-from distutils.sysconfig import get_config_var
2016-
2017-from setup_common import *
2018-
2019-# Set to True to enable multiple file compilations (experimental)
2020-ENABLE_SEPARATE_COMPILATION = (os.environ.get('NPY_SEPARATE_COMPILATION', "1") != "0")
2021-# Set to True to enable relaxed strides checking. This (mostly) means
2022-# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.
2023-NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "0") != "0")
2024-
2025-# XXX: ugly, we use a class to avoid calling twice some expensive functions in
2026-# config.h/numpyconfig.h. I don't see a better way because distutils force
2027-# config.h generation inside an Extension class, and as such sharing
2028-# configuration informations between extensions is not easy.
2029-# Using a pickled-based memoize does not work because config_cmd is an instance
2030-# method, which cPickle does not like.
2031-#
2032-# Use pickle in all cases, as cPickle is gone in python3 and the difference
2033-# in time is only in build. -- Charles Harris, 2013-03-30
2034-
2035-class CallOnceOnly(object):
2036- def __init__(self):
2037- self._check_types = None
2038- self._check_ieee_macros = None
2039- self._check_complex = None
2040-
2041- def check_types(self, *a, **kw):
2042- if self._check_types is None:
2043- out = check_types(*a, **kw)
2044- self._check_types = pickle.dumps(out)
2045- else:
2046- out = copy.deepcopy(pickle.loads(self._check_types))
2047- return out
2048-
2049- def check_ieee_macros(self, *a, **kw):
2050- if self._check_ieee_macros is None:
2051- out = check_ieee_macros(*a, **kw)
2052- self._check_ieee_macros = pickle.dumps(out)
2053- else:
2054- out = copy.deepcopy(pickle.loads(self._check_ieee_macros))
2055- return out
2056-
2057- def check_complex(self, *a, **kw):
2058- if self._check_complex is None:
2059- out = check_complex(*a, **kw)
2060- self._check_complex = pickle.dumps(out)
2061- else:
2062- out = copy.deepcopy(pickle.loads(self._check_complex))
2063- return out
2064-
2065-PYTHON_HAS_UNICODE_WIDE = True
2066-
2067-def pythonlib_dir():
2068- """return path where libpython* is."""
2069- if sys.platform == 'win32':
2070- return os.path.join(sys.prefix, "libs")
2071- else:
2072- return get_config_var('LIBDIR')
2073-
2074-def is_npy_no_signal():
2075- """Return True if the NPY_NO_SIGNAL symbol must be defined in configuration
2076- header."""
2077- return sys.platform == 'win32'
2078-
2079-def is_npy_no_smp():
2080- """Return True if the NPY_NO_SMP symbol must be defined in public
2081- header (when SMP support cannot be reliably enabled)."""
2082- # Python 2.3 causes a segfault when
2083- # trying to re-acquire the thread-state
2084- # which is done in error-handling
2085- # ufunc code. NPY_ALLOW_C_API and friends
2086- # cause the segfault. So, we disable threading
2087- # for now.
2088- if sys.version[:5] < '2.4.2':
2089- nosmp = 1
2090- else:
2091- # Perhaps a fancier check is in order here.
2092- # so that threads are only enabled if there
2093- # are actually multiple CPUS? -- but
2094- # threaded code can be nice even on a single
2095- # CPU so that long-calculating code doesn't
2096- # block.
2097- try:
2098- nosmp = os.environ['NPY_NOSMP']
2099- nosmp = 1
2100- except KeyError:
2101- nosmp = 0
2102- return nosmp == 1
2103-
2104-def win32_checks(deflist):
2105- from numpy.distutils.misc_util import get_build_architecture
2106- a = get_build_architecture()
2107-
2108- # Distutils hack on AMD64 on windows
2109- print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %
2110- (a, os.name, sys.platform))
2111- if a == 'AMD64':
2112- deflist.append('DISTUTILS_USE_SDK')
2113-
2114- # On win32, force long double format string to be 'g', not
2115- # 'Lg', since the MS runtime does not support long double whose
2116- # size is > sizeof(double)
2117- if a == "Intel" or a == "AMD64":
2118- deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')
2119-
2120-def check_math_capabilities(config, moredefs, mathlibs):
2121- def check_func(func_name):
2122- return config.check_func(func_name, libraries=mathlibs,
2123- decl=True, call=True)
2124-
2125- def check_funcs_once(funcs_name):
2126- decl = dict([(f, True) for f in funcs_name])
2127- st = config.check_funcs_once(funcs_name, libraries=mathlibs,
2128- decl=decl, call=decl)
2129- if st:
2130- moredefs.extend([(fname2def(f), 1) for f in funcs_name])
2131- return st
2132-
2133- def check_funcs(funcs_name):
2134- # Use check_funcs_once first, and if it does not work, test func per
2135- # func. Return success only if all the functions are available
2136- if not check_funcs_once(funcs_name):
2137- # Global check failed, check func per func
2138- for f in funcs_name:
2139- if check_func(f):
2140- moredefs.append((fname2def(f), 1))
2141- return 0
2142- else:
2143- return 1
2144-
2145- #use_msvc = config.check_decl("_MSC_VER")
2146-
2147- if not check_funcs_once(MANDATORY_FUNCS):
2148- raise SystemError("One of the required function to build numpy is not"
2149- " available (the list is %s)." % str(MANDATORY_FUNCS))
2150-
2151- # Standard functions which may not be available and for which we have a
2152- # replacement implementation. Note that some of these are C99 functions.
2153-
2154- # XXX: hack to circumvent cpp pollution from python: python put its
2155- # config.h in the public namespace, so we have a clash for the common
2156- # functions we test. We remove every function tested by python's
2157- # autoconf, hoping their own test are correct
2158- for f in OPTIONAL_STDFUNCS_MAYBE:
2159- if config.check_decl(fname2def(f),
2160- headers=["Python.h", "math.h"]):
2161- OPTIONAL_STDFUNCS.remove(f)
2162-
2163- check_funcs(OPTIONAL_STDFUNCS)
2164-
2165- for h in OPTIONAL_HEADERS:
2166- if config.check_func("", decl=False, call=False, headers=[h]):
2167- moredefs.append((fname2def(h).replace(".", "_"), 1))
2168-
2169- for tup in OPTIONAL_INTRINSICS:
2170- headers = None
2171- if len(tup) == 2:
2172- f, args = tup
2173- else:
2174- f, args, headers = tup[0], tup[1], [tup[2]]
2175- if config.check_func(f, decl=False, call=True, call_args=args,
2176- headers=headers):
2177- moredefs.append((fname2def(f), 1))
2178-
2179- for dec, fn in OPTIONAL_GCC_ATTRIBUTES:
2180- if config.check_funcs_once([fn],
2181- decl=dict((('%s %s' % (dec, fn), True),)),
2182- call=False):
2183- moredefs.append((fname2def(fn), 1))
2184-
2185- # C99 functions: float and long double versions
2186- check_funcs(C99_FUNCS_SINGLE)
2187- check_funcs(C99_FUNCS_EXTENDED)
2188-
2189-def check_complex(config, mathlibs):
2190- priv = []
2191- pub = []
2192-
2193- try:
2194- if os.uname()[0] == "Interix":
2195- warnings.warn("Disabling broken complex support. See #1365")
2196- return priv, pub
2197- except:
2198- # os.uname not available on all platforms. blanket except ugly but safe
2199- pass
2200-
2201- # Check for complex support
2202- st = config.check_header('complex.h')
2203- if st:
2204- priv.append(('HAVE_COMPLEX_H', 1))
2205- pub.append(('NPY_USE_C99_COMPLEX', 1))
2206-
2207- for t in C99_COMPLEX_TYPES:
2208- st = config.check_type(t, headers=["complex.h"])
2209- if st:
2210- pub.append(('NPY_HAVE_%s' % type2def(t), 1))
2211-
2212- def check_prec(prec):
2213- flist = [f + prec for f in C99_COMPLEX_FUNCS]
2214- decl = dict([(f, True) for f in flist])
2215- if not config.check_funcs_once(flist, call=decl, decl=decl,
2216- libraries=mathlibs):
2217- for f in flist:
2218- if config.check_func(f, call=True, decl=True,
2219- libraries=mathlibs):
2220- priv.append((fname2def(f), 1))
2221- else:
2222- priv.extend([(fname2def(f), 1) for f in flist])
2223-
2224- check_prec('')
2225- check_prec('f')
2226- check_prec('l')
2227-
2228- return priv, pub
2229-
2230-def check_ieee_macros(config):
2231- priv = []
2232- pub = []
2233-
2234- macros = []
2235-
2236- def _add_decl(f):
2237- priv.append(fname2def("decl_%s" % f))
2238- pub.append('NPY_%s' % fname2def("decl_%s" % f))
2239-
2240- # XXX: hack to circumvent cpp pollution from python: python put its
2241- # config.h in the public namespace, so we have a clash for the common
2242- # functions we test. We remove every function tested by python's
2243- # autoconf, hoping their own test are correct
2244- _macros = ["isnan", "isinf", "signbit", "isfinite"]
2245- for f in _macros:
2246- py_symbol = fname2def("decl_%s" % f)
2247- already_declared = config.check_decl(py_symbol,
2248- headers=["Python.h", "math.h"])
2249- if already_declared:
2250- if config.check_macro_true(py_symbol,
2251- headers=["Python.h", "math.h"]):
2252- pub.append('NPY_%s' % fname2def("decl_%s" % f))
2253- else:
2254- macros.append(f)
2255- # Normally, isnan and isinf are macro (C99), but some platforms only have
2256- # func, or both func and macro version. Check for macro only, and define
2257- # replacement ones if not found.
2258- # Note: including Python.h is necessary because it modifies some math.h
2259- # definitions
2260- for f in macros:
2261- st = config.check_decl(f, headers = ["Python.h", "math.h"])
2262- if st:
2263- _add_decl(f)
2264-
2265- return priv, pub
2266-
2267-def check_types(config_cmd, ext, build_dir):
2268- private_defines = []
2269- public_defines = []
2270-
2271- # Expected size (in number of bytes) for each type. This is an
2272- # optimization: those are only hints, and an exhaustive search for the size
2273- # is done if the hints are wrong.
2274- expected = {}
2275- expected['short'] = [2]
2276- expected['int'] = [4]
2277- expected['long'] = [8, 4]
2278- expected['float'] = [4]
2279- expected['double'] = [8]
2280- expected['long double'] = [8, 12, 16]
2281- expected['Py_intptr_t'] = [4, 8]
2282- expected['PY_LONG_LONG'] = [8]
2283- expected['long long'] = [8]
2284-
2285- # Check we have the python header (-dev* packages on Linux)
2286- result = config_cmd.check_header('Python.h')
2287- if not result:
2288- raise SystemError(
2289- "Cannot compile 'Python.h'. Perhaps you need to "\
2290- "install python-dev|python-devel.")
2291- res = config_cmd.check_header("endian.h")
2292- if res:
2293- private_defines.append(('HAVE_ENDIAN_H', 1))
2294- public_defines.append(('NPY_HAVE_ENDIAN_H', 1))
2295-
2296- # Check basic types sizes
2297- for type in ('short', 'int', 'long'):
2298- res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers = ["Python.h"])
2299- if res:
2300- public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type)))
2301- else:
2302- res = config_cmd.check_type_size(type, expected=expected[type])
2303- if res >= 0:
2304- public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
2305- else:
2306- raise SystemError("Checking sizeof (%s) failed !" % type)
2307-
2308- for type in ('float', 'double', 'long double'):
2309- already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type),
2310- headers = ["Python.h"])
2311- res = config_cmd.check_type_size(type, expected=expected[type])
2312- if res >= 0:
2313- public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
2314- if not already_declared and not type == 'long double':
2315- private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
2316- else:
2317- raise SystemError("Checking sizeof (%s) failed !" % type)
2318-
2319- # Compute size of corresponding complex type: used to check that our
2320- # definition is binary compatible with C99 complex type (check done at
2321- # build time in npy_common.h)
2322- complex_def = "struct {%s __x; %s __y;}" % (type, type)
2323- res = config_cmd.check_type_size(complex_def, expected=2*expected[type])
2324- if res >= 0:
2325- public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))
2326- else:
2327- raise SystemError("Checking sizeof (%s) failed !" % complex_def)
2328-
2329-
2330- for type in ('Py_intptr_t',):
2331- res = config_cmd.check_type_size(type, headers=["Python.h"],
2332- library_dirs=[pythonlib_dir()],
2333- expected=expected[type])
2334-
2335- if res >= 0:
2336- private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
2337- public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
2338- else:
2339- raise SystemError("Checking sizeof (%s) failed !" % type)
2340-
2341- # We check declaration AND type because that's how distutils does it.
2342- if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):
2343- res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'],
2344- library_dirs=[pythonlib_dir()],
2345- expected=expected['PY_LONG_LONG'])
2346- if res >= 0:
2347- private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
2348- public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
2349- else:
2350- raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG')
2351-
2352- res = config_cmd.check_type_size('long long',
2353- expected=expected['long long'])
2354- if res >= 0:
2355- #private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res))
2356- public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res))
2357- else:
2358- raise SystemError("Checking sizeof (%s) failed !" % 'long long')
2359-
2360- if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):
2361- raise RuntimeError(
2362- "Config wo CHAR_BIT is not supported"\
2363- ", please contact the maintainers")
2364-
2365- return private_defines, public_defines
2366-
2367-def check_mathlib(config_cmd):
2368- # Testing the C math library
2369- mathlibs = []
2370- mathlibs_choices = [[], ['m'], ['cpml']]
2371- mathlib = os.environ.get('MATHLIB')
2372- if mathlib:
2373- mathlibs_choices.insert(0, mathlib.split(','))
2374- for libs in mathlibs_choices:
2375- if config_cmd.check_func("exp", libraries=libs, decl=True, call=True):
2376- mathlibs = libs
2377- break
2378- else:
2379- raise EnvironmentError("math library missing; rerun "
2380- "setup.py after setting the "
2381- "MATHLIB env variable")
2382- return mathlibs
2383-
2384-def visibility_define(config):
2385- """Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty
2386- string)."""
2387- if config.check_compiler_gcc4():
2388- return '__attribute__((visibility("hidden")))'
2389- else:
2390- return ''
2391-
2392-def configuration(parent_package='',top_path=None):
2393- from numpy.distutils.misc_util import Configuration, dot_join
2394- from numpy.distutils.system_info import get_info, default_lib_dirs
2395-
2396- config = Configuration('core', parent_package, top_path)
2397- local_dir = config.local_path
2398- codegen_dir = join(local_dir, 'code_generators')
2399-
2400- if is_released(config):
2401- warnings.simplefilter('error', MismatchCAPIWarning)
2402-
2403- # Check whether we have a mismatch between the set C API VERSION and the
2404- # actual C API VERSION
2405- check_api_version(C_API_VERSION, codegen_dir)
2406-
2407- generate_umath_py = join(codegen_dir, 'generate_umath.py')
2408- n = dot_join(config.name, 'generate_umath')
2409- generate_umath = imp.load_module('_'.join(n.split('.')),
2410- open(generate_umath_py, 'U'), generate_umath_py,
2411- ('.py', 'U', 1))
2412-
2413- header_dir = 'include/numpy' # this is relative to config.path_in_package
2414-
2415- cocache = CallOnceOnly()
2416-
2417- def generate_config_h(ext, build_dir):
2418- target = join(build_dir, header_dir, 'config.h')
2419- d = os.path.dirname(target)
2420- if not os.path.exists(d):
2421- os.makedirs(d)
2422-
2423- if newer(__file__, target):
2424- config_cmd = config.get_config_cmd()
2425- log.info('Generating %s', target)
2426-
2427- # Check sizeof
2428- moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)
2429-
2430- # Check math library and C99 math funcs availability
2431- mathlibs = check_mathlib(config_cmd)
2432- moredefs.append(('MATHLIB', ','.join(mathlibs)))
2433-
2434- check_math_capabilities(config_cmd, moredefs, mathlibs)
2435- moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])
2436- moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])
2437-
2438- # Signal check
2439- if is_npy_no_signal():
2440- moredefs.append('__NPY_PRIVATE_NO_SIGNAL')
2441-
2442- # Windows checks
2443- if sys.platform=='win32' or os.name=='nt':
2444- win32_checks(moredefs)
2445-
2446- # Inline check
2447- inline = config_cmd.check_inline()
2448-
2449- # Check whether we need our own wide character support
2450- if not config_cmd.check_decl('Py_UNICODE_WIDE', headers=['Python.h']):
2451- PYTHON_HAS_UNICODE_WIDE = True
2452- else:
2453- PYTHON_HAS_UNICODE_WIDE = False
2454-
2455- if ENABLE_SEPARATE_COMPILATION:
2456- moredefs.append(('ENABLE_SEPARATE_COMPILATION', 1))
2457-
2458- if NPY_RELAXED_STRIDES_CHECKING:
2459- moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
2460-
2461- # Get long double representation
2462- if sys.platform != 'darwin':
2463- rep = check_long_double_representation(config_cmd)
2464- if rep in ['INTEL_EXTENDED_12_BYTES_LE',
2465- 'INTEL_EXTENDED_16_BYTES_LE',
2466- 'MOTOROLA_EXTENDED_12_BYTES_BE',
2467- 'IEEE_QUAD_LE', 'IEEE_QUAD_BE',
2468- 'IEEE_DOUBLE_LE', 'IEEE_DOUBLE_BE',
2469- 'DOUBLE_DOUBLE_BE']:
2470- moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
2471- else:
2472- raise ValueError("Unrecognized long double format: %s" % rep)
2473-
2474- # Py3K check
2475- if sys.version_info[0] == 3:
2476- moredefs.append(('NPY_PY3K', 1))
2477-
2478- # Generate the config.h file from moredefs
2479- target_f = open(target, 'w')
2480- for d in moredefs:
2481- if isinstance(d, str):
2482- target_f.write('#define %s\n' % (d))
2483- else:
2484- target_f.write('#define %s %s\n' % (d[0], d[1]))
2485-
2486- # define inline to our keyword, or nothing
2487- target_f.write('#ifndef __cplusplus\n')
2488- if inline == 'inline':
2489- target_f.write('/* #undef inline */\n')
2490- else:
2491- target_f.write('#define inline %s\n' % inline)
2492- target_f.write('#endif\n')
2493-
2494- # add the guard to make sure config.h is never included directly,
2495- # but always through npy_config.h
2496- target_f.write("""
2497-#ifndef _NPY_NPY_CONFIG_H_
2498-#error config.h should never be included directly, include npy_config.h instead
2499-#endif
2500-""")
2501-
2502- target_f.close()
2503- print('File:', target)
2504- target_f = open(target)
2505- print(target_f.read())
2506- target_f.close()
2507- print('EOF')
2508- else:
2509- mathlibs = []
2510- target_f = open(target)
2511- for line in target_f:
2512- s = '#define MATHLIB'
2513- if line.startswith(s):
2514- value = line[len(s):].strip()
2515- if value:
2516- mathlibs.extend(value.split(','))
2517- target_f.close()
2518-
2519- # Ugly: this can be called within a library and not an extension,
2520- # in which case there is no libraries attributes (and none is
2521- # needed).
2522- if hasattr(ext, 'libraries'):
2523- ext.libraries.extend(mathlibs)
2524-
2525- incl_dir = os.path.dirname(target)
2526- if incl_dir not in config.numpy_include_dirs:
2527- config.numpy_include_dirs.append(incl_dir)
2528-
2529- return target
2530-
2531- def generate_numpyconfig_h(ext, build_dir):
2532- """Depends on config.h: generate_config_h has to be called before !"""
2533- target = join(build_dir, header_dir, '_numpyconfig.h')
2534- d = os.path.dirname(target)
2535- if not os.path.exists(d):
2536- os.makedirs(d)
2537- if newer(__file__, target):
2538- config_cmd = config.get_config_cmd()
2539- log.info('Generating %s', target)
2540-
2541- # Check sizeof
2542- ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)
2543-
2544- if is_npy_no_signal():
2545- moredefs.append(('NPY_NO_SIGNAL', 1))
2546-
2547- if is_npy_no_smp():
2548- moredefs.append(('NPY_NO_SMP', 1))
2549- else:
2550- moredefs.append(('NPY_NO_SMP', 0))
2551-
2552- mathlibs = check_mathlib(config_cmd)
2553- moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])
2554- moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])
2555-
2556- if ENABLE_SEPARATE_COMPILATION:
2557- moredefs.append(('NPY_ENABLE_SEPARATE_COMPILATION', 1))
2558-
2559- if NPY_RELAXED_STRIDES_CHECKING:
2560- moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
2561-
2562- # Check wether we can use inttypes (C99) formats
2563- if config_cmd.check_decl('PRIdPTR', headers = ['inttypes.h']):
2564- moredefs.append(('NPY_USE_C99_FORMATS', 1))
2565-
2566- # visibility check
2567- hidden_visibility = visibility_define(config_cmd)
2568- moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))
2569-
2570- # Add the C API/ABI versions
2571- moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
2572- moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))
2573-
2574- # Add moredefs to header
2575- target_f = open(target, 'w')
2576- for d in moredefs:
2577- if isinstance(d, str):
2578- target_f.write('#define %s\n' % (d))
2579- else:
2580- target_f.write('#define %s %s\n' % (d[0], d[1]))
2581-
2582- # Define __STDC_FORMAT_MACROS
2583- target_f.write("""
2584-#ifndef __STDC_FORMAT_MACROS
2585-#define __STDC_FORMAT_MACROS 1
2586-#endif
2587-""")
2588- target_f.close()
2589-
2590- # Dump the numpyconfig.h header to stdout
2591- print('File: %s' % target)
2592- target_f = open(target)
2593- print(target_f.read())
2594- target_f.close()
2595- print('EOF')
2596- config.add_data_files((header_dir, target))
2597- return target
2598-
2599- def generate_api_func(module_name):
2600- def generate_api(ext, build_dir):
2601- script = join(codegen_dir, module_name + '.py')
2602- sys.path.insert(0, codegen_dir)
2603- try:
2604- m = __import__(module_name)
2605- log.info('executing %s', script)
2606- h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))
2607- finally:
2608- del sys.path[0]
2609- config.add_data_files((header_dir, h_file),
2610- (header_dir, doc_file))
2611- return (h_file,)
2612- return generate_api
2613-
2614- generate_numpy_api = generate_api_func('generate_numpy_api')
2615- generate_ufunc_api = generate_api_func('generate_ufunc_api')
2616-
2617- config.add_include_dirs(join(local_dir, "src", "private"))
2618- config.add_include_dirs(join(local_dir, "src"))
2619- config.add_include_dirs(join(local_dir))
2620-
2621- config.add_data_files('include/numpy/*.h')
2622- config.add_include_dirs(join('src', 'npymath'))
2623- config.add_include_dirs(join('src', 'multiarray'))
2624- config.add_include_dirs(join('src', 'umath'))
2625- config.add_include_dirs(join('src', 'npysort'))
2626-
2627- config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])
2628-
2629- config.numpy_include_dirs.extend(config.paths('include'))
2630-
2631- deps = [join('src', 'npymath', '_signbit.c'),
2632- join('include', 'numpy', '*object.h'),
2633- 'include/numpy/fenv/fenv.c',
2634- 'include/numpy/fenv/fenv.h',
2635- join(codegen_dir, 'genapi.py'),
2636- ]
2637-
2638- # Don't install fenv unless we need them.
2639- if sys.platform == 'cygwin':
2640- config.add_data_dir('include/numpy/fenv')
2641-
2642- #######################################################################
2643- # dummy module #
2644- #######################################################################
2645-
2646- # npymath needs the config.h and numpyconfig.h files to be generated, but
2647- # build_clib cannot handle generate_config_h and generate_numpyconfig_h
2648- # (don't ask). Because clib are generated before extensions, we have to
2649- # explicitly add an extension which has generate_config_h and
2650- # generate_numpyconfig_h as sources *before* adding npymath.
2651-
2652- config.add_extension('_dummy',
2653- sources = [join('src', 'dummymodule.c'),
2654- generate_config_h,
2655- generate_numpyconfig_h,
2656- generate_numpy_api]
2657- )
2658-
2659- #######################################################################
2660- # npymath library #
2661- #######################################################################
2662-
2663- subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])
2664- def get_mathlib_info(*args):
2665- # Another ugly hack: the mathlib info is known once build_src is run,
2666- # but we cannot use add_installed_pkg_config here either, so we only
2667- # update the substition dictionary during npymath build
2668- config_cmd = config.get_config_cmd()
2669-
2670- # Check that the toolchain works, to fail early if it doesn't
2671- # (avoid late errors with MATHLIB which are confusing if the
2672- # compiler does not work).
2673- st = config_cmd.try_link('int main(void) { return 0;}')
2674- if not st:
2675- raise RuntimeError("Broken toolchain: cannot link a simple C program")
2676- mlibs = check_mathlib(config_cmd)
2677-
2678- posix_mlib = ' '.join(['-l%s' % l for l in mlibs])
2679- msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])
2680- subst_dict["posix_mathlib"] = posix_mlib
2681- subst_dict["msvc_mathlib"] = msvc_mlib
2682-
2683- config.add_installed_library('npymath',
2684- sources=[join('src', 'npymath', 'npy_math.c.src'),
2685- join('src', 'npymath', 'ieee754.c.src'),
2686- join('src', 'npymath', 'npy_math_complex.c.src'),
2687- join('src', 'npymath', 'halffloat.c'),
2688- get_mathlib_info],
2689- install_dir='lib')
2690- config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config",
2691- subst_dict)
2692- config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config",
2693- subst_dict)
2694-
2695- #######################################################################
2696- # npysort library #
2697- #######################################################################
2698-
2699- # This library is created for the build but it is not installed
2700- config.add_library('npysort',
2701- sources = [join('src', 'npysort', 'quicksort.c.src'),
2702- join('src', 'npysort', 'mergesort.c.src'),
2703- join('src', 'npysort', 'heapsort.c.src'),
2704- join('src', 'npysort', 'selection.c.src')])
2705-
2706-
2707- #######################################################################
2708- # multiarray module #
2709- #######################################################################
2710-
2711- # Multiarray version: this function is needed to build foo.c from foo.c.src
2712- # when foo.c is included in another file and as such not in the src
2713- # argument of build_ext command
2714- def generate_multiarray_templated_sources(ext, build_dir):
2715- from numpy.distutils.misc_util import get_cmd
2716-
2717- subpath = join('src', 'multiarray')
2718- sources = [join(local_dir, subpath, 'scalartypes.c.src'),
2719- join(local_dir, subpath, 'arraytypes.c.src'),
2720- join(local_dir, subpath, 'nditer_templ.c.src'),
2721- join(local_dir, subpath, 'lowlevel_strided_loops.c.src'),
2722- join(local_dir, subpath, 'einsum.c.src')]
2723-
2724- # numpy.distutils generate .c from .c.src in weird directories, we have
2725- # to add them there as they depend on the build_dir
2726- config.add_include_dirs(join(build_dir, subpath))
2727- cmd = get_cmd('build_src')
2728- cmd.ensure_finalized()
2729- cmd.template_sources(sources, ext)
2730-
2731- multiarray_deps = [
2732- join('src', 'multiarray', 'arrayobject.h'),
2733- join('src', 'multiarray', 'arraytypes.h'),
2734- join('src', 'multiarray', 'array_assign.h'),
2735- join('src', 'multiarray', 'buffer.h'),
2736- join('src', 'multiarray', 'calculation.h'),
2737- join('src', 'multiarray', 'common.h'),
2738- join('src', 'multiarray', 'convert_datatype.h'),
2739- join('src', 'multiarray', 'convert.h'),
2740- join('src', 'multiarray', 'conversion_utils.h'),
2741- join('src', 'multiarray', 'ctors.h'),
2742- join('src', 'multiarray', 'descriptor.h'),
2743- join('src', 'multiarray', 'getset.h'),
2744- join('src', 'multiarray', 'hashdescr.h'),
2745- join('src', 'multiarray', 'iterators.h'),
2746- join('src', 'multiarray', 'mapping.h'),
2747- join('src', 'multiarray', 'methods.h'),
2748- join('src', 'multiarray', 'multiarraymodule.h'),
2749- join('src', 'multiarray', 'nditer_impl.h'),
2750- join('src', 'multiarray', 'numpymemoryview.h'),
2751- join('src', 'multiarray', 'number.h'),
2752- join('src', 'multiarray', 'numpyos.h'),
2753- join('src', 'multiarray', 'refcount.h'),
2754- join('src', 'multiarray', 'scalartypes.h'),
2755- join('src', 'multiarray', 'sequence.h'),
2756- join('src', 'multiarray', 'shape.h'),
2757- join('src', 'multiarray', 'ucsnarrow.h'),
2758- join('src', 'multiarray', 'usertypes.h'),
2759- join('src', 'private', 'lowlevel_strided_loops.h'),
2760- join('include', 'numpy', 'arrayobject.h'),
2761- join('include', 'numpy', '_neighborhood_iterator_imp.h'),
2762- join('include', 'numpy', 'npy_endian.h'),
2763- join('include', 'numpy', 'arrayscalars.h'),
2764- join('include', 'numpy', 'noprefix.h'),
2765- join('include', 'numpy', 'npy_interrupt.h'),
2766- join('include', 'numpy', 'oldnumeric.h'),
2767- join('include', 'numpy', 'npy_3kcompat.h'),
2768- join('include', 'numpy', 'npy_math.h'),
2769- join('include', 'numpy', 'halffloat.h'),
2770- join('include', 'numpy', 'npy_common.h'),
2771- join('include', 'numpy', 'npy_os.h'),
2772- join('include', 'numpy', 'utils.h'),
2773- join('include', 'numpy', 'ndarrayobject.h'),
2774- join('include', 'numpy', 'npy_cpu.h'),
2775- join('include', 'numpy', 'numpyconfig.h'),
2776- join('include', 'numpy', 'ndarraytypes.h'),
2777- join('include', 'numpy', 'npy_1_7_deprecated_api.h'),
2778- join('include', 'numpy', '_numpyconfig.h.in'),
2779- ]
2780-
2781- multiarray_src = [
2782- join('src', 'multiarray', 'arrayobject.c'),
2783- join('src', 'multiarray', 'arraytypes.c.src'),
2784- join('src', 'multiarray', 'array_assign.c'),
2785- join('src', 'multiarray', 'array_assign_scalar.c'),
2786- join('src', 'multiarray', 'array_assign_array.c'),
2787- join('src', 'multiarray', 'buffer.c'),
2788- join('src', 'multiarray', 'calculation.c'),
2789- join('src', 'multiarray', 'common.c'),
2790- join('src', 'multiarray', 'convert.c'),
2791- join('src', 'multiarray', 'convert_datatype.c'),
2792- join('src', 'multiarray', 'conversion_utils.c'),
2793- join('src', 'multiarray', 'ctors.c'),
2794- join('src', 'multiarray', 'datetime.c'),
2795- join('src', 'multiarray', 'datetime_strings.c'),
2796- join('src', 'multiarray', 'datetime_busday.c'),
2797- join('src', 'multiarray', 'datetime_busdaycal.c'),
2798- join('src', 'multiarray', 'descriptor.c'),
2799- join('src', 'multiarray', 'dtype_transfer.c'),
2800- join('src', 'multiarray', 'einsum.c.src'),
2801- join('src', 'multiarray', 'flagsobject.c'),
2802- join('src', 'multiarray', 'getset.c'),
2803- join('src', 'multiarray', 'hashdescr.c'),
2804- join('src', 'multiarray', 'item_selection.c'),
2805- join('src', 'multiarray', 'iterators.c'),
2806- join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),
2807- join('src', 'multiarray', 'mapping.c'),
2808- join('src', 'multiarray', 'methods.c'),
2809- join('src', 'multiarray', 'multiarraymodule.c'),
2810- join('src', 'multiarray', 'nditer_templ.c.src'),
2811- join('src', 'multiarray', 'nditer_api.c'),
2812- join('src', 'multiarray', 'nditer_constr.c'),
2813- join('src', 'multiarray', 'nditer_pywrap.c'),
2814- join('src', 'multiarray', 'number.c'),
2815- join('src', 'multiarray', 'numpymemoryview.c'),
2816- join('src', 'multiarray', 'numpyos.c'),
2817- join('src', 'multiarray', 'refcount.c'),
2818- join('src', 'multiarray', 'sequence.c'),
2819- join('src', 'multiarray', 'shape.c'),
2820- join('src', 'multiarray', 'scalarapi.c'),
2821- join('src', 'multiarray', 'scalartypes.c.src'),
2822- join('src', 'multiarray', 'usertypes.c'),
2823- join('src', 'multiarray', 'ucsnarrow.c')]
2824-
2825-
2826- if not ENABLE_SEPARATE_COMPILATION:
2827- multiarray_deps.extend(multiarray_src)
2828- multiarray_src = [join('src', 'multiarray', 'multiarraymodule_onefile.c')]
2829- multiarray_src.append(generate_multiarray_templated_sources)
2830-
2831- config.add_extension('multiarray',
2832- sources = multiarray_src +
2833- [generate_config_h,
2834- generate_numpyconfig_h,
2835- generate_numpy_api,
2836- join(codegen_dir, 'generate_numpy_api.py'),
2837- join('*.py')],
2838- depends = deps + multiarray_deps,
2839- libraries = ['npymath', 'npysort'])
2840-
2841- #######################################################################
2842- # umath module #
2843- #######################################################################
2844-
2845- # umath version: this function is needed to build foo.c from foo.c.src
2846- # when foo.c is included in another file and as such not in the src
2847- # argument of build_ext command
2848- def generate_umath_templated_sources(ext, build_dir):
2849- from numpy.distutils.misc_util import get_cmd
2850-
2851- subpath = join('src', 'umath')
2852- # NOTE: For manual template conversion of loops.h.src, read the note
2853- # in that file.
2854- sources = [
2855- join(local_dir, subpath, 'loops.c.src'),
2856- join(local_dir, subpath, 'simd.inc.src')]
2857-
2858- # numpy.distutils generate .c from .c.src in weird directories, we have
2859- # to add them there as they depend on the build_dir
2860- config.add_include_dirs(join(build_dir, subpath))
2861- cmd = get_cmd('build_src')
2862- cmd.ensure_finalized()
2863- cmd.template_sources(sources, ext)
2864-
2865-
2866- def generate_umath_c(ext, build_dir):
2867- target = join(build_dir, header_dir, '__umath_generated.c')
2868- dir = os.path.dirname(target)
2869- if not os.path.exists(dir):
2870- os.makedirs(dir)
2871- script = generate_umath_py
2872- if newer(script, target):
2873- f = open(target, 'w')
2874- f.write(generate_umath.make_code(generate_umath.defdict,
2875- generate_umath.__file__))
2876- f.close()
2877- return []
2878-
2879- umath_src = [
2880- join('src', 'umath', 'umathmodule.c'),
2881- join('src', 'umath', 'reduction.c'),
2882- join('src', 'umath', 'funcs.inc.src'),
2883- join('src', 'umath', 'simd.inc.src'),
2884- join('src', 'umath', 'loops.c.src'),
2885- join('src', 'umath', 'ufunc_object.c'),
2886- join('src', 'umath', 'ufunc_type_resolution.c')]
2887-
2888- umath_deps = [
2889- generate_umath_py,
2890- join('src', 'umath', 'simd.inc.src'),
2891- join(codegen_dir, 'generate_ufunc_api.py')]
2892-
2893- if not ENABLE_SEPARATE_COMPILATION:
2894- umath_deps.extend(umath_src)
2895- umath_src = [join('src', 'umath', 'umathmodule_onefile.c')]
2896- umath_src.append(generate_umath_templated_sources)
2897- umath_src.append(join('src', 'umath', 'funcs.inc.src'))
2898- umath_src.append(join('src', 'umath', 'simd.inc.src'))
2899-
2900- config.add_extension('umath',
2901- sources = umath_src +
2902- [generate_config_h,
2903- generate_numpyconfig_h,
2904- generate_umath_c,
2905- generate_ufunc_api],
2906- depends = deps + umath_deps,
2907- libraries = ['npymath'],
2908- )
2909-
2910- #######################################################################
2911- # scalarmath module #
2912- #######################################################################
2913-
2914- config.add_extension('scalarmath',
2915- sources = [join('src', 'scalarmathmodule.c.src'),
2916- generate_config_h,
2917- generate_numpyconfig_h,
2918- generate_numpy_api,
2919- generate_ufunc_api],
2920- depends = deps,
2921- libraries = ['npymath'],
2922- )
2923-
2924- #######################################################################
2925- # _dotblas module #
2926- #######################################################################
2927-
2928- # Configure blasdot
2929- blas_info = get_info('blas_opt', 0)
2930- #blas_info = {}
2931- def get_dotblas_sources(ext, build_dir):
2932- if blas_info:
2933- #if ('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', []):
2934- # return None # dotblas needs ATLAS, Fortran compiled blas will not be sufficient.
2935- return ext.depends[:1]
2936- return None # no extension module will be built
2937-
2938- config.add_extension('_dotblas',
2939- sources = [get_dotblas_sources],
2940- depends = [join('blasdot', '_dotblas.c'),
2941- join('blasdot', 'cblas.h'),
2942- ],
2943- include_dirs = ['blasdot'],
2944- extra_info = blas_info
2945- )
2946-
2947- #######################################################################
2948- # umath_tests module #
2949- #######################################################################
2950-
2951- config.add_extension('umath_tests',
2952- sources = [join('src', 'umath', 'umath_tests.c.src')])
2953-
2954- #######################################################################
2955- # custom rational dtype module #
2956- #######################################################################
2957-
2958- config.add_extension('test_rational',
2959- sources = [join('src', 'umath', 'test_rational.c.src')])
2960-
2961- #######################################################################
2962- # struct_ufunc_test module #
2963- #######################################################################
2964-
2965- config.add_extension('struct_ufunc_test',
2966- sources = [join('src', 'umath', 'struct_ufunc_test.c.src')])
2967-
2968- #######################################################################
2969- # multiarray_tests module #
2970- #######################################################################
2971-
2972- config.add_extension('multiarray_tests',
2973- sources = [join('src', 'multiarray', 'multiarray_tests.c.src')])
2974-
2975- #######################################################################
2976- # operand_flag_tests module #
2977- #######################################################################
2978-
2979- config.add_extension('operand_flag_tests',
2980- sources = [join('src', 'umath', 'operand_flag_tests.c.src')])
2981-
2982- config.add_data_dir('tests')
2983- config.add_data_dir('tests/data')
2984-
2985- config.make_svn_version_py()
2986-
2987- return config
2988-
2989-if __name__=='__main__':
2990- from numpy.distutils.core import setup
2991- setup(configuration=configuration)
2992
2993=== removed file '.pc/ppc64el_cpu_config.patch/numpy/core/setup_common.py'
2994--- .pc/ppc64el_cpu_config.patch/numpy/core/setup_common.py 2014-01-03 17:10:24 +0000
2995+++ .pc/ppc64el_cpu_config.patch/numpy/core/setup_common.py 1970-01-01 00:00:00 +0000
2996@@ -1,308 +0,0 @@
2997-from __future__ import division, absolute_import, print_function
2998-
2999-# Code common to build tools
3000-import sys
3001-from os.path import join
3002-import warnings
3003-import copy
3004-import binascii
3005-
3006-from distutils.ccompiler import CompileError
3007-
3008-#-------------------
3009-# Versioning support
3010-#-------------------
3011-# How to change C_API_VERSION ?
3012-# - increase C_API_VERSION value
3013-# - record the hash for the new C API with the script cversions.py
3014-# and add the hash to cversions.txt
3015-# The hash values are used to remind developers when the C API number was not
3016-# updated - generates a MismatchCAPIWarning warning which is turned into an
3017-# exception for released version.
3018-
3019-# Binary compatibility version number. This number is increased whenever the
3020-# C-API is changed such that binary compatibility is broken, i.e. whenever a
3021-# recompile of extension modules is needed.
3022-C_ABI_VERSION = 0x01000009
3023-
3024-# Minor API version. This number is increased whenever a change is made to the
3025-# C-API -- whether it breaks binary compatibility or not. Some changes, such
3026-# as adding a function pointer to the end of the function table, can be made
3027-# without breaking binary compatibility. In this case, only the C_API_VERSION
3028-# (*not* C_ABI_VERSION) would be increased. Whenever binary compatibility is
3029-# broken, both C_API_VERSION and C_ABI_VERSION should be increased.
3030-#
3031-# 0x00000008 - 1.7.x
3032-# 0x00000009 - 1.8.x
3033-C_API_VERSION = 0x00000009
3034-
3035-class MismatchCAPIWarning(Warning):
3036- pass
3037-
3038-def is_released(config):
3039- """Return True if a released version of numpy is detected."""
3040- from distutils.version import LooseVersion
3041-
3042- v = config.get_version('../version.py')
3043- if v is None:
3044- raise ValueError("Could not get version")
3045- pv = LooseVersion(vstring=v).version
3046- if len(pv) > 3:
3047- return False
3048- return True
3049-
3050-def get_api_versions(apiversion, codegen_dir):
3051- """Return current C API checksum and the recorded checksum for the given
3052- version of the C API version."""
3053- api_files = [join(codegen_dir, 'numpy_api_order.txt'),
3054- join(codegen_dir, 'ufunc_api_order.txt')]
3055-
3056- # Compute the hash of the current API as defined in the .txt files in
3057- # code_generators
3058- sys.path.insert(0, codegen_dir)
3059- try:
3060- m = __import__('genapi')
3061- numpy_api = __import__('numpy_api')
3062- curapi_hash = m.fullapi_hash(numpy_api.full_api)
3063- apis_hash = m.get_versions_hash()
3064- finally:
3065- del sys.path[0]
3066-
3067- return curapi_hash, apis_hash[apiversion]
3068-
3069-def check_api_version(apiversion, codegen_dir):
3070- """Emits a MismacthCAPIWarning if the C API version needs updating."""
3071- curapi_hash, api_hash = get_api_versions(apiversion, codegen_dir)
3072-
3073- # If different hash, it means that the api .txt files in
3074- # codegen_dir have been updated without the API version being
3075- # updated. Any modification in those .txt files should be reflected
3076- # in the api and eventually abi versions.
3077- # To compute the checksum of the current API, use
3078- # code_generators/cversions.py script
3079- if not curapi_hash == api_hash:
3080- msg = "API mismatch detected, the C API version " \
3081- "numbers have to be updated. Current C api version is %d, " \
3082- "with checksum %s, but recorded checksum for C API version %d in " \
3083- "codegen_dir/cversions.txt is %s. If functions were added in the " \
3084- "C API, you have to update C_API_VERSION in %s."
3085- warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash,
3086- __file__),
3087- MismatchCAPIWarning)
3088-# Mandatory functions: if not found, fail the build
3089-MANDATORY_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs",
3090- "floor", "ceil", "sqrt", "log10", "log", "exp", "asin",
3091- "acos", "atan", "fmod", 'modf', 'frexp', 'ldexp']
3092-
3093-# Standard functions which may not be available and for which we have a
3094-# replacement implementation. Note that some of these are C99 functions.
3095-OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh",
3096- "rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow",
3097- "copysign", "nextafter"]
3098-
3099-
3100-OPTIONAL_HEADERS = [
3101-# sse headers only enabled automatically on amd64/x32 builds
3102- "xmmintrin.h", # SSE
3103- "emmintrin.h", # SSE2
3104-]
3105-
3106-# optional gcc compiler builtins and their call arguments and optional a
3107-# required header
3108-# call arguments are required as the compiler will do strict signature checking
3109-OPTIONAL_INTRINSICS = [("__builtin_isnan", '5.'),
3110- ("__builtin_isinf", '5.'),
3111- ("__builtin_isfinite", '5.'),
3112- ("__builtin_bswap32", '5u'),
3113- ("__builtin_bswap64", '5u'),
3114- ("__builtin_expect", '5, 0'),
3115- ("_mm_load_ps", '(float*)0', "xmmintrin.h"), # SSE
3116- ("_mm_load_pd", '(double*)0', "emmintrin.h"), # SSE2
3117- ]
3118-
3119-# gcc function attributes
3120-# (attribute as understood by gcc, function name),
3121-# function name will be converted to HAVE_<upper-case-name> preprocessor macro
3122-OPTIONAL_GCC_ATTRIBUTES = [('__attribute__((optimize("unroll-loops")))',
3123- 'attribute_optimize_unroll_loops'),
3124- ]
3125-
3126-# Subset of OPTIONAL_STDFUNCS which may alreay have HAVE_* defined by Python.h
3127-OPTIONAL_STDFUNCS_MAYBE = ["expm1", "log1p", "acosh", "atanh", "asinh", "hypot",
3128- "copysign"]
3129-
3130-# C99 functions: float and long double versions
3131-C99_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor",
3132- "ceil", "rint", "trunc", "sqrt", "log10", "log", "log1p", "exp",
3133- "expm1", "asin", "acos", "atan", "asinh", "acosh", "atanh",
3134- "hypot", "atan2", "pow", "fmod", "modf", 'frexp', 'ldexp',
3135- "exp2", "log2", "copysign", "nextafter"]
3136-
3137-C99_FUNCS_SINGLE = [f + 'f' for f in C99_FUNCS]
3138-C99_FUNCS_EXTENDED = [f + 'l' for f in C99_FUNCS]
3139-
3140-C99_COMPLEX_TYPES = ['complex double', 'complex float', 'complex long double']
3141-
3142-C99_COMPLEX_FUNCS = ['creal', 'cimag', 'cabs', 'carg', 'cexp', 'csqrt', 'clog',
3143- 'ccos', 'csin', 'cpow']
3144-
3145-def fname2def(name):
3146- return "HAVE_%s" % name.upper()
3147-
3148-def sym2def(symbol):
3149- define = symbol.replace(' ', '')
3150- return define.upper()
3151-
3152-def type2def(symbol):
3153- define = symbol.replace(' ', '_')
3154- return define.upper()
3155-
3156-# Code to detect long double representation taken from MPFR m4 macro
3157-def check_long_double_representation(cmd):
3158- cmd._check_compiler()
3159- body = LONG_DOUBLE_REPRESENTATION_SRC % {'type': 'long double'}
3160-
3161- # We need to use _compile because we need the object filename
3162- src, object = cmd._compile(body, None, None, 'c')
3163- try:
3164- type = long_double_representation(pyod(object))
3165- return type
3166- finally:
3167- cmd._clean()
3168-
3169-LONG_DOUBLE_REPRESENTATION_SRC = r"""
3170-/* "before" is 16 bytes to ensure there's no padding between it and "x".
3171- * We're not expecting any "long double" bigger than 16 bytes or with
3172- * alignment requirements stricter than 16 bytes. */
3173-typedef %(type)s test_type;
3174-
3175-struct {
3176- char before[16];
3177- test_type x;
3178- char after[8];
3179-} foo = {
3180- { '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
3181- '\001', '\043', '\105', '\147', '\211', '\253', '\315', '\357' },
3182- -123456789.0,
3183- { '\376', '\334', '\272', '\230', '\166', '\124', '\062', '\020' }
3184-};
3185-"""
3186-
3187-def pyod(filename):
3188- """Python implementation of the od UNIX utility (od -b, more exactly).
3189-
3190- Parameters
3191- ----------
3192- filename : str
3193- name of the file to get the dump from.
3194-
3195- Returns
3196- -------
3197- out : seq
3198- list of lines of od output
3199-
3200- Note
3201- ----
3202- We only implement enough to get the necessary information for long double
3203- representation, this is not intended as a compatible replacement for od.
3204- """
3205- def _pyod2():
3206- out = []
3207-
3208- fid = open(filename, 'rb')
3209- try:
3210- yo = [int(oct(int(binascii.b2a_hex(o), 16))) for o in fid.read()]
3211- for i in range(0, len(yo), 16):
3212- line = ['%07d' % int(oct(i))]
3213- line.extend(['%03d' % c for c in yo[i:i+16]])
3214- out.append(" ".join(line))
3215- return out
3216- finally:
3217- fid.close()
3218-
3219- def _pyod3():
3220- out = []
3221-
3222- fid = open(filename, 'rb')
3223- try:
3224- yo2 = [oct(o)[2:] for o in fid.read()]
3225- for i in range(0, len(yo2), 16):
3226- line = ['%07d' % int(oct(i)[2:])]
3227- line.extend(['%03d' % int(c) for c in yo2[i:i+16]])
3228- out.append(" ".join(line))
3229- return out
3230- finally:
3231- fid.close()
3232-
3233- if sys.version_info[0] < 3:
3234- return _pyod2()
3235- else:
3236- return _pyod3()
3237-
3238-_BEFORE_SEQ = ['000', '000', '000', '000', '000', '000', '000', '000',
3239- '001', '043', '105', '147', '211', '253', '315', '357']
3240-_AFTER_SEQ = ['376', '334', '272', '230', '166', '124', '062', '020']
3241-
3242-_IEEE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000']
3243-_IEEE_DOUBLE_LE = _IEEE_DOUBLE_BE[::-1]
3244-_INTEL_EXTENDED_12B = ['000', '000', '000', '000', '240', '242', '171', '353',
3245- '031', '300', '000', '000']
3246-_INTEL_EXTENDED_16B = ['000', '000', '000', '000', '240', '242', '171', '353',
3247- '031', '300', '000', '000', '000', '000', '000', '000']
3248-_MOTOROLA_EXTENDED_12B = ['300', '031', '000', '000', '353', '171',
3249- '242', '240', '000', '000', '000', '000']
3250-_IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000',
3251- '000', '000', '000', '000', '000', '000', '000', '000']
3252-_IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1]
3253-_DOUBLE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000'] + \
3254- ['000'] * 8
3255-
3256-def long_double_representation(lines):
3257- """Given a binary dump as given by GNU od -b, look for long double
3258- representation."""
3259-
3260- # Read contains a list of 32 items, each item is a byte (in octal
3261- # representation, as a string). We 'slide' over the output until read is of
3262- # the form before_seq + content + after_sequence, where content is the long double
3263- # representation:
3264- # - content is 12 bytes: 80 bits Intel representation
3265- # - content is 16 bytes: 80 bits Intel representation (64 bits) or quad precision
3266- # - content is 8 bytes: same as double (not implemented yet)
3267- read = [''] * 32
3268- saw = None
3269- for line in lines:
3270- # we skip the first word, as od -b output an index at the beginning of
3271- # each line
3272- for w in line.split()[1:]:
3273- read.pop(0)
3274- read.append(w)
3275-
3276- # If the end of read is equal to the after_sequence, read contains
3277- # the long double
3278- if read[-8:] == _AFTER_SEQ:
3279- saw = copy.copy(read)
3280- if read[:12] == _BEFORE_SEQ[4:]:
3281- if read[12:-8] == _INTEL_EXTENDED_12B:
3282- return 'INTEL_EXTENDED_12_BYTES_LE'
3283- if read[12:-8] == _MOTOROLA_EXTENDED_12B:
3284- return 'MOTOROLA_EXTENDED_12_BYTES_BE'
3285- elif read[:8] == _BEFORE_SEQ[8:]:
3286- if read[8:-8] == _INTEL_EXTENDED_16B:
3287- return 'INTEL_EXTENDED_16_BYTES_LE'
3288- elif read[8:-8] == _IEEE_QUAD_PREC_BE:
3289- return 'IEEE_QUAD_BE'
3290- elif read[8:-8] == _IEEE_QUAD_PREC_LE:
3291- return 'IEEE_QUAD_LE'
3292- elif read[8:-8] == _DOUBLE_DOUBLE_BE:
3293- return 'DOUBLE_DOUBLE_BE'
3294- elif read[:16] == _BEFORE_SEQ:
3295- if read[16:-8] == _IEEE_DOUBLE_LE:
3296- return 'IEEE_DOUBLE_LE'
3297- elif read[16:-8] == _IEEE_DOUBLE_BE:
3298- return 'IEEE_DOUBLE_BE'
3299-
3300- if saw is not None:
3301- raise ValueError("Unrecognized format (%s)" % saw)
3302- else:
3303- # We never detected the after_sequence
3304- raise ValueError("Could not lock sequences (%s)" % saw)
3305
3306=== removed directory '.pc/ppc64el_cpu_config.patch/numpy/core/src'
3307=== removed directory '.pc/ppc64el_cpu_config.patch/numpy/core/src/npymath'
3308=== removed file '.pc/ppc64el_cpu_config.patch/numpy/core/src/npymath/ieee754.c.src'
3309--- .pc/ppc64el_cpu_config.patch/numpy/core/src/npymath/ieee754.c.src 2014-01-03 17:10:24 +0000
3310+++ .pc/ppc64el_cpu_config.patch/numpy/core/src/npymath/ieee754.c.src 1970-01-01 00:00:00 +0000
3311@@ -1,680 +0,0 @@
3312-/* -*- c -*- */
3313-/*
3314- * vim:syntax=c
3315- *
3316- * Low-level routines related to IEEE-754 format
3317- */
3318-#include "npy_math_common.h"
3319-#include "npy_math_private.h"
3320-
3321-#ifndef HAVE_COPYSIGN
3322-double npy_copysign(double x, double y)
3323-{
3324- npy_uint32 hx, hy;
3325- GET_HIGH_WORD(hx, x);
3326- GET_HIGH_WORD(hy, y);
3327- SET_HIGH_WORD(x, (hx & 0x7fffffff) | (hy & 0x80000000));
3328- return x;
3329-}
3330-#endif
3331-
3332-/*
3333- The below code is provided for compilers which do not yet provide C11 compatibility (gcc 4.5 and older)
3334- */
3335-#ifndef LDBL_TRUE_MIN
3336-#define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
3337-#endif
3338-
3339-#if !defined(HAVE_DECL_SIGNBIT)
3340-#include "_signbit.c"
3341-
3342-int _npy_signbit_f(float x)
3343-{
3344- return _npy_signbit_d((double) x);
3345-}
3346-
3347-int _npy_signbit_ld(long double x)
3348-{
3349- return _npy_signbit_d((double) x);
3350-}
3351-#endif
3352-
3353-/*
3354- * FIXME: There is a lot of redundancy between _next* and npy_nextafter*.
3355- * refactor this at some point
3356- *
3357- * p >= 0, returnx x + nulp
3358- * p < 0, returnx x - nulp
3359- */
3360-double _next(double x, int p)
3361-{
3362- volatile double t;
3363- npy_int32 hx, hy, ix;
3364- npy_uint32 lx;
3365-
3366- EXTRACT_WORDS(hx, lx, x);
3367- ix = hx & 0x7fffffff; /* |x| */
3368-
3369- if (((ix >= 0x7ff00000) && ((ix - 0x7ff00000) | lx) != 0)) /* x is nan */
3370- return x;
3371- if ((ix | lx) == 0) { /* x == 0 */
3372- if (p >= 0) {
3373- INSERT_WORDS(x, 0x0, 1); /* return +minsubnormal */
3374- } else {
3375- INSERT_WORDS(x, 0x80000000, 1); /* return -minsubnormal */
3376- }
3377- t = x * x;
3378- if (t == x)
3379- return t;
3380- else
3381- return x; /* raise underflow flag */
3382- }
3383- if (p < 0) { /* x -= ulp */
3384- if (lx == 0)
3385- hx -= 1;
3386- lx -= 1;
3387- } else { /* x += ulp */
3388- lx += 1;
3389- if (lx == 0)
3390- hx += 1;
3391- }
3392- hy = hx & 0x7ff00000;
3393- if (hy >= 0x7ff00000)
3394- return x + x; /* overflow */
3395- if (hy < 0x00100000) { /* underflow */
3396- t = x * x;
3397- if (t != x) { /* raise underflow flag */
3398- INSERT_WORDS(x, hx, lx);
3399- return x;
3400- }
3401- }
3402- INSERT_WORDS(x, hx, lx);
3403- return x;
3404-}
3405-
3406-float _nextf(float x, int p)
3407-{
3408- volatile float t;
3409- npy_int32 hx, hy, ix;
3410-
3411- GET_FLOAT_WORD(hx, x);
3412- ix = hx & 0x7fffffff; /* |x| */
3413-
3414- if ((ix > 0x7f800000)) /* x is nan */
3415- return x;
3416- if (ix == 0) { /* x == 0 */
3417- if (p >= 0) {
3418- SET_FLOAT_WORD(x, 0x0 | 1); /* return +minsubnormal */
3419- } else {
3420- SET_FLOAT_WORD(x, 0x80000000 | 1); /* return -minsubnormal */
3421- }
3422- t = x * x;
3423- if (t == x)
3424- return t;
3425- else
3426- return x; /* raise underflow flag */
3427- }
3428- if (p < 0) { /* x -= ulp */
3429- hx -= 1;
3430- } else { /* x += ulp */
3431- hx += 1;
3432- }
3433- hy = hx & 0x7f800000;
3434- if (hy >= 0x7f800000)
3435- return x + x; /* overflow */
3436- if (hy < 0x00800000) { /* underflow */
3437- t = x * x;
3438- if (t != x) { /* raise underflow flag */
3439- SET_FLOAT_WORD(x, hx);
3440- return x;
3441- }
3442- }
3443- SET_FLOAT_WORD(x, hx);
3444- return x;
3445-}
3446-
3447-#ifdef HAVE_LDOUBLE_DOUBLE_DOUBLE_BE
3448-
3449-/*
3450- * FIXME: this is ugly and untested. The asm part only works with gcc, and we
3451- * should consolidate the GET_LDOUBLE* / SET_LDOUBLE macros
3452- */
3453-#define math_opt_barrier(x) \
3454- ({ __typeof (x) __x = x; __asm ("" : "+m" (__x)); __x; })
3455-#define math_force_eval(x) __asm __volatile ("" : : "m" (x))
3456-
3457-/* only works for big endian */
3458-typedef union
3459-{
3460- npy_longdouble value;
3461- struct
3462- {
3463- npy_uint64 msw;
3464- npy_uint64 lsw;
3465- } parts64;
3466- struct
3467- {
3468- npy_uint32 w0, w1, w2, w3;
3469- } parts32;
3470-} ieee854_long_double_shape_type;
3471-
3472-/* Get two 64 bit ints from a long double. */
3473-
3474-#define GET_LDOUBLE_WORDS64(ix0,ix1,d) \
3475-do { \
3476- ieee854_long_double_shape_type qw_u; \
3477- qw_u.value = (d); \
3478- (ix0) = qw_u.parts64.msw; \
3479- (ix1) = qw_u.parts64.lsw; \
3480-} while (0)
3481-
3482-/* Set a long double from two 64 bit ints. */
3483-
3484-#define SET_LDOUBLE_WORDS64(d,ix0,ix1) \
3485-do { \
3486- ieee854_long_double_shape_type qw_u; \
3487- qw_u.parts64.msw = (ix0); \
3488- qw_u.parts64.lsw = (ix1); \
3489- (d) = qw_u.value; \
3490-} while (0)
3491-
3492-npy_longdouble _nextl(npy_longdouble x, int p)
3493-{
3494- npy_int64 hx,ihx,ilx;
3495- npy_uint64 lx;
3496-
3497- GET_LDOUBLE_WORDS64(hx, lx, x);
3498- ihx = hx & 0x7fffffffffffffffLL; /* |hx| */
3499- ilx = lx & 0x7fffffffffffffffLL; /* |lx| */
3500-
3501- if(((ihx & 0x7ff0000000000000LL)==0x7ff0000000000000LL)&&
3502- ((ihx & 0x000fffffffffffffLL)!=0)) {
3503- return x; /* signal the nan */
3504- }
3505- if(ihx == 0 && ilx == 0) { /* x == 0 */
3506- npy_longdouble u;
3507- SET_LDOUBLE_WORDS64(x, p, 0ULL);/* return +-minsubnormal */
3508- u = x * x;
3509- if (u == x) {
3510- return u;
3511- } else {
3512- return x; /* raise underflow flag */
3513- }
3514- }
3515-
3516- npy_longdouble u;
3517- if(p < 0) { /* p < 0, x -= ulp */
3518- if((hx==0xffefffffffffffffLL)&&(lx==0xfc8ffffffffffffeLL))
3519- return x+x; /* overflow, return -inf */
3520- if (hx >= 0x7ff0000000000000LL) {
3521- SET_LDOUBLE_WORDS64(u,0x7fefffffffffffffLL,0x7c8ffffffffffffeLL);
3522- return u;
3523- }
3524- if(ihx <= 0x0360000000000000LL) { /* x <= LDBL_MIN */
3525- u = math_opt_barrier (x);
3526- x -= LDBL_TRUE_MIN;
3527- if (ihx < 0x0360000000000000LL
3528- || (hx > 0 && (npy_int64) lx <= 0)
3529- || (hx < 0 && (npy_int64) lx > 1)) {
3530- u = u * u;
3531- math_force_eval (u); /* raise underflow flag */
3532- }
3533- return x;
3534- }
3535- if (ihx < 0x06a0000000000000LL) { /* ulp will denormal */
3536- SET_LDOUBLE_WORDS64(u,(hx&0x7ff0000000000000LL),0ULL);
3537- u *= 0x1.0000000000000p-105L;
3538- } else
3539- SET_LDOUBLE_WORDS64(u,(hx&0x7ff0000000000000LL)-0x0690000000000000LL,0ULL);
3540- return x - u;
3541- } else { /* p >= 0, x += ulp */
3542- if((hx==0x7fefffffffffffffLL)&&(lx==0x7c8ffffffffffffeLL))
3543- return x+x; /* overflow, return +inf */
3544- if ((npy_uint64) hx >= 0xfff0000000000000ULL) {
3545- SET_LDOUBLE_WORDS64(u,0xffefffffffffffffLL,0xfc8ffffffffffffeLL);
3546- return u;
3547- }
3548- if(ihx <= 0x0360000000000000LL) { /* x <= LDBL_MIN */
3549- u = math_opt_barrier (x);
3550- x += LDBL_TRUE_MIN;
3551- if (ihx < 0x0360000000000000LL
3552- || (hx > 0 && (npy_int64) lx < 0 && lx != 0x8000000000000001LL)
3553- || (hx < 0 && (npy_int64) lx >= 0)) {
3554- u = u * u;
3555- math_force_eval (u); /* raise underflow flag */
3556- }
3557- if (x == 0.0L) /* handle negative LDBL_TRUE_MIN case */
3558- x = -0.0L;
3559- return x;
3560- }
3561- if (ihx < 0x06a0000000000000LL) { /* ulp will denormal */
3562- SET_LDOUBLE_WORDS64(u,(hx&0x7ff0000000000000LL),0ULL);
3563- u *= 0x1.0000000000000p-105L;
3564- } else
3565- SET_LDOUBLE_WORDS64(u,(hx&0x7ff0000000000000LL)-0x0690000000000000LL,0ULL);
3566- return x + u;
3567- }
3568-}
3569-#else
3570-npy_longdouble _nextl(npy_longdouble x, int p)
3571-{
3572- volatile npy_longdouble t;
3573- union IEEEl2bitsrep ux;
3574-
3575- ux.e = x;
3576-
3577- if ((GET_LDOUBLE_EXP(ux) == 0x7fff &&
3578- ((GET_LDOUBLE_MANH(ux) & ~LDBL_NBIT) | GET_LDOUBLE_MANL(ux)) != 0)) {
3579- return ux.e; /* x is nan */
3580- }
3581- if (ux.e == 0.0) {
3582- SET_LDOUBLE_MANH(ux, 0); /* return +-minsubnormal */
3583- SET_LDOUBLE_MANL(ux, 1);
3584- if (p >= 0) {
3585- SET_LDOUBLE_SIGN(ux, 0);
3586- } else {
3587- SET_LDOUBLE_SIGN(ux, 1);
3588- }
3589- t = ux.e * ux.e;
3590- if (t == ux.e) {
3591- return t;
3592- } else {
3593- return ux.e; /* raise underflow flag */
3594- }
3595- }
3596- if (p < 0) { /* x -= ulp */
3597- if (GET_LDOUBLE_MANL(ux) == 0) {
3598- if ((GET_LDOUBLE_MANH(ux) & ~LDBL_NBIT) == 0) {
3599- SET_LDOUBLE_EXP(ux, GET_LDOUBLE_EXP(ux) - 1);
3600- }
3601- SET_LDOUBLE_MANH(ux,
3602- (GET_LDOUBLE_MANH(ux) - 1) |
3603- (GET_LDOUBLE_MANH(ux) & LDBL_NBIT));
3604- }
3605- SET_LDOUBLE_MANL(ux, GET_LDOUBLE_MANL(ux) - 1);
3606- } else { /* x += ulp */
3607- SET_LDOUBLE_MANL(ux, GET_LDOUBLE_MANL(ux) + 1);
3608- if (GET_LDOUBLE_MANL(ux) == 0) {
3609- SET_LDOUBLE_MANH(ux,
3610- (GET_LDOUBLE_MANH(ux) + 1) |
3611- (GET_LDOUBLE_MANH(ux) & LDBL_NBIT));
3612- if ((GET_LDOUBLE_MANH(ux) & ~LDBL_NBIT) == 0) {
3613- SET_LDOUBLE_EXP(ux, GET_LDOUBLE_EXP(ux) + 1);
3614- }
3615- }
3616- }
3617- if (GET_LDOUBLE_EXP(ux) == 0x7fff) {
3618- return ux.e + ux.e; /* overflow */
3619- }
3620- if (GET_LDOUBLE_EXP(ux) == 0) { /* underflow */
3621- if (LDBL_NBIT) {
3622- SET_LDOUBLE_MANH(ux, GET_LDOUBLE_MANH(ux) & ~LDBL_NBIT);
3623- }
3624- t = ux.e * ux.e;
3625- if (t != ux.e) { /* raise underflow flag */
3626- return ux.e;
3627- }
3628- }
3629-
3630- return ux.e;
3631-}
3632-#endif
3633-
3634-/*
3635- * nextafter code taken from BSD math lib, the code contains the following
3636- * notice:
3637- *
3638- * ====================================================
3639- * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
3640- *
3641- * Developed at SunPro, a Sun Microsystems, Inc. business.
3642- * Permission to use, copy, modify, and distribute this
3643- * software is freely granted, provided that this notice
3644- * is preserved.
3645- * ====================================================
3646- */
3647-
3648-#ifndef HAVE_NEXTAFTER
3649-double npy_nextafter(double x, double y)
3650-{
3651- volatile double t;
3652- npy_int32 hx, hy, ix, iy;
3653- npy_uint32 lx, ly;
3654-
3655- EXTRACT_WORDS(hx, lx, x);
3656- EXTRACT_WORDS(hy, ly, y);
3657- ix = hx & 0x7fffffff; /* |x| */
3658- iy = hy & 0x7fffffff; /* |y| */
3659-
3660- if (((ix >= 0x7ff00000) && ((ix - 0x7ff00000) | lx) != 0) || /* x is nan */
3661- ((iy >= 0x7ff00000) && ((iy - 0x7ff00000) | ly) != 0)) /* y is nan */
3662- return x + y;
3663- if (x == y)
3664- return y; /* x=y, return y */
3665- if ((ix | lx) == 0) { /* x == 0 */
3666- INSERT_WORDS(x, hy & 0x80000000, 1); /* return +-minsubnormal */
3667- t = x * x;
3668- if (t == x)
3669- return t;
3670- else
3671- return x; /* raise underflow flag */
3672- }
3673- if (hx >= 0) { /* x > 0 */
3674- if (hx > hy || ((hx == hy) && (lx > ly))) { /* x > y, x -= ulp */
3675- if (lx == 0)
3676- hx -= 1;
3677- lx -= 1;
3678- } else { /* x < y, x += ulp */
3679- lx += 1;
3680- if (lx == 0)
3681- hx += 1;
3682- }
3683- } else { /* x < 0 */
3684- if (hy >= 0 || hx > hy || ((hx == hy) && (lx > ly))) { /* x < y, x -= ulp */
3685- if (lx == 0)
3686- hx -= 1;
3687- lx -= 1;
3688- } else { /* x > y, x += ulp */
3689- lx += 1;
3690- if (lx == 0)
3691- hx += 1;
3692- }
3693- }
3694- hy = hx & 0x7ff00000;
3695- if (hy >= 0x7ff00000)
3696- return x + x; /* overflow */
3697- if (hy < 0x00100000) { /* underflow */
3698- t = x * x;
3699- if (t != x) { /* raise underflow flag */
3700- INSERT_WORDS(y, hx, lx);
3701- return y;
3702- }
3703- }
3704- INSERT_WORDS(x, hx, lx);
3705- return x;
3706-}
3707-#endif
3708-
3709-#ifndef HAVE_NEXTAFTERF
3710-float npy_nextafterf(float x, float y)
3711-{
3712- volatile float t;
3713- npy_int32 hx, hy, ix, iy;
3714-
3715- GET_FLOAT_WORD(hx, x);
3716- GET_FLOAT_WORD(hy, y);
3717- ix = hx & 0x7fffffff; /* |x| */
3718- iy = hy & 0x7fffffff; /* |y| */
3719-
3720- if ((ix > 0x7f800000) || /* x is nan */
3721- (iy > 0x7f800000)) /* y is nan */
3722- return x + y;
3723- if (x == y)
3724- return y; /* x=y, return y */
3725- if (ix == 0) { /* x == 0 */
3726- SET_FLOAT_WORD(x, (hy & 0x80000000) | 1); /* return +-minsubnormal */
3727- t = x * x;
3728- if (t == x)
3729- return t;
3730- else
3731- return x; /* raise underflow flag */
3732- }
3733- if (hx >= 0) { /* x > 0 */
3734- if (hx > hy) { /* x > y, x -= ulp */
3735- hx -= 1;
3736- } else { /* x < y, x += ulp */
3737- hx += 1;
3738- }
3739- } else { /* x < 0 */
3740- if (hy >= 0 || hx > hy) { /* x < y, x -= ulp */
3741- hx -= 1;
3742- } else { /* x > y, x += ulp */
3743- hx += 1;
3744- }
3745- }
3746- hy = hx & 0x7f800000;
3747- if (hy >= 0x7f800000)
3748- return x + x; /* overflow */
3749- if (hy < 0x00800000) { /* underflow */
3750- t = x * x;
3751- if (t != x) { /* raise underflow flag */
3752- SET_FLOAT_WORD(y, hx);
3753- return y;
3754- }
3755- }
3756- SET_FLOAT_WORD(x, hx);
3757- return x;
3758-}
3759-#endif
3760-
3761-#ifndef HAVE_NEXTAFTERL
3762-npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y)
3763-{
3764- volatile npy_longdouble t;
3765- union IEEEl2bitsrep ux;
3766- union IEEEl2bitsrep uy;
3767-
3768- ux.e = x;
3769- uy.e = y;
3770-
3771- if ((GET_LDOUBLE_EXP(ux) == 0x7fff &&
3772- ((GET_LDOUBLE_MANH(ux) & ~LDBL_NBIT) | GET_LDOUBLE_MANL(ux)) != 0) ||
3773- (GET_LDOUBLE_EXP(uy) == 0x7fff &&
3774- ((GET_LDOUBLE_MANH(uy) & ~LDBL_NBIT) | GET_LDOUBLE_MANL(uy)) != 0)) {
3775- return ux.e + uy.e; /* x or y is nan */
3776- }
3777- if (ux.e == uy.e) {
3778- return uy.e; /* x=y, return y */
3779- }
3780- if (ux.e == 0.0) {
3781- SET_LDOUBLE_MANH(ux, 0); /* return +-minsubnormal */
3782- SET_LDOUBLE_MANL(ux, 1);
3783- SET_LDOUBLE_SIGN(ux, GET_LDOUBLE_SIGN(uy));
3784- t = ux.e * ux.e;
3785- if (t == ux.e) {
3786- return t;
3787- } else {
3788- return ux.e; /* raise underflow flag */
3789- }
3790- }
3791- if ((ux.e > 0.0) ^ (ux.e < uy.e)) { /* x -= ulp */
3792- if (GET_LDOUBLE_MANL(ux) == 0) {
3793- if ((GET_LDOUBLE_MANH(ux) & ~LDBL_NBIT) == 0) {
3794- SET_LDOUBLE_EXP(ux, GET_LDOUBLE_EXP(ux) - 1);
3795- }
3796- SET_LDOUBLE_MANH(ux,
3797- (GET_LDOUBLE_MANH(ux) - 1) |
3798- (GET_LDOUBLE_MANH(ux) & LDBL_NBIT));
3799- }
3800- SET_LDOUBLE_MANL(ux, GET_LDOUBLE_MANL(ux) - 1);
3801- } else { /* x += ulp */
3802- SET_LDOUBLE_MANL(ux, GET_LDOUBLE_MANL(ux) + 1);
3803- if (GET_LDOUBLE_MANL(ux) == 0) {
3804- SET_LDOUBLE_MANH(ux,
3805- (GET_LDOUBLE_MANH(ux) + 1) |
3806- (GET_LDOUBLE_MANH(ux) & LDBL_NBIT));
3807- if ((GET_LDOUBLE_MANH(ux) & ~LDBL_NBIT) == 0) {
3808- SET_LDOUBLE_EXP(ux, GET_LDOUBLE_EXP(ux) + 1);
3809- }
3810- }
3811- }
3812- if (GET_LDOUBLE_EXP(ux) == 0x7fff) {
3813- return ux.e + ux.e; /* overflow */
3814- }
3815- if (GET_LDOUBLE_EXP(ux) == 0) { /* underflow */
3816- if (LDBL_NBIT) {
3817- SET_LDOUBLE_MANH(ux, GET_LDOUBLE_MANH(ux) & ~LDBL_NBIT);
3818- }
3819- t = ux.e * ux.e;
3820- if (t != ux.e) { /* raise underflow flag */
3821- return ux.e;
3822- }
3823- }
3824-
3825- return ux.e;
3826-}
3827-#endif
3828-
3829-/**begin repeat
3830- * #suff = f,,l#
3831- * #SUFF = F,,L#
3832- * #type = npy_float, npy_double, npy_longdouble#
3833- */
3834-@type@ npy_spacing@suff@(@type@ x)
3835-{
3836- /* XXX: npy isnan/isinf may be optimized by bit twiddling */
3837- if (npy_isinf(x)) {
3838- return NPY_NAN@SUFF@;
3839- }
3840-
3841- return _next@suff@(x, 1) - x;
3842-}
3843-/**end repeat**/
3844-
3845-/*
3846- * Decorate all the math functions which are available on the current platform
3847- */
3848-
3849-#ifdef HAVE_NEXTAFTERF
3850-float npy_nextafterf(float x, float y)
3851-{
3852- return nextafterf(x, y);
3853-}
3854-#endif
3855-
3856-#ifdef HAVE_NEXTAFTER
3857-double npy_nextafter(double x, double y)
3858-{
3859- return nextafter(x, y);
3860-}
3861-#endif
3862-
3863-#ifdef HAVE_NEXTAFTERL
3864-npy_longdouble npy_nextafterl(npy_longdouble x, npy_longdouble y)
3865-{
3866- return nextafterl(x, y);
3867-}
3868-#endif
3869-
3870-/*
3871- * Functions to set the floating point status word.
3872- */
3873-
3874-#if defined(sun) || defined(__BSD__) || defined(__OpenBSD__) || \
3875- (defined(__FreeBSD__) && (__FreeBSD_version < 502114)) || \
3876- defined(__NetBSD__)
3877-#include <ieeefp.h>
3878-
3879-void npy_set_floatstatus_divbyzero(void)
3880-{
3881- fpsetsticky(FP_X_DZ);
3882-}
3883-
3884-void npy_set_floatstatus_overflow(void)
3885-{
3886- fpsetsticky(FP_X_OFL);
3887-}
3888-
3889-void npy_set_floatstatus_underflow(void)
3890-{
3891- fpsetsticky(FP_X_UFL);
3892-}
3893-
3894-void npy_set_floatstatus_invalid(void)
3895-{
3896- fpsetsticky(FP_X_INV);
3897-}
3898-
3899-
3900-#elif defined(__GLIBC__) || defined(__APPLE__) || \
3901- defined(__CYGWIN__) || defined(__MINGW32__) || \
3902- (defined(__FreeBSD__) && (__FreeBSD_version >= 502114))
3903-
3904-# if defined(__GLIBC__) || defined(__APPLE__) || \
3905- defined(__MINGW32__) || defined(__FreeBSD__)
3906-# include <fenv.h>
3907-# elif defined(__CYGWIN__)
3908-# include "numpy/fenv/fenv.h"
3909-# endif
3910-
3911-void npy_set_floatstatus_divbyzero(void)
3912-{
3913- feraiseexcept(FE_DIVBYZERO);
3914-}
3915-
3916-void npy_set_floatstatus_overflow(void)
3917-{
3918- feraiseexcept(FE_OVERFLOW);
3919-}
3920-
3921-void npy_set_floatstatus_underflow(void)
3922-{
3923- feraiseexcept(FE_UNDERFLOW);
3924-}
3925-
3926-void npy_set_floatstatus_invalid(void)
3927-{
3928- feraiseexcept(FE_INVALID);
3929-}
3930-
3931-#elif defined(_AIX)
3932-#include <float.h>
3933-#include <fpxcp.h>
3934-
3935-void npy_set_floatstatus_divbyzero(void)
3936-{
3937- fp_raise_xcp(FP_DIV_BY_ZERO);
3938-}
3939-
3940-void npy_set_floatstatus_overflow(void)
3941-{
3942- fp_raise_xcp(FP_OVERFLOW);
3943-}
3944-
3945-void npy_set_floatstatus_underflow(void)
3946-{
3947- fp_raise_xcp(FP_UNDERFLOW);
3948-}
3949-
3950-void npy_set_floatstatus_invalid(void)
3951-{
3952- fp_raise_xcp(FP_INVALID);
3953-}
3954-
3955-#else
3956-
3957-/*
3958- * By using a volatile floating point value,
3959- * the compiler is forced to actually do the requested
3960- * operations because of potential concurrency.
3961- *
3962- * We shouldn't write multiple values to a single
3963- * global here, because that would cause
3964- * a race condition.
3965- */
3966-static volatile double _npy_floatstatus_x,
3967- _npy_floatstatus_zero = 0.0, _npy_floatstatus_big = 1e300,
3968- _npy_floatstatus_small = 1e-300, _npy_floatstatus_inf;
3969-
3970-void npy_set_floatstatus_divbyzero(void)
3971-{
3972- _npy_floatstatus_x = 1.0 / _npy_floatstatus_zero;
3973-}
3974-
3975-void npy_set_floatstatus_overflow(void)
3976-{
3977- _npy_floatstatus_x = _npy_floatstatus_big * 1e300;
3978-}
3979-
3980-void npy_set_floatstatus_underflow(void)
3981-{
3982- _npy_floatstatus_x = _npy_floatstatus_small * 1e-300;
3983-}
3984-
3985-void npy_set_floatstatus_invalid(void)
3986-{
3987- _npy_floatstatus_inf = NPY_INFINITY;
3988- _npy_floatstatus_x = _npy_floatstatus_inf - NPY_INFINITY;
3989-}
3990-
3991-#endif
3992
3993=== removed file '.pc/ppc64el_cpu_config.patch/numpy/core/src/npymath/npy_math_private.h'
3994--- .pc/ppc64el_cpu_config.patch/numpy/core/src/npymath/npy_math_private.h 2014-01-03 17:10:24 +0000
3995+++ .pc/ppc64el_cpu_config.patch/numpy/core/src/npymath/npy_math_private.h 1970-01-01 00:00:00 +0000
3996@@ -1,518 +0,0 @@
3997-/*
3998- *
3999- * ====================================================
4000- * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
4001- *
4002- * Developed at SunPro, a Sun Microsystems, Inc. business.
4003- * Permission to use, copy, modify, and distribute this
4004- * software is freely granted, provided that this notice
4005- * is preserved.
4006- * ====================================================
4007- */
4008-
4009-/*
4010- * from: @(#)fdlibm.h 5.1 93/09/24
4011- * $FreeBSD$
4012- */
4013-
4014-#ifndef _NPY_MATH_PRIVATE_H_
4015-#define _NPY_MATH_PRIVATE_H_
4016-
4017-#include <Python.h>
4018-#include <math.h>
4019-
4020-#include "npy_config.h"
4021-#include "npy_fpmath.h"
4022-
4023-#include "numpy/npy_math.h"
4024-#include "numpy/npy_cpu.h"
4025-#include "numpy/npy_endian.h"
4026-#include "numpy/npy_common.h"
4027-
4028-/*
4029- * The original fdlibm code used statements like:
4030- * n0 = ((*(int*)&one)>>29)^1; * index of high word *
4031- * ix0 = *(n0+(int*)&x); * high word of x *
4032- * ix1 = *((1-n0)+(int*)&x); * low word of x *
4033- * to dig two 32 bit words out of the 64 bit IEEE floating point
4034- * value. That is non-ANSI, and, moreover, the gcc instruction
4035- * scheduler gets it wrong. We instead use the following macros.
4036- * Unlike the original code, we determine the endianness at compile
4037- * time, not at run time; I don't see much benefit to selecting
4038- * endianness at run time.
4039- */
4040-
4041-/*
4042- * A union which permits us to convert between a double and two 32 bit
4043- * ints.
4044- */
4045-
4046-/* XXX: not really, but we already make this assumption elsewhere. Will have to
4047- * fix this at some point */
4048-#define IEEE_WORD_ORDER NPY_BYTE_ORDER
4049-
4050-#if IEEE_WORD_ORDER == NPY_BIG_ENDIAN
4051-
4052-typedef union
4053-{
4054- double value;
4055- struct
4056- {
4057- npy_uint32 msw;
4058- npy_uint32 lsw;
4059- } parts;
4060-} ieee_double_shape_type;
4061-
4062-#endif
4063-
4064-#if IEEE_WORD_ORDER == NPY_LITTLE_ENDIAN
4065-
4066-typedef union
4067-{
4068- double value;
4069- struct
4070- {
4071- npy_uint32 lsw;
4072- npy_uint32 msw;
4073- } parts;
4074-} ieee_double_shape_type;
4075-
4076-#endif
4077-
4078-/* Get two 32 bit ints from a double. */
4079-
4080-#define EXTRACT_WORDS(ix0,ix1,d) \
4081-do { \
4082- ieee_double_shape_type ew_u; \
4083- ew_u.value = (d); \
4084- (ix0) = ew_u.parts.msw; \
4085- (ix1) = ew_u.parts.lsw; \
4086-} while (0)
4087-
4088-/* Get the more significant 32 bit int from a double. */
4089-
4090-#define GET_HIGH_WORD(i,d) \
4091-do { \
4092- ieee_double_shape_type gh_u; \
4093- gh_u.value = (d); \
4094- (i) = gh_u.parts.msw; \
4095-} while (0)
4096-
4097-/* Get the less significant 32 bit int from a double. */
4098-
4099-#define GET_LOW_WORD(i,d) \
4100-do { \
4101- ieee_double_shape_type gl_u; \
4102- gl_u.value = (d); \
4103- (i) = gl_u.parts.lsw; \
4104-} while (0)
4105-
4106-/* Set the more significant 32 bits of a double from an int. */
4107-
4108-#define SET_HIGH_WORD(d,v) \
4109-do { \
4110- ieee_double_shape_type sh_u; \
4111- sh_u.value = (d); \
4112- sh_u.parts.msw = (v); \
4113- (d) = sh_u.value; \
4114-} while (0)
4115-
4116-/* Set the less significant 32 bits of a double from an int. */
4117-
4118-#define SET_LOW_WORD(d,v) \
4119-do { \
4120- ieee_double_shape_type sl_u; \
4121- sl_u.value = (d); \
4122- sl_u.parts.lsw = (v); \
4123- (d) = sl_u.value; \
4124-} while (0)
4125-
4126-/* Set a double from two 32 bit ints. */
4127-
4128-#define INSERT_WORDS(d,ix0,ix1) \
4129-do { \
4130- ieee_double_shape_type iw_u; \
4131- iw_u.parts.msw = (ix0); \
4132- iw_u.parts.lsw = (ix1); \
4133- (d) = iw_u.value; \
4134-} while (0)
4135-
4136-/*
4137- * A union which permits us to convert between a float and a 32 bit
4138- * int.
4139- */
4140-
4141-typedef union
4142-{
4143- float value;
4144- /* FIXME: Assumes 32 bit int. */
4145- npy_uint32 word;
4146-} ieee_float_shape_type;
4147-
4148-/* Get a 32 bit int from a float. */
4149-
4150-#define GET_FLOAT_WORD(i,d) \
4151-do { \
4152- ieee_float_shape_type gf_u; \
4153- gf_u.value = (d); \
4154- (i) = gf_u.word; \
4155-} while (0)
4156-
4157-/* Set a float from a 32 bit int. */
4158-
4159-#define SET_FLOAT_WORD(d,i) \
4160-do { \
4161- ieee_float_shape_type sf_u; \
4162- sf_u.word = (i); \
4163- (d) = sf_u.value; \
4164-} while (0)
4165-
4166-#ifdef NPY_USE_C99_COMPLEX
4167-#include <complex.h>
4168-#endif
4169-
4170-/*
4171- * Long double support
4172- */
4173-#if defined(HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE)
4174- /*
4175- * Intel extended 80 bits precision. Bit representation is
4176- * | junk | s |eeeeeeeeeeeeeee|mmmmmmmm................mmmmmmm|
4177- * | 16 bits| 1 bit | 15 bits | 64 bits |
4178- * | a[2] | a[1] | a[0] |
4179- *
4180- * 16 stronger bits of a[2] are junk
4181- */
4182- typedef npy_uint32 IEEEl2bitsrep_part;
4183-
4184-
4185- union IEEEl2bitsrep {
4186- npy_longdouble e;
4187- IEEEl2bitsrep_part a[3];
4188- };
4189-
4190- #define LDBL_MANL_INDEX 0
4191- #define LDBL_MANL_MASK 0xFFFFFFFF
4192- #define LDBL_MANL_SHIFT 0
4193-
4194- #define LDBL_MANH_INDEX 1
4195- #define LDBL_MANH_MASK 0xFFFFFFFF
4196- #define LDBL_MANH_SHIFT 0
4197-
4198- #define LDBL_EXP_INDEX 2
4199- #define LDBL_EXP_MASK 0x7FFF
4200- #define LDBL_EXP_SHIFT 0
4201-
4202- #define LDBL_SIGN_INDEX 2
4203- #define LDBL_SIGN_MASK 0x8000
4204- #define LDBL_SIGN_SHIFT 15
4205-
4206- #define LDBL_NBIT 0x80000000
4207-
4208- typedef npy_uint32 ldouble_man_t;
4209- typedef npy_uint32 ldouble_exp_t;
4210- typedef npy_uint32 ldouble_sign_t;
4211-#elif defined(HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE)
4212- /*
4213- * Intel extended 80 bits precision, 16 bytes alignment.. Bit representation is
4214- * | junk | s |eeeeeeeeeeeeeee|mmmmmmmm................mmmmmmm|
4215- * | 16 bits| 1 bit | 15 bits | 64 bits |
4216- * | a[2] | a[1] | a[0] |
4217- *
4218- * a[3] and 16 stronger bits of a[2] are junk
4219- */
4220- typedef npy_uint32 IEEEl2bitsrep_part;
4221-
4222- union IEEEl2bitsrep {
4223- npy_longdouble e;
4224- IEEEl2bitsrep_part a[4];
4225- };
4226-
4227- #define LDBL_MANL_INDEX 0
4228- #define LDBL_MANL_MASK 0xFFFFFFFF
4229- #define LDBL_MANL_SHIFT 0
4230-
4231- #define LDBL_MANH_INDEX 1
4232- #define LDBL_MANH_MASK 0xFFFFFFFF
4233- #define LDBL_MANH_SHIFT 0
4234-
4235- #define LDBL_EXP_INDEX 2
4236- #define LDBL_EXP_MASK 0x7FFF
4237- #define LDBL_EXP_SHIFT 0
4238-
4239- #define LDBL_SIGN_INDEX 2
4240- #define LDBL_SIGN_MASK 0x8000
4241- #define LDBL_SIGN_SHIFT 15
4242-
4243- #define LDBL_NBIT 0x800000000
4244-
4245- typedef npy_uint32 ldouble_man_t;
4246- typedef npy_uint32 ldouble_exp_t;
4247- typedef npy_uint32 ldouble_sign_t;
4248-#elif defined(HAVE_LDOUBLE_MOTOROLA_EXTENDED_12_BYTES_BE)
4249- /*
4250- * Motorola extended 80 bits precision. Bit representation is
4251- * | s |eeeeeeeeeeeeeee| junk |mmmmmmmm................mmmmmmm|
4252- * | 1 bit | 15 bits | 16 bits| 64 bits |
4253- * | a[0] | a[1] | a[2] |
4254- *
4255- * 16 low bits of a[0] are junk
4256- */
4257- typedef npy_uint32 IEEEl2bitsrep_part;
4258-
4259-
4260- union IEEEl2bitsrep {
4261- npy_longdouble e;
4262- IEEEl2bitsrep_part a[3];
4263- };
4264-
4265- #define LDBL_MANL_INDEX 2
4266- #define LDBL_MANL_MASK 0xFFFFFFFF
4267- #define LDBL_MANL_SHIFT 0
4268-
4269- #define LDBL_MANH_INDEX 1
4270- #define LDBL_MANH_MASK 0xFFFFFFFF
4271- #define LDBL_MANH_SHIFT 0
4272-
4273- #define LDBL_EXP_INDEX 0
4274- #define LDBL_EXP_MASK 0x7FFF0000
4275- #define LDBL_EXP_SHIFT 16
4276-
4277- #define LDBL_SIGN_INDEX 0
4278- #define LDBL_SIGN_MASK 0x80000000
4279- #define LDBL_SIGN_SHIFT 31
4280-
4281- #define LDBL_NBIT 0x80000000
4282-
4283- typedef npy_uint32 ldouble_man_t;
4284- typedef npy_uint32 ldouble_exp_t;
4285- typedef npy_uint32 ldouble_sign_t;
4286-#elif defined(HAVE_LDOUBLE_IEEE_DOUBLE_16_BYTES_BE) || \
4287- defined(HAVE_LDOUBLE_IEEE_DOUBLE_BE)
4288- /* 64 bits IEEE double precision aligned on 16 bytes: used by ppc arch on
4289- * Mac OS X */
4290-
4291- /*
4292- * IEEE double precision. Bit representation is
4293- * | s |eeeeeeeeeee|mmmmmmmm................mmmmmmm|
4294- * |1 bit| 11 bits | 52 bits |
4295- * | a[0] | a[1] |
4296- */
4297- typedef npy_uint32 IEEEl2bitsrep_part;
4298-
4299- union IEEEl2bitsrep {
4300- npy_longdouble e;
4301- IEEEl2bitsrep_part a[2];
4302- };
4303-
4304- #define LDBL_MANL_INDEX 1
4305- #define LDBL_MANL_MASK 0xFFFFFFFF
4306- #define LDBL_MANL_SHIFT 0
4307-
4308- #define LDBL_MANH_INDEX 0
4309- #define LDBL_MANH_MASK 0x000FFFFF
4310- #define LDBL_MANH_SHIFT 0
4311-
4312- #define LDBL_EXP_INDEX 0
4313- #define LDBL_EXP_MASK 0x7FF00000
4314- #define LDBL_EXP_SHIFT 20
4315-
4316- #define LDBL_SIGN_INDEX 0
4317- #define LDBL_SIGN_MASK 0x80000000
4318- #define LDBL_SIGN_SHIFT 31
4319-
4320- #define LDBL_NBIT 0
4321-
4322- typedef npy_uint32 ldouble_man_t;
4323- typedef npy_uint32 ldouble_exp_t;
4324- typedef npy_uint32 ldouble_sign_t;
4325-#elif defined(HAVE_LDOUBLE_IEEE_DOUBLE_LE)
4326- /* 64 bits IEEE double precision, Little Endian. */
4327-
4328- /*
4329- * IEEE double precision. Bit representation is
4330- * | s |eeeeeeeeeee|mmmmmmmm................mmmmmmm|
4331- * |1 bit| 11 bits | 52 bits |
4332- * | a[1] | a[0] |
4333- */
4334- typedef npy_uint32 IEEEl2bitsrep_part;
4335-
4336- union IEEEl2bitsrep {
4337- npy_longdouble e;
4338- IEEEl2bitsrep_part a[2];
4339- };
4340-
4341- #define LDBL_MANL_INDEX 0
4342- #define LDBL_MANL_MASK 0xFFFFFFFF
4343- #define LDBL_MANL_SHIFT 0
4344-
4345- #define LDBL_MANH_INDEX 1
4346- #define LDBL_MANH_MASK 0x000FFFFF
4347- #define LDBL_MANH_SHIFT 0
4348-
4349- #define LDBL_EXP_INDEX 1
4350- #define LDBL_EXP_MASK 0x7FF00000
4351- #define LDBL_EXP_SHIFT 20
4352-
4353- #define LDBL_SIGN_INDEX 1
4354- #define LDBL_SIGN_MASK 0x80000000
4355- #define LDBL_SIGN_SHIFT 31
4356-
4357- #define LDBL_NBIT 0x00000080
4358-
4359- typedef npy_uint32 ldouble_man_t;
4360- typedef npy_uint32 ldouble_exp_t;
4361- typedef npy_uint32 ldouble_sign_t;
4362-#elif defined(HAVE_LDOUBLE_IEEE_QUAD_BE)
4363- /*
4364- * IEEE quad precision, Big Endian. Bit representation is
4365- * | s |eeeeeeeeeee|mmmmmmmm................mmmmmmm|
4366- * |1 bit| 15 bits | 112 bits |
4367- * | a[0] | a[1] |
4368- */
4369- typedef npy_uint64 IEEEl2bitsrep_part;
4370-
4371- union IEEEl2bitsrep {
4372- npy_longdouble e;
4373- IEEEl2bitsrep_part a[2];
4374- };
4375-
4376- #define LDBL_MANL_INDEX 1
4377- #define LDBL_MANL_MASK 0xFFFFFFFFFFFFFFFF
4378- #define LDBL_MANL_SHIFT 0
4379-
4380- #define LDBL_MANH_INDEX 0
4381- #define LDBL_MANH_MASK 0x0000FFFFFFFFFFFF
4382- #define LDBL_MANH_SHIFT 0
4383-
4384- #define LDBL_EXP_INDEX 0
4385- #define LDBL_EXP_MASK 0x7FFF000000000000
4386- #define LDBL_EXP_SHIFT 48
4387-
4388- #define LDBL_SIGN_INDEX 0
4389- #define LDBL_SIGN_MASK 0x8000000000000000
4390- #define LDBL_SIGN_SHIFT 63
4391-
4392- #define LDBL_NBIT 0
4393-
4394- typedef npy_uint64 ldouble_man_t;
4395- typedef npy_uint64 ldouble_exp_t;
4396- typedef npy_uint32 ldouble_sign_t;
4397-#elif defined(HAVE_LDOUBLE_IEEE_QUAD_LE)
4398- /*
4399- * IEEE quad precision, Little Endian. Bit representation is
4400- * | s |eeeeeeeeeee|mmmmmmmm................mmmmmmm|
4401- * |1 bit| 15 bits | 112 bits |
4402- * | a[1] | a[0] |
4403- */
4404- typedef npy_uint64 IEEEl2bitsrep_part;
4405-
4406- union IEEEl2bitsrep {
4407- npy_longdouble e;
4408- IEEEl2bitsrep_part a[2];
4409- };
4410-
4411- #define LDBL_MANL_INDEX 0
4412- #define LDBL_MANL_MASK 0xFFFFFFFFFFFFFFFF
4413- #define LDBL_MANL_SHIFT 0
4414-
4415- #define LDBL_MANH_INDEX 1
4416- #define LDBL_MANH_MASK 0x0000FFFFFFFFFFFF
4417- #define LDBL_MANH_SHIFT 0
4418-
4419- #define LDBL_EXP_INDEX 1
4420- #define LDBL_EXP_MASK 0x7FFF000000000000
4421- #define LDBL_EXP_SHIFT 48
4422-
4423- #define LDBL_SIGN_INDEX 1
4424- #define LDBL_SIGN_MASK 0x8000000000000000
4425- #define LDBL_SIGN_SHIFT 63
4426-
4427- #define LDBL_NBIT 0
4428-
4429- typedef npy_uint64 ldouble_man_t;
4430- typedef npy_uint64 ldouble_exp_t;
4431- typedef npy_uint32 ldouble_sign_t;
4432-#endif
4433-
4434-#ifndef HAVE_LDOUBLE_DOUBLE_DOUBLE_BE
4435-/* Get the sign bit of x. x should be of type IEEEl2bitsrep */
4436-#define GET_LDOUBLE_SIGN(x) \
4437- (((x).a[LDBL_SIGN_INDEX] & LDBL_SIGN_MASK) >> LDBL_SIGN_SHIFT)
4438-
4439-/* Set the sign bit of x to v. x should be of type IEEEl2bitsrep */
4440-#define SET_LDOUBLE_SIGN(x, v) \
4441- ((x).a[LDBL_SIGN_INDEX] = \
4442- ((x).a[LDBL_SIGN_INDEX] & ~LDBL_SIGN_MASK) | \
4443- (((IEEEl2bitsrep_part)(v) << LDBL_SIGN_SHIFT) & LDBL_SIGN_MASK))
4444-
4445-/* Get the exp bits of x. x should be of type IEEEl2bitsrep */
4446-#define GET_LDOUBLE_EXP(x) \
4447- (((x).a[LDBL_EXP_INDEX] & LDBL_EXP_MASK) >> LDBL_EXP_SHIFT)
4448-
4449-/* Set the exp bit of x to v. x should be of type IEEEl2bitsrep */
4450-#define SET_LDOUBLE_EXP(x, v) \
4451- ((x).a[LDBL_EXP_INDEX] = \
4452- ((x).a[LDBL_EXP_INDEX] & ~LDBL_EXP_MASK) | \
4453- (((IEEEl2bitsrep_part)(v) << LDBL_EXP_SHIFT) & LDBL_EXP_MASK))
4454-
4455-/* Get the manl bits of x. x should be of type IEEEl2bitsrep */
4456-#define GET_LDOUBLE_MANL(x) \
4457- (((x).a[LDBL_MANL_INDEX] & LDBL_MANL_MASK) >> LDBL_MANL_SHIFT)
4458-
4459-/* Set the manl bit of x to v. x should be of type IEEEl2bitsrep */
4460-#define SET_LDOUBLE_MANL(x, v) \
4461- ((x).a[LDBL_MANL_INDEX] = \
4462- ((x).a[LDBL_MANL_INDEX] & ~LDBL_MANL_MASK) | \
4463- (((IEEEl2bitsrep_part)(v) << LDBL_MANL_SHIFT) & LDBL_MANL_MASK))
4464-
4465-/* Get the manh bits of x. x should be of type IEEEl2bitsrep */
4466-#define GET_LDOUBLE_MANH(x) \
4467- (((x).a[LDBL_MANH_INDEX] & LDBL_MANH_MASK) >> LDBL_MANH_SHIFT)
4468-
4469-/* Set the manh bit of x to v. x should be of type IEEEl2bitsrep */
4470-#define SET_LDOUBLE_MANH(x, v) \
4471- ((x).a[LDBL_MANH_INDEX] = \
4472- ((x).a[LDBL_MANH_INDEX] & ~LDBL_MANH_MASK) | \
4473- (((IEEEl2bitsrep_part)(v) << LDBL_MANH_SHIFT) & LDBL_MANH_MASK))
4474-
4475-#endif /* #ifndef HAVE_LDOUBLE_DOUBLE_DOUBLE_BE */
4476-
4477-/*
4478- * Those unions are used to convert a pointer of npy_cdouble to native C99
4479- * complex or our own complex type independently on whether C99 complex
4480- * support is available
4481- */
4482-#ifdef NPY_USE_C99_COMPLEX
4483-typedef union {
4484- npy_cdouble npy_z;
4485- complex double c99_z;
4486-} __npy_cdouble_to_c99_cast;
4487-
4488-typedef union {
4489- npy_cfloat npy_z;
4490- complex float c99_z;
4491-} __npy_cfloat_to_c99_cast;
4492-
4493-typedef union {
4494- npy_clongdouble npy_z;
4495- complex long double c99_z;
4496-} __npy_clongdouble_to_c99_cast;
4497-#else
4498-typedef union {
4499- npy_cdouble npy_z;
4500- npy_cdouble c99_z;
4501-} __npy_cdouble_to_c99_cast;
4502-
4503-typedef union {
4504- npy_cfloat npy_z;
4505- npy_cfloat c99_z;
4506-} __npy_cfloat_to_c99_cast;
4507-
4508-typedef union {
4509- npy_clongdouble npy_z;
4510- npy_clongdouble c99_z;
4511-} __npy_clongdouble_to_c99_cast;
4512-#endif
4513-
4514-#endif /* !_NPY_MATH_PRIVATE_H_ */
4515
4516=== removed directory '.pc/ppc64el_cpu_config.patch/numpy/core/src/private'
4517=== removed file '.pc/ppc64el_cpu_config.patch/numpy/core/src/private/npy_fpmath.h'
4518--- .pc/ppc64el_cpu_config.patch/numpy/core/src/private/npy_fpmath.h 2013-12-15 14:33:37 +0000
4519+++ .pc/ppc64el_cpu_config.patch/numpy/core/src/private/npy_fpmath.h 1970-01-01 00:00:00 +0000
4520@@ -1,48 +0,0 @@
4521-#ifndef _NPY_NPY_FPMATH_H_
4522-#define _NPY_NPY_FPMATH_H_
4523-
4524-#include "npy_config.h"
4525-
4526-#include "numpy/npy_os.h"
4527-#include "numpy/npy_cpu.h"
4528-#include "numpy/npy_common.h"
4529-
4530-#ifdef NPY_OS_DARWIN
4531- /* This hardcoded logic is fragile, but universal builds makes it
4532- * difficult to detect arch-specific features */
4533-
4534- /* MAC OS X < 10.4 and gcc < 4 does not support proper long double, and
4535- * is the same as double on those platforms */
4536- #if NPY_BITSOF_LONGDOUBLE == NPY_BITSOF_DOUBLE
4537- /* This assumes that FPU and ALU have the same endianness */
4538- #if NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN
4539- #define HAVE_LDOUBLE_IEEE_DOUBLE_LE
4540- #elif NPY_BYTE_ORDER == NPY_BIG_ENDIAN
4541- #define HAVE_LDOUBLE_IEEE_DOUBLE_BE
4542- #else
4543- #error Endianness undefined ?
4544- #endif
4545- #else
4546- #if defined(NPY_CPU_X86)
4547- #define HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE
4548- #elif defined(NPY_CPU_AMD64)
4549- #define HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE
4550- #elif defined(NPY_CPU_PPC) || defined(NPY_CPU_PPC64)
4551- #define HAVE_LDOUBLE_IEEE_DOUBLE_16_BYTES_BE
4552- #endif
4553- #endif
4554-#endif
4555-
4556-#if !(defined(HAVE_LDOUBLE_IEEE_QUAD_BE) || \
4557- defined(HAVE_LDOUBLE_IEEE_QUAD_LE) || \
4558- defined(HAVE_LDOUBLE_IEEE_DOUBLE_LE) || \
4559- defined(HAVE_LDOUBLE_IEEE_DOUBLE_BE) || \
4560- defined(HAVE_LDOUBLE_IEEE_DOUBLE_16_BYTES_BE) || \
4561- defined(HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE) || \
4562- defined(HAVE_LDOUBLE_INTEL_EXTENDED_12_BYTES_LE) || \
4563- defined(HAVE_LDOUBLE_MOTOROLA_EXTENDED_12_BYTES_BE) || \
4564- defined(HAVE_LDOUBLE_DOUBLE_DOUBLE_BE))
4565- #error No long double representation defined
4566-#endif
4567-
4568-#endif
4569
4570=== removed directory '.pc/python3-soabi.patch'
4571=== removed directory '.pc/python3-soabi.patch/numpy'
4572=== removed file '.pc/python3-soabi.patch/numpy/ctypeslib.py'
4573--- .pc/python3-soabi.patch/numpy/ctypeslib.py 2014-01-03 17:10:24 +0000
4574+++ .pc/python3-soabi.patch/numpy/ctypeslib.py 1970-01-01 00:00:00 +0000
4575@@ -1,426 +0,0 @@
4576-"""
4577-============================
4578-``ctypes`` Utility Functions
4579-============================
4580-
4581-See Also
4582----------
4583-load_library : Load a C library.
4584-ndpointer : Array restype/argtype with verification.
4585-as_ctypes : Create a ctypes array from an ndarray.
4586-as_array : Create an ndarray from a ctypes array.
4587-
4588-References
4589-----------
4590-.. [1] "SciPy Cookbook: ctypes", http://www.scipy.org/Cookbook/Ctypes
4591-
4592-Examples
4593---------
4594-Load the C library:
4595-
4596->>> _lib = np.ctypeslib.load_library('libmystuff', '.') #doctest: +SKIP
4597-
4598-Our result type, an ndarray that must be of type double, be 1-dimensional
4599-and is C-contiguous in memory:
4600-
4601->>> array_1d_double = np.ctypeslib.ndpointer(
4602-... dtype=np.double,
4603-... ndim=1, flags='CONTIGUOUS') #doctest: +SKIP
4604-
4605-Our C-function typically takes an array and updates its values
4606-in-place. For example::
4607-
4608- void foo_func(double* x, int length)
4609- {
4610- int i;
4611- for (i = 0; i < length; i++) {
4612- x[i] = i*i;
4613- }
4614- }
4615-
4616-We wrap it using:
4617-
4618->>> _lib.foo_func.restype = None #doctest: +SKIP
4619->>> _lib.foo_func.argtypes = [array_1d_double, c_int] #doctest: +SKIP
4620-
4621-Then, we're ready to call ``foo_func``:
4622-
4623->>> out = np.empty(15, dtype=np.double)
4624->>> _lib.foo_func(out, len(out)) #doctest: +SKIP
4625-
4626-"""
4627-from __future__ import division, absolute_import, print_function
4628-
4629-__all__ = ['load_library', 'ndpointer', 'test', 'ctypes_load_library',
4630- 'c_intp', 'as_ctypes', 'as_array']
4631-
4632-import sys, os
4633-from numpy import integer, ndarray, dtype as _dtype, deprecate, array
4634-from numpy.core.multiarray import _flagdict, flagsobj
4635-
4636-try:
4637- import ctypes
4638-except ImportError:
4639- ctypes = None
4640-
4641-if ctypes is None:
4642- def _dummy(*args, **kwds):
4643- """
4644- Dummy object that raises an ImportError if ctypes is not available.
4645-
4646- Raises
4647- ------
4648- ImportError
4649- If ctypes is not available.
4650-
4651- """
4652- raise ImportError("ctypes is not available.")
4653- ctypes_load_library = _dummy
4654- load_library = _dummy
4655- as_ctypes = _dummy
4656- as_array = _dummy
4657- from numpy import intp as c_intp
4658- _ndptr_base = object
4659-else:
4660- import numpy.core._internal as nic
4661- c_intp = nic._getintp_ctype()
4662- del nic
4663- _ndptr_base = ctypes.c_void_p
4664-
4665- # Adapted from Albert Strasheim
4666- def load_library(libname, loader_path):
4667- if ctypes.__version__ < '1.0.1':
4668- import warnings
4669- warnings.warn("All features of ctypes interface may not work " \
4670- "with ctypes < 1.0.1")
4671-
4672- ext = os.path.splitext(libname)[1]
4673- if not ext:
4674- # Try to load library with platform-specific name, otherwise
4675- # default to libname.[so|pyd]. Sometimes, these files are built
4676- # erroneously on non-linux platforms.
4677- from numpy.distutils.misc_util import get_shared_lib_extension
4678- so_ext = get_shared_lib_extension()
4679- libname_ext = [libname + so_ext]
4680- # mac, windows and linux >= py3.2 shared library and loadable
4681- # module have different extensions so try both
4682- so_ext2 = get_shared_lib_extension(is_python_ext=True)
4683- if not so_ext2 == so_ext:
4684- libname_ext.insert(0, libname + so_ext2)
4685- else:
4686- libname_ext = [libname]
4687-
4688- loader_path = os.path.abspath(loader_path)
4689- if not os.path.isdir(loader_path):
4690- libdir = os.path.dirname(loader_path)
4691- else:
4692- libdir = loader_path
4693-
4694- for ln in libname_ext:
4695- libpath = os.path.join(libdir, ln)
4696- if os.path.exists(libpath):
4697- try:
4698- return ctypes.cdll[libpath]
4699- except OSError:
4700- ## defective lib file
4701- raise
4702- ## if no successful return in the libname_ext loop:
4703- raise OSError("no file with expected extension")
4704-
4705- ctypes_load_library = deprecate(load_library, 'ctypes_load_library',
4706- 'load_library')
4707-
4708-def _num_fromflags(flaglist):
4709- num = 0
4710- for val in flaglist:
4711- num += _flagdict[val]
4712- return num
4713-
4714-_flagnames = ['C_CONTIGUOUS', 'F_CONTIGUOUS', 'ALIGNED', 'WRITEABLE',
4715- 'OWNDATA', 'UPDATEIFCOPY']
4716-def _flags_fromnum(num):
4717- res = []
4718- for key in _flagnames:
4719- value = _flagdict[key]
4720- if (num & value):
4721- res.append(key)
4722- return res
4723-
4724-
4725-class _ndptr(_ndptr_base):
4726-
4727- def _check_retval_(self):
4728- """This method is called when this class is used as the .restype
4729- asttribute for a shared-library function. It constructs a numpy
4730- array from a void pointer."""
4731- return array(self)
4732-
4733- @property
4734- def __array_interface__(self):
4735- return {'descr': self._dtype_.descr,
4736- '__ref': self,
4737- 'strides': None,
4738- 'shape': self._shape_,
4739- 'version': 3,
4740- 'typestr': self._dtype_.descr[0][1],
4741- 'data': (self.value, False),
4742- }
4743-
4744- @classmethod
4745- def from_param(cls, obj):
4746- if not isinstance(obj, ndarray):
4747- raise TypeError("argument must be an ndarray")
4748- if cls._dtype_ is not None \
4749- and obj.dtype != cls._dtype_:
4750- raise TypeError("array must have data type %s" % cls._dtype_)
4751- if cls._ndim_ is not None \
4752- and obj.ndim != cls._ndim_:
4753- raise TypeError("array must have %d dimension(s)" % cls._ndim_)
4754- if cls._shape_ is not None \
4755- and obj.shape != cls._shape_:
4756- raise TypeError("array must have shape %s" % str(cls._shape_))
4757- if cls._flags_ is not None \
4758- and ((obj.flags.num & cls._flags_) != cls._flags_):
4759- raise TypeError("array must have flags %s" %
4760- _flags_fromnum(cls._flags_))
4761- return obj.ctypes
4762-
4763-
4764-# Factory for an array-checking class with from_param defined for
4765-# use with ctypes argtypes mechanism
4766-_pointer_type_cache = {}
4767-def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
4768- """
4769- Array-checking restype/argtypes.
4770-
4771- An ndpointer instance is used to describe an ndarray in restypes
4772- and argtypes specifications. This approach is more flexible than
4773- using, for example, ``POINTER(c_double)``, since several restrictions
4774- can be specified, which are verified upon calling the ctypes function.
4775- These include data type, number of dimensions, shape and flags. If a
4776- given array does not satisfy the specified restrictions,
4777- a ``TypeError`` is raised.
4778-
4779- Parameters
4780- ----------
4781- dtype : data-type, optional
4782- Array data-type.
4783- ndim : int, optional
4784- Number of array dimensions.
4785- shape : tuple of ints, optional
4786- Array shape.
4787- flags : str or tuple of str
4788- Array flags; may be one or more of:
4789-
4790- - C_CONTIGUOUS / C / CONTIGUOUS
4791- - F_CONTIGUOUS / F / FORTRAN
4792- - OWNDATA / O
4793- - WRITEABLE / W
4794- - ALIGNED / A
4795- - UPDATEIFCOPY / U
4796-
4797- Returns
4798- -------
4799- klass : ndpointer type object
4800- A type object, which is an ``_ndtpr`` instance containing
4801- dtype, ndim, shape and flags information.
4802-
4803- Raises
4804- ------
4805- TypeError
4806- If a given array does not satisfy the specified restrictions.
4807-
4808- Examples
4809- --------
4810- >>> clib.somefunc.argtypes = [np.ctypeslib.ndpointer(dtype=np.float64,
4811- ... ndim=1,
4812- ... flags='C_CONTIGUOUS')]
4813- ... #doctest: +SKIP
4814- >>> clib.somefunc(np.array([1, 2, 3], dtype=np.float64))
4815- ... #doctest: +SKIP
4816-
4817- """
4818-
4819- if dtype is not None:
4820- dtype = _dtype(dtype)
4821- num = None
4822- if flags is not None:
4823- if isinstance(flags, str):
4824- flags = flags.split(',')
4825- elif isinstance(flags, (int, integer)):
4826- num = flags
4827- flags = _flags_fromnum(num)
4828- elif isinstance(flags, flagsobj):
4829- num = flags.num
4830- flags = _flags_fromnum(num)
4831- if num is None:
4832- try:
4833- flags = [x.strip().upper() for x in flags]
4834- except:
4835- raise TypeError("invalid flags specification")
4836- num = _num_fromflags(flags)
4837- try:
4838- return _pointer_type_cache[(dtype, ndim, shape, num)]
4839- except KeyError:
4840- pass
4841- if dtype is None:
4842- name = 'any'
4843- elif dtype.names:
4844- name = str(id(dtype))
4845- else:
4846- name = dtype.str
4847- if ndim is not None:
4848- name += "_%dd" % ndim
4849- if shape is not None:
4850- try:
4851- strshape = [str(x) for x in shape]
4852- except TypeError:
4853- strshape = [str(shape)]
4854- shape = (shape,)
4855- shape = tuple(shape)
4856- name += "_"+"x".join(strshape)
4857- if flags is not None:
4858- name += "_"+"_".join(flags)
4859- else:
4860- flags = []
4861- klass = type("ndpointer_%s"%name, (_ndptr,),
4862- {"_dtype_": dtype,
4863- "_shape_" : shape,
4864- "_ndim_" : ndim,
4865- "_flags_" : num})
4866- _pointer_type_cache[dtype] = klass
4867- return klass
4868-
4869-if ctypes is not None:
4870- ct = ctypes
4871- ################################################################
4872- # simple types
4873-
4874- # maps the numpy typecodes like '<f8' to simple ctypes types like
4875- # c_double. Filled in by prep_simple.
4876- _typecodes = {}
4877-
4878- def prep_simple(simple_type, dtype):
4879- """Given a ctypes simple type, construct and attach an
4880- __array_interface__ property to it if it does not yet have one.
4881- """
4882- try: simple_type.__array_interface__
4883- except AttributeError: pass
4884- else: return
4885-
4886- typestr = _dtype(dtype).str
4887- _typecodes[typestr] = simple_type
4888-
4889- def __array_interface__(self):
4890- return {'descr': [('', typestr)],
4891- '__ref': self,
4892- 'strides': None,
4893- 'shape': (),
4894- 'version': 3,
4895- 'typestr': typestr,
4896- 'data': (ct.addressof(self), False),
4897- }
4898-
4899- simple_type.__array_interface__ = property(__array_interface__)
4900-
4901- simple_types = [
4902- ((ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong), "i"),
4903- ((ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong), "u"),
4904- ((ct.c_float, ct.c_double), "f"),
4905- ]
4906-
4907- # Prep that numerical ctypes types:
4908- for types, code in simple_types:
4909- for tp in types:
4910- prep_simple(tp, "%c%d" % (code, ct.sizeof(tp)))
4911-
4912- ################################################################
4913- # array types
4914-
4915- _ARRAY_TYPE = type(ct.c_int * 1)
4916-
4917- def prep_array(array_type):
4918- """Given a ctypes array type, construct and attach an
4919- __array_interface__ property to it if it does not yet have one.
4920- """
4921- try: array_type.__array_interface__
4922- except AttributeError: pass
4923- else: return
4924-
4925- shape = []
4926- ob = array_type
4927- while type(ob) is _ARRAY_TYPE:
4928- shape.append(ob._length_)
4929- ob = ob._type_
4930- shape = tuple(shape)
4931- ai = ob().__array_interface__
4932- descr = ai['descr']
4933- typestr = ai['typestr']
4934-
4935- def __array_interface__(self):
4936- return {'descr': descr,
4937- '__ref': self,
4938- 'strides': None,
4939- 'shape': shape,
4940- 'version': 3,
4941- 'typestr': typestr,
4942- 'data': (ct.addressof(self), False),
4943- }
4944-
4945- array_type.__array_interface__ = property(__array_interface__)
4946-
4947- def prep_pointer(pointer_obj, shape):
4948- """Given a ctypes pointer object, construct and
4949- attach an __array_interface__ property to it if it does not
4950- yet have one.
4951- """
4952- try: pointer_obj.__array_interface__
4953- except AttributeError: pass
4954- else: return
4955-
4956- contents = pointer_obj.contents
4957- dtype = _dtype(type(contents))
4958-
4959- inter = {'version': 3,
4960- 'typestr': dtype.str,
4961- 'data': (ct.addressof(contents), False),
4962- 'shape': shape}
4963-
4964- pointer_obj.__array_interface__ = inter
4965-
4966- ################################################################
4967- # public functions
4968-
4969- def as_array(obj, shape=None):
4970- """Create a numpy array from a ctypes array or a ctypes POINTER.
4971- The numpy array shares the memory with the ctypes object.
4972-
4973- The size parameter must be given if converting from a ctypes POINTER.
4974- The size parameter is ignored if converting from a ctypes array
4975- """
4976- tp = type(obj)
4977- try: tp.__array_interface__
4978- except AttributeError:
4979- if hasattr(obj, 'contents'):
4980- prep_pointer(obj, shape)
4981- else:
4982- prep_array(tp)
4983- return array(obj, copy=False)
4984-
4985- def as_ctypes(obj):
4986- """Create and return a ctypes object from a numpy array. Actually
4987- anything that exposes the __array_interface__ is accepted."""
4988- ai = obj.__array_interface__
4989- if ai["strides"]:
4990- raise TypeError("strided arrays not supported")
4991- if ai["version"] != 3:
4992- raise TypeError("only __array_interface__ version 3 supported")
4993- addr, readonly = ai["data"]
4994- if readonly:
4995- raise TypeError("readonly arrays unsupported")
4996- tp = _typecodes[ai["typestr"]]
4997- for dim in ai["shape"][::-1]:
4998- tp = tp * dim
4999- result = tp.from_address(addr)
5000- result.__keep = ai
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches

to all changes: