Merge lp:~wecacuee/mrol/mrol-dev into lp:~julian-ryde/mrol/main

Proposed by vikas
Status: Approved
Approved by: Julian Ryde
Approved revision: 51
Proposed branch: lp:~wecacuee/mrol/mrol-dev
Merge into: lp:~julian-ryde/mrol/main
Diff against target: 929657 lines (+3047/-462417)
120 files modified
.bzrignore (+11/-0)
README (+0/-52)
compile.sh (+0/-5)
experimental/cup_mapper.py (+0/-99)
experimental/cup_segmenter.py (+0/-142)
full_test.sh (+0/-9)
lgpl.txt (+0/-166)
mrol_mapping/__init__.py (+0/-2)
mrol_mapping/bresenhamline.py (+112/-0)
mrol_mapping/cython/fast.pyx (+0/-112)
mrol_mapping/depth_image.py (+141/-0)
mrol_mapping/log.py (+11/-0)
mrol_mapping/mapper.py (+0/-438)
mrol_mapping/mrol.py (+0/-352)
mrol_mapping/occupiedlist.py (+0/-549)
mrol_mapping/optimization.py (+0/-173)
mrol_mapping/pointcloud.py (+0/-227)
mrol_mapping/poseutil.py (+0/-352)
mrol_mapping/quantizer.py (+0/-144)
mrol_mapping/trajectory.py (+0/-150)
mrol_mapping/util.py (+0/-134)
mrol_mapping/visualiser/dispxyz.py (+0/-191)
mrol_mapping/visualiser/mayaviclient.py (+167/-0)
mrol_mapping/visualiser/mayaviserver.py (+6/-0)
mrol_mapping/visualiser/mayaviviz.py (+83/-0)
mrol_mapping/visualiser/robotvisualiser.py (+0/-216)
mrol_mapping/visualiser/rviz.py (+127/-0)
mrol_ros/CMakeLists.txt (+37/-0)
mrol_ros/Makefile (+1/-0)
mrol_ros/etc/Kinect_demo_config.vcg (+83/-0)
mrol_ros/etc/Kinect_demo_config2.vcg (+209/-0)
mrol_ros/launch/kinect_demo.launch (+24/-0)
mrol_ros/launch/mrol_test_with_bag.launch (+23/-0)
mrol_ros/launch/turtlebot_demo.launch (+40/-0)
mrol_ros/launch/turtlebot_demo_dixie.launch (+25/-0)
mrol_ros/launch/turtlebot_demo_turtlebot.launch (+48/-0)
mrol_ros/launch/turtlebot_essentials_workstation.launch (+13/-0)
mrol_ros/mainpage.dox (+26/-0)
mrol_ros/manifest.xml (+29/-0)
mrol_ros/nodes/Kinect_mapper_demo.py (+451/-0)
mrol_ros/src/icp_mrol/__init__.py (+2/-0)
mrol_ros/src/icp_mrol/icp_derivation.py (+40/-0)
mrol_ros/src/icp_mrol/icp_derivation_p2plane.py (+43/-0)
mrol_ros/src/icp_mrol/icp_mrol.py (+481/-0)
mrol_ros/src/icp_mrol/icp_test.py (+147/-0)
mrol_ros/src/rosbag_to_mrolPC.py (+48/-0)
mrol_ros/test/Kinect_republish.py (+40/-0)
mrol_ros/test/make_cup.py (+92/-0)
play_demo (+0/-6)
rosutils_mrol/__init__.py (+2/-0)
rosutils_mrol/point_cloud.py (+100/-0)
rosutils_mrol/rosutils_mrol.py (+165/-0)
run_tests (+0/-8)
scripts/demo.py (+0/-28)
scripts/dependencies.sh (+0/-11)
scripts/profile (+0/-4)
scripts/showpts.sh (+0/-3)
setup.py (+19/-0)
test/align_segment_test.py (+0/-235)
test/benchmarktest.py (+96/-0)
test/coverage.sh (+0/-5)
test/data/mrol.in.di.3/poses2.txt (+18/-0)
test/data/qcat/1220400779.xyz (+0/-32897)
test/data/qcat/1220400821.xyz (+0/-37140)
test/data/return00.xyz (+0/-14564)
test/data/return01.xyz (+0/-14577)
test/data/simulated/poses.txt (+0/-16)
test/data/simulated/poses2.txt (+0/-18)
test/data/stillscans/1271124950-new.txt (+0/-547)
test/data/stillscans/1271124950-pose.txt (+0/-4)
test/data/stillscans/1271124950-scan.txt (+0/-13556)
test/data/stillscans_100708/1278525843-pose.txt (+0/-4)
test/data/stillscans_100708/1278525843-scan.txt (+0/-20903)
test/data/stillscans_100708/1278525863-pose.txt (+0/-4)
test/data/stillscans_100708/1278525863-scan.txt (+0/-36632)
test/data/stillscans_100708/1278525905-pose.txt (+0/-4)
test/data/stillscans_100708/1278525905-scan.txt (+0/-12448)
test/data/stillscans_100708/1278525931-pose.txt (+0/-4)
test/data/stillscans_100708/1278525931-scan.txt (+0/-16816)
test/data/stillscans_100708/1278525958-pose.txt (+0/-4)
test/data/stillscans_100708/1278525958-scan.txt (+0/-18937)
test/data/stillscans_100708/1278525984-pose.txt (+0/-4)
test/data/stillscans_100708/1278525984-scan.txt (+0/-21682)
test/data/stillscans_100708/1278526005-pose.txt (+0/-4)
test/data/stillscans_100708/1278526005-scan.txt (+0/-19328)
test/data/stillscans_100708/1278526025-pose.txt (+0/-4)
test/data/stillscans_100708/1278526025-scan.txt (+0/-16395)
test/data/stillscans_100708/1278526042-pose.txt (+0/-4)
test/data/stillscans_100708/1278526042-scan.txt (+0/-13428)
test/data/stillscans_100708/1278526057-pose.txt (+0/-4)
test/data/stillscans_100708/1278526057-scan.txt (+0/-19049)
test/data/stillscans_100708/1278526071-pose.txt (+0/-4)
test/data/stillscans_100708/1278526071-scan.txt (+0/-16171)
test/data/stillscans_100708/1278526111-pose.txt (+0/-4)
test/data/stillscans_100708/1278526111-scan.txt (+0/-26988)
test/data/stillscans_100708/1278526129-pose.txt (+0/-4)
test/data/stillscans_100708/1278526129-scan.txt (+0/-22330)
test/data/stillscans_100708/1278526141-pose.txt (+0/-4)
test/data/stillscans_100708/1278526141-scan.txt (+0/-19225)
test/data/stillscans_100708/1278526179-pose.txt (+0/-4)
test/data/stillscans_100708/1278526179-scan.txt (+0/-14547)
test/data/stillscans_100708/1278526217-pose.txt (+0/-4)
test/data/stillscans_100708/1278526217-scan.txt (+0/-29901)
test/data/stillscans_100708/1278526341-pose.txt (+0/-4)
test/data/stillscans_100708/1278526341-scan.txt (+0/-19167)
test/data/stillscans_100708/empty-map.txt (+0/-1)
test/depth_image.py (+0/-136)
test/iros_mapper.py (+0/-80)
test/iros_segmenter.py (+0/-95)
test/mapper_test.py (+0/-259)
test/pointcloud_test.py (+0/-67)
test/pose3d_test.py (+0/-88)
test/quantizer_test.py (+0/-73)
test/rviz/visualize.vcg (+87/-0)
test/simulation/map_import.py (+0/-45)
test/simulation/true_pose.py (+0/-18)
test/speed_test.py (+0/-205)
test/tests.py (+0/-26)
test/util_test.py (+0/-41)
todo.otl (+0/-136)
To merge this branch: bzr merge lp:~wecacuee/mrol/mrol-dev
Reviewer Review Type Date Requested Status
Julian Ryde Approve
Review via email: mp+135263@code.launchpad.net

Description of the change

1) Use backward ray tracing to remove noisy voxels upto a distance of 10 voxels
2) Vectorized support for userdata for colored voxels.
3) Added rviz as visualizer
4) Merged mrol_ros, the ros wrappers for using MROL.

To post a comment you must log in.
Revision history for this message
Julian Ryde (julian-ryde) :
review: Approve

Unmerged revisions

51. By Vikas Dhiman <email address hidden>

Merging lp:~wecacuee/mrol/mrol-dev

50. By Vikas Dhiman <email address hidden>

1) play_demo should also include the existing PYTHONPATH. 2) fixed volumetric_sample test case. The sampled points are guaranted to be within the resolution of original point but not at the center.

49. By Vikas Dhiman <email address hidden>

Manually added julians changes after merge failed. "Added function to save point cloud in the .ply format"; "Got speed_test working again after API changes.

48. By Vikas Dhiman <email address hidden>

Override julians repos with my version after merge

47. By Vikas Dhiman <email address hidden>

Removed unnecessary term from compile.sh

46. By vikas

Few more fixes. Do not overwrite estimated_trajectory.txt

45. By vikas

Fixing some fine grained test cases.

44. By vikas

Added support for userdata in terms of colored voxels. Added benchmark test and corresponding data

43. By vikas

Fixed a bug in generating image with gaussian noise.

42. By vikas

adding log.py

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== added file '.bzrignore'
2--- .bzrignore 1970-01-01 00:00:00 +0000
3+++ .bzrignore 2012-11-20 22:16:18 +0000
4@@ -0,0 +1,11 @@
5+build/
6+dist/
7+*.egg-info/
8+.bzrignore
9+data
10+.syncme
11+mapper_test.profile
12+rviz.back.py
13+TAGS
14+tags
15+*.glc
16
17=== added file 'README'
18--- README 1970-01-01 00:00:00 +0000
19+++ README 2012-11-20 22:16:18 +0000
20@@ -0,0 +1,52 @@
21+DESCRIPTION
22+
23+A python library with two elements:
24+
25+The first is an efficient multi-resolution voxel representation for fast
26+proximity determination.
27+
28+The second is a global and local alignment algorithms for merging
29+multiple 3D point clouds into one map. Initial application is 3D mapping
30+
31+The algorithms are described in the following papers. Please cite
32+these in your work if you find this code useful.
33+
34+J. Ryde and H. Hu. 3D mapping with multi-resolution occupied voxel lists. Autonomous Robots, 2009.
35+J. Ryde and N. Hillier. Alignment and 3D scene change detection for segmentation in autonomous earth moving. ICRA 2011.
36+
37+INSTALLATION
38+
39+Required packages (ubuntu/debian naming)
40+
41+python-nose python-matplotlib python-scipy python-numexpr python-visual python-numpy
42+
43+Optional packages
44+python-coverage
45+
46+Make sure that the directory containing this README is on the PYTHONPATH
47+
48+To run tests:
49+first compile with
50+> ./compile.sh
51+then execute tests with
52+> ./run_tests
53+
54+COPYRIGHT
55+
56+Copyright 2010-2011, Julian Ryde and Nicholas Hillier.
57+
58+WARRANTY
59+
60+This program is distributed in the hope that it will be useful,
61+but WITHOUT ANY WARRANTY; without even the implied warranty of
62+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
63+GNU Lesser General Public License for more details.
64+
65+No obligations or promises are committed to for its maintenance.
66+
67+LICENSING
68+
69+You should have received a copy of the GNU Lesser General Public License
70+along with this program. If not, see <http://www.gnu.org/licenses/>.
71+
72+
73
74=== removed file 'README'
75--- README 2011-11-29 22:17:03 +0000
76+++ README 1970-01-01 00:00:00 +0000
77@@ -1,52 +0,0 @@
78-DESCRIPTION
79-
80-A python library with two elements:
81-
82-The first is an efficient multi-resolution voxel representation for fast
83-proximity determination.
84-
85-The second is a global and local alignment algorithms for merging
86-multiple 3D point clouds into one map. Initial application is 3D mapping
87-
88-The algorithms are described in the following papers. Please cite
89-these in your work if you find this code useful.
90-
91-J. Ryde and H. Hu. 3D mapping with multi-resolution occupied voxel lists. Autonomous Robots, 2009.
92-J. Ryde and N. Hillier. Alignment and 3D scene change detection for segmentation in autonomous earth moving. ICRA 2011.
93-
94-INSTALLATION
95-
96-Required packages (ubuntu/debian naming)
97-
98-python-nose python-matplotlib python-scipy python-numexpr python-visual python-numpy
99-
100-Optional packages
101-python-coverage
102-
103-Make sure that the directory containing this README is on the PYTHONPATH
104-
105-To run tests:
106-first compile with
107-> ./compile.sh
108-then execute tests with
109-> ./run_tests
110-
111-COPYRIGHT
112-
113-Copyright 2010-2011, Julian Ryde and Nicholas Hillier.
114-
115-WARRANTY
116-
117-This program is distributed in the hope that it will be useful,
118-but WITHOUT ANY WARRANTY; without even the implied warranty of
119-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
120-GNU Lesser General Public License for more details.
121-
122-No obligations or promises are committed to for its maintenance.
123-
124-LICENSING
125-
126-You should have received a copy of the GNU Lesser General Public License
127-along with this program. If not, see <http://www.gnu.org/licenses/>.
128-
129-
130
131=== added file 'compile.sh'
132--- compile.sh 1970-01-01 00:00:00 +0000
133+++ compile.sh 2012-11-20 22:16:18 +0000
134@@ -0,0 +1,5 @@
135+#! /bin/bash
136+fname=`dirname $0`/mrol_mapping/cython/fast
137+cython $fname.pyx
138+#gcc -shared -pthread -fPIC -fwrapv -O3 -ffast-math -Wall -fno-strict-aliasing -I/usr/include/python2.6 -o $fname.so $fname.c
139+gcc -shared -pthread -fPIC -fwrapv -O3 -Wall -fno-strict-aliasing -I/usr/include/python2.6 -o $fname.so $fname.c
140
141=== removed file 'compile.sh'
142--- compile.sh 2011-06-17 03:07:58 +0000
143+++ compile.sh 1970-01-01 00:00:00 +0000
144@@ -1,5 +0,0 @@
145-#! /bin/bash
146-fname=`dirname $0`/mrol_mapping/cython/fast
147-cython $fname.pyx
148-#gcc -shared -pthread -fPIC -fwrapv -O3 -ffast-math -Wall -fno-strict-aliasing -I/usr/include/python2.6 -o $fname.so $fname.c
149-gcc -shared -pthread -fPIC -fwrapv -O3 -Wall -fno-strict-aliasing -I/usr/include/python2.6 -o $fname.so $fname.c
150
151=== added directory 'experimental'
152=== removed directory 'experimental'
153=== added file 'experimental/cup_mapper.py'
154--- experimental/cup_mapper.py 1970-01-01 00:00:00 +0000
155+++ experimental/cup_mapper.py 2012-11-20 22:16:18 +0000
156@@ -0,0 +1,99 @@
157+'''File to build map from a directory of depth images and save to file'''
158+from __future__ import division
159+import mrol_mapping.poseutil as poseutil
160+import mrol_mapping.mapper as mapper
161+import mrol_mapping.pointcloud as pointcloud
162+import mrol_mapping.util as util
163+
164+
165+import mrol_mapping.occupiedlist as occupiedlist
166+import mrol_mapping.quantizer as quantizer
167+
168+import os
169+import numpy as np
170+import sys
171+
172+sys.path.append('../test')
173+import depth_image
174+
175+res = 0.01
176+visualise = True
177+make_free = False
178+load_pc = True
179+
180+def get_pointcloud(fname):
181+ xyzs = depth_image.image_to_points(fname)
182+ pc = pointcloud.PointCloud(xyzs)
183+ # remove ceiling and floor
184+ pc.boxfilter(zmin=-15, zmax=0.5)
185+ return pc
186+
187+def get_freespace(xyzs, pose, voxelmap):
188+ # TODO consider using mapper._build_sensor_model() and mapper.generate_freespace() instead.
189+ # resampling to align with grid. bad hack, but makes ray_cast easier.
190+ res = voxelmap.get_resolution()
191+ xyzs = poseutil.transformpoints(xyzs,pose)
192+ voxs = occupiedlist.pointstovoxels(xyzs, res)
193+ voxs = quantizer.uniquerows(voxs, 0)
194+ X = occupiedlist.voxelstopoints(voxs, res)
195+ free_pts = voxelmap.free_space_ray_trace(X,pose[:3])
196+ return free_pts
197+
198+
199+
200+if __name__ == '__main__':
201+ try:
202+ datadir = sys.argv[1]
203+ outfname = sys.argv[2]+".map"
204+ fnames = os.listdir(datadir)
205+ fnames.sort()
206+ if load_pc:
207+ fnames = [datadir + '/' + fname for fname in fnames if fname.startswith('mrolpc')]
208+ else:
209+ fnames = [datadir + '/' + fname for fname in fnames if fname.endswith('.png')]
210+ except:
211+ print 'Need to specify a valid directory for input and file name for output'
212+ sys.exit()
213+
214+ # variable initialisation
215+ iros_map = mapper.VoxelMap(res,levels=3)
216+ iros_free_map = mapper.VoxelMap(res,levels=1)
217+ bestpose = poseutil.Pose3D()
218+
219+ if load_pc:
220+ pc_xform = poseutil.Pose3D(X=(0,0,0,0,0,0))
221+ pc = pointcloud.load(fnames.pop(0))
222+ pc = pointcloud.PointCloud(pc_xform.transformPoints(pc))
223+ pc_xform = poseutil.Pose3D(X=(0,0,0,-np.pi/2.,0,-np.pi/2.))
224+ else:
225+ pc = get_pointcloud(fnames.pop(0))
226+ iros_map.add_points(pc, bestpose)
227+ if visualise:
228+ iros_map.display(changes=False)
229+ if make_free:
230+ freepts = get_freespace(pc.points, bestpose, iros_map)
231+ pcfree = pointcloud.PointCloud(freepts)
232+ iros_free_map.add_points(pcfree,None)
233+ if visualise:
234+ iros_free_map.display(npts=1000000)
235+
236+ for fname in fnames:
237+ print fname,
238+ if load_pc:
239+ pc = pointcloud.load(fname)
240+ pc = pointcloud.PointCloud(pc_xform.transformPoints(pc))
241+ else:
242+ pc = get_pointcloud(fname)
243+ bestpose = iros_map.align_points(pc, bestpose)[0]
244+ iros_map.add_points(pc, bestpose)
245+ if make_free:
246+ freepts = get_freespace(pc.points, bestpose, iros_map)
247+ pcfree = pointcloud.PointCloud(freepts)
248+ iros_free_map.add_points(pcfree,None)
249+ print bestpose
250+
251+ iros_map.save(outfname)
252+ if make_free:
253+ iros_free_map.save(outfname+"free")
254+ print 'Map size:', len(iros_map), 'voxels.'
255+ print 'Map saved to', outfname
256
257=== removed file 'experimental/cup_mapper.py'
258--- experimental/cup_mapper.py 2011-06-03 16:52:15 +0000
259+++ experimental/cup_mapper.py 1970-01-01 00:00:00 +0000
260@@ -1,99 +0,0 @@
261-'''File to build map from a directory of depth images and save to file'''
262-from __future__ import division
263-import mrol_mapping.poseutil as poseutil
264-import mrol_mapping.mapper as mapper
265-import mrol_mapping.pointcloud as pointcloud
266-import mrol_mapping.util as util
267-
268-
269-import mrol_mapping.occupiedlist as occupiedlist
270-import mrol_mapping.quantizer as quantizer
271-
272-import os
273-import numpy as np
274-import sys
275-
276-sys.path.append('../test')
277-import depth_image
278-
279-res = 0.01
280-visualise = True
281-make_free = False
282-load_pc = True
283-
284-def get_pointcloud(fname):
285- xyzs = depth_image.image_to_points(fname)
286- pc = pointcloud.PointCloud(xyzs)
287- # remove ceiling and floor
288- pc.boxfilter(zmin=-15, zmax=0.5)
289- return pc
290-
291-def get_freespace(xyzs, pose, voxelmap):
292- # TODO consider using mapper._build_sensor_model() and mapper.generate_freespace() instead.
293- # resampling to align with grid. bad hack, but makes ray_cast easier.
294- res = voxelmap.get_resolution()
295- xyzs = poseutil.transformpoints(xyzs,pose)
296- voxs = occupiedlist.pointstovoxels(xyzs, res)
297- voxs = quantizer.uniquerows(voxs, 0)
298- X = occupiedlist.voxelstopoints(voxs, res)
299- free_pts = voxelmap.free_space_ray_trace(X,pose[:3])
300- return free_pts
301-
302-
303-
304-if __name__ == '__main__':
305- try:
306- datadir = sys.argv[1]
307- outfname = sys.argv[2]+".map"
308- fnames = os.listdir(datadir)
309- fnames.sort()
310- if load_pc:
311- fnames = [datadir + '/' + fname for fname in fnames if fname.startswith('mrolpc')]
312- else:
313- fnames = [datadir + '/' + fname for fname in fnames if fname.endswith('.png')]
314- except:
315- print 'Need to specify a valid directory for input and file name for output'
316- sys.exit()
317-
318- # variable initialisation
319- iros_map = mapper.VoxelMap(res,levels=3)
320- iros_free_map = mapper.VoxelMap(res,levels=1)
321- bestpose = poseutil.Pose3D()
322-
323- if load_pc:
324- pc_xform = poseutil.Pose3D(X=(0,0,0,0,0,0))
325- pc = pointcloud.load(fnames.pop(0))
326- pc = pointcloud.PointCloud(pc_xform.transformPoints(pc))
327- pc_xform = poseutil.Pose3D(X=(0,0,0,-np.pi/2.,0,-np.pi/2.))
328- else:
329- pc = get_pointcloud(fnames.pop(0))
330- iros_map.add_points(pc, bestpose)
331- if visualise:
332- iros_map.display(changes=False)
333- if make_free:
334- freepts = get_freespace(pc.points, bestpose, iros_map)
335- pcfree = pointcloud.PointCloud(freepts)
336- iros_free_map.add_points(pcfree,None)
337- if visualise:
338- iros_free_map.display(npts=1000000)
339-
340- for fname in fnames:
341- print fname,
342- if load_pc:
343- pc = pointcloud.load(fname)
344- pc = pointcloud.PointCloud(pc_xform.transformPoints(pc))
345- else:
346- pc = get_pointcloud(fname)
347- bestpose = iros_map.align_points(pc, bestpose)[0]
348- iros_map.add_points(pc, bestpose)
349- if make_free:
350- freepts = get_freespace(pc.points, bestpose, iros_map)
351- pcfree = pointcloud.PointCloud(freepts)
352- iros_free_map.add_points(pcfree,None)
353- print bestpose
354-
355- iros_map.save(outfname)
356- if make_free:
357- iros_free_map.save(outfname+"free")
358- print 'Map size:', len(iros_map), 'voxels.'
359- print 'Map saved to', outfname
360
361=== added file 'experimental/cup_segmenter.py'
362--- experimental/cup_segmenter.py 1970-01-01 00:00:00 +0000
363+++ experimental/cup_segmenter.py 2012-11-20 22:16:18 +0000
364@@ -0,0 +1,142 @@
365+import sys
366+import numpy as np
367+
368+import mrol_mapping.mapper as mapper
369+import mrol_mapping.poseutil as poseutil
370+import mrol_mapping.occupiedlist as occupiedlist
371+import mrol_mapping.pointcloud as pointcloud
372+import mrol_mapping.visualiser.robotvisualiser as robotvisualiser
373+import mrol_mapping.quantizer as quantizer
374+import os
375+
376+sys.path.append('../test')
377+import iros_mapper
378+
379+
380+import signal
381+def signal_handler(signal, frame):
382+ import pdb; pdb.set_trace()
383+signal.signal(signal.SIGINT, signal_handler)
384+
385+#fname = '/jcr/data3d/lamp/present/0002.png'
386+load_pc = True# for loading point clouds from the Kinect from npy files
387+try:
388+ datadir = sys.argv[1]
389+ fnames = os.listdir(datadir)
390+ fnames.sort()
391+ if load_pc:
392+ fnames = [datadir + '/' + fname for fname in fnames if fname.startswith('mrolpc')]
393+ else:
394+ fnames = [datadir + '/' + fname for fname in fnames if fname.endswith('.png')]
395+except:
396+ print 'Need to specify a valid directory for input'
397+ sys.exit()
398+visualise = True
399+long_term = False
400+
401+
402+bestpose = poseutil.Pose3D(X=(0,0,0,0,0,0))
403+pc_xform = poseutil.Pose3D(X=(0,0,0,-np.pi/2.,0,-np.pi/2.))
404+res = 0.01
405+segment_map = mapper.VoxelMap(res,levels=3)
406+pc = pointcloud.load(fnames.pop(0))
407+#pc = pointcloud.PointCloud(pc)
408+pc = pointcloud.PointCloud(pc_xform.transformPoints(pc))
409+segment_map.add_points(pc, bestpose)
410+object_map = mapper.VoxelMap(res/4.,levels=1)
411+
412+
413+#freespace_ol = segment_map.generate_freespace2(resolution = res*2, minR=0.4, maxR=3)
414+#freespace_ol = segment_map.generate_freespace(res, minR=0.25, maxR=0.75) # *4 for speed during testing, should just be res
415+
416+free_space_res = res
417+
418+pc_vox = occupiedlist.pointstovoxels(pc.points, free_space_res)
419+pc_vox = quantizer.uniquerows(pc_vox, 0)
420+pc_pts = occupiedlist.voxelstopoints(pc_vox, free_space_res)
421+pc_regular = pointcloud.PointCloud(pc_pts)
422+freespace = segment_map.free_space_ray_trace(pc_regular,(0,0,0),free_space_res,voxel_offset=2.5)
423+freespace_ol = occupiedlist.OccupiedList(free_space_res)
424+freespace_ol.add_points(freespace.points)
425+
426+#vis = robotvisualiser.Visualiser(npts=2e6)
427+#vis.addmappts(segment_map.get_points().points)
428+#vis.addmappts(freespace_ol.getpoints())
429+#free_pc = pointcloud.PointCloud(freespace_ol.getpoints())
430+#free_pc.display()
431+
432+
433+
434+if visualise:
435+ vis = robotvisualiser.Visualiser(npts=2e6)
436+ vis.set_orthographic(Bool=True)
437+ vis.setminmax(-1.5,1.5)
438+ #objvis = robotvisualiser.Visualiser()
439+ #objvis.set_orthographic(Bool=True)
440+ #objvis.setminmax(-1.5,1.5)
441+
442+pc_xform = poseutil.Pose3D(X=(0,0,0,-np.pi/2.,0,-np.pi/2.))
443+count = 0
444+for fname in fnames:
445+ if load_pc:
446+ pc = pointcloud.load(fname)
447+ pc = pointcloud.PointCloud(pc_xform.transformPoints(pc))
448+ else:
449+ pc = get_pointcloud(fname)
450+ print fname,
451+ bestpose = segment_map.align_points(pc, bestpose)[0]
452+ print bestpose
453+ hits, new_points = segment_map.mrol.getfinest().segment_ovl(bestpose,pc.points)
454+ if long_term:
455+ # if we wanted a long-term map without dynamic object coruption
456+ inliers, outliers = freespace_ol.segment_ovl(None, new_points)
457+ object_map.add_points(pointcloud.PointCloud(inliers), None)
458+ segment_map.add_points(pointcloud.PointCloud(outliers), None)
459+ #if freespace2:
460+ # if len(inliers) > 0:
461+ # tmp_freespace_ol = segment_map.generate_freespace2(inliers,pose=bestpose)
462+ # else:
463+ # # nothing to segment?
464+ # tmp_freespace_ol = occupiedlist.OccupiedList(1)
465+ #else:
466+ # tmp_freespace_ol = segment_map.generate_freespace(res,pose=bestpose)
467+ #
468+ #freespace_ol.add_points(tmp_freespace_ol.getpoints(), None)
469+ else:
470+ # if we are just trying to segment out the cup to make a model, and it's not moving.
471+ inliers, outliers = freespace_ol.segment_ovl(bestpose,pc.points)
472+ #TODO only add points if overlap is good?
473+ if count == 0:
474+ object_map.add_points(pointcloud.PointCloud(inliers), None)
475+ segment_map.add_points(pointcloud.PointCloud(new_points), None)
476+ else:
477+ if len(inliers)/len(object_map) > 0.5:
478+ object_map.add_points(pointcloud.PointCloud(inliers), None)
479+ segment_map.add_points(pointcloud.PointCloud(new_points), None)
480+ else:
481+ print "Not adding to map"
482+
483+ if visualise:
484+ print "Visualising"
485+ vis.clear()
486+ vis.addmappts(segment_map.get_points().points)
487+ vis.setrobotpose(bestpose.getMatrix())
488+ vis.addtrajectorypoint(bestpose.getTuple()[0:3])
489+ if long_term:
490+ if len(outliers) > 0:
491+ vis.setrightpts(outliers) # new map points
492+ else:
493+ vis.setrightpts(new_points)
494+ if len(inliers) > 0:
495+ vis.setleftpts(inliers) # segmented points
496+ #pointcloud.save(fname+"_seg",inliers)
497+ #np.save(fname+"_pose",np.asarray(bestpose.getTuple()))
498+
499+ #objvis.clear()
500+ #objvis.addmappts(object_map.get_points().points)
501+ #free_pc = pointcloud.PointCloud(freespace_ol.getpoints())
502+ #P = free_pc.display()
503+ count +=1
504+import pdb;pdb.set_trace()
505+segment_map.save("segment_map.map")
506+object_map.save("object_map.map")
507
508=== removed file 'experimental/cup_segmenter.py'
509--- experimental/cup_segmenter.py 2012-02-26 19:51:50 +0000
510+++ experimental/cup_segmenter.py 1970-01-01 00:00:00 +0000
511@@ -1,142 +0,0 @@
512-import sys
513-import numpy as np
514-
515-import mrol_mapping.mapper as mapper
516-import mrol_mapping.poseutil as poseutil
517-import mrol_mapping.occupiedlist as occupiedlist
518-import mrol_mapping.pointcloud as pointcloud
519-import mrol_mapping.visualiser.robotvisualiser as robotvisualiser
520-import mrol_mapping.quantizer as quantizer
521-import os
522-
523-sys.path.append('../test')
524-import iros_mapper
525-
526-
527-import signal
528-def signal_handler(signal, frame):
529- import pdb; pdb.set_trace()
530-signal.signal(signal.SIGINT, signal_handler)
531-
532-#fname = '/jcr/data3d/lamp/present/0002.png'
533-load_pc = True# for loading point clouds from the Kinect from npy files
534-try:
535- datadir = sys.argv[1]
536- fnames = os.listdir(datadir)
537- fnames.sort()
538- if load_pc:
539- fnames = [datadir + '/' + fname for fname in fnames if fname.startswith('mrolpc')]
540- else:
541- fnames = [datadir + '/' + fname for fname in fnames if fname.endswith('.png')]
542-except:
543- print 'Need to specify a valid directory for input'
544- sys.exit()
545-visualise = True
546-long_term = False
547-
548-
549-bestpose = poseutil.Pose3D(X=(0,0,0,0,0,0))
550-pc_xform = poseutil.Pose3D(X=(0,0,0,-np.pi/2.,0,-np.pi/2.))
551-res = 0.01
552-segment_map = mapper.VoxelMap(res,levels=3)
553-pc = pointcloud.load(fnames.pop(0))
554-#pc = pointcloud.PointCloud(pc)
555-pc = pointcloud.PointCloud(pc_xform.transformPoints(pc))
556-segment_map.add_points(pc, bestpose)
557-object_map = mapper.VoxelMap(res/4.,levels=1)
558-
559-
560-#freespace_ol = segment_map.generate_freespace2(resolution = res*2, minR=0.4, maxR=3)
561-#freespace_ol = segment_map.generate_freespace(res, minR=0.25, maxR=0.75) # *4 for speed during testing, should just be res
562-
563-free_space_res = res
564-
565-pc_vox = occupiedlist.pointstovoxels(pc.points, free_space_res)
566-pc_vox = quantizer.uniquerows(pc_vox, 0)
567-pc_pts = occupiedlist.voxelstopoints(pc_vox, free_space_res)
568-pc_regular = pointcloud.PointCloud(pc_pts)
569-freespace = segment_map.free_space_ray_trace(pc_regular,(0,0,0),free_space_res,voxel_offset=2.5)
570-freespace_ol = occupiedlist.OccupiedList(free_space_res)
571-freespace_ol.add_points(freespace.points)
572-
573-#vis = robotvisualiser.Visualiser(npts=2e6)
574-#vis.addmappts(segment_map.get_points().points)
575-#vis.addmappts(freespace_ol.getpoints())
576-#free_pc = pointcloud.PointCloud(freespace_ol.getpoints())
577-#free_pc.display()
578-
579-
580-
581-if visualise:
582- vis = robotvisualiser.Visualiser(npts=2e6)
583- vis.set_orthographic(Bool=True)
584- vis.setminmax(-1.5,1.5)
585- #objvis = robotvisualiser.Visualiser()
586- #objvis.set_orthographic(Bool=True)
587- #objvis.setminmax(-1.5,1.5)
588-
589-pc_xform = poseutil.Pose3D(X=(0,0,0,-np.pi/2.,0,-np.pi/2.))
590-count = 0
591-for fname in fnames:
592- if load_pc:
593- pc = pointcloud.load(fname)
594- pc = pointcloud.PointCloud(pc_xform.transformPoints(pc))
595- else:
596- pc = get_pointcloud(fname)
597- print fname,
598- bestpose = segment_map.align_points(pc, bestpose)[0]
599- print bestpose
600- hits, new_points = segment_map.mrol.getfinest().segment_ovl(bestpose,pc.points)
601- if long_term:
602- # if we wanted a long-term map without dynamic object coruption
603- inliers, outliers = freespace_ol.segment_ovl(None, new_points)
604- object_map.add_points(pointcloud.PointCloud(inliers), None)
605- segment_map.add_points(pointcloud.PointCloud(outliers), None)
606- #if freespace2:
607- # if len(inliers) > 0:
608- # tmp_freespace_ol = segment_map.generate_freespace2(inliers,pose=bestpose)
609- # else:
610- # # nothing to segment?
611- # tmp_freespace_ol = occupiedlist.OccupiedList(1)
612- #else:
613- # tmp_freespace_ol = segment_map.generate_freespace(res,pose=bestpose)
614- #
615- #freespace_ol.add_points(tmp_freespace_ol.getpoints(), None)
616- else:
617- # if we are just trying to segment out the cup to make a model, and it's not moving.
618- inliers, outliers = freespace_ol.segment_ovl(bestpose,pc.points)
619- #TODO only add points if overlap is good?
620- if count == 0:
621- object_map.add_points(pointcloud.PointCloud(inliers), None)
622- segment_map.add_points(pointcloud.PointCloud(new_points), None)
623- else:
624- if len(inliers)/len(object_map) > 0.5:
625- object_map.add_points(pointcloud.PointCloud(inliers), None)
626- segment_map.add_points(pointcloud.PointCloud(new_points), None)
627- else:
628- print "Not adding to map"
629-
630- if visualise:
631- print "Visualising"
632- vis.clear()
633- vis.addmappts(segment_map.get_points().points)
634- vis.setrobotpose(bestpose.getMatrix())
635- vis.addtrajectorypoint(bestpose.getTuple()[0:3])
636- if long_term:
637- if len(outliers) > 0:
638- vis.setrightpts(outliers) # new map points
639- else:
640- vis.setrightpts(new_points)
641- if len(inliers) > 0:
642- vis.setleftpts(inliers) # segmented points
643- #pointcloud.save(fname+"_seg",inliers)
644- #np.save(fname+"_pose",np.asarray(bestpose.getTuple()))
645-
646- #objvis.clear()
647- #objvis.addmappts(object_map.get_points().points)
648- #free_pc = pointcloud.PointCloud(freespace_ol.getpoints())
649- #P = free_pc.display()
650- count +=1
651-import pdb;pdb.set_trace()
652-segment_map.save("segment_map.map")
653-object_map.save("object_map.map")
654
655=== added file 'full_test.sh'
656--- full_test.sh 1970-01-01 00:00:00 +0000
657+++ full_test.sh 2012-11-20 22:16:18 +0000
658@@ -0,0 +1,9 @@
659+#! /bin/sh
660+# does a fresh branch of the local repo in tmp, compiles and tests
661+# commit locally and run this before pushing to the main repository
662+rm -rf /tmp/mrol_repo
663+bzr branch . /tmp/mrol_repo
664+cd /tmp/mrol_repo
665+#./compile.sh
666+./run_tests
667+rm -rf /tmp/mrol_repo
668
669=== removed file 'full_test.sh'
670--- full_test.sh 2011-11-29 22:19:46 +0000
671+++ full_test.sh 1970-01-01 00:00:00 +0000
672@@ -1,9 +0,0 @@
673-#! /bin/sh
674-# does a fresh branch of the local repo in tmp, compiles and tests
675-# commit locally and run this before pushing to the main repository
676-rm -rf /tmp/mrol_repo
677-bzr branch . /tmp/mrol_repo
678-cd /tmp/mrol_repo
679-#./compile.sh
680-./run_tests
681-rm -rf /tmp/mrol_repo
682
683=== added file 'lgpl.txt'
684--- lgpl.txt 1970-01-01 00:00:00 +0000
685+++ lgpl.txt 2012-11-20 22:16:18 +0000
686@@ -0,0 +1,166 @@
687+ GNU LESSER GENERAL PUBLIC LICENSE
688+ Version 3, 29 June 2007
689+
690+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
691+ Everyone is permitted to copy and distribute verbatim copies
692+ of this license document, but changing it is not allowed.
693+
694+
695+ This version of the GNU Lesser General Public License incorporates
696+the terms and conditions of version 3 of the GNU General Public
697+License, supplemented by the additional permissions listed below.
698+
699+ 0. Additional Definitions.
700+
701+ As used herein, "this License" refers to version 3 of the GNU Lesser
702+General Public License, and the "GNU GPL" refers to version 3 of the GNU
703+General Public License.
704+
705+ "The Library" refers to a covered work governed by this License,
706+other than an Application or a Combined Work as defined below.
707+
708+ An "Application" is any work that makes use of an interface provided
709+by the Library, but which is not otherwise based on the Library.
710+Defining a subclass of a class defined by the Library is deemed a mode
711+of using an interface provided by the Library.
712+
713+ A "Combined Work" is a work produced by combining or linking an
714+Application with the Library. The particular version of the Library
715+with which the Combined Work was made is also called the "Linked
716+Version".
717+
718+ The "Minimal Corresponding Source" for a Combined Work means the
719+Corresponding Source for the Combined Work, excluding any source code
720+for portions of the Combined Work that, considered in isolation, are
721+based on the Application, and not on the Linked Version.
722+
723+ The "Corresponding Application Code" for a Combined Work means the
724+object code and/or source code for the Application, including any data
725+and utility programs needed for reproducing the Combined Work from the
726+Application, but excluding the System Libraries of the Combined Work.
727+
728+ 1. Exception to Section 3 of the GNU GPL.
729+
730+ You may convey a covered work under sections 3 and 4 of this License
731+without being bound by section 3 of the GNU GPL.
732+
733+ 2. Conveying Modified Versions.
734+
735+ If you modify a copy of the Library, and, in your modifications, a
736+facility refers to a function or data to be supplied by an Application
737+that uses the facility (other than as an argument passed when the
738+facility is invoked), then you may convey a copy of the modified
739+version:
740+
741+ a) under this License, provided that you make a good faith effort to
742+ ensure that, in the event an Application does not supply the
743+ function or data, the facility still operates, and performs
744+ whatever part of its purpose remains meaningful, or
745+
746+ b) under the GNU GPL, with none of the additional permissions of
747+ this License applicable to that copy.
748+
749+ 3. Object Code Incorporating Material from Library Header Files.
750+
751+ The object code form of an Application may incorporate material from
752+a header file that is part of the Library. You may convey such object
753+code under terms of your choice, provided that, if the incorporated
754+material is not limited to numerical parameters, data structure
755+layouts and accessors, or small macros, inline functions and templates
756+(ten or fewer lines in length), you do both of the following:
757+
758+ a) Give prominent notice with each copy of the object code that the
759+ Library is used in it and that the Library and its use are
760+ covered by this License.
761+
762+ b) Accompany the object code with a copy of the GNU GPL and this license
763+ document.
764+
765+ 4. Combined Works.
766+
767+ You may convey a Combined Work under terms of your choice that,
768+taken together, effectively do not restrict modification of the
769+portions of the Library contained in the Combined Work and reverse
770+engineering for debugging such modifications, if you also do each of
771+the following:
772+
773+ a) Give prominent notice with each copy of the Combined Work that
774+ the Library is used in it and that the Library and its use are
775+ covered by this License.
776+
777+ b) Accompany the Combined Work with a copy of the GNU GPL and this license
778+ document.
779+
780+ c) For a Combined Work that displays copyright notices during
781+ execution, include the copyright notice for the Library among
782+ these notices, as well as a reference directing the user to the
783+ copies of the GNU GPL and this license document.
784+
785+ d) Do one of the following:
786+
787+ 0) Convey the Minimal Corresponding Source under the terms of this
788+ License, and the Corresponding Application Code in a form
789+ suitable for, and under terms that permit, the user to
790+ recombine or relink the Application with a modified version of
791+ the Linked Version to produce a modified Combined Work, in the
792+ manner specified by section 6 of the GNU GPL for conveying
793+ Corresponding Source.
794+
795+ 1) Use a suitable shared library mechanism for linking with the
796+ Library. A suitable mechanism is one that (a) uses at run time
797+ a copy of the Library already present on the user's computer
798+ system, and (b) will operate properly with a modified version
799+ of the Library that is interface-compatible with the Linked
800+ Version.
801+
802+ e) Provide Installation Information, but only if you would otherwise
803+ be required to provide such information under section 6 of the
804+ GNU GPL, and only to the extent that such information is
805+ necessary to install and execute a modified version of the
806+ Combined Work produced by recombining or relinking the
807+ Application with a modified version of the Linked Version. (If
808+ you use option 4d0, the Installation Information must accompany
809+ the Minimal Corresponding Source and Corresponding Application
810+ Code. If you use option 4d1, you must provide the Installation
811+ Information in the manner specified by section 6 of the GNU GPL
812+ for conveying Corresponding Source.)
813+
814+ 5. Combined Libraries.
815+
816+ You may place library facilities that are a work based on the
817+Library side by side in a single library together with other library
818+facilities that are not Applications and are not covered by this
819+License, and convey such a combined library under terms of your
820+choice, if you do both of the following:
821+
822+ a) Accompany the combined library with a copy of the same work based
823+ on the Library, uncombined with any other library facilities,
824+ conveyed under the terms of this License.
825+
826+ b) Give prominent notice with the combined library that part of it
827+ is a work based on the Library, and explaining where to find the
828+ accompanying uncombined form of the same work.
829+
830+ 6. Revised Versions of the GNU Lesser General Public License.
831+
832+ The Free Software Foundation may publish revised and/or new versions
833+of the GNU Lesser General Public License from time to time. Such new
834+versions will be similar in spirit to the present version, but may
835+differ in detail to address new problems or concerns.
836+
837+ Each version is given a distinguishing version number. If the
838+Library as you received it specifies that a certain numbered version
839+of the GNU Lesser General Public License "or any later version"
840+applies to it, you have the option of following the terms and
841+conditions either of that published version or of any later version
842+published by the Free Software Foundation. If the Library as you
843+received it does not specify a version number of the GNU Lesser
844+General Public License, you may choose any version of the GNU Lesser
845+General Public License ever published by the Free Software Foundation.
846+
847+ If the Library as you received it specifies that a proxy can decide
848+whether future versions of the GNU Lesser General Public License shall
849+apply, that proxy's public statement of acceptance of any version is
850+permanent authorization for you to choose that version for the
851+Library.
852+
853
854=== removed file 'lgpl.txt'
855--- lgpl.txt 2011-07-05 06:26:34 +0000
856+++ lgpl.txt 1970-01-01 00:00:00 +0000
857@@ -1,166 +0,0 @@
858- GNU LESSER GENERAL PUBLIC LICENSE
859- Version 3, 29 June 2007
860-
861- Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
862- Everyone is permitted to copy and distribute verbatim copies
863- of this license document, but changing it is not allowed.
864-
865-
866- This version of the GNU Lesser General Public License incorporates
867-the terms and conditions of version 3 of the GNU General Public
868-License, supplemented by the additional permissions listed below.
869-
870- 0. Additional Definitions.
871-
872- As used herein, "this License" refers to version 3 of the GNU Lesser
873-General Public License, and the "GNU GPL" refers to version 3 of the GNU
874-General Public License.
875-
876- "The Library" refers to a covered work governed by this License,
877-other than an Application or a Combined Work as defined below.
878-
879- An "Application" is any work that makes use of an interface provided
880-by the Library, but which is not otherwise based on the Library.
881-Defining a subclass of a class defined by the Library is deemed a mode
882-of using an interface provided by the Library.
883-
884- A "Combined Work" is a work produced by combining or linking an
885-Application with the Library. The particular version of the Library
886-with which the Combined Work was made is also called the "Linked
887-Version".
888-
889- The "Minimal Corresponding Source" for a Combined Work means the
890-Corresponding Source for the Combined Work, excluding any source code
891-for portions of the Combined Work that, considered in isolation, are
892-based on the Application, and not on the Linked Version.
893-
894- The "Corresponding Application Code" for a Combined Work means the
895-object code and/or source code for the Application, including any data
896-and utility programs needed for reproducing the Combined Work from the
897-Application, but excluding the System Libraries of the Combined Work.
898-
899- 1. Exception to Section 3 of the GNU GPL.
900-
901- You may convey a covered work under sections 3 and 4 of this License
902-without being bound by section 3 of the GNU GPL.
903-
904- 2. Conveying Modified Versions.
905-
906- If you modify a copy of the Library, and, in your modifications, a
907-facility refers to a function or data to be supplied by an Application
908-that uses the facility (other than as an argument passed when the
909-facility is invoked), then you may convey a copy of the modified
910-version:
911-
912- a) under this License, provided that you make a good faith effort to
913- ensure that, in the event an Application does not supply the
914- function or data, the facility still operates, and performs
915- whatever part of its purpose remains meaningful, or
916-
917- b) under the GNU GPL, with none of the additional permissions of
918- this License applicable to that copy.
919-
920- 3. Object Code Incorporating Material from Library Header Files.
921-
922- The object code form of an Application may incorporate material from
923-a header file that is part of the Library. You may convey such object
924-code under terms of your choice, provided that, if the incorporated
925-material is not limited to numerical parameters, data structure
926-layouts and accessors, or small macros, inline functions and templates
927-(ten or fewer lines in length), you do both of the following:
928-
929- a) Give prominent notice with each copy of the object code that the
930- Library is used in it and that the Library and its use are
931- covered by this License.
932-
933- b) Accompany the object code with a copy of the GNU GPL and this license
934- document.
935-
936- 4. Combined Works.
937-
938- You may convey a Combined Work under terms of your choice that,
939-taken together, effectively do not restrict modification of the
940-portions of the Library contained in the Combined Work and reverse
941-engineering for debugging such modifications, if you also do each of
942-the following:
943-
944- a) Give prominent notice with each copy of the Combined Work that
945- the Library is used in it and that the Library and its use are
946- covered by this License.
947-
948- b) Accompany the Combined Work with a copy of the GNU GPL and this license
949- document.
950-
951- c) For a Combined Work that displays copyright notices during
952- execution, include the copyright notice for the Library among
953- these notices, as well as a reference directing the user to the
954- copies of the GNU GPL and this license document.
955-
956- d) Do one of the following:
957-
958- 0) Convey the Minimal Corresponding Source under the terms of this
959- License, and the Corresponding Application Code in a form
960- suitable for, and under terms that permit, the user to
961- recombine or relink the Application with a modified version of
962- the Linked Version to produce a modified Combined Work, in the
963- manner specified by section 6 of the GNU GPL for conveying
964- Corresponding Source.
965-
966- 1) Use a suitable shared library mechanism for linking with the
967- Library. A suitable mechanism is one that (a) uses at run time
968- a copy of the Library already present on the user's computer
969- system, and (b) will operate properly with a modified version
970- of the Library that is interface-compatible with the Linked
971- Version.
972-
973- e) Provide Installation Information, but only if you would otherwise
974- be required to provide such information under section 6 of the
975- GNU GPL, and only to the extent that such information is
976- necessary to install and execute a modified version of the
977- Combined Work produced by recombining or relinking the
978- Application with a modified version of the Linked Version. (If
979- you use option 4d0, the Installation Information must accompany
980- the Minimal Corresponding Source and Corresponding Application
981- Code. If you use option 4d1, you must provide the Installation
982- Information in the manner specified by section 6 of the GNU GPL
983- for conveying Corresponding Source.)
984-
985- 5. Combined Libraries.
986-
987- You may place library facilities that are a work based on the
988-Library side by side in a single library together with other library
989-facilities that are not Applications and are not covered by this
990-License, and convey such a combined library under terms of your
991-choice, if you do both of the following:
992-
993- a) Accompany the combined library with a copy of the same work based
994- on the Library, uncombined with any other library facilities,
995- conveyed under the terms of this License.
996-
997- b) Give prominent notice with the combined library that part of it
998- is a work based on the Library, and explaining where to find the
999- accompanying uncombined form of the same work.
1000-
1001- 6. Revised Versions of the GNU Lesser General Public License.
1002-
1003- The Free Software Foundation may publish revised and/or new versions
1004-of the GNU Lesser General Public License from time to time. Such new
1005-versions will be similar in spirit to the present version, but may
1006-differ in detail to address new problems or concerns.
1007-
1008- Each version is given a distinguishing version number. If the
1009-Library as you received it specifies that a certain numbered version
1010-of the GNU Lesser General Public License "or any later version"
1011-applies to it, you have the option of following the terms and
1012-conditions either of that published version or of any later version
1013-published by the Free Software Foundation. If the Library as you
1014-received it does not specify a version number of the GNU Lesser
1015-General Public License, you may choose any version of the GNU Lesser
1016-General Public License ever published by the Free Software Foundation.
1017-
1018- If the Library as you received it specifies that a proxy can decide
1019-whether future versions of the GNU Lesser General Public License shall
1020-apply, that proxy's public statement of acceptance of any version is
1021-permanent authorization for you to choose that version for the
1022-Library.
1023-
1024
1025=== added directory 'mrol_mapping'
1026=== removed directory 'mrol_mapping'
1027=== added file 'mrol_mapping/__init__.py'
1028--- mrol_mapping/__init__.py 1970-01-01 00:00:00 +0000
1029+++ mrol_mapping/__init__.py 2012-11-20 22:16:18 +0000
1030@@ -0,0 +1,2 @@
1031+import os
1032+root_dir = os.path.abspath(os.path.dirname(__file__))
1033
1034=== removed file 'mrol_mapping/__init__.py'
1035--- mrol_mapping/__init__.py 2011-06-03 16:52:15 +0000
1036+++ mrol_mapping/__init__.py 1970-01-01 00:00:00 +0000
1037@@ -1,2 +0,0 @@
1038-import os
1039-root_dir = os.path.abspath(os.path.dirname(__file__))
1040
1041=== added file 'mrol_mapping/bresenhamline.py'
1042--- mrol_mapping/bresenhamline.py 1970-01-01 00:00:00 +0000
1043+++ mrol_mapping/bresenhamline.py 2012-11-20 22:16:18 +0000
1044@@ -0,0 +1,112 @@
1045+"""
1046+N-D Bresenham line algo
1047+"""
1048+import numpy as np
1049+def _bresenhamline_nslope(slope):
1050+ """
1051+ Normalize slope for Bresenham's line algorithm.
1052+
1053+ >>> s = np.array([[-2, -2, -2, 0]])
1054+ >>> _bresenhamline_nslope(s)
1055+ array([[-1., -1., -1., 0.]])
1056+
1057+ >>> s = np.array([[0, 0, 0, 0]])
1058+ >>> _bresenhamline_nslope(s)
1059+ array([[ 0., 0., 0., 0.]])
1060+
1061+ >>> s = np.array([[0, 0, 9, 0]])
1062+ >>> _bresenhamline_nslope(s)
1063+ array([[ 0., 0., 1., 0.]])
1064+ """
1065+ scale = np.amax(np.abs(slope), axis=1).reshape(-1, 1)
1066+ zeroslope = (scale == 0).all(1)
1067+ scale[zeroslope] = np.ones(1)
1068+ normalizedslope = slope.astype(np.double) / scale
1069+ normalizedslope[zeroslope] = np.zeros(slope[0].shape)
1070+ return normalizedslope
1071+
1072+def _bresenhamlines(start, end, max_iter):
1073+ """
1074+ Returns npts lines of length max_iter each. (npts x max_iter x dimension)
1075+
1076+ >>> s = np.array([[3, 1, 9, 0],[0, 0, 3, 0]])
1077+ >>> _bresenhamlines(s, np.zeros(s.shape[1]), max_iter=-1)
1078+ array([[[ 3, 1, 8, 0],
1079+ [ 2, 1, 7, 0],
1080+ [ 2, 1, 6, 0],
1081+ [ 2, 1, 5, 0],
1082+ [ 1, 0, 4, 0],
1083+ [ 1, 0, 3, 0],
1084+ [ 1, 0, 2, 0],
1085+ [ 0, 0, 1, 0],
1086+ [ 0, 0, 0, 0]],
1087+ <BLANKLINE>
1088+ [[ 0, 0, 2, 0],
1089+ [ 0, 0, 1, 0],
1090+ [ 0, 0, 0, 0],
1091+ [ 0, 0, -1, 0],
1092+ [ 0, 0, -2, 0],
1093+ [ 0, 0, -3, 0],
1094+ [ 0, 0, -4, 0],
1095+ [ 0, 0, -5, 0],
1096+ [ 0, 0, -6, 0]]])
1097+ """
1098+ if max_iter == -1:
1099+ max_iter = np.amax(np.amax(np.abs(end - start), axis=1))
1100+ npts, dim = start.shape
1101+ nslope = _bresenhamline_nslope(end - start)
1102+
1103+ # steps to iterate on
1104+ stepseq = np.arange(1, max_iter + 1)
1105+ stepmat = np.tile(stepseq, (dim, 1)).T
1106+
1107+ # some hacks for broadcasting properly
1108+ bline = start[:, np.newaxis, :] + nslope[:, np.newaxis, :] * stepmat
1109+
1110+ # Approximate to nearest int
1111+ return np.rint(bline).astype(start.dtype)
1112+
1113+def bresenhamline(start, end, max_iter=5):
1114+ """
1115+ Returns a list of points from (start, end] by ray tracing a line b/w the
1116+ points.
1117+ Parameters:
1118+ start: An array of start points (number of points x dimension)
1119+ end: An end points (1 x dimension)
1120+ or An array of end point corresponding to each start point
1121+ (number of points x dimension)
1122+ max_iter: Max points to traverse. if -1, maximum number of required
1123+ points are traversed
1124+
1125+ Returns:
1126+ linevox (n x dimension) A cumulative array of all points traversed by
1127+ all the lines so far.
1128+
1129+ >>> s = np.array([[3, 1, 9, 0],[0, 0, 3, 0]])
1130+ >>> bresenhamline(s, np.zeros(s.shape[1]), max_iter=-1)
1131+ array([[ 3, 1, 8, 0],
1132+ [ 2, 1, 7, 0],
1133+ [ 2, 1, 6, 0],
1134+ [ 2, 1, 5, 0],
1135+ [ 1, 0, 4, 0],
1136+ [ 1, 0, 3, 0],
1137+ [ 1, 0, 2, 0],
1138+ [ 0, 0, 1, 0],
1139+ [ 0, 0, 0, 0],
1140+ [ 0, 0, 2, 0],
1141+ [ 0, 0, 1, 0],
1142+ [ 0, 0, 0, 0],
1143+ [ 0, 0, -1, 0],
1144+ [ 0, 0, -2, 0],
1145+ [ 0, 0, -3, 0],
1146+ [ 0, 0, -4, 0],
1147+ [ 0, 0, -5, 0],
1148+ [ 0, 0, -6, 0]])
1149+ """
1150+ # Return the points as a single array
1151+ return _bresenhamlines(start, end, max_iter).reshape(-1, start.shape[-1])
1152+
1153+
1154+if __name__ == "__main__":
1155+ import doctest
1156+ doctest.testmod()
1157
1158=== added directory 'mrol_mapping/cython'
1159=== removed directory 'mrol_mapping/cython'
1160=== added file 'mrol_mapping/cython/__init__.py'
1161=== removed file 'mrol_mapping/cython/__init__.py'
1162=== added file 'mrol_mapping/cython/fast.pyx'
1163--- mrol_mapping/cython/fast.pyx 1970-01-01 00:00:00 +0000
1164+++ mrol_mapping/cython/fast.pyx 2012-11-20 22:16:18 +0000
1165@@ -0,0 +1,113 @@
1166+#Copyright 2010-2011, Julian Ryde and Nicholas Hillier.
1167+#
1168+#This program is distributed in the hope that it will be useful,
1169+#but WITHOUT ANY WARRANTY; without even the implied warranty of
1170+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1171+#GNU Lesser General Public License for more details.
1172+#
1173+#You should have received a copy of the GNU Lesser General Public License
1174+#along with this program. If not, see <http://www.gnu.org/licenses/>.
1175+
1176+
1177+cimport cython
1178+import numpy as np
1179+cimport numpy as np
1180+
1181+#ctypedef np.float64_t dtype_t
1182+# difference between 32 and 64 bit
1183+ctypedef np.int64_t dtype_i
1184+#ctypedef np.int32_t dtype_i
1185+
1186+#cdef dict a
1187+#a = {}
1188+#a['a'] = 1
1189+
1190+@cython.boundscheck(False)
1191+def intersection(voxelmap, np.ndarray[dtype_i, ndim=1] voxels):
1192+ cdef Py_ssize_t i, overlap = 0
1193+ for i in range(voxels.shape[0]):
1194+ if voxels[i] in voxelmap:
1195+ overlap += 1
1196+ return overlap
1197+
1198+@cython.boundscheck(False)
1199+#@cython.locals(a=cython.double, b=cython.double,
1200+ #N=cython.Py_ssize_t, dx=cython.double,
1201+ #s=cython.double, i=cython.Py_ssize_t)
1202+#@cython.locals(a=cython.double, b=cython.double,
1203+def nested_intersection(dict D, np.ndarray[dtype_i, ndim=2] V):
1204+ cdef Py_ssize_t i, a, b, c, overlap = 0
1205+ for a, b, c in V:
1206+ if a in D and b in D[a] and c in D[a][b]:
1207+ overlap += 1
1208+ return overlap
1209+
1210+#@cython.boundscheck(False)
1211+#def intersection(voxelmap, np.ndarray[dtype_i, ndim=2] voxels):
1212+# cdef Py_ssize_t i, overlap = 0
1213+# for i in range(voxels.shape[0]):
1214+# if (voxels[i, 0], voxels[i, 1], voxels[i, 2]) in voxelmap:
1215+# overlap += 1
1216+# return overlap
1217+
1218+@cython.boundscheck(False)
1219+def fillray(dtype_i numcells, np.ndarray[np.float64_t, ndim=1] dir_unit, np.float64_t res, np.ndarray[np.float64_t, ndim=2] ray):
1220+ cdef int i
1221+ for i in range(numcells):
1222+ ray[i] = dir_unit*i*res
1223+ return ray
1224+
1225+@cython.boundscheck(False)
1226+def increment(dict mapvoxels, np.ndarray[dtype_i, ndim=2] voxels, dtype_i amount=1, userdata=None):
1227+ # TODO ensure this remains consistend with the decrement function
1228+ # TODO fails when there is only one voxel
1229+ # TODO add ability to increment according to a vector of occupancies same
1230+ # length as the array of voxels ie amount should also be allowed to be a
1231+ # vector of occupancies
1232+ if amount < 0:
1233+ print "Warning: negative increment amount. Consider using decrement() instead."
1234+ cdef np.ndarray[dtype_i, ndim=1] newvoxels = np.zeros(voxels.shape[0], dtype=np.int64)
1235+ cdef dtype_i v0, v1, v2, occupancy
1236+ cdef tuple v
1237+ for i in range(voxels.shape[0]):
1238+ occupancy = 0
1239+ v0 = voxels[i,0]
1240+ v1 = voxels[i,1]
1241+ v2 = voxels[i,2]
1242+ v = (v0, v1, v2) # ensure v is a tuple
1243+ if mapvoxels.has_key(v):
1244+ newvoxels[i] = 0
1245+ occupancy = mapvoxels[v] + amount
1246+ else:
1247+ newvoxels[i] = 1
1248+ occupancy = amount
1249+ if occupancy > 0:
1250+ if userdata == None:
1251+ mapvoxels[v] = occupancy
1252+ else:
1253+ mapvoxels[v] = (occupancy, userdata)
1254+ else:
1255+ del mapvoxels[v]
1256+ return newvoxels.astype(np.bool)
1257+
1258+
1259+@cython.boundscheck(False)
1260+def user_increment(mapvoxels, np.ndarray[np.int_t, ndim=1] ids, np.ndarray[np.float64_t, ndim=1] increments, userdata, useraddfn):
1261+ cdef np.ndarray[dtype_i, ndim=1] new_ids = np.zeros(ids.shape[0], dtype=np.int64)
1262+ cdef np.ndarray[dtype_i, ndim=1] rem_ids = np.zeros(ids.shape[0], dtype=np.int64)
1263+ for counter,ID in enumerate(ids):
1264+ if ID not in mapvoxels:
1265+ new_ids[counter] = ID
1266+ mapvoxels[ID] = np.hstack([mapvoxels[ID][0] + increments[counter], useraddfn(mapvoxels[ID][1:], userdata[counter])])
1267+ if mapvoxels[ID][0] < 0:
1268+ del mapvoxels[ID]
1269+ rem_ids[counter] = ID
1270+ return mapvoxels, new_ids.astype(np.bool), rem_ids.astype(np.bool)
1271+
1272+
1273+#@cython.boundscheck(False)
1274+#def shmcopy(manager_list):
1275+# elements = []
1276+# for el in manager_list:
1277+# elements.append(el)
1278+# return elements
1279
1280=== removed file 'mrol_mapping/cython/fast.pyx'
1281--- mrol_mapping/cython/fast.pyx 2011-07-11 06:52:51 +0000
1282+++ mrol_mapping/cython/fast.pyx 1970-01-01 00:00:00 +0000
1283@@ -1,112 +0,0 @@
1284-#Copyright 2010-2011, Julian Ryde and Nicholas Hillier.
1285-#
1286-#This program is distributed in the hope that it will be useful,
1287-#but WITHOUT ANY WARRANTY; without even the implied warranty of
1288-#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1289-#GNU Lesser General Public License for more details.
1290-#
1291-#You should have received a copy of the GNU Lesser General Public License
1292-#along with this program. If not, see <http://www.gnu.org/licenses/>.
1293-
1294-
1295-cimport cython
1296-import numpy as np
1297-cimport numpy as np
1298-
1299-#ctypedef np.float64_t dtype_t
1300-# difference between 32 and 64 bit
1301-ctypedef np.int64_t dtype_i
1302-#ctypedef np.int32_t dtype_i
1303-
1304-#cdef dict a
1305-#a = {}
1306-#a['a'] = 1
1307-
1308-@cython.boundscheck(False)
1309-def intersection(voxelmap, np.ndarray[dtype_i, ndim=1] voxels):
1310- cdef Py_ssize_t i, overlap = 0
1311- for i in range(voxels.shape[0]):
1312- if voxels[i] in voxelmap:
1313- overlap += 1
1314- return overlap
1315-
1316-@cython.boundscheck(False)
1317-#@cython.locals(a=cython.double, b=cython.double,
1318- #N=cython.Py_ssize_t, dx=cython.double,
1319- #s=cython.double, i=cython.Py_ssize_t)
1320-#@cython.locals(a=cython.double, b=cython.double,
1321-def nested_intersection(dict D, np.ndarray[dtype_i, ndim=2] V):
1322- cdef Py_ssize_t i, a, b, c, overlap = 0
1323- for a, b, c in V:
1324- if a in D and b in D[a] and c in D[a][b]:
1325- overlap += 1
1326- return overlap
1327-
1328-#@cython.boundscheck(False)
1329-#def intersection(voxelmap, np.ndarray[dtype_i, ndim=2] voxels):
1330-# cdef Py_ssize_t i, overlap = 0
1331-# for i in range(voxels.shape[0]):
1332-# if (voxels[i, 0], voxels[i, 1], voxels[i, 2]) in voxelmap:
1333-# overlap += 1
1334-# return overlap
1335-
1336-@cython.boundscheck(False)
1337-def fillray(dtype_i numcells, np.ndarray[np.float64_t, ndim=1] dir_unit, np.float64_t res, np.ndarray[np.float64_t, ndim=2] ray):
1338- for i in range(numcells):
1339- ray[i] = dir_unit*i*res
1340- return ray
1341-
1342-@cython.boundscheck(False)
1343-def increment(dict mapvoxels, np.ndarray[dtype_i, ndim=2] voxels, dtype_i amount=1, userdata=None):
1344- # TODO ensure this remains consistend with the decrement function
1345- # TODO fails when there is only one voxel
1346- # TODO add ability to increment according to a vector of occupancies same
1347- # length as the array of voxels ie amount should also be allowed to be a
1348- # vector of occupancies
1349- if amount < 0:
1350- print "Warning: negative increment amount. Consider using decrement() instead."
1351- cdef np.ndarray[dtype_i, ndim=1] newvoxels = np.zeros(voxels.shape[0], dtype=np.int64)
1352- cdef dtype_i v0, v1, v2, occupancy
1353- cdef tuple v
1354- for i in range(voxels.shape[0]):
1355- occupancy = 0
1356- v0 = voxels[i,0]
1357- v1 = voxels[i,1]
1358- v2 = voxels[i,2]
1359- v = (v0, v1, v2) # ensure v is a tuple
1360- if mapvoxels.has_key(v):
1361- newvoxels[i] = 0
1362- occupancy = mapvoxels[v] + amount
1363- else:
1364- newvoxels[i] = 1
1365- occupancy = amount
1366- if occupancy > 0:
1367- if userdata == None:
1368- mapvoxels[v] = occupancy
1369- else:
1370- mapvoxels[v] = (occupancy, userdata)
1371- else:
1372- del mapvoxels[v]
1373- return newvoxels.astype(np.bool)
1374-
1375-
1376-@cython.boundscheck(False)
1377-def user_increment(mapvoxels, np.ndarray[np.int_t, ndim=1] ids, np.ndarray[np.float64_t, ndim=1] increments, userdata, useraddfn):
1378- cdef np.ndarray[dtype_i, ndim=1] new_ids = np.zeros(ids.shape[0], dtype=np.int64)
1379- cdef np.ndarray[dtype_i, ndim=1] rem_ids = np.zeros(ids.shape[0], dtype=np.int64)
1380- for counter,ID in enumerate(ids):
1381- if ID not in mapvoxels:
1382- new_ids[counter] = ID
1383- mapvoxels[ID] = np.hstack([mapvoxels[ID][0] + increments[counter], useraddfn(mapvoxels[ID][1:], userdata[counter])])
1384- if mapvoxels[ID][0] < 0:
1385- del mapvoxels[ID]
1386- rem_ids[counter] = ID
1387- return mapvoxels, new_ids.astype(np.bool), rem_ids.astype(np.bool)
1388-
1389-
1390-#@cython.boundscheck(False)
1391-#def shmcopy(manager_list):
1392-# elements = []
1393-# for el in manager_list:
1394-# elements.append(el)
1395-# return elements
1396
1397=== added file 'mrol_mapping/depth_image.py'
1398--- mrol_mapping/depth_image.py 1970-01-01 00:00:00 +0000
1399+++ mrol_mapping/depth_image.py 2012-11-20 22:16:18 +0000
1400@@ -0,0 +1,141 @@
1401+'''Author: Julian Ryde'''
1402+from __future__ import division
1403+import numpy as np
1404+import pylab
1405+import sys
1406+import mrol_mapping.pointcloud as pointcloud
1407+
1408+# TODO generalise this so we can put it into MROL_mapping lib
1409+def load_image(fname):
1410+ ''' Load a depth image, generated from Blender '''
1411+ di = pylab.imread(fname)
1412+ # convert to ranges
1413+ # blender map scale factor = 0.2
1414+ blender_map = 0.2
1415+ di /= blender_map
1416+ return di
1417+
1418+def image_to_points(di, rgb=None, max_range=5., fov=np.radians(75),
1419+ timestamp=0):
1420+ ''' Convert a depth image to point cloud'''
1421+ # at edge tan fov/2 = 320 / a where a is the adjacent pixel length
1422+ a = 0.5 * di.shape[1] / np.tan(fov/2)
1423+ ci = np.arange(0, di.shape[1])
1424+ ri = np.arange(0, di.shape[0])
1425+ cols = ci - di.shape[1]/2 + 0.5
1426+ rows = ri - di.shape[0]/2 + 0.5
1427+ rows.shape = -1, 1
1428+
1429+ f = di / a
1430+ # scale factors between image plane and real space position of
1431+ # point, then by similar shapes, one is just a scale factor enlargement of the other
1432+ # Blender z buffer returns not the range to the point but the distance from the camera plane, namely the z distance.
1433+
1434+ # TODO could use a masked array for performance
1435+
1436+ # camera looking along the x-axis
1437+ if (len(di.shape) == 3):
1438+ # adjust for broadcasting
1439+ cols = np.reshape(cols, (-1, 1))
1440+ rows = np.reshape(rows, (-1, 1, 1))
1441+
1442+ Y = -f * cols
1443+ Z = -f * rows
1444+ X = f * a
1445+
1446+ #inds = np.logical_and(di < di.max(), di > 0)
1447+ inds = np.logical_and(di < max_range, di > 0)
1448+
1449+ xyzs = np.vstack((X[inds], Y[inds], Z[inds])).T
1450+ assert(len(xyzs))
1451+ pc = pointcloud.PointCloud(xyzs, timestamp=timestamp)
1452+ if rgb is not None:
1453+ rgbfiltered = rgb[inds]
1454+ pc.userdata = rgbfiltered
1455+ return pc
1456+
1457+def depth_discont_simple(di, tol=0.2):
1458+ dx = np.diff(di, axis=0)
1459+ #dx = np.vstack([np.zeros(di.shape[1]),dx])
1460+ #dx = np.vstack([np.zeros(di.shape[1]),dx[:-1,:],np.zeros(di.shape[1])])
1461+ dx = np.vstack([dx,np.zeros(di.shape[1])])
1462+ dx = np.abs(dx)
1463+ dx[dx < tol] = 0
1464+ dy = np.diff(di, axis=1)
1465+ #dy = np.hstack([np.atleast_2d(np.zeros(di.shape[0])).T,dy])
1466+ #dy = np.hstack([np.atleast_2d(np.zeros(di.shape[0])).T,dy[:,:-1],np.atleast_2d(np.zeros(di.shape[0])).T])
1467+ dy = np.hstack([dy,np.atleast_2d(np.zeros(di.shape[0])).T])
1468+ dy = np.abs(dy)
1469+ dy[dy < tol] = 0
1470+ diff_im = np.sqrt(dx**2 + dy**2)
1471+ di_diff = np.zeros(di.shape)
1472+ di_diff[diff_im > 0] = di[diff_im > 0]
1473+ import pylab
1474+ pylab.ion()
1475+ pylab.imshow(di_diff)
1476+ import scipy.ndimage as ndimage
1477+ #di_diff = ndimage.minimum_filter(di_diff, size=(1,1))
1478+ #for i in range(di_diff.shape[0]):
1479+ # for j in range(di_diff.shape[1]-1):
1480+ # p1 = di_diff[i,j]
1481+ # p2 = di_diff[i,j+1]
1482+ # if p1 == 0 or p2 == 0:
1483+ # continue
1484+ # if p1 > p2:
1485+ # p1 = 0
1486+ # elif p2 > p1:
1487+ # p2 = 0
1488+ # di_diff[i,j] = p1
1489+ # di_diff[i,j+1] = p2
1490+ return di_diff
1491+
1492+def depth_discont_sobel(di, tol=0.5):
1493+ import scipy.ndimage as ndimage
1494+ dx = ndimage.convolve(di,[[-1,0,1],[-2,0,2],[-1,0,1]])
1495+ dy = ndimage.convolve(di,[[-1,-2,-1],[0,0,0],[1,2,1]])
1496+ diff_im = np.sqrt(dx**2 + dy**2)
1497+ diff_im[diff_im < tol] = 0
1498+ di_diff = np.zeros(di.shape)
1499+ di_diff[diff_im > 0] = di[diff_im > 0]
1500+ #import pylab
1501+ #pylab.ion()
1502+ di_diff = ndimage.minimum_filter(di_diff, size=(2,2))
1503+ #for i in range(di_diff.shape[0]):
1504+ # for j in range(di_diff.shape[1]-1):
1505+ # p1 = di_diff[i,j]
1506+ # p2 = di_diff[i,j+1]
1507+ # if p1 == 0 or p2 == 0:
1508+ # continue
1509+ # if p1 > p2:
1510+ # p1 = 0
1511+ # elif p2 > p1:
1512+ # p2 = 0
1513+ # di_diff[i,j] = p1
1514+ # di_diff[i,j+1] = p2
1515+ #pylab.imshow(di_diff)
1516+ return di_diff
1517+
1518+def depth_filt_min_non_zero(p1,p2):
1519+ if p1 == 0 or p2 == 0:
1520+ return
1521+ if p1 > p2:
1522+ p1 = 0
1523+ return
1524+ if p2 > p1:
1525+ p2 = 0
1526+ return
1527+ return
1528+
1529+def depth_disconts2(di, tol=0.2):
1530+ import scipy.ndimage as ndimage
1531+ ndimage.gaussian_filter(di,sigma=(5,5))
1532+ ndimage.gaussian_derivative()
1533+ ndimage.median_filter()
1534+ return di_diff
1535+
1536+if __name__ == '__main__':
1537+ fname = sys.argv[1]
1538+ xyzs = image_to_points(fname)
1539+ import mrol_mapping.visualiser.dispxyz as dispxyz
1540+ dispxyz.showpts(xyzs)
1541+ print 'Escape to close window'
1542
1543=== added file 'mrol_mapping/log.py'
1544--- mrol_mapping/log.py 1970-01-01 00:00:00 +0000
1545+++ mrol_mapping/log.py 2012-11-20 22:16:18 +0000
1546@@ -0,0 +1,11 @@
1547+import logging
1548+def getLogger(name):
1549+ logger = logging.getLogger(name)
1550+ # create console handler with a higher log level
1551+ ch = logging.StreamHandler()
1552+ ch.setLevel(logging.DEBUG)
1553+ # create formatter and add it to the handlers
1554+ formatter = logging.Formatter('%(name)s - %(message)s')
1555+ ch.setFormatter(formatter)
1556+ logger.addHandler(ch)
1557+ return logger
1558
1559=== added file 'mrol_mapping/mapper.py'
1560--- mrol_mapping/mapper.py 1970-01-01 00:00:00 +0000
1561+++ mrol_mapping/mapper.py 2012-11-20 22:16:18 +0000
1562@@ -0,0 +1,438 @@
1563+#Copyright 2010-2011, Julian Ryde and Nicholas Hillier.
1564+#
1565+#This program is distributed in the hope that it will be useful,
1566+#but WITHOUT ANY WARRANTY; without even the implied warranty of
1567+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
1568+#GNU Lesser General Public License for more details.
1569+#
1570+#You should have received a copy of the GNU Lesser General Public License
1571+#along with this program. If not, see <http://www.gnu.org/licenses/>.
1572+
1573+from __future__ import division
1574+import numpy as np
1575+
1576+import pointcloud
1577+import occupiedlist
1578+import mrol
1579+import poseutil
1580+import util
1581+import cPickle as pickle
1582+import quantizer
1583+import time
1584+import log
1585+import logging
1586+
1587+logger = log.getLogger('mapper')
1588+logger.setLevel(logging.INFO)
1589+USE_BACKTRACING = True
1590+
1591+_map_resolution_levels = 4 # number of different levels of resolution
1592+
1593+def loadmap(fname):
1594+ return pickle.load(open(fname))
1595+
1596+class MapBuilder(object):
1597+ def __init__(self, resolution,
1598+ visualise=False,
1599+ levels=_map_resolution_levels,
1600+ odometry_map=False,
1601+ rotate=False):
1602+ self.vmap = VoxelMap(resolution, levels=levels,
1603+ rotate=rotate)
1604+ self.odometry_map = odometry_map
1605+
1606+ self.bestpose = poseutil.Pose3D()
1607+ self.scancount = 0
1608+ if visualise:
1609+ self.vmap.display()
1610+
1611+ def add_pointcloud(self, pc):
1612+ if self.scancount == 0:
1613+ self.scancount = 1
1614+ self.vmap.add_scan(pc, pc.pose)
1615+ pc.pose = (poseutil.Pose3D() if pc.pose is None else pc.pose)
1616+ return self.vmap.get_voxels(level=-1), pc.pose
1617+
1618+ # reduce number of scan points to one per voxel
1619+ # sample = pc.volumetricsample(self.vmap.resolution)
1620+
1621+ logger.info('--------- Scan %d ---------' % self.scancount)
1622+ self.scancount += 1
1623+
1624+ if pc.pose is None:
1625+ guessPose = self.bestpose
1626+ else:
1627+ guessPose = pc.pose
1628+
1629+ start = time.time()
1630+ if not self.odometry_map:
1631+ self.bestpose, maxcost = self.vmap.align_points(pc, guessPose)
1632+
1633+ alignment_time = time.time() - start
1634+
1635+ start = time.time()
1636+ if self.odometry_map: # create map without aligning
1637+ self.vmap.add_scan(pc, guessPose)
1638+ else:
1639+ self.vmap.add_scan(pc, self.bestpose)
1640+
1641+ mapupdate_time = time.time() - start
1642+ logger.info('Map voxel count:{0}'.format( len(self.vmap)))
1643+
1644+ logger.info('Aligned pose:{0}'.format(self.bestpose))
1645+ logger.info('Cost value: {0}'.format(maxcost))
1646+
1647+ logger.info('Alignment time(s): %.2f' % alignment_time)
1648+ logger.info('Map update time(s): %.2f' % mapupdate_time)
1649+ return self.vmap.get_voxels(level=-1), self.bestpose
1650+
1651+def buildmap(pointclouds, resolution, visualise=True, odometry_map=False,
1652+ rotate=False,
1653+ bestpose_queue=None,
1654+ voxel_queue=None):
1655+ '''Takes an iterable of point clouds which can have guess poses and produce
1656+ a map. If no guess poses uses previous pose as an estimate
1657+
1658+ bestpose_queue: A util.Queue object through which a mapper thread can
1659+ optionally communicate back the bestpose got so far. This might be used to
1660+ optimize the pc.pose i.e. the initial guesspose
1661+ '''
1662+ vmap = VoxelMap(resolution, levels=_map_resolution_levels, rotate=rotate)
1663+ pc = pointclouds.next()
1664+ vmap.add_scan(pc, pc.pose)
1665+
1666+ logger.info('--------- Scan 0 ---------')
1667+ logger.info('Map voxel count:{0}'.format(len(vmap)))
1668+
1669+ if visualise:
1670+ vmap.display()
1671+
1672+ bestpose = poseutil.Pose3D()
1673+ # in case this needs to be used as the first guess pose
1674+
1675+ i = 0
1676+ maxcost = None
1677+ while True:
1678+ try:
1679+ pc = pointclouds.next()
1680+ except StopIteration:
1681+ break
1682+ i += 1
1683+
1684+ # reduce number of scan points to one per voxel
1685+ sample = pc.volumetricsample(resolution)
1686+
1687+ logger.info('--------- Scan %d ---------' % i)
1688+
1689+ if pc.pose is None:
1690+ guessPose = bestpose
1691+ else:
1692+ guessPose = pc.pose
1693+
1694+ start = time.time()
1695+ if not odometry_map:
1696+ bestpose, maxcost = vmap.align_points(sample, guessPose)
1697+ if bestpose_queue:
1698+ bestpose_queue.put_force(bestpose)
1699+
1700+ alignment_time = time.time() - start
1701+
1702+ start = time.time()
1703+ if odometry_map: # create map without aligning
1704+ vmap.add_scan(pc, guessPose)
1705+ else:
1706+ vmap.add_scan(pc, bestpose)
1707+
1708+ if voxel_queue:
1709+ (vdata, vres) = vmap.get_voxels(level=-1)
1710+ voxel_queue.put_force((vdata, vres))
1711+
1712+ mapupdate_time = time.time() - start
1713+ logger.info('Map voxel count:{0}'.format( len(vmap)))
1714+
1715+ logger.info('Aligned pose:{0}'.format(bestpose))
1716+ logger.info('Cost value: {0}'.format(maxcost))
1717+
1718+ #logger.info('Execution times (s): %.2f\t%.2f' % (alignment_time, mapupdate_time))
1719+ logger.info('Alignment time(s): %.2f' % alignment_time)
1720+ logger.info('Map update time(s): %.2f' % mapupdate_time)
1721+ return vmap
1722+
1723+def _build_Kinect_sensor_model(resolution, minR=0.5, maxR=5):
1724+ '''builds an occupiedlist sensor model aligned with the x-axis at the
1725+ origin for free space determination'''
1726+ fov = np.radians(75)
1727+ #hres = 640
1728+ #vres = 480
1729+ hres = 320
1730+ vres = 240
1731+ dtheta = fov / hres
1732+ A = 0.5 * hres / np.tan(0.5 * fov)
1733+ # perpendicular distance of camera image plane in pixel space
1734+ cols = np.arange(hres) - hres * 0.5
1735+ rows = np.arange(vres) - vres * 0.5
1736+ rows.shape = -1, 1
1737+ # normalise by A so you can step in x-axis
1738+ rows /= A
1739+ cols /= A
1740+ Y = np.tile(cols, (vres, 1)).ravel()
1741+ Z = np.tile(rows, (1, hres)).ravel()
1742+
1743+ sensor_model_ol = occupiedlist.OccupiedList(resolution, maxvoxels=2e6)
1744+ # this way voxels that have multiple rays passing through them are not
1745+ # tested multiple times.
1746+
1747+ xyzs = np.empty((vres * hres, 3))
1748+ for X in np.arange(minR, maxR, resolution):
1749+ # TODO this is an approximation calculate the angles
1750+ xyzs[:, 0] = X
1751+ xyzs[:, 1] = Y * X
1752+ xyzs[:, 2] = Z * X
1753+ sensor_model_ol.add_points(xyzs)
1754+ print X
1755+ return sensor_model_ol
1756+
1757+class VoxelMap:
1758+ '''Mobile robot mapper capable of 2D and 3D maps. Need to initialise with
1759+ a initial pose and scan so that future scans can be aligned, poses are all
1760+ poseutil objects. Recommended way is to take a 3D scan
1761+ whilst the robot is stationary to seed the map and then go from there,
1762+ matching 2D or 3D scans to the map for localisation within the map,
1763+ or incrementally growing the map.'''
1764+
1765+ # Special functions #############################################
1766+ # optional also include an estimate of the pose error
1767+ def __init__(self, resolution, levels=1, rotate=False):
1768+ # lattice_type='cubic', #alignment_method='OVL','Pt2Pt_ICP','Pt2Pl_ICP'
1769+ self.mrol = mrol.MROL(resolution, levels)
1770+ self.verbosity = 0
1771+ self.trajectory = []
1772+ # List of poses at which observations have been taken
1773+
1774+ # The verbosity level. Higher verbosity gives more print statements.
1775+ #self.set_feature_range(100)
1776+ self.lattice = 'cubic' # not used anywhere yet because cubic is default
1777+ # for assistance in discretisation of angular increments in the
1778+ # gauss_siedel optimisation
1779+ self._sensor_pts = None # the sensor model as voxel points
1780+ self._rv = None # the robot visualiser
1781+ self._rotate = rotate # if visualising, autorotate
1782+
1783+ def __len__(self):
1784+ ''' return the number of voxels at the finest resolution '''
1785+ return len(self.mrol.mapvoxels[-1])
1786+
1787+ #def __str__(self)
1788+ # ''' return key statistics of the map as a string: e.g. mapped volume,
1789+ # extents, number of voxels, resolution levels, resolutions.
1790+
1791+ # Getter functions #############################################
1792+ def get_resolution(self):
1793+ return self.mrol.getfinest().resolution
1794+
1795+ def get_voxels(self, level=-1):
1796+ '''Returns the (almost) raw voxel information together
1797+ with the resolution of the voxelisation.'''
1798+ voxels = self.mrol.getvoxels(level=level)
1799+ resolution = self.mrol.getresolution(level=level)
1800+ return voxels, resolution
1801+
1802+ def get_points(self, level=-1):
1803+ '''Returns the map as a point cloud of the voxel centers. If the
1804+ map has been setup with multiple resolutions, return the voxels at the
1805+ desired resolution. Defaults to the finest resolution.'''
1806+ voxel_centers = self.mrol.getpoints(level=level)
1807+ pc = pointcloud.PointCloud(voxel_centers)
1808+ return pc
1809+
1810+
1811+ def get_overlap(self, pose, xyzs):
1812+ '''Return the overlap between a pointcloud at a given pose and the map.'''
1813+ ol = self.mrol.getfinest()
1814+ overlap = ol.calccollisions(pose, xyzs)
1815+ return overlap
1816+
1817+ # other functions #############################################
1818+
1819+ def save(self, fname):
1820+ outfile = open(fname, 'w')
1821+ self._rv = None # remove visualiser
1822+ pickle.dump(self, outfile, protocol=2)
1823+
1824+ def display(self,npts=1e6,changes=False):
1825+ '''Shows a representation of the map in a gui'''
1826+ from visualiser.robotvisualiser import Visualiser
1827+ if self._rv is None:
1828+ self._rv_changes = changes
1829+ self._rv = Visualiser(npts=npts, Rotate=self._rotate)
1830+ self._rv.set_orthographic(Bool=True)
1831+
1832+ # TODO visualiser should take a point cloud object
1833+ self._rv.addmappts(self.get_points().points)
1834+
1835+ # add the trajectory points
1836+ for traj in self.trajectory:
1837+ self._rv.addtrajectorypoint(traj.getPosition())
1838+ #else:
1839+ #self._disp.se
1840+
1841+ def add_scan(self, pc, pose, timestamp=None):
1842+ '''Adds a new scan to the existing map.
1843+
1844+ Adds the occupied voxels and removes the freespace.
1845+ '''
1846+ userdata = pc.userdata
1847+ xyzs = pc.points
1848+ if USE_BACKTRACING:
1849+ self.clear_freespace(xyzs, pose)
1850+ self.add_points(xyzs, pose, timestamp, userdata)
1851+
1852+ def clear_freespace(self, xyzs, pose):
1853+ '''Removes the free space voxels from the occupancy list.
1854+
1855+ Parameters:
1856+ xyzs: The occupied PointCloud, with the sensor as origin.
1857+ pose: The pose of `xyzs` PointCloud with respect to global map.
1858+ '''
1859+ xyzs, return_pose = poseutil.transformPoints(xyzs, pose)
1860+ sensor_loc = poseutil.transformPoints(np.array([[0, 0, 0]]), pose)
1861+ self.mrol.clear_freespace(xyzs, sensor_loc[0])
1862+
1863+ def add_points(self, xyzs, pose, timestamp=None, userdata=None):
1864+ '''Adds the pointcloud xyzs to the map, transforming them by the given
1865+ pose if required. Returns the overlap of the new points against the
1866+ existing map.'''
1867+ if isinstance(xyzs, pointcloud.PointCloud):
1868+ xyzs = xyzs.points
1869+ assert xyzs.shape[1] == 3, '3 dimensional points'
1870+ xyzs, return_pose = poseutil.transformPoints(xyzs, pose)
1871+ self.trajectory.append(return_pose)
1872+ if self._rv is not None: # visualisation
1873+ finest = self.mrol.getfinest()
1874+ # determine unique new voxels convert to points and add to the map
1875+ #ol = occupiedlist.OccupiedList(finest.resolution)
1876+ ol = finest
1877+ #ol.add_points(xyzs)
1878+ #ol = ol - finest
1879+ vox = ol.getvoxeldata()
1880+ self._rv.addmappts(vox)
1881+ #self._rv.setminmax(np.min(self.get_points().points[:,2]),np.max(self.get_points().points[:,2]))
1882+ self._rv.setrobotpose(return_pose.getMatrix())
1883+ self._rv.addtrajectorypoint(return_pose.getTuple()[0:3])
1884+ if self._rv_changes:
1885+ inliers, outliers = self.mrol.getfinest().segment_ovl(None,xyzs)
1886+ self._rv.setrightpts(inliers)
1887+ self._rv.setleftpts(outliers)
1888+ self.mrol.add_points(xyzs, userdata=userdata, timestamp=timestamp)
1889+ return self.get_overlap(return_pose.getTuple(), xyzs)
1890+
1891+ # TODO Consider a remove_points based on timestamps?
1892+ def remove_points(self, xyzs, pose):
1893+ '''Remove points from the map. In practice, the idea is
1894+ to decrement the probability of the voxel in the map having been
1895+ observed.
1896+
1897+ Returns the voxels at the finest resolution that are now empty.
1898+ '''
1899+ xyzs = poseutil.transformPoints(xyzs,pose)[0]
1900+ return self.mrol.removepoints(xyzs)
1901+
1902+ #def set_feature_range(self, feature_range):
1903+ # ''' Sets the 'feature_range' variable. Used for assistance in
1904+ # the discretisation of angular increments in the gauss_siedel
1905+ # optimisation '''
1906+ # self.feature_range = float(feature_range)
1907+ # self.mrol.feature_range = self.feature_range
1908+
1909+ # def set_options? instead of a long list of init options
1910+ # def get_options? so the user knows the current setup
1911+
1912+
1913+ #def some_sort_of_map_maintenance_interface
1914+ # - raytrace cleanup, time decay, or callable method based on number
1915+ # of observations.
1916+ # - perhaps as a parameter to the init for auto-maintenance?
1917+
1918+ # If we provide an interface to directly add voxels (e.g. from
1919+ # pre-made maps), it would be nice to be able to merge voxels of
1920+ # different resolutions and lattice types. This is a low priority
1921+ # feature
1922+ #def add_voxels(self, voxels, res, lattice_type=self.lattice_type) # perhaps rename add_map?
1923+
1924+ #def get_voxels(self, res/level, lattice_type=self.lattice_type) # perhaps rename get_map?
1925+ # return center coordinate in same format as points are added so it looks like a point cloud
1926+
1927+ #def get_voxel_data
1928+ # for things like number of observations. Can also be retrieved from
1929+ # get_voxels with the raw option?
1930+
1931+ #def get_voxel_primitive(self, res, lattice_type=self.lattice_type)
1932+ # return some representation of the voxel shape for visualisation purposes
1933+ # (tri/quad-mesh, corner points?)
1934+
1935+ #def sample_points(self, method='volumetric')
1936+ # to do some point cloud sampling (volumetric, random etc)
1937+
1938+ def align_points(self, xyzs, guess_pose, two_D=False, full_data=False):
1939+ '''Align the point cloud xyzs with the data already contained in the
1940+ map, using the guess_pose as a seed to the optimisation. Returns the
1941+ pose that best fits the point cloud data to the map and the value of
1942+ the objective function at that pose.
1943+
1944+ If the map is a multi-resolution structure, then the alignment will use
1945+ the data at the various resolutions to perform the alignment, which is
1946+ more expensive, but more robust. To force alignment at just a single
1947+ resolution, specify the level parameter.
1948+
1949+ By default, the alignment is a 6DOF optimization. This can be
1950+ constrained to the XY plane by enabling the two_D parameter.'''
1951+
1952+ xyzs = xyzs.points[:,:3]
1953+ bestpose, cost = self.mrol.optimise_alignment(xyzs, guess_pose, full_data=full_data)
1954+ return bestpose, cost
1955+
1956+ #def align_points_to_map(self, xyzs, guess_pose):
1957+ # bestpose, cost = optimization.gauss_siedel(self._performance_objectiveFast, guess_pose, args=(xyzs, 1.0), quiet=True)
1958+ # xyzs_xformed = poseutil.transformPoints(xyzs, bestpose)
1959+ # return bestpose, cost, xyzs_xformed
1960+
1961+ def generate_freespace(self, resolution, pose=None, minR=0.5, maxR=5):
1962+ '''Generates an occuplied list consisting of the observed free space at
1963+ the specified resolution. This is useful for detecting object removal
1964+ from a map.'''
1965+ assert resolution >= self.get_resolution()
1966+ if self._sensor_pts is None:
1967+ sensor_model_ol = _build_Kinect_sensor_model(resolution, minR=minR, maxR=maxR) # for simulated data
1968+ self._sensor_pts = sensor_model_ol.getpoints()
1969+ logger.info("Sensor model points:", len(self._sensor_pts))
1970+ #vis = visualiser.robotvisualiser.Visualiser(npts=1e7)
1971+ #vis.addmappts(self._sensor_pts)
1972+
1973+ base_ol = self.mrol.getfinest()
1974+ freespace_ol = occupiedlist.OccupiedList(resolution, maxvoxels=5e6)
1975+ # loop through the trajectory of poses
1976+ # ray trace from each pose and add voxels to the free space occupied voxel list
1977+ if pose == None:
1978+ for pose in self.trajectory:
1979+ logger.info("Pose {0}".format(pose))
1980+ inliers, outliers = base_ol.segment_ovl(pose, self._sensor_pts)
1981+ # TODO some means of not adding points that are further away than the intersection (doesn't work very well without this!)
1982+ # Julian suggests a masking operation whilst stepping through the layers of the model in X
1983+ freespace_ol.add_points(outliers)
1984+ freespace_ol.removepoints(inliers)
1985+ else:
1986+ inliers, outliers = base_ol.segment_ovl(pose, self._sensor_pts)
1987+ # TODO some means of not adding points that are further away than the intersection
1988+ freespace_ol.add_points(outliers)
1989+ freespace_ol.removepoints(inliers)
1990+ return freespace_ol
1991+
1992+ def _trajectory_objective(self, guess_pose, pts, foo):
1993+ return -occupiedlist.calccollisions(guess_pose, self._traj_alignment_voxel_set, self.res, pts)
1994+
1995+ # TODO add support for not specifying centrepose or spreadpose, looks
1996+ # through entire map
1997+ def globally_localise(self, points, centrepose, spreadpose):
1998+ '''Localises the point cloud in this map without requiring a good
1999+ initial guess pose. Returns the bestpose along with measure of overlap.'''
2000+ return self.mrol.match(points.points, centrepose, spreadpose)
2001
2002=== removed file 'mrol_mapping/mapper.py'
2003--- mrol_mapping/mapper.py 2012-03-21 18:57:49 +0000
2004+++ mrol_mapping/mapper.py 1970-01-01 00:00:00 +0000
2005@@ -1,438 +0,0 @@
2006-#Copyright 2010-2011, Julian Ryde and Nicholas Hillier.
2007-#
2008-#This program is distributed in the hope that it will be useful,
2009-#but WITHOUT ANY WARRANTY; without even the implied warranty of
2010-#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2011-#GNU Lesser General Public License for more details.
2012-#
2013-#You should have received a copy of the GNU Lesser General Public License
2014-#along with this program. If not, see <http://www.gnu.org/licenses/>.
2015-
2016-from __future__ import division
2017-import numpy as np
2018-
2019-import pointcloud
2020-import occupiedlist
2021-import mrol
2022-import poseutil
2023-import util
2024-import cPickle as pickle
2025-import quantizer
2026-import time
2027-
2028-debug = True
2029-_map_resolution_levels = 4 # number of different levels of resolution
2030-
2031-def loadmap(fname):
2032- return pickle.load(open(fname))
2033-
2034-def buildmap(pointclouds, resolution, visualise=True, odometry_map=False, rotate=False):
2035- '''Takes an iterable of point clouds which can have guess poses and produce
2036- a map. If no guess poses uses previous pose as an estimate'''
2037-
2038- vmap = VoxelMap(resolution, levels=_map_resolution_levels, rotate=rotate)
2039- pc = pointclouds.next()
2040- vmap.add_points(pc, pc.pose)
2041- if debug:
2042- print '--------- Scan 0 ---------'
2043- print 'Map voxel count:', len(vmap)
2044-
2045- if visualise:
2046- vmap.display()
2047-
2048- bestpose = poseutil.Pose3D()
2049- # in case this needs to be used as the first guess pose
2050-
2051- i = 0
2052- maxcost = None
2053- while True:
2054- try:
2055- pc = pointclouds.next()
2056- except StopIteration:
2057- break
2058- i += 1
2059-
2060- # reduce number of scan points to one per voxel
2061- sample = pc.volumetricsample(resolution)
2062-
2063- if debug:
2064- print '--------- Scan %d ---------' % i
2065-
2066- if pc.pose is None:
2067- guessPose = bestpose
2068- else:
2069- guessPose = pc.pose
2070-
2071- start = time.time()
2072- if not odometry_map:
2073- bestpose, maxcost = vmap.align_points(sample, guessPose)
2074- alignment_time = time.time() - start
2075-
2076- start = time.time()
2077- if odometry_map: # create map without aligning
2078- vmap.add_points(pc, guessPose)
2079- else:
2080- vmap.add_points(pc, bestpose)
2081- mapupdate_time = time.time() - start
2082- print 'Map voxel count:', len(vmap)
2083-
2084- if debug:
2085- print 'Aligned pose:', bestpose
2086- print 'Cost value: ', maxcost
2087-
2088- if debug:
2089- #print 'Execution times (s): %.2f\t%.2f' % (alignment_time, mapupdate_time)
2090- print 'Alignment time(s): %.2f' % alignment_time
2091- print 'Map update time(s): %.2f' % mapupdate_time
2092- return vmap
2093-
2094-def _build_Kinect_sensor_model(resolution, minR=0.5, maxR=5):
2095- '''builds an occupiedlist sensor model aligned with the x-axis at the
2096- origin for free space determination'''
2097- fov = np.radians(75)
2098- #hres = 640
2099- #vres = 480
2100- hres = 320
2101- vres = 240
2102- dtheta = fov / hres
2103- A = 0.5 * hres / np.tan(0.5 * fov)
2104- # perpendicular distance of camera image plane in pixel space
2105- cols = np.arange(hres) - hres * 0.5
2106- rows = np.arange(vres) - vres * 0.5
2107- rows.shape = -1, 1
2108- # normalise by A so you can step in x-axis
2109- rows /= A
2110- cols /= A
2111- Y = np.tile(cols, (vres, 1)).ravel()
2112- Z = np.tile(rows, (1, hres)).ravel()
2113-
2114- sensor_model_ol = occupiedlist.OccupiedList(resolution, maxvoxels=2e6)
2115- # this way voxels that have multiple rays passing through them are not
2116- # tested multiple times.
2117-
2118- xyzs = np.empty((vres * hres, 3))
2119- for X in np.arange(minR, maxR, resolution):
2120- # TODO this is an approximation calculate the angles
2121- xyzs[:, 0] = X
2122- xyzs[:, 1] = Y * X
2123- xyzs[:, 2] = Z * X
2124- sensor_model_ol.add_points(xyzs)
2125- print X
2126- return sensor_model_ol
2127-
2128-class VoxelMap:
2129- '''Mobile robot mapper capable of 2D and 3D maps. Need to initialise with
2130- a initial pose and scan so that future scans can be aligned, poses are all
2131- poseutil objects. Recommended way is to take a 3D scan
2132- whilst the robot is stationary to seed the map and then go from there,
2133- matching 2D or 3D scans to the map for localisation within the map,
2134- or incrementally growing the map.'''
2135-
2136- # Special functions #############################################
2137- # optional also include an estimate of the pose error
2138- def __init__(self, resolution, levels=1, rotate=False):
2139- # lattice_type='cubic', #alignment_method='OVL','Pt2Pt_ICP','Pt2Pl_ICP'
2140- self.mrol = mrol.MROL(resolution, levels)
2141- self.verbosity = 0
2142- self.trajectory = []
2143- # List of poses at which observations have been taken
2144-
2145- # The verbosity level. Higher verbosity gives more print statements.
2146- #self.set_feature_range(100)
2147- self.lattice = 'cubic' # not used anywhere yet because cubic is default
2148- # for assistance in discretisation of angular increments in the
2149- # gauss_siedel optimisation
2150- self._sensor_pts = None # the sensor model as voxel points
2151- self._rv = None # the robot visualiser
2152- self._rotate = rotate # if visualising, autorotate
2153-
2154- def __len__(self):
2155- ''' return the number of voxels at the finest resolution '''
2156- return len(self.mrol.mapvoxels[-1])
2157-
2158- #def __str__(self)
2159- # ''' return key statistics of the map as a string: e.g. mapped volume,
2160- # extents, number of voxels, resolution levels, resolutions.
2161-
2162- # Getter functions #############################################
2163- def get_resolution(self):
2164- return self.mrol.getfinest().resolution
2165-
2166- def get_voxels(self, level=-1):
2167- '''Returns the (almost) raw voxel information together
2168- with the resolution of the voxelisation.'''
2169- voxels = self.mrol.getvoxels(level=level)
2170- resolution = self.mrol.getresolution(level=level)
2171- return voxels[0], voxels[1], resolution
2172-
2173- def get_points(self, level=-1):
2174- '''Returns the map as a point cloud of the voxel centers. If the
2175- map has been setup with multiple resolutions, return the voxels at the
2176- desired resolution. Defaults to the finest resolution.'''
2177- voxel_centers = self.mrol.getpoints(level=level)
2178- pc = pointcloud.PointCloud(voxel_centers)
2179- return pc
2180-
2181-
2182- def get_overlap(self, pose, xyzs):
2183- '''Return the overlap between a pointcloud at a given pose and the map.'''
2184- ol = self.mrol.getfinest()
2185- overlap = ol.calccollisions(pose, xyzs)
2186- return overlap
2187-
2188- # other functions #############################################
2189-
2190- def save(self, fname):
2191- outfile = open(fname, 'w')
2192- self._rv = None # remove visualiser
2193- pickle.dump(self, outfile, protocol=2)
2194-
2195- def display(self,npts=1e6,changes=False):
2196- '''Shows a representation of the map in a gui'''
2197- import visualiser.robotvisualiser
2198- if self._rv is None:
2199- self._rv_changes = changes
2200- self._rv = visualiser.robotvisualiser.Visualiser(npts=npts, Rotate=self._rotate)
2201- self._rv.set_orthographic(Bool=True)
2202- #self._rv.setminmax(np.min(self.get_points().points[:,2]),np.max(self.get_points().points[:,2]))
2203- #self._rv.setautominmax()
2204-
2205- #if len(inliers) > 0:
2206- # vis.setrightpts(inliers)
2207- # vis.setleftpts(outliers)
2208- #vis.setspinpts(FreeMapper.get_points())
2209-
2210- # TODO visualiser should take a point cloud object
2211- self._rv.addmappts(self.get_points().points)
2212-
2213- # add the trajectory points
2214- for traj in self.trajectory:
2215- self._rv.addtrajectorypoint(traj.getPosition())
2216- #else:
2217- #self._disp.se
2218-
2219- def add_points(self, xyzs, pose, timestamp=None, userdata=None):
2220- '''Adds the pointcloud xyzs to the map, transforming them by the given
2221- pose if required. Returns the overlap of the new points against the
2222- existing map.'''
2223- xyzs = xyzs.points
2224- if xyzs.shape[1] > 3: # handling for additional information that is part of the pointcloud object (e.g. colour)
2225- if userdata == None:
2226- userdata = xyzs[:,3:]
2227- else:
2228- userdata = np.hstack([xyzs[:,3:],userdata])
2229- xyzs = xyzs[:,:3]
2230- xyzs, return_pose = poseutil.transformPoints(xyzs, pose)
2231- self.trajectory.append(return_pose)
2232- if self._rv is not None: # visualisation
2233- finest = self.mrol.getfinest()
2234- # determine unique new voxels convert to points and add to the map
2235- ol = occupiedlist.OccupiedList(finest.resolution)
2236- ol.add_points(xyzs)
2237- ol = ol - finest
2238- self._rv.addmappts(ol.getpoints())
2239- #self._rv.setminmax(np.min(self.get_points().points[:,2]),np.max(self.get_points().points[:,2]))
2240- self._rv.setrobotpose(return_pose.getMatrix())
2241- self._rv.addtrajectorypoint(return_pose.getTuple()[0:3])
2242- if self._rv_changes:
2243- inliers, outliers = self.mrol.getfinest().segment_ovl(None,xyzs)
2244- self._rv.setrightpts(inliers)
2245- self._rv.setleftpts(outliers)
2246- self.mrol.add_points(xyzs, userdata=userdata, timestamp=timestamp)
2247- return self.get_overlap(return_pose.getTuple(), xyzs)
2248-
2249- # TODO Consider a remove_points based on timestamps?
2250- def remove_points(self, xyzs, pose):
2251- '''Remove points from the map. In practice, the idea is
2252- to decrement the probability of the voxel in the map having been
2253- observed.
2254-
2255- Returns the voxels at the finest resolution that are now empty.
2256- '''
2257- xyzs = poseutil.transformPoints(xyzs,pose)[0]
2258- return self.mrol.removepoints(xyzs)
2259-
2260- #def set_feature_range(self, feature_range):
2261- # ''' Sets the 'feature_range' variable. Used for assistance in
2262- # the discretisation of angular increments in the gauss_siedel
2263- # optimisation '''
2264- # self.feature_range = float(feature_range)
2265- # self.mrol.feature_range = self.feature_range
2266-
2267- # def set_options? instead of a long list of init options
2268- # def get_options? so the user knows the current setup
2269-
2270-
2271- #def some_sort_of_map_maintenance_interface
2272- # - raytrace cleanup, time decay, or callable method based on number
2273- # of observations.
2274- # - perhaps as a parameter to the init for auto-maintenance?
2275-
2276- # If we provide an interface to directly add voxels (e.g. from
2277- # pre-made maps), it would be nice to be able to merge voxels of
2278- # different resolutions and lattice types. This is a low priority
2279- # feature
2280- #def add_voxels(self, voxels, res, lattice_type=self.lattice_type) # perhaps rename add_map?
2281-
2282- #def get_voxels(self, res/level, lattice_type=self.lattice_type) # perhaps rename get_map?
2283- # return center coordinate in same format as points are added so it looks like a point cloud
2284-
2285- #def get_voxel_data
2286- # for things like number of observations. Can also be retrieved from
2287- # get_voxels with the raw option?
2288-
2289- #def get_voxel_primitive(self, res, lattice_type=self.lattice_type)
2290- # return some representation of the voxel shape for visualisation purposes
2291- # (tri/quad-mesh, corner points?)
2292-
2293- #def sample_points(self, method='volumetric')
2294- # to do some point cloud sampling (volumetric, random etc)
2295-
2296- def align_points(self, xyzs, guess_pose, two_D=False, full_data=False):
2297- '''Align the point cloud xyzs with the data already contained in the
2298- map, using the guess_pose as a seed to the optimisation. Returns the
2299- pose that best fits the point cloud data to the map and the value of
2300- the objective function at that pose.
2301-
2302- If the map is a multi-resolution structure, then the alignment will use
2303- the data at the various resolutions to perform the alignment, which is
2304- more expensive, but more robust. To force alignment at just a single
2305- resolution, specify the level parameter.
2306-
2307- By default, the alignment is a 6DOF optimization. This can be
2308- constrained to the XY plane by enabling the two_D parameter.'''
2309-
2310- xyzs = xyzs.points[:,:3]
2311- bestpose, cost = self.mrol.optimise_alignment(xyzs, guess_pose, full_data=full_data)
2312- return bestpose, cost
2313-
2314- #def align_points_to_map(self, xyzs, guess_pose):
2315- # bestpose, cost = optimization.gauss_siedel(self._performance_objectiveFast, guess_pose, args=(xyzs, 1.0), quiet=True)
2316- # xyzs_xformed = poseutil.transformPoints(xyzs, bestpose)
2317- # return bestpose, cost, xyzs_xformed
2318-
2319- def generate_freespace(self, resolution, pose=None, minR=0.5, maxR=5):
2320- '''Generates an occuplied list consisting of the observed free space at
2321- the specified resolution. This is useful for detecting object removal
2322- from a map.'''
2323- assert resolution >= self.get_resolution()
2324- if self._sensor_pts is None:
2325- sensor_model_ol = _build_Kinect_sensor_model(resolution, minR=minR, maxR=maxR) # for simulated data
2326- self._sensor_pts = sensor_model_ol.getpoints()
2327- print "Sensor model points:", len(self._sensor_pts)
2328- #vis = visualiser.robotvisualiser.Visualiser(npts=1e7)
2329- #vis.addmappts(self._sensor_pts)
2330-
2331- base_ol = self.mrol.getfinest()
2332- freespace_ol = occupiedlist.OccupiedList(resolution, maxvoxels=5e6)
2333- # loop through the trajectory of poses
2334- # ray trace from each pose and add voxels to the free space occupied voxel list
2335- if pose == None:
2336- for pose in self.trajectory:
2337- print pose
2338- inliers, outliers = base_ol.segment_ovl(pose, self._sensor_pts)
2339- # TODO some means of not adding points that are further away than the intersection (doesn't work very well without this!)
2340- # Julian suggests a masking operation whilst stepping through the layers of the model in X
2341- freespace_ol.add_points(outliers)
2342- freespace_ol.removepoints(inliers)
2343- else:
2344- inliers, outliers = base_ol.segment_ovl(pose, self._sensor_pts)
2345- # TODO some means of not adding points that are further away than the intersection
2346- freespace_ol.add_points(outliers)
2347- freespace_ol.removepoints(inliers)
2348- return freespace_ol
2349-
2350- def generate_freespace2(self,pc=None, pose=None, resolution=None, minR=0.5, maxR=5):
2351- '''
2352- Uses existing ray-trace method to get
2353- around the current limitations of above implementation.
2354-
2355- Generates an occuplied list consisting of the observed free space at
2356- the specified resolution. This is useful for detecting object removal
2357- from a map.
2358- '''
2359- if resolution == None:
2360- res = self.get_resolution()
2361- else:
2362- res = resolution
2363- assert res >= self.get_resolution()
2364- free_space_ol = occupiedlist.OccupiedList(res, maxvoxels=2e6)
2365- if pc==None and pose==None:
2366- # generate from map using pose list
2367- if self._sensor_pts is None:
2368- sensor_model_ol = _build_Kinect_sensor_model(res, minR=minR, maxR=maxR)
2369- self._sensor_pts = pointcloud.PointCloud(sensor_model_ol.getpoints())
2370- print "Sensor model points:", len(self._sensor_pts)
2371- base_ol = self.mrol.getfinest()
2372-
2373- for pose in self.trajectory:
2374- print pose
2375- inliers, outliers = base_ol.segment_ovl(pose, self._sensor_pts.points)
2376- inlier_pc = pointcloud.PointCloud(inliers)
2377- free_pts = self.free_space_ray_trace(inliers,pose.getTuple()[:3],self.get_resolution())
2378- free_space_ol.add_points(free_pts.points)
2379- else:
2380- assert xyzs != None and pose != None, "Both xyzs and pose must be specified"
2381- # resampling to align with grid. bad hack, but makes ray_cast easier.
2382- voxs = occupiedlist.pointstovoxels(xyzs, res)
2383- voxs = quantizer.uniquerows(voxs, 0)
2384- X = occupiedlist.voxelstopoints(voxs, res)
2385- X_pc = pointcloud.PointCloud(X)
2386- free_pts = self.free_space_ray_trace(X_pc,pose.getTuple()[:3],self.get_resolution())
2387- free_space_ol.add_points(free_pts.points)
2388- return free_space_ol
2389-
2390- def free_space_ray_trace(self,pc,origin,resolution,voxel_offset=0):
2391- # This is essentially independant of the VoxelMap; put elsewhere?
2392- free_pts = []
2393- res = resolution
2394- v_off = np.ceil(voxel_offset*(res/self.get_resolution()))
2395- free_pts_OL = occupiedlist.OccupiedList(res,maxvoxels=3e6)
2396- count = 0
2397- for point in pc.points:
2398- ray_vox_hits, range2, dir_unit, all_ray_pts = self.ray_trace_pts(origin,point,return_extras=True)
2399- all_ray_pts = all_ray_pts.points[:-(v_off+1)]
2400- all_ray_vox = occupiedlist.pointstovoxels(all_ray_pts, res)
2401-
2402- all_ray_vox_ol = occupiedlist.OccupiedList(1)
2403- all_ray_vox_ol.add_points(all_ray_vox)
2404-
2405- ray_vox_hits_ol = occupiedlist.OccupiedList(1)
2406- ray_vox_hits_ol.add_points(ray_vox_hits.points)
2407-
2408- free_ray_vox_ol = all_ray_vox_ol - ray_vox_hits_ol
2409- free_pts_OL.add_points(free_ray_vox_ol.getpoints()*res)
2410- count +=1
2411- #if np.remainder(count / len(pc) * 100,1) == 0:
2412- # print count / len(pc) * 100 , '%'
2413- return pointcloud.PointCloud(free_pts_OL.getpoints())
2414-
2415-
2416-
2417- def ray_trace(self, pose, length, axis=0):
2418- '''perform a ray-trace from the pose and return all map voxels at the
2419- base resolution along the ray as a pointcloud object.
2420- '''
2421- return pointcloud.PointCloud(self.mrol.raytrace(pose, length, axis=axis))
2422-
2423- def ray_trace_pts(self, point1, point2, return_extras=False):
2424- '''
2425- perform a ray-trace from point1 to point2 and return all map
2426- voxels at the base resolution along the ray as a pointcloud object.
2427- '''
2428- if not return_extras:
2429- ray_vox_hits = self.mrol.raytracepts(point1, point2, return_extras=False)
2430- return pointcloud.PointCloud(ray_vox_hits)
2431- else:
2432- ray_vox_hits, range2, dir_unit, all_ray_pts = self.mrol.raytracepts(point1, point2, return_extras=True)
2433- return pointcloud.PointCloud(ray_vox_hits), range2, dir_unit, pointcloud.PointCloud(all_ray_pts)
2434-
2435- def _trajectory_objective(self, guess_pose, pts, foo):
2436- return -occupiedlist.calccollisions(guess_pose, self._traj_alignment_voxel_set, self.res, pts)
2437-
2438- # TODO add support for not specifying centrepose or spreadpose, looks
2439- # through entire map
2440- def globally_localise(self, points, centrepose, spreadpose):
2441- '''Localises the point cloud in this map without requiring a good
2442- initial guess pose. Returns the bestpose along with measure of overlap.'''
2443- return self.mrol.match(points.points, centrepose, spreadpose)
2444
2445=== added file 'mrol_mapping/mrol.py'
2446--- mrol_mapping/mrol.py 1970-01-01 00:00:00 +0000
2447+++ mrol_mapping/mrol.py 2012-11-20 22:16:18 +0000
2448@@ -0,0 +1,354 @@
2449+'''
2450+Implementation of multi-resolution occupied voxel lists (MROL)
2451+Author: Julian Ryde and Nick Hillier
2452+'''
2453+#Copyright 2010-2011, Julian Ryde and Nicholas Hillier.
2454+#
2455+#This program is distributed in the hope that it will be useful,
2456+#but WITHOUT ANY WARRANTY; without even the implied warranty of
2457+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2458+#GNU Lesser General Public License for more details.
2459+#
2460+#You should have received a copy of the GNU Lesser General Public License
2461+#along with this program. If not, see <http://www.gnu.org/licenses/>.
2462+
2463+from __future__ import division
2464+
2465+import time
2466+import os
2467+import numpy as np
2468+
2469+import occupiedlist
2470+import optimization
2471+import util
2472+import poseutil
2473+import quantizer
2474+import logging
2475+
2476+logger = logging.getLogger('mrol')
2477+logger.setLevel(logging.INFO)
2478+#modalfractions = np.repeat(0.3, 5)
2479+modalfractions = [0.7, 0.8, 0.9, 0.9, 0.9]
2480+topn = 100
2481+
2482+# approximate number of sample points selected from the scan to be aligned
2483+sample_size = 2000
2484+
2485+# TODO consider pulling in table class from icra2011 code, table.py
2486+def printtable(table, width):
2487+ for row in table:
2488+ for field in row:
2489+ print '| ' + str(field).ljust(width)[:width],
2490+ print
2491+
2492+class MROL:
2493+ '''
2494+ Stores voxel indices at multiple resolution occupied voxel lists
2495+ and provides interfaces to add, remove and manipulate the data.
2496+ '''
2497+ def __init__(self, resolution, levels=3):
2498+ # coarse to fine resolutions
2499+ self.factor = 2 # multiplier between the resolutions might have to be an integer?
2500+ resolutions = (pow(self.factor, np.arange(levels))*resolution)[::-1]
2501+ self.mapvoxels = [] # list of occupied voxel lists
2502+ self.maptimes = []
2503+ # TODO: save memory by setting the max voxels at each level
2504+ for i, res in enumerate(resolutions):
2505+ ol = occupiedlist.OccupiedList(res)
2506+ self.mapvoxels.append(ol)
2507+ #self.maptimes.append([]) # TODO
2508+
2509+ self.keyframe = set()
2510+ self.feature_range = 8.0
2511+ # range to a good feature density for angular alignment purposes
2512+
2513+ def __repr__(self):
2514+ return self.__str__()
2515+
2516+ def __str__(self):
2517+ width = 6
2518+ delim = '\t'
2519+ resolutions = (str(m.resolution).rjust(width) for m in self.mapvoxels)
2520+ retstr = 'resolutions: ' + delim.join(resolutions) + '\nvoxel counts: '
2521+ voxel_counts = (str(len(m)).rjust(width) for m in self.mapvoxels)
2522+ retstr += delim.join(voxel_counts)
2523+ return retstr
2524+
2525+ def __len__(self):
2526+ return len(self.mapvoxels)
2527+
2528+ def __eq__(self, mrol_map):
2529+ return self.__dict__ == mrol_map.__dict__
2530+
2531+ def getresolution(self, level=None):
2532+ if level == None: return self.getfinest().resolution
2533+ return self.mapvoxels[level].resolution
2534+
2535+ def getpoints(self, level=-1):
2536+ return self.mapvoxels[level].getpoints()
2537+
2538+ def getvoxels(self, level=-1):
2539+ return self.mapvoxels[level].getvoxeldata()
2540+
2541+ def getfinest(self):
2542+ '''
2543+ Returns the occupied list with the finest/best resolution it has.
2544+ '''
2545+ return self.mapvoxels[-1]
2546+
2547+ def clear_freespace(self, xyzs, sensor_loc):
2548+ for ol in self.mapvoxels:
2549+ ol.clear_freespace(xyzs, sensor_loc)
2550+
2551+ def add_points(self, xyzs, timestamp=None, userdata=None, increment=1):
2552+ # TODO do something with timestamps for re-journalling
2553+ # TODO allow userdata to be added as an information field to the voxels
2554+ # (e.g. RGB)
2555+ # TODO work out how to get the userdata out again (perhaps in
2556+ # getpoints/getvoxels?)
2557+
2558+ for ol in self.mapvoxels:
2559+ ol.add_points(xyzs,userdata=userdata)
2560+
2561+ ## add to the finest occupiedlist
2562+ #modified_inds, increments = self.getfinest().add_points(xyzs, return_modified=True, increment=increment)
2563+ ##modified_inds_finest = modified_inds.copy()
2564+ ## now propagate these modifications to coarser resolutions
2565+ #for ol in self.mapvoxels[-2::-1]:
2566+ # modified_inds /= self.factor
2567+ # # TODO slow but hopefully more likely to be correct way
2568+ # ol._update_voxels(modified_inds, increments)
2569+ ##return modified_inds_finest
2570+
2571+ def removepoints(self, xyzs, return_removed=False):
2572+ return self.add_points(xyzs, increment=-1)
2573+
2574+ # TODO Reconsider keyframe usage after trajectory class is implemented
2575+ #def setkeyframe(self, xyzs, pose):
2576+ # xyzs = poseutil.transformPoints(xyzs,pose)
2577+ # voxel_array = occupiedlist.pointstovoxels(xyzs, self.getresolution(), unique=True)
2578+ # self.keyframe = set(occupiedlist.hashrows(voxel_array))
2579+ #
2580+ #def getkeyframeset(self):
2581+ # return self.keyframe
2582+
2583+ def _sample_points(self, xyzs, sample_size):
2584+ start_resolution = self.getfinest().resolution
2585+ sample = util.volumetricsample(xyzs, start_resolution)
2586+ for i in range(1, 10):
2587+ sample_res = start_resolution * 2**i
2588+ sample = util.volumetricsample(sample, sample_res)
2589+ if len(sample) < sample_size:
2590+ break
2591+
2592+ logger.info('Volumetric sample resolution: {0}'.format(sample_res))
2593+ return sample
2594+
2595+ # Ray trace methods
2596+ def calcray(self, ray):
2597+ ol = self.getfinest()
2598+ hits = ol.segment_ovl(None, ray)[0]
2599+ if len(hits) > 0:
2600+ vox_hits = occupiedlist.pointstovoxels(hits,ol.resolution,ol.lattice)
2601+ return quantizer.uniquerows(vox_hits, 0)
2602+ else:
2603+ return []
2604+
2605+ def raytrace(self, pose, length, axis=0):
2606+ '''
2607+ perform a ray-trace from the pose and return all map voxels at the
2608+ base resolution along the ray.
2609+ It traces from any point in the map frame (e.g. the sensor's
2610+ current position) along a ray oriented as per the pose's angular
2611+ components.
2612+ '''
2613+ res = self.getresolution()/2.
2614+ numcells = np.ceil(length/res)+2 # + 1 for 0th point + 1 for end point with poor aligned voxels
2615+ ray = np.zeros([numcells,3])
2616+ ray[:,axis] = np.hstack([0,np.cumsum(np.ones(numcells-1))*res])
2617+ ray = poseutil.transformPoints(ray, pose)[0]
2618+ return self.calcray(ray)
2619+
2620+ def raytracepts(self, point1, point2, return_extras=False):
2621+ '''
2622+ perform a ray-trace from point1 to point2 and return all map
2623+ voxels at the base resolution along the ray.
2624+ '''
2625+ res = self.getresolution()#/3.
2626+ point2a = point2 - point1
2627+ range2 = np.sum(point2a**2)
2628+ length = np.sqrt(range2)
2629+ dir_unit = point2a/length
2630+ numcells = int(np.ceil(length/res)+2) # + 1 for 0th point + 1 for last point
2631+ ray = np.zeros([numcells,3])
2632+ ## TODO make this a list comprehension as this loop slows the cast down lots
2633+ #for i in range(numcells):
2634+ # ray[i] = dir_unit*i*res
2635+ ray_res = length/(numcells-1)
2636+ ray = fast.fillray(numcells, dir_unit, ray_res, ray)
2637+ ray = ray + point1
2638+ if return_extras:
2639+ return self.calcray(ray), length, dir_unit, ray
2640+ else:
2641+ return self.calcray(ray)
2642+
2643+ # Alignment methods
2644+ def objectivefuncMROL(self, pose, xyzs): # TODO not really used anymore?
2645+ '''
2646+ Objective function to be minimised.
2647+ For a list of xyzs, transforms by pose and calculations the cost
2648+ at the various resolutions. If level is specified, only
2649+ calc at that level.
2650+ '''
2651+ levels = len(self.mapvoxels)
2652+ hits = np.empty(levels)
2653+ resolutions = np.empty(levels)
2654+ for i, ol in enumerate(self.mapvoxels):
2655+ hits[i] = ol.calccollisions(pose, xyzs, query=False)
2656+ resolutions[i] = ol.resolution
2657+ # sort hits from coarse to fine
2658+ new = np.hstack((-np.diff(hits), hits[-1]))
2659+ #weights = np.ones(levels)
2660+ weights = resolutions[-1]/resolutions # same as ..., 1/8, 1/4, 1/2, 1
2661+ #cost = -np.dot(weights, hits)
2662+ cost = -np.dot(weights, new)
2663+ return cost
2664+
2665+ def optimise_alignment(self, xyzs, initialpose, full_data=False):
2666+ dtheta_base = np.radians(0.5)
2667+ # TODO worried that this is not checking for adjacent voxel occupancy
2668+
2669+ # sample scan points appropriately
2670+ # no point doing multiple look ups on the same voxel and in fact an
2671+ # unfair weighting
2672+ # you want a volumetric sample such that the number of sample points is enough
2673+
2674+ # cast points to float32 for speed
2675+ xyzs = np.float32(xyzs)
2676+ xyzs_sample = self._sample_points(xyzs, sample_size)
2677+ #xyzs_sample = util.volumetricsample(xyzs, 0.4)
2678+
2679+ logger.info('initialpose:{0}'.format(initialpose))
2680+ logger.info('Number of sample points: %d' % len(xyzs_sample))
2681+
2682+ bestpose = initialpose.copy()
2683+ for level, ol in enumerate(self.mapvoxels):
2684+ objective_func = ol.cost_func
2685+ dtheta = dtheta_base*pow(self.factor,(len(self.mapvoxels)-level-1))
2686+ dx = ol.resolution
2687+ # This helps avoid local minima, but limits accuracy to voxel size
2688+ # at the finest resolution
2689+ # objective_func is -occupiedlist.calccollisions(pose, xyzs)
2690+ bestpose, cost = optimization.cost_min(objective_func, bestpose, (xyzs_sample,), dx, dtheta, max_iterations=20, verbosity=2)
2691+ logger.info('resolution: %f' % (ol.resolution))
2692+ logger.info('bestpose: {0}'.format(bestpose))
2693+ #bestpose, cost = optimization._cost_min_scipy(objective_func, bestpose, (xyzs_sample,))
2694+ # run it again at the finest resolution with tighter steps to get
2695+ # sub-voxel accuracy.
2696+ dx = dx*pow(self.factor,-2)
2697+ dtheta = dtheta_base*pow(self.factor,-2)
2698+ if not full_data:
2699+ # Sometimes get better results with the below resampling, but much faster.
2700+ xyzs = util.offsetvolsample(xyzs, self.getfinest().resolution)
2701+ bestpose, cost = optimization.cost_min(objective_func, bestpose, (xyzs,), dx, dtheta, max_iterations=100, verbosity=2)
2702+ return bestpose, cost
2703+
2704+ def match(self, xyzs, centrepose, spreadpose):
2705+ """Does a global search of the specified pose space.
2706+ Find the possible locations of the occupied list ol in the current
2707+ occupied list. Takes an occupied list to match to the current one and
2708+ two poses."""
2709+
2710+ # determines the orientation step size as it relates to occupancy list
2711+ # resolution
2712+ poses = None
2713+ bestpose = poseutil.Pose3D()
2714+
2715+ totaltime = time.time()
2716+ lasttime = time.time()
2717+ # TODO think about this number
2718+ #xyzs = util.getsample(xyzs, 1000)
2719+
2720+ levels = len(self.mapvoxels)
2721+ table = np.zeros((levels+2, 9), dtype=np.dtype('S8'))
2722+ # header for the table of values of interest
2723+ table = []
2724+ table.append('Res modal map scan pose pose Top Time Best'.split())
2725+ table.append('(m) Frac voxels points count count overlap Taken pose'.split())
2726+ table.append('--- ----- ------ ------ ----- ----- ------- ----- ----'.split())
2727+ for i, mapol in enumerate(self.mapvoxels):
2728+ tablerow = []
2729+ res = mapol.resolution
2730+ deltaposition = res
2731+ deltaorientation = res/self.feature_range # 8
2732+ if poses == None:
2733+ poses = poseutil.uniformposes(centrepose, spreadpose, deltaposition, deltaorientation)
2734+ else:
2735+ poses = poseutil.refineposes(poses, deltaposition, deltaorientation)
2736+ logging.info('Number of poses to test:%d' % len(poses))
2737+
2738+ # TODO remove poses outside search space this needs tidying up/thinking about
2739+ #cparr = np.array(centrepose.get())
2740+ #sparr = np.array(spreadpose.get())
2741+ #D = np.array((deltaposition, deltaposition, deltaposition, deltaorientation, deltaorientation, deltaorientation))
2742+ #maxpose = cparr + sparr + D
2743+ #minpose = cparr - sparr - D
2744+ #select = np.logical_and(np.all(poses <= maxpose, 1), np.all(poses >= minpose, 1))
2745+ ##print 'Culling poses: ', len(poses), sum(select)
2746+ #poses = poses[select]
2747+
2748+ xyzs_sample = util.volumetricsample(xyzs, res)
2749+ tablerow.extend((res, modalfractions[i], len(mapol), len(xyzs_sample), len(poses)))
2750+
2751+ overlaps = []
2752+ for pose in poses: # TIME consuming loop
2753+ #overlap = np.sum(mapol.calccollisions(pose, xyzs, query=True))
2754+ overlap = mapol.calccollisions(pose, xyzs_sample, query=False)
2755+ overlaps.append(overlap)
2756+ #overlaps.append(-self.objectivefuncMROL(pose, xyzs_sample))
2757+ overlaps = np.asarray(overlaps)
2758+
2759+ ind_max = np.argmax(overlaps)
2760+ bestpose.set(poses[ind_max])
2761+ bestoverlap = overlaps[ind_max]
2762+ candidates = overlaps >= modalfractions[i] * bestoverlap
2763+ # maybe just take the top n?
2764+ #if len(overlaps) > topn:
2765+ # candidates = overlaps.argsort()[-topn:]
2766+ #else:
2767+ # candidates = range(len(overlaps))
2768+
2769+ timetaken = time.time() - lasttime
2770+ lasttime = time.time()
2771+ tablerow.extend((len(candidates), bestoverlap, timetaken, bestpose))
2772+ table.append(tablerow)
2773+ print
2774+ printtable(table, width=9)
2775+ print 'Bestpose: ', bestpose
2776+ #scansize = ol.voxels[-1].shape[0]
2777+ poses = poses[candidates]
2778+
2779+ print "Total time: ", time.time() - totaltime
2780+ overlaps = overlaps[candidates]
2781+ return bestpose, bestoverlap
2782+
2783+ def matchbest(self, xyzs, centrepose, spreadpose):
2784+ poses, overlaps = self.match(xyzs, centrepose, spreadpose)
2785+ bestind = np.argmax(overlaps)
2786+ bestpose = poseutil.Pose3D(poses[bestind, :])
2787+ cost = overlaps[bestind]
2788+ return bestpose, cost
2789+
2790+ def matchandadd(self, ol, centrepose, spreadpose):
2791+ """ matches the given occupiedlist ol by searching around the
2792+ centrepose a distance of spreadpose. If the match is successful the
2793+ occupied list is transfomred and combined with the current occupied
2794+ list. Returns the pose at which the occupied list ol was added."""
2795+ # TODO add a threshold for deciding whether to merge
2796+ # TODO add unit test for this
2797+ # TODO add a ray trace clear?
2798+ poses, overlaps = self.match(ol, centrepose, spreadpose)
2799+ bestpose = poseutil.Pose3D(poses[np.argmax(overlaps), :])
2800+ self.add_points(bestpose.transformPoints(ol.getpoints()))
2801+ return bestpose
2802+
2803
2804=== removed file 'mrol_mapping/mrol.py'
2805--- mrol_mapping/mrol.py 2012-03-04 17:10:07 +0000
2806+++ mrol_mapping/mrol.py 1970-01-01 00:00:00 +0000
2807@@ -1,352 +0,0 @@
2808-'''
2809-Implementation of multi-resolution occupied voxel lists (MROL)
2810-Author: Julian Ryde and Nick Hillier
2811-'''
2812-#Copyright 2010-2011, Julian Ryde and Nicholas Hillier.
2813-#
2814-#This program is distributed in the hope that it will be useful,
2815-#but WITHOUT ANY WARRANTY; without even the implied warranty of
2816-#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
2817-#GNU Lesser General Public License for more details.
2818-#
2819-#You should have received a copy of the GNU Lesser General Public License
2820-#along with this program. If not, see <http://www.gnu.org/licenses/>.
2821-
2822-from __future__ import division
2823-
2824-import time
2825-import os
2826-import numpy as np
2827-
2828-import occupiedlist
2829-import optimization
2830-import util
2831-import poseutil
2832-import quantizer
2833-
2834-#import cython.fast as fast
2835-
2836-verbose = True
2837-#modalfractions = np.repeat(0.3, 5)
2838-modalfractions = [0.7, 0.8, 0.9, 0.9, 0.9]
2839-topn = 100
2840-
2841-# approximate number of sample points selected from the scan to be aligned
2842-sample_size = 2000
2843-
2844-# TODO consider pulling in table class from icra2011 code, table.py
2845-def printtable(table, width):
2846- for row in table:
2847- for field in row:
2848- print '| ' + str(field).ljust(width)[:width],
2849- print
2850-
2851-class MROL:
2852- '''
2853- Stores voxel indices at multiple resolution occupied voxel lists
2854- and provides interfaces to add, remove and manipulate the data.
2855- '''
2856- def __init__(self, resolution, levels=3):
2857- # coarse to fine resolutions
2858- self.factor = 2 # multiplier between the resolutions might have to be an integer?
2859- resolutions = (pow(self.factor, np.arange(levels))*resolution)[::-1]
2860- self.mapvoxels = [] # list of occupied voxel lists
2861- self.maptimes = []
2862- # TODO: save memory by setting the max voxels at each level
2863- for i, res in enumerate(resolutions):
2864- ol = occupiedlist.OccupiedList(res)
2865- self.mapvoxels.append(ol)
2866- #self.maptimes.append([]) # TODO
2867-
2868- self.keyframe = set()
2869- self.feature_range = 8.0
2870- # range to a good feature density for angular alignment purposes
2871-
2872- def __repr__(self):
2873- return self.__str__()
2874-
2875- def __str__(self):
2876- width = 6
2877- delim = '\t'
2878- resolutions = (str(m.resolution).rjust(width) for m in self.mapvoxels)
2879- retstr = 'resolutions: ' + delim.join(resolutions) + '\nvoxel counts: '
2880- voxel_counts = (str(len(m)).rjust(width) for m in self.mapvoxels)
2881- retstr += delim.join(voxel_counts)
2882- return retstr
2883-
2884- def __len__(self):
2885- return len(self.mapvoxels)
2886-
2887- def __eq__(self, mrol_map):
2888- return self.__dict__ == mrol_map.__dict__
2889-
2890- def getresolution(self, level=None):
2891- if level == None: return self.getfinest().resolution
2892- return self.mapvoxels[level].resolution
2893-
2894- def getpoints(self, level=-1):
2895- return self.mapvoxels[level].getpoints()
2896-
2897- def getvoxels(self, level=-1):
2898- return self.mapvoxels[level].getvoxeldata()
2899-
2900- def getfinest(self):
2901- '''
2902- Returns the occupied list with the finest/best resolution it has.
2903- '''
2904- return self.mapvoxels[-1]
2905-
2906- def add_points(self, xyzs, timestamp=None, userdata=None, increment=1):
2907- # TODO do something with timestamps for re-journalling
2908- # TODO allow userdata to be added as an information field to the voxels
2909- # (e.g. RGB)
2910- # TODO work out how to get the userdata out again (perhaps in
2911- # getpoints/getvoxels?)
2912-
2913- for ol in self.mapvoxels:
2914- ol.add_points(xyzs,userdata=userdata)
2915-
2916- ## add to the finest occupiedlist
2917- #modified_inds, increments = self.getfinest().add_points(xyzs, return_modified=True, increment=increment)
2918- ##modified_inds_finest = modified_inds.copy()
2919- ## now propagate these modifications to coarser resolutions
2920- #for ol in self.mapvoxels[-2::-1]:
2921- # modified_inds /= self.factor
2922- # # TODO slow but hopefully more likely to be correct way
2923- # ol._update_voxels(modified_inds, increments)
2924- ##return modified_inds_finest
2925-
2926- def removepoints(self, xyzs, return_removed=False):
2927- return self.add_points(xyzs, increment=-1)
2928-
2929- # TODO Reconsider keyframe usage after trajectory class is implemented
2930- #def setkeyframe(self, xyzs, pose):
2931- # xyzs = poseutil.transformPoints(xyzs,pose)
2932- # voxel_array = occupiedlist.pointstovoxels(xyzs, self.getresolution(), unique=True)
2933- # self.keyframe = set(occupiedlist.hashrows(voxel_array))
2934- #
2935- #def getkeyframeset(self):
2936- # return self.keyframe
2937-
2938- def _sample_points(self, xyzs, sample_size):
2939- start_resolution = self.getfinest().resolution
2940- sample = util.volumetricsample(xyzs, start_resolution)
2941- for i in range(1, 10):
2942- sample_res = start_resolution * 2**i
2943- sample = util.volumetricsample(sample, sample_res)
2944- if len(sample) < sample_size:
2945- break
2946-
2947- if verbose:
2948- print 'Volumetric sample resolution:', sample_res
2949- return sample
2950-
2951- # Ray trace methods
2952- def calcray(self, ray):
2953- ol = self.getfinest()
2954- hits = ol.segment_ovl(None, ray)[0]
2955- if len(hits) > 0:
2956- vox_hits = occupiedlist.pointstovoxels(hits,ol.resolution,ol.lattice)
2957- return quantizer.uniquerows(vox_hits, 0)
2958- else:
2959- return []
2960-
2961- def raytrace(self, pose, length, axis=0):
2962- '''
2963- perform a ray-trace from the pose and return all map voxels at the
2964- base resolution along the ray.
2965- It traces from any point in the map frame (e.g. the sensor's
2966- current position) along a ray oriented as per the pose's angular
2967- components.
2968- '''
2969- res = self.getresolution()/2.
2970- numcells = np.ceil(length/res)+2 # + 1 for 0th point + 1 for end point with poor aligned voxels
2971- ray = np.zeros([numcells,3])
2972- ray[:,axis] = np.hstack([0,np.cumsum(np.ones(numcells-1))*res])
2973- ray = poseutil.transformPoints(ray, pose)[0]
2974- return self.calcray(ray)
2975-
2976- def raytracepts(self, point1, point2, return_extras=False):
2977- '''
2978- perform a ray-trace from point1 to point2 and return all map
2979- voxels at the base resolution along the ray.
2980- '''
2981- res = self.getresolution()#/3.
2982- point2a = point2 - point1
2983- range2 = np.sum(point2a**2)
2984- length = np.sqrt(range2)
2985- dir_unit = point2a/length
2986- numcells = int(np.ceil(length/res)+2) # + 1 for 0th point + 1 for last point
2987- ray = np.zeros([numcells,3])
2988- ## TODO make this a list comprehension as this loop slows the cast down lots
2989- #for i in range(numcells):
2990- # ray[i] = dir_unit*i*res
2991- ray_res = length/(numcells-1)
2992- ray = fast.fillray(numcells, dir_unit, ray_res, ray)
2993- ray = ray + point1
2994- if return_extras:
2995- return self.calcray(ray), length, dir_unit, ray
2996- else:
2997- return self.calcray(ray)
2998-
2999- # Alignment methods
3000- def objectivefuncMROL(self, pose, xyzs): # TODO not really used anymore?
3001- '''
3002- Objective function to be minimised.
3003- For a list of xyzs, transforms by pose and calculations the cost
3004- at the various resolutions. If level is specified, only
3005- calc at that level.
3006- '''
3007- levels = len(self.mapvoxels)
3008- hits = np.empty(levels)
3009- resolutions = np.empty(levels)
3010- for i, ol in enumerate(self.mapvoxels):
3011- hits[i] = ol.calccollisions(pose, xyzs, query=False)
3012- resolutions[i] = ol.resolution
3013- # sort hits from coarse to fine
3014- new = np.hstack((-np.diff(hits), hits[-1]))
3015- #weights = np.ones(levels)
3016- weights = resolutions[-1]/resolutions # same as ..., 1/8, 1/4, 1/2, 1
3017- #cost = -np.dot(weights, hits)
3018- cost = -np.dot(weights, new)
3019- return cost
3020-
3021- def optimise_alignment(self, xyzs, initialpose, full_data=False):
3022- dtheta_base = np.radians(0.5)
3023- # TODO worried that this is not checking for adjacent voxel occupancy
3024-
3025- # sample scan points appropriately
3026- # no point doing multiple look ups on the same voxel and in fact an
3027- # unfair weighting
3028- # you want a volumetric sample such that the number of sample points is enough
3029-
3030- # cast points to float32 for speed
3031- xyzs = np.float32(xyzs)
3032- xyzs_sample = self._sample_points(xyzs, sample_size)
3033- #xyzs_sample = util.volumetricsample(xyzs, 0.4)
3034-
3035- if verbose:
3036- print 'Number of sample points: %d' % len(xyzs_sample)
3037-
3038- bestpose = initialpose.copy()
3039- for level, ol in enumerate(self.mapvoxels):
3040- objective_func = ol.cost_func
3041- dtheta = dtheta_base*pow(self.factor,(len(self.mapvoxels)-level-1))
3042- dx = ol.resolution
3043- # This helps avoid local minima, but limits accuracy to voxel size
3044- # at the finest resolution
3045- if verbose:
3046- print 'resolution: %f' % (ol.resolution)
3047- # objective_func is -occupiedlist.calccollisions(pose, xyzs)
3048- bestpose, cost = optimization.cost_min(objective_func, bestpose, (xyzs_sample,), dx, dtheta, max_iterations=20, verbosity=2)
3049- #bestpose, cost = optimization._cost_min_scipy(objective_func, bestpose, (xyzs_sample,))
3050- # run it again at the finest resolution with tighter steps to get
3051- # sub-voxel accuracy.
3052- dx = dx*pow(self.factor,-2)
3053- dtheta = dtheta_base*pow(self.factor,-2)
3054- if not full_data:
3055- # Sometimes get better results with the below resampling, but much faster.
3056- xyzs = util.offsetvolsample(xyzs, self.getfinest().resolution)
3057- bestpose, cost = optimization.cost_min(objective_func, bestpose, (xyzs,), dx, dtheta, max_iterations=100, verbosity=2)
3058- return bestpose, cost
3059-
3060- def match(self, xyzs, centrepose, spreadpose):
3061- """Does a global search of the specified pose space.
3062- Find the possible locations of the occupied list ol in the current
3063- occupied list. Takes an occupied list to match to the current one and
3064- two poses."""
3065-
3066- # determines the orientation step size as it relates to occupancy list
3067- # resolution
3068- poses = None
3069- bestpose = poseutil.Pose3D()
3070-
3071- totaltime = time.time()
3072- lasttime = time.time()
3073- # TODO think about this number
3074- #xyzs = util.getsample(xyzs, 1000)
3075-
3076- levels = len(self.mapvoxels)
3077- table = np.zeros((levels+2, 9), dtype=np.dtype('S8'))
3078- # header for the table of values of interest
3079- table = []
3080- table.append('Res modal map scan pose pose Top Time Best'.split())
3081- table.append('(m) Frac voxels points count count overlap Taken pose'.split())
3082- table.append('--- ----- ------ ------ ----- ----- ------- ----- ----'.split())
3083- for i, mapol in enumerate(self.mapvoxels):
3084- tablerow = []
3085- res = mapol.resolution
3086- deltaposition = res
3087- deltaorientation = res/self.feature_range # 8
3088- if poses == None:
3089- poses = poseutil.uniformposes(centrepose, spreadpose, deltaposition, deltaorientation)
3090- else:
3091- poses = poseutil.refineposes(poses, deltaposition, deltaorientation)
3092- if verbose:
3093- print 'Number of poses to test:', len(poses)
3094-
3095- # TODO remove poses outside search space this needs tidying up/thinking about
3096- #cparr = np.array(centrepose.get())
3097- #sparr = np.array(spreadpose.get())
3098- #D = np.array((deltaposition, deltaposition, deltaposition, deltaorientation, deltaorientation, deltaorientation))
3099- #maxpose = cparr + sparr + D
3100- #minpose = cparr - sparr - D
3101- #select = np.logical_and(np.all(poses <= maxpose, 1), np.all(poses >= minpose, 1))
3102- ##print 'Culling poses: ', len(poses), sum(select)
3103- #poses = poses[select]
3104-
3105- xyzs_sample = util.volumetricsample(xyzs, res)
3106- tablerow.extend((res, modalfractions[i], len(mapol), len(xyzs_sample), len(poses)))
3107-
3108- overlaps = []
3109- for pose in poses: # TIME consuming loop
3110- #overlap = np.sum(mapol.calccollisions(pose, xyzs, query=True))
3111- overlap = mapol.calccollisions(pose, xyzs_sample, query=False)
3112- overlaps.append(overlap)
3113- #overlaps.append(-self.objectivefuncMROL(pose, xyzs_sample))
3114- overlaps = np.asarray(overlaps)
3115-
3116- ind_max = np.argmax(overlaps)
3117- bestpose.set(poses[ind_max])
3118- bestoverlap = overlaps[ind_max]
3119- candidates = overlaps >= modalfractions[i] * bestoverlap
3120- # maybe just take the top n?
3121- #if len(overlaps) > topn:
3122- # candidates = overlaps.argsort()[-topn:]
3123- #else:
3124- # candidates = range(len(overlaps))
3125-
3126- timetaken = time.time() - lasttime
3127- lasttime = time.time()
3128- tablerow.extend((len(candidates), bestoverlap, timetaken, bestpose))
3129- table.append(tablerow)
3130- print
3131- printtable(table, width=9)
3132- print 'Bestpose: ', bestpose
3133- #scansize = ol.voxels[-1].shape[0]
3134- poses = poses[candidates]
3135-
3136- print "Total time: ", time.time() - totaltime
3137- overlaps = overlaps[candidates]
3138- return bestpose, bestoverlap
3139-
3140- def matchbest(self, xyzs, centrepose, spreadpose):
3141- poses, overlaps = self.match(xyzs, centrepose, spreadpose)
3142- bestind = np.argmax(overlaps)
3143- bestpose = poseutil.Pose3D(poses[bestind, :])
3144- cost = overlaps[bestind]
3145- return bestpose, cost
3146-
3147- def matchandadd(self, ol, centrepose, spreadpose):
3148- """ matches the given occupiedlist ol by searching around the
3149- centrepose a distance of spreadpose. If the match is successful the
3150- occupied list is transfomred and combined with the current occupied
3151- list. Returns the pose at which the occupied list ol was added."""
3152- # TODO add a threshold for deciding whether to merge
3153- # TODO add unit test for this
3154- # TODO add a ray trace clear?
3155- poses, overlaps = self.match(ol, centrepose, spreadpose)
3156- bestpose = poseutil.Pose3D(poses[np.argmax(overlaps), :])
3157- self.add_points(bestpose.transformPoints(ol.getpoints()))
3158- return bestpose
3159-
3160
3161=== added file 'mrol_mapping/occupiedlist.py'
3162--- mrol_mapping/occupiedlist.py 1970-01-01 00:00:00 +0000
3163+++ mrol_mapping/occupiedlist.py 2012-11-20 22:16:18 +0000
3164@@ -0,0 +1,650 @@
3165+#Copyright 2010-2011, Julian Ryde and Nicholas Hillier.
3166+#
3167+#This program is distributed in the hope that it will be useful,
3168+#but WITHOUT ANY WARRANTY; without even the implied warranty of
3169+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3170+#GNU Lesser General Public License for more details.
3171+#
3172+#You should have received a copy of the GNU Lesser General Public License
3173+#along with this program. If not, see <http://www.gnu.org/licenses/>.
3174+
3175+from __future__ import division
3176+import numpy as np
3177+import scipy.stats as stats
3178+
3179+import itertools
3180+import poseutil
3181+import quantizer
3182+import pointcloud
3183+from bresenhamline import bresenhamline
3184+import log
3185+import logging
3186+
3187+#import numexpr as ne
3188+#ne.set_num_threads(4)
3189+
3190+logger = log.getLogger('occupiedlist')
3191+logger.setLevel(logging.INFO)
3192+NBACKTRACE_VOXELS = 10
3193+
3194+inds = np.indices((3,3,3)) - 1
3195+inds.shape = (3, 27)
3196+inds = inds.T
3197+offset_inds = np.zeros((27, 4), dtype=np.int16)
3198+offset_inds[:,:3] = inds
3199+
3200+latticefuncs = {'cubic':quantizer.quantize_cubic, 'bcc':quantizer.quantize_bcc, 'fcc':quantizer.quantize_fcc}
3201+""" Dictionary of the currently implemented lattice quantizers """
3202+
3203+cdf_res = 0.01
3204+cdf_halfwidth = 2
3205+def _x_to_ind(X):
3206+ '''Converts x to an index in the lookup table'''
3207+ return np.int32((X + cdf_halfwidth)/cdf_res + 0.5)
3208+
3209+normcdf = stats.norm.cdf(np.arange(-cdf_halfwidth, cdf_halfwidth, cdf_res))
3210+# lookup table for the cdf
3211+def cdf_approx(xs, sigma):
3212+ return normcdf[_x_to_ind(xs)]
3213+
3214+def loadxyz(fname, resolution):
3215+ '''loads a list of points in x, y, z format from a file and makes an
3216+ occupied voxel list'''
3217+ xyzs = np.loadtxt(fname)
3218+ ol = OccupiedList(resolution)
3219+ ol.add_points(xyzs[:, 0:3])
3220+ return ol
3221+
3222+def pointstovoxels(X, resolution, pose=None, latticetype='cubic', out=None):
3223+ '''Converts a list of points to integer voxel indices. Optionally
3224+ transforming by pose as well for speed.'''
3225+ # Possible lattice types currently include cubic, bcc and fcc.
3226+ # TODO out of bound check required.
3227+ #return latticefuncs[latticetype](X, resolution)
3228+ if out is None:
3229+ out = np.empty((X.shape[0], 4), dtype=np.int16) # uninitialised for speed
3230+ out[:,3] = 0 # set last column to zero
3231+ else: # make sure it is unpacked
3232+ _unpack(out)
3233+
3234+ # combines transformation and scaling into one operation for speed
3235+ X, pose = poseutil.transformPoints(X, pose, scale=1.0/resolution)
3236+ #out[:, :3] = np.round(X)
3237+ # out[:, :3] = X + 0.5 # this does not work quite right
3238+ # out[:, :3] = np.floor(X) # without multi-resolution offsets
3239+ out[:, :3] = np.floor(X + 0.5) # with multi resolution offsets but they do not seem to help that much
3240+ return out
3241+
3242+def voxelstopoints(A, resolution):
3243+ """ Converts an array of integers voxel indices to an array of points """
3244+ # TODO make this work for alternative lattices
3245+ return A*resolution
3246+ #return (A + 0.5)*resolution # without multi-resolution offsets
3247+
3248+def dilate(voxels):
3249+ '''Add adjacent voxels, effectively blurring or smearing occupied voxel
3250+ list'''
3251+ # blur/smear map instead of line above to include checking of adjacent voxels
3252+ mg = np.mgrid[-1:2, -1:2, -1:2]
3253+ mg.shape = (3, 27, 1)
3254+ voxels = np.array(voxels).T
3255+ dilated = mg + voxels[:, np.newaxis, :]
3256+ dilated = dilated.transpose((0, 2, 1)).reshape(3, -1).T
3257+ return np.array(tuple(set(tuplelist(dilated))))
3258+
3259+def tuplelist(X):
3260+ '''Converts an nx3 array to a list of tuples quickly this is useful for
3261+ bulk insertion into dictionaries and sets'''
3262+ return zip(X[:, 0], X[:, 1], X[:, 2])
3263+
3264+def _4_column_int16(inds):
3265+ '''converts N x 3 array of voxel ints to N x 4 array of int16 with last
3266+ column 0'''
3267+ X_int16 = np.empty((inds.shape[0], 4), dtype=np.int16)
3268+ X_int16[:, :3] = inds
3269+ # 4th column needs to be zeros
3270+ X_int16[:, 3] = 0
3271+ #X_int16.dtype = np.int64
3272+ #X_int16.shape = -1
3273+ return X_int16
3274+
3275+def _unpack(ids):
3276+ '''converts an int64 array of voxel ids to an nx4 array of np.int16.
3277+ Near instantaneous in place operation'''
3278+ ids.dtype = np.int16
3279+ ids.shape = -1, 4
3280+
3281+def _pack(inds):
3282+ '''converts in place an integer array of Nx4 int16 voxels to an array of
3283+ ids. the last column are zeros.'''
3284+ assert inds.dtype == np.int16, 'inds needs to be np.int16'
3285+ assert inds.shape[1] == 4
3286+ assert not np.isfortran(inds), 'Incorrect array memory layout'
3287+
3288+ inds.dtype = np.int64
3289+ inds.shape = -1
3290+
3291+def _fast_count(ints, return_index=False):
3292+ #counted_ids = collections.Counter(ids) # collections.Counter turns out to be slow
3293+
3294+ # Fast counting of integers although might not scale to large N well
3295+ # because of the sort done by unique?
3296+ if return_index:
3297+ uniqs, uniqinds, inverseinds = np.unique(ints, return_index=True,
3298+ return_inverse=True)
3299+ counts = np.bincount(inverseinds)
3300+ return uniqs, counts, uniqinds
3301+ else:
3302+ uniqs, inds = np.unique(ints, return_inverse=True)
3303+ counts = np.bincount(inds)
3304+ return uniqs, counts
3305+
3306+class BloomFilter:
3307+ '''Special case counting bloom filter optimized for voxel indices'''
3308+
3309+ def __init__(self, size):
3310+ # TODO add probability of false positive as parameter
3311+ self.size = int(6*size) # should this be prime or what?
3312+ self.K = 2 # number of hash functions
3313+ self.bloomtable = np.zeros(self.size, dtype=np.uint16)
3314+ # signed here although not needed makes update errors to the bloomtable
3315+ # easier to spot
3316+
3317+ def __len__(self):
3318+ return self.size
3319+
3320+ def __eq__(self, bf):
3321+ return np.all(self.bloomtable == bf.bloomtable)
3322+
3323+ def djbhash(self, i):
3324+ '''Hash function from D J Bernstein'''
3325+ h = 5381L
3326+ t = (h * 33) & 0xffffffffL
3327+ h = t ^ i
3328+ return h
3329+
3330+ def fnvhash(self, i):
3331+ '''Fowler, Noll, Vo Hash function'''
3332+ h = 2166136261
3333+ t = (h * 16777619) & 0xffffffffL
3334+ h = t ^ i
3335+ return h
3336+
3337+ #mask1 = eval('0b' + 32 * '01')
3338+ #mask2 = eval('0b' + 32 * '10')
3339+ def _hash_voxel_ids(self, ids):
3340+ # Need a hash function that that can generate multiple hashes for given
3341+ # input and over a specified range
3342+ inds = np.empty((2, ids.shape[0]), np.int64)
3343+ # use id values themselves as hash dangerous and quicker but when benchmarked seemed to make little difference
3344+ # inds[0] = ids
3345+ inds[0] = self.fnvhash(ids)
3346+ inds[1] = self.djbhash(ids)
3347+ S = self.size
3348+ #return ne.evaluate('inds % S')
3349+ return inds % self.size
3350+
3351+ def add_voxel_ids(self, ids):
3352+ inds = self._hash_voxel_ids(ids)
3353+ for i in range(self.K):
3354+ self.bloomtable[inds[i]] +=1
3355+
3356+ def remove_voxel_ids(self, ids):
3357+ '''Decrement the counts for this counting bloom table but clamp to
3358+ zero'''
3359+ inds = self._hash_voxel_ids(ids)
3360+ for i in range(self.K):
3361+ remove_inds = inds[i][self.bloomtable[inds[i]] > 0]
3362+ for ind in remove_inds:
3363+ # select only those inds with counts > 0
3364+ self.bloomtable[ind] -= 1
3365+ #if np.any(to_change < 1):
3366+ # only decrement those entries which are > 0
3367+
3368+ def contains(self, ids):
3369+ '''Note that this is an approximation with tunable error. False
3370+ positives are possible but false negatives are not.'''
3371+
3372+ # TODO currently only works for 2 hash functions
3373+
3374+ # create K indices into bloom table from K hash functions
3375+ inds = self._hash_voxel_ids(ids)
3376+
3377+ # TODO surely this can be done in a more efficient manner?
3378+ present = np.zeros((self.K, len(ids)), dtype=bool)
3379+ for i in range(self.K):
3380+ present[i] = self.bloomtable[inds[i]] > 0
3381+
3382+ #allpresent = present.sum(axis=0) == self.K
3383+ #allpresent = np.all(present, axis=0)
3384+
3385+ #a = present[0]
3386+ #b = present[1]
3387+ #allpresent = ne.evaluate('a & b')
3388+ #return np.all(present, axis=0)
3389+ return np.logical_and(present[0], present[1])
3390+
3391+def user_add(cur_data,data):
3392+ ''' An example of a user add function for incrementing user data in
3393+ a map voxel given a new measurement'''
3394+ # First we must handle the case where this is the first time we've
3395+ # added data to the voxel, easily identified because the current
3396+ # voxel data only contains the number of prior observations (zero).
3397+ if len(cur_data) == 0:
3398+ return data
3399+ else:
3400+ # This should be customised to the user's application. Typically
3401+ # it should check for consistency in data length also
3402+ return np.int64((np.array(cur_data) + np.array(data)) / 2.) # some merging algorithm
3403+
3404+class default_vox_content:
3405+ # TODO consider replacing this with a np.recarray or some numpy.dtype trickery?
3406+ def __init__(self):
3407+ self.data = np.array([0],dtype=int)
3408+
3409+ def __getitem__(self, index):
3410+ if index == 0:
3411+ return self.data
3412+ else:
3413+ return np.array([])
3414+
3415+class OccupiedList:
3416+ """ stores a set of voxel indices list at a single resolution """
3417+
3418+ def __init__(self, resolution, maxvoxels=1e6, use_bloom=True):
3419+ self.resolution = resolution
3420+ self._transformedvoxels = None
3421+ self.voxdtype = None
3422+
3423+ # voxels is a dictionary with keys of np.int64 IDs that are the result
3424+ # of packing 4 np.int16
3425+ # the content of the dictionary is either an np.int16 holding
3426+ # increments, or TODO a userdefined structure
3427+ self._voxels = dict()
3428+ self.lattice = 'cubic' # should be one of the latticefuncs
3429+ self._use_bloom = use_bloom
3430+ self.use_KDE = False
3431+ if self.use_KDE:
3432+ # Worst case scenario for a cubic lattice is a point right on a
3433+ # diagonal corner, i.e. the probability is spread evenly between 8
3434+ # cells (0.125). It seems reasonable to curtail the occupancy if it
3435+ # is half of this probability (1/4 of a cell diagonal if the cdf
3436+ # was linear).
3437+ self.occupancy_threshold = 0.5 * 1/8
3438+ else:
3439+ self.occupancy_threshold = 1
3440+ # below which voxels are removed and changes/increments of these
3441+ # are ignored unless re-added occupancy scale factor
3442+
3443+ self.maxvoxels = maxvoxels
3444+ if self._use_bloom:
3445+ self.bloomfilter = BloomFilter(self.maxvoxels)
3446+
3447+ def __eq__(self, o):
3448+ if self.resolution != o.resolution or len(self) != len(o):
3449+ return False
3450+ for K, V in self._voxels.items():
3451+ if o._voxels[K] != V:
3452+ return False
3453+ return True
3454+
3455+ def __len__(self):
3456+ return len(self._voxels)
3457+
3458+ def __repr__(self):
3459+ # TODO: rewrite considering userdata
3460+ occupancies = np.asarray(self._voxels.values())
3461+ occupancies.shape = -1, 1
3462+ X = np.hstack((self.getvoxels(), occupancies))
3463+ return str(X)
3464+
3465+ def __sub__(self, ol):
3466+ '''Set like subtraction'''
3467+ # TODO in python 2.7 you can dict1.viewkeys() & dict2.viewkeys() to get
3468+ # the intersection of the keys on dictionary
3469+ # TODO maybe quicker would be to find the intersection and then delete
3470+ # those voxels
3471+ self_set = set(self._voxels.keys())
3472+ ol_set = set(ol._voxels.keys())
3473+ D = np.fromiter(self_set - ol_set, dtype=np.int64)
3474+ _unpack(D)
3475+ Dol = OccupiedList(self.resolution)
3476+ increments = np.ones(len(D))
3477+ Dol._update_voxels(D, increments)
3478+ return Dol
3479+
3480+ # TODO These methods of get and set should be moved to a more generic sparse nD structure
3481+ def set_occupancies(self, voxel_inds, occupancies):
3482+ '''
3483+ Sets the specified voxel indices to the occupancies given.
3484+
3485+ >>> ol = OccupiedList(0.1)
3486+ >>> inds = np.array(((1,2,3), ))
3487+ >>> ol.set_occupancies(inds, (1,2))
3488+ >>> ol
3489+ [[1 2 3 1]]
3490+
3491+ '''
3492+ ids = _4_column_int16(voxel_inds)
3493+ _pack(ids)
3494+ self._check_voxdtype(None)
3495+ for ID, occ in zip(ids, occupancies):
3496+ self._voxels[ID] = occ
3497+
3498+ def get_occupancies(self, voxel_inds):
3499+ '''
3500+ Gets the occupancies for the given voxel indices
3501+
3502+ >>> ol = OccupiedList(0.1)
3503+ >>> n = 100
3504+ >>> inds = np.random.randint(-9999, 9999, (n,3))
3505+ >>> occs = np.random.randint(0, 1000, n)
3506+ >>> ol.set_occupancies(inds, occs)
3507+ >>> np.all(ol.get_occupancies(inds) == occs)
3508+ True
3509+
3510+ Getting empty voxels should not change ol
3511+
3512+ >>> ol = OccupiedList(0.1)
3513+ >>> ol.get_occupancies( np.array(((1,2,3),)) )
3514+ array([0])
3515+ >>> len(ol)
3516+ 0
3517+ '''
3518+ ids = _4_column_int16(voxel_inds)
3519+ _pack(ids)
3520+ return np.array([self._voxels.get(ID, 0) for ID in ids])
3521+
3522+ def getvoxels(self):
3523+ ids = np.asarray(self._voxels.keys())
3524+ _unpack(ids)
3525+ return ids[:,:3]
3526+
3527+ def getvoxeldata(self):
3528+ '''Returns both voxel indices and assocated data'''
3529+ voxels = self.getvoxels() * self.resolution
3530+ if not len(voxels):
3531+ return voxels
3532+ voxdtype = self._voxels.itervalues().next().dtype
3533+ if ('userdata' in voxdtype.names):
3534+ vox_val = np.array(self._voxels.values(), dtype=voxdtype)
3535+ voxels = np.hstack([voxels, vox_val['userdata']])
3536+ return voxels
3537+
3538+ def getpoints(self):
3539+ return voxelstopoints(self.getvoxels(), self.resolution)
3540+
3541+ def display(self):
3542+ pc = pointcloud.PointCloud(self.getpoints())
3543+ pc.display()
3544+
3545+ def KDE(self, x0, a0, sigma):
3546+ '''Does a kernel density estimate for the point, x0 should be lattice
3547+ units and a0 is the coordinates of the nearest lattice point in lattice
3548+ units'''
3549+ # calculate the kernel integrals between -inf, x0, x0 + 1 and inf
3550+ integrals = np.zeros((3,3))
3551+ for i, x in enumerate(x0):
3552+ a = a0[i]
3553+ #cumulative_integrals = stats.norm.cdf((a - 0.5, a + 0.5), loc=x, scale=sigma)
3554+ cumulative_integrals = cdf_approx(np.asarray((a - x - 0.5, a - x + 0.5)), sigma=sigma)
3555+ integrals[i] = np.diff(np.hstack((0, cumulative_integrals, 1)))
3556+ # outer product returning a 3 * 3 * 3 array
3557+ X = integrals[0].reshape(3,1,1)
3558+ Y = integrals[1].reshape(1,3,1)
3559+ Z = integrals[2].reshape(1,1,3)
3560+ # return the kernel density integral over the volume of the lattice
3561+ # cell
3562+ return X * Y * Z
3563+
3564+ # this function is not used anymore but is kept for reference when adding
3565+ # capability to store other data such as RGB.
3566+ def _update_voxel_func_user(self, ID, increment, userdata):
3567+ vox = self._voxels[ID]
3568+ self._voxels[ID] = np.hstack([vox[0] + increment, self.useraddfn(vox[1:], userdata)])
3569+ if self._voxels[ID][0] < self.occupancy_threshold:
3570+ del self._voxels[ID]
3571+ self.removed_ids.append(ID)
3572+
3573+ def _extract_voxdtype(self, userdata):
3574+ if userdata is not None:
3575+ # remove duplicates from userdata
3576+ voxdtype = np.dtype([('count', int),
3577+ ('userdata',
3578+ userdata.dtype, userdata.shape[1])
3579+ ])
3580+ else:
3581+ if len(self._voxels):
3582+ # make zeros consistent with existing self._voxels
3583+ firstval = self._voxels.itervalues().next()
3584+ voxdtype = firstval.dtype
3585+ else:
3586+ voxdtype = np.dtype([('count', int)])
3587+ return voxdtype
3588+
3589+ def _check_voxdtype(self, userdata):
3590+ voxdtype = self._extract_voxdtype(userdata)
3591+ if self.voxdtype is None:
3592+ self.voxdtype = voxdtype
3593+ else:
3594+ assert voxdtype == self.voxdtype
3595+
3596+ def _zeros(self):
3597+ return np.zeros(1, dtype=self.voxdtype)[0]
3598+
3599+ def _update_voxels(self, increment_inds, increment, userdata=None):
3600+ #ids = _4_column_int16(increment_inds)
3601+ _pack(increment_inds)
3602+
3603+ # could just do the line below however need the added ones for bloom
3604+ # filter and so that voxels with zero occupancy are removed
3605+ # self._voxels.update(increment_inds)
3606+
3607+ # Generate a list of newly added voxels and removed voxels for the
3608+ # bloom table update, and do any voxel merging operations
3609+ logger.info('Number of points to add: %d' % len(increment_inds))
3610+ # merging multiple increments of the same voxel
3611+ # count number of added points for each voxel
3612+
3613+ uniqs, counts, uniqinds = _fast_count(increment_inds,
3614+ return_index=True)
3615+ if userdata is not None:
3616+ userdata = userdata[uniqinds]
3617+ # uniqs, counts are the unique values and their corresponding counts
3618+ self._check_voxdtype(userdata)
3619+
3620+ # perhaps a hack to get zero value
3621+ zeros = self._zeros()#np.zeros(1, dtype=voxdtype)[0]
3622+ newcounts = counts * increment
3623+ value_uniqs = np.array([self._voxels.get(ID, zeros) for ID in uniqs],
3624+ dtype=self.voxdtype)
3625+ occupancies = value_uniqs['count']
3626+
3627+ # counter values are initialized with zero. Non-zero values indicate
3628+ # pre-existance
3629+ existing_ids = (occupancies != zeros['count']).reshape(-1)
3630+
3631+ # #############
3632+ # actual update
3633+ # #############
3634+ # add map occupancies with collected voxel counts of points to be added
3635+ if userdata is not None:
3636+ value_uniqs['userdata'] = userdata
3637+ value_uniqs['count'] = value_uniqs['count'] + newcounts
3638+ occupancies = value_uniqs['count']
3639+
3640+ # split into two groups, those that were changed and those that require
3641+ # removal
3642+
3643+ # delete any less than 1
3644+ inds = (occupancies < self.occupancy_threshold)
3645+ removed_ids = uniqs[inds]
3646+ removed_existing = uniqs[inds & existing_ids]
3647+
3648+ changed_ids = uniqs[np.logical_not(inds)]
3649+ changed_values = value_uniqs[np.logical_not(inds)]
3650+
3651+ logger.info("Removing:%d" % len(removed_existing))
3652+ for ID in removed_existing:
3653+ del self._voxels[ID]
3654+
3655+
3656+ # set those with occupancy 1 or more
3657+ # set occupancies of voxels with their new values
3658+ self._voxels.update(itertools.izip(changed_ids, changed_values))
3659+
3660+ #for ID in increment_inds:
3661+ # if ID not in self._voxels:
3662+ # new_ids.append(ID)
3663+ # update_func(ID, increment)
3664+ # #self._update_voxel_func_user(ID, increments[i], userdata[i])
3665+
3666+ #self.removed_ids = np.asarray(self.removed_ids)
3667+ #new_ids = np.asarray(new_ids)
3668+ if self._use_bloom:
3669+ self.bloomfilter.add_voxel_ids(changed_ids)
3670+ self.bloomfilter.remove_voxel_ids(removed_existing)
3671+
3672+ def clear_freespace(self, xyzs, sensor_location):
3673+ '''
3674+ Clear the voxels in global map that we are quite certain about not
3675+ being occupied.
3676+
3677+ Parameters:
3678+ xyzs : points with sensor at origin. The points have not been
3679+ transformed to global map coordinates. But the `pose` provides the
3680+ exact transformation required for converting to global map.
3681+ pose : The pose that will map the points `xyzs` to global map.
3682+ '''
3683+ if not len(self._voxels):
3684+ return
3685+ # TODO should really take a point cloud object rather than xyzs
3686+ if len(xyzs) == 0:
3687+ return
3688+ xyzs = np.atleast_2d(xyzs) # for single points
3689+ assert xyzs.shape[1] == 3, 'Points should be in a n by 3 array format'
3690+
3691+ inds = pointstovoxels(xyzs, resolution=self.resolution, latticetype=self.lattice)
3692+ sensor_voxel = pointstovoxels(sensor_location,
3693+ resolution=self.resolution, latticetype=self.lattice)
3694+
3695+ free_voxels = bresenhamline(inds, sensor_voxel,
3696+ max_iter=NBACKTRACE_VOXELS)
3697+
3698+ # equivalent of remove points
3699+ self._update_voxels(free_voxels, -1)
3700+ assert len(self._voxels) < self.maxvoxels, 'Max voxels exceeded for bloom table'
3701+
3702+
3703+ # TODO do add_points and removepoints properly by apportioning the point
3704+ # probability amongst surrounding voxels, like kernel density estimation.
3705+ # This update should also be more sophisticated in terms of the sensor model.
3706+ # TODO specify sigma as a covariance matrix for each point to be added
3707+ # (this takes into account the pose of the sensor)
3708+ def add_points(self, xyzs, pose=None, userdata=None, increment=1, return_modified=False, sigma=0.5):
3709+ """ convert points to voxel indices and append them to this occupied
3710+ voxel list """
3711+ # TODO should really take a point cloud object rather than xyzs
3712+ if len(xyzs) == 0:
3713+ return
3714+ xyzs = np.atleast_2d(xyzs) # for single points
3715+ assert xyzs.shape[1] == 3, 'Points should be in a n by 3 array format'
3716+ if pose is not None:
3717+ xyzs = pose.transformPoints(xyzs)
3718+
3719+ inds = pointstovoxels(xyzs, resolution=self.resolution, latticetype=self.lattice)
3720+ # TODO check that voxel indices are not out of bounds
3721+
3722+ if self.use_KDE:
3723+ # TODO is similar to mgrid used in dilate, refactor these together or
3724+ # remove dilate as it is not needed now we are doing kernel density
3725+ # estimation
3726+ surrounding_inds = np.int16((np.indices((3,3,3)) - 1).reshape(3, -1).T)
3727+
3728+ # Add voxels to be incremented according to kernel density estimation
3729+ increment_inds = []
3730+ increments = []
3731+ # TODO rather than use a list preallocate the array at 27 * len(xyzs)?
3732+ # TODO make this work for single points
3733+ for xyz, ind in zip(xyzs, inds):
3734+ #increments.extend(np.int8(increment*self.KDE(xyz/self.resolution, ind)).ravel())
3735+ kde = self.KDE(xyz/self.resolution, ind, sigma=sigma)
3736+ #increments.extend(np.int8(increment*kde).ravel())
3737+ increments.extend((increment*kde).ravel())
3738+ # Now add these increments to the appropriate indices
3739+ increment_inds.extend(ind + surrounding_inds)
3740+
3741+ increment_inds = np.asarray(increment_inds)
3742+ increments = np.asarray(increments)
3743+
3744+ # remove increments that are too small to consider
3745+ bigenough_inds = np.abs(increments) >= self.occupancy_threshold
3746+ increments = increments[bigenough_inds]
3747+ increment_inds = increment_inds[bigenough_inds]
3748+ else:
3749+ increment_inds = inds
3750+
3751+ self._update_voxels(increment_inds, increment, userdata)
3752+
3753+ assert len(self._voxels) < self.maxvoxels, 'Max voxels exceeded for bloom table'
3754+ return increment_inds
3755+
3756+ def removepoints(self, xyzs, decrement=1, return_removed=False, **args):
3757+ '''Decrement the observations in the voxels that the xyzs fall
3758+ into.'''
3759+ return self.add_points(xyzs, pose=None, increment=-decrement, return_modified=return_removed, **args)
3760+
3761+ #If the delete flag is set, then remove the voxels that
3762+ #xyzs fall into. Return both the voxels that xyzs fall into and the
3763+ #total decrement amount for that voxel.
3764+
3765+ def savexyz(self, fname):
3766+ """ saves the finest occupied list to fname in x y z format """
3767+ np.savetxt(fname, self.getpoints())
3768+
3769+ def calccollisions(self, pose, xyzs, returntotal=True, query=False):
3770+ '''The intersection of points with this occupied voxel list.
3771+ If query then return the density for each point after transformation
3772+ by the given pose, rather than just a count of hits.
3773+ '''
3774+ # reuse transformed voxels to save initialising the fourth column it to
3775+ # zero each time. This is some nasty code I hope the speed increase is
3776+ # worth it.
3777+ if self._transformedvoxels is None or len(self._transformedvoxels) != len(xyzs):
3778+ self._transformedvoxels = pointstovoxels(xyzs, resolution=self.resolution, pose=pose, out=None)
3779+ else:
3780+ self._transformedvoxels = pointstovoxels(xyzs, resolution=self.resolution, pose=pose, out=self._transformedvoxels)
3781+
3782+ _pack(self._transformedvoxels)
3783+
3784+ if query:
3785+ return [self._voxels[ID] for ID in self._transformedvoxels]
3786+ #return sum(self._voxels[ID] for ID in self._transformedvoxels)
3787+
3788+ if self._use_bloom:
3789+ present = self.bloomfilter.contains(self._transformedvoxels)
3790+ if returntotal:
3791+ return np.sum(present)
3792+ else:
3793+ return present
3794+ else:
3795+ if returntotal:
3796+ return sum(ID in self._voxels for ID in self._transformedvoxels)
3797+ else:
3798+ return [ID in self._voxels for ID in self._transformedvoxels]
3799+
3800+ def cost_func(self, pose, xyzs):
3801+ return -self.calccollisions(pose, xyzs)
3802+
3803+ def segment_ovl(self, pose, xyzs):
3804+ ''' returns the inliers and outliers of the intersection of a point
3805+ cloud with this occupied voxel list.'''
3806+ xyzs = poseutil.transformPoints(xyzs, pose)[0]
3807+ overlap_inds = np.array(self.calccollisions(None, xyzs, returntotal=False))
3808+ inliers = xyzs[overlap_inds]
3809+ outliers = xyzs[np.logical_not(overlap_inds)]
3810+ return inliers, outliers
3811+
3812+if __name__ == "__main__":
3813+ import doctest
3814+ doctest.testmod()
3815
3816=== removed file 'mrol_mapping/occupiedlist.py'
3817--- mrol_mapping/occupiedlist.py 2012-03-09 15:29:52 +0000
3818+++ mrol_mapping/occupiedlist.py 1970-01-01 00:00:00 +0000
3819@@ -1,549 +0,0 @@
3820-#Copyright 2010-2011, Julian Ryde and Nicholas Hillier.
3821-#
3822-#This program is distributed in the hope that it will be useful,
3823-#but WITHOUT ANY WARRANTY; without even the implied warranty of
3824-#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
3825-#GNU Lesser General Public License for more details.
3826-#
3827-#You should have received a copy of the GNU Lesser General Public License
3828-#along with this program. If not, see <http://www.gnu.org/licenses/>.
3829-
3830-from __future__ import division
3831-import collections
3832-import numpy as np
3833-import scipy.stats as stats
3834-
3835-import poseutil
3836-import quantizer
3837-import pointcloud
3838-
3839-#import numexpr as ne
3840-#ne.set_num_threads(4)
3841-
3842-verbose = False
3843-
3844-inds = np.indices((3,3,3)) - 1
3845-inds.shape = (3, 27)
3846-inds = inds.T
3847-offset_inds = np.zeros((27, 4), dtype=np.int16)
3848-offset_inds[:,:3] = inds
3849-
3850-""" Dictionary of the currently implemented lattice quantizers """
3851-latticefuncs = {'cubic':quantizer.quantize_cubic, 'bcc':quantizer.quantize_bcc, 'fcc':quantizer.quantize_fcc}
3852-
3853-cdf_res = 0.01
3854-cdf_halfwidth = 2
3855-def _x_to_ind(X):
3856- '''Converts x to an index in the lookup table'''
3857- return np.int32((X + cdf_halfwidth)/cdf_res + 0.5)
3858-
3859-normcdf = stats.norm.cdf(np.arange(-cdf_halfwidth, cdf_halfwidth, cdf_res))
3860-# lookup table for the cdf
3861-def cdf_approx(xs, sigma):
3862- return normcdf[_x_to_ind(xs)]
3863-
3864-def loadxyz(fname, resolution):
3865- '''loads a list of points in x, y, z format from a file and makes an
3866- occupied voxel list'''
3867- xyzs = np.loadtxt(fname)
3868- ol = OccupiedList(resolution)
3869- ol.add_points(xyzs[:, 0:3])
3870- return ol
3871-
3872-def pointstovoxels(X, resolution=None, pose=None, latticetype='cubic', out=None):
3873- '''Converts a list of points to integer voxel indices. Optionally
3874- transforming by pose as well for speed.'''
3875- # Possible lattice types currently include cubic, bcc and fcc.
3876- # TODO out of bound check required.
3877- #return latticefuncs[latticetype](X, resolution)
3878- if out is None:
3879- out = np.empty((X.shape[0], 4), dtype=np.int16) # uninitialised for speed
3880- out[:,3] = 0 # set last column to zero
3881- else: # make sure it is unpacked
3882- _unpack(out)
3883-
3884- # combines transformation and scaling into one operation for speed
3885- X, pose = poseutil.transformPoints(X, pose, scale=1.0/resolution)
3886- #out[:, :3] = np.round(X)
3887- out[:, :3] = X + 0.5
3888- #out[:, :3] = np.floor(X+0.5)
3889-
3890- return out
3891-
3892-def voxelstopoints(A, resolution):
3893- """ Converts an array of integers voxel indices to an array of points """
3894- # TODO make this work for alternative lattices
3895- return A*resolution
3896-
3897-def dilate(voxels):
3898- '''Add adjacent voxels, effectively blurring or smearing occupied voxel
3899- list'''
3900- # blur/smear map instead of line above to include checking of adjacent voxels
3901- mg = np.mgrid[-1:2, -1:2, -1:2]
3902- mg.shape = (3, 27, 1)
3903- voxels = np.array(voxels).T
3904- dilated = mg + voxels[:, np.newaxis, :]
3905- dilated = dilated.transpose((0, 2, 1)).reshape(3, -1).T
3906- return np.array(tuple(set(tuplelist(dilated))))
3907-
3908-def tuplelist(X):
3909- '''Converts an nx3 array to a list of tuples quickly this is useful for
3910- bulk insertion into dictionaries and sets'''
3911- return zip(X[:, 0], X[:, 1], X[:, 2])
3912-
3913-def _4_column_int16(inds):
3914- '''converts N x 3 array of voxel ints to N x 4 array of int16 with last
3915- column 0'''
3916- X_int16 = np.empty((inds.shape[0], 4), dtype=np.int16)
3917- X_int16[:, :3] = inds
3918- # 4th column needs to be zeros
3919- X_int16[:, 3] = 0
3920- #X_int16.dtype = np.int64
3921- #X_int16.shape = -1
3922- return X_int16
3923-
3924-def _unpack(ids):
3925- '''converts an int64 array of voxel ids to an nx4 array of np.int16.
3926- Near instantaneous in place operation'''
3927- ids.dtype = np.int16
3928- ids.shape = -1, 4
3929-
3930-def _pack(inds):
3931- '''converts in place an integer array of Nx4 int16 voxels to an array of
3932- ids. the last column are zeros.'''
3933- assert inds.dtype == np.int16, 'inds needs to be np.int16'
3934- assert inds.shape[1] == 4
3935-
3936- inds.dtype = np.int64
3937- inds.shape = -1
3938-
3939-def _fast_count(ints):
3940- #counted_ids = collections.Counter(ids) # collections.Counter turns out to be slow
3941-
3942- # Fast counting of integers although might not scale to large N well
3943- # because of the sort done by unique?
3944- uniqs, inds = np.unique(ints, return_inverse=True)
3945- counts = np.bincount(inds)
3946- return uniqs, counts
3947-
3948-class BloomFilter:
3949- '''Special case counting bloom filter optimized for voxel indices'''
3950-
3951- def __init__(self, size):
3952- # TODO add probability of false positive as parameter
3953- self.size = int(6*size) # should this be prime or what?
3954- self.K = 2 # number of hash functions
3955- self.bloomtable = np.zeros(self.size, dtype=np.uint16)
3956- # signed here although not needed makes update errors to the bloomtable
3957- # easier to spot
3958-
3959- def __len__(self):
3960- return self.size
3961-
3962- def __eq__(self, bf):
3963- return np.all(self.bloomtable == bf.bloomtable)
3964-
3965- def djbhash(self, i):
3966- """Hash function from D J Bernstein"""
3967- h = 5381L
3968- t = (h * 33) & 0xffffffffL
3969- h = t ^ i
3970- return h
3971-
3972- def fnvhash(self, i):
3973- """Fowler, Noll, Vo Hash function"""
3974- h = 2166136261
3975- t = (h * 16777619) & 0xffffffffL
3976- h = t ^ i
3977- return h
3978-
3979- #mask1 = eval('0b' + 32 * '01')
3980- #mask2 = eval('0b' + 32 * '10')
3981- def _hash_voxel_ids(self, ids):
3982- # Need a hash function that that can generate multiple hashes for given
3983- # input and over a specified range
3984- inds = np.empty((2, ids.shape[0]), np.int64)
3985- inds[0] = self.fnvhash(ids)
3986- inds[1] = self.djbhash(ids)
3987- S = self.size
3988- #return ne.evaluate('inds % S')
3989- return inds % self.size
3990-
3991- def add_voxel_ids(self, ids):
3992- inds = self._hash_voxel_ids(ids)
3993- for i in range(self.K):
3994- self.bloomtable[inds[i]] +=1
3995-
3996- def remove_voxel_ids(self, ids):
3997- '''Decrement the counts for this counting bloom table but clamp to
3998- zero'''
3999- inds = self._hash_voxel_ids(ids)
4000- for i in range(self.K):
4001- remove_inds = inds[i][self.bloomtable[inds[i]] > 0]
4002- for ind in remove_inds:
4003- # select only those inds with counts > 0
4004- self.bloomtable[ind] -= 1
4005- #if np.any(to_change < 1):
4006- # only decrement those entries which are > 0
4007-
4008- def contains(self, ids):
4009- '''Note that this is an approximation with tunable error. False
4010- positives are possible but false negatives are not.'''
4011-
4012- # TODO currently only works for 2 hash functions
4013-
4014- # create K indices into bloom table from K hash functions
4015- inds = self._hash_voxel_ids(ids)
4016-
4017- # TODO surely this can be done in a more efficient manner?
4018- present = np.zeros((self.K, len(ids)), dtype=bool)
4019- for i in range(self.K):
4020- present[i] = self.bloomtable[inds[i]] > 0
4021-
4022- #allpresent = present.sum(axis=0) == self.K
4023- #allpresent = np.all(present, axis=0)
4024-
4025- #a = present[0]
4026- #b = present[1]
4027- #allpresent = ne.evaluate('a & b')
4028- #return np.all(present, axis=0)
4029- return np.logical_and(present[0], present[1])
4030-
4031-def user_add(cur_data,data):
4032- ''' An example of a user add function for incrementing user data in
4033- a map voxel given a new measurement'''
4034- # First we must handle the case where this is the first time we've
4035- # added data to the voxel, easily identified because the current
4036- # voxel data only contains the number of prior observations (zero).
4037- if len(cur_data) == 0:
4038- return data
4039- else:
4040- # This should be customised to the user's application. Typically
4041- # it should check for consistency in data length also
4042- return np.int64((np.array(cur_data) + np.array(data)) / 2.) # some merging algorithm
4043-
4044-class default_vox_content:
4045- # TODO consider replacing this with a np.recarray or some numpy.dtype trickery?
4046- def __init__(self):
4047- self.data = np.array([0],dtype=int)
4048-
4049- def __getitem__(self, index):
4050- if index == 0:
4051- return self.data
4052- else:
4053- return np.array([])
4054-
4055-class OccupiedList:
4056- """ stores a set of voxel indices list at a single resolution """
4057-
4058- def __init__(self, resolution, maxvoxels=1e6, use_bloom=True):
4059- self.resolution = resolution
4060- self._transformedvoxels = None
4061-
4062- # voxels is a dictionary with keys of np.int64 IDs that are the result
4063- # of packing 4 np.int16
4064- # the content of the dictionary is either an np.int16 holding
4065- # increments, or TODO a userdefined structure
4066- self._voxels = collections.Counter()
4067- self.lattice = 'cubic' # should be one of the latticefuncs
4068- self._use_bloom = use_bloom
4069- self.use_KDE = False
4070- if self.use_KDE:
4071- # Worst case scenario for a cubic lattice is a point right on a
4072- # diagonal corner, i.e. the probability is spread evenly between 8
4073- # cells (0.125). It seems reasonable to curtail the occupancy if it
4074- # is half of this probability (1/4 of a cell diagonal if the cdf
4075- # was linear).
4076- self.occupancy_threshold = 0.5 * 1/8
4077- else:
4078- self.occupancy_threshold = 1
4079- # below which voxels are removed and changes/increments of these
4080- # are ignored unless re-added occupancy scale factor
4081-
4082- self.maxvoxels = maxvoxels
4083- if self._use_bloom:
4084- self.bloomfilter = BloomFilter(self.maxvoxels)
4085-
4086- def __eq__(self, o):
4087- if self.resolution != o.resolution or len(self) != len(o):
4088- return False
4089- for K, V in self._voxels.items():
4090- if o._voxels[K] != V:
4091- return False
4092- return True
4093-
4094- def __len__(self):
4095- return len(self._voxels)
4096-
4097- def __repr__(self):
4098- # TODO: rewrite considering userdata
4099- occupancies = np.asarray(self._voxels.values())
4100- occupancies.shape = -1, 1
4101- X = np.hstack((self.getvoxels(), occupancies))
4102- return str(X)
4103-
4104- def __sub__(self, ol):
4105- '''Set like subtraction'''
4106- # TODO in python 2.7 you can dict1.viewkeys() & dict2.viewkeys() to get
4107- # the intersection of the keys on dictionary
4108- # TODO maybe quicker would be to find the intersection and then delete
4109- # those voxels
4110- self_set = set(self._voxels.keys())
4111- ol_set = set(ol._voxels.keys())
4112- D = np.fromiter(self_set - ol_set, dtype=np.int64)
4113- _unpack(D)
4114- Dol = OccupiedList(self.resolution)
4115- increments = np.ones(len(D))
4116- Dol._update_voxels(D, increments)
4117- return Dol
4118-
4119- # TODO These methods of get and set should be moved to a more generic sparse nD structure
4120- def set_occupancies(self, voxel_inds, occupancies):
4121- '''
4122- Sets the specified voxel indices to the occupancies given.
4123-
4124- >>> ol = OccupiedList(0.1)
4125- >>> inds = ((1,2,3), )
4126- >>> ol.set_occupancies(inds, (1,2))
4127- >>> ol
4128- [[1 2 3 1]]
4129-
4130- '''
4131- ids = _4_column_int16(voxel_inds)
4132- _pack(ids)
4133- for ID, occ in zip(ids, occupancies):
4134- self._voxels[ID] = occ
4135-
4136- def get_occupancies(self, voxel_inds):
4137- '''
4138- Gets the occupancies for the given voxel indices
4139-
4140- >>> ol = OccupiedList(0.1)
4141- >>> n = 100
4142- >>> inds = np.random.randint(-9999, 9999, (n,3))
4143- >>> occs = np.random.randint(0, 1000, n)
4144- >>> ol.set_occupancies(inds, occs)
4145- >>> np.all(ol.get_occupancies(inds) == occs)
4146- True
4147-
4148- Getting empty voxels should not change ol
4149-
4150- >>> ol = OccupiedList(0.1)
4151- >>> ol.get_occupancies( ((1,2,3),) )
4152- array([0])
4153- >>> len(ol)
4154- 0
4155- '''
4156- ids = _4_column_int16(voxel_inds)
4157- _pack(ids)
4158- return np.array([self._voxels[ID] for ID in ids])
4159-
4160- def getvoxels(self):
4161- ids = np.asarray(self._voxels.keys())
4162- _unpack(ids)
4163- return ids[:,:3]
4164-
4165- def getvoxeldata(self):
4166- '''Returns both voxel indices and assocated data'''
4167- return self.getvoxels(), np.array(self._voxels.values())
4168-
4169- def getpoints(self):
4170- return voxelstopoints(self.getvoxels(), self.resolution)
4171-
4172- def display(self):
4173- pc = pointcloud.PointCloud(self.getpoints())
4174- pc.display()
4175-
4176- def KDE(self, x0, a0, sigma):
4177- '''Does a kernel density estimate for the point, x0 should be lattice
4178- units and a0 is the coordinates of the nearest lattice point in lattice
4179- units'''
4180- # calculate the kernel integrals between -inf, x0, x0 + 1 and inf
4181- integrals = np.zeros((3,3))
4182- for i, x in enumerate(x0):
4183- a = a0[i]
4184- #cumulative_integrals = stats.norm.cdf((a - 0.5, a + 0.5), loc=x, scale=sigma)
4185- cumulative_integrals = cdf_approx(np.asarray((a - x - 0.5, a - x + 0.5)), sigma=sigma)
4186- integrals[i] = np.diff(np.hstack((0, cumulative_integrals, 1)))
4187- # outer product returning a 3 * 3 * 3 array
4188- X = integrals[0].reshape(3,1,1)
4189- Y = integrals[1].reshape(1,3,1)
4190- Z = integrals[2].reshape(1,1,3)
4191- # return the kernel density integral over the volume of the lattice
4192- # cell
4193- return X * Y * Z
4194-
4195- # this function is not used anymore but is kept for reference when adding
4196- # capability to store other data such as RGB.
4197- def _update_voxel_func_user(self, ID, increment, userdata):
4198- vox = self._voxels[ID]
4199- self._voxels[ID] = np.hstack([vox[0] + increment, self.useraddfn(vox[1:], userdata)])
4200- if self._voxels[ID][0] < self.occupancy_threshold:
4201- del self._voxels[ID]
4202- self.removed_ids.append(ID)
4203-
4204- def _update_voxels(self, increment_inds, increment, userdata=None):
4205- #ids = _4_column_int16(increment_inds)
4206- _pack(increment_inds)
4207-
4208- # could just do the line below however need the added ones for bloom
4209- # filter and so that voxels with zero occupancy are removed
4210- # self._voxels.update(increment_inds)
4211-
4212- # Generate a list of newly added voxels and removed voxels for the
4213- # bloom table update, and do any voxel merging operations
4214- if verbose:
4215- print 'Number of points to add: ', len(increment_inds)
4216- # merging multiple increments of the same voxel
4217- # count number of added points for each voxel
4218-
4219- uniqs, counts = _fast_count(increment_inds)
4220- # uniqs, counts are the unique values and their corresponding counts
4221-
4222- # TODO there is probably a bug if increment is 0 so remove increments of zero
4223-
4224- # query the map for occupancies of these voxels
4225- occupancies = np.array([self._voxels[ID] for ID in uniqs])
4226- # add map occupancies with collected voxel counts of points to be added
4227- occupancies += counts
4228-
4229- # split into two groups, those that were changed and those that require
4230- # removal
4231-
4232- # delete any less than 1
4233- inds = occupancies < self.occupancy_threshold
4234- removed_ids = uniqs[inds]
4235- changed_ids = uniqs[np.logical_not(inds)]
4236- changed_occupancies = occupancies[np.logical_not(inds)]
4237- for ID in removed_ids:
4238- del self._voxels[ID]
4239-
4240- # set those with occupancy 1 or more
4241- # set occupancies of voxels with their new values
4242- for i, ID in enumerate(changed_ids):
4243- self._voxels[ID] = changed_occupancies[i]
4244-
4245- #for ID in increment_inds:
4246- # if ID not in self._voxels:
4247- # new_ids.append(ID)
4248- # update_func(ID, increment)
4249- # #self._update_voxel_func_user(ID, increments[i], userdata[i])
4250-
4251- #self.removed_ids = np.asarray(self.removed_ids)
4252- #new_ids = np.asarray(new_ids)
4253- if self._use_bloom:
4254- self.bloomfilter.add_voxel_ids(changed_ids)
4255- self.bloomfilter.remove_voxel_ids(removed_ids)
4256-
4257- # TODO do add_points and removepoints properly by apportioning the point
4258- # probability amongst surrounding voxels, like kernel density estimation.
4259- # This update should also be more sophisticated in terms of the sensor model.
4260- # TODO specify sigma as a covariance matrix for each point to be added
4261- # (this takes into account the pose of the sensor)
4262- def add_points(self, xyzs, pose=None, userdata=None, increment=1, return_modified=False, sigma=0.5):
4263- """ convert points to voxel indices and append them to this occupied
4264- voxel list """
4265- # TODO should really take a point cloud object rather than xyzs
4266- if len(xyzs) == 0:
4267- return
4268- xyzs = np.atleast_2d(xyzs) # for single points
4269- assert xyzs.shape[1] == 3, 'Points should be in a n by 3 array format'
4270- if pose is not None:
4271- xyzs = pose.transformPoints(xyzs)
4272-
4273- inds = pointstovoxels(xyzs, resolution=self.resolution, latticetype=self.lattice)
4274- # TODO check that voxel indices are not out of bounds
4275-
4276- if self.use_KDE:
4277- # TODO is similar to mgrid used in dilate, refactor these together or
4278- # remove dilate as it is not needed now we are doing kernel density
4279- # estimation
4280- surrounding_inds = np.int16((np.indices((3,3,3)) - 1).reshape(3, -1).T)
4281-
4282- # Add voxels to be incremented according to kernel density estimation
4283- increment_inds = []
4284- increments = []
4285- # TODO rather than use a list preallocate the array at 27 * len(xyzs)?
4286- # TODO make this work for single points
4287- for xyz, ind in zip(xyzs, inds):
4288- #increments.extend(np.int8(increment*self.KDE(xyz/self.resolution, ind)).ravel())
4289- kde = self.KDE(xyz/self.resolution, ind, sigma=sigma)
4290- #increments.extend(np.int8(increment*kde).ravel())
4291- increments.extend((increment*kde).ravel())
4292- # Now add these increments to the appropriate indices
4293- increment_inds.extend(ind + surrounding_inds)
4294-
4295- increment_inds = np.asarray(increment_inds)
4296- increments = np.asarray(increments)
4297-
4298- # remove increments that are too small to consider
4299- bigenough_inds = np.abs(increments) >= self.occupancy_threshold
4300- increments = increments[bigenough_inds]
4301- increment_inds = increment_inds[bigenough_inds]
4302- else:
4303- increment_inds = inds
4304-
4305- self._update_voxels(increment_inds, increment, userdata)
4306-
4307- assert len(self._voxels) < self.maxvoxels, 'Max voxels exceeded for bloom table'
4308- return increment_inds
4309-
4310- def removepoints(self, xyzs, decrement=1, return_removed=False, **args):
4311- '''Decrement the observations in the voxels that the xyzs fall
4312- into.'''
4313- return self.add_points(xyzs, pose=None, increment=-decrement, return_modified=return_removed, **args)
4314-
4315- #If the delete flag is set, then remove the voxels that
4316- #xyzs fall into. Return both the voxels that xyzs fall into and the
4317- #total decrement amount for that voxel.
4318-
4319- def savexyz(self, fname):
4320- """ saves the finest occupied list to fname in x y z format """
4321- np.savetxt(fname, self.getpoints())
4322-
4323- def calccollisions(self, pose, xyzs, returntotal=True, query=False):
4324- '''The intersection of points with this occupied voxel list.
4325- If query then return the density for each point after transformation
4326- by the given pose, rather than just a count of hits.
4327- '''
4328- # reuse transformed voxels to save initialising the fourth column it to
4329- # zero each time. This is some nasty code I hope the speed increase is
4330- # worth it.
4331- if self._transformedvoxels is None or len(self._transformedvoxels) != len(xyzs):
4332- self._transformedvoxels = pointstovoxels(xyzs, resolution=self.resolution, pose=pose, out=None)
4333- else:
4334- self._transformedvoxels = pointstovoxels(xyzs, resolution=self.resolution, pose=pose, out=self._transformedvoxels)
4335-
4336- _pack(self._transformedvoxels)
4337-
4338- if query:
4339- return [self._voxels[ID] for ID in self._transformedvoxels]
4340- #return sum(self._voxels[ID] for ID in self._transformedvoxels)
4341-
4342- if self._use_bloom:
4343- present = self.bloomfilter.contains(self._transformedvoxels)
4344- if returntotal:
4345- return np.sum(present)
4346- else:
4347- return present
4348- else:
4349- if returntotal:
4350- return sum(ID in self._voxels for ID in self._transformedvoxels)
4351- else:
4352- return [ID in self._voxels for ID in self._transformedvoxels]
4353-
4354- def cost_func(self, pose, xyzs):
4355- return -self.calccollisions(pose, xyzs)
4356-
4357- def segment_ovl(self, pose, xyzs):
4358- ''' returns the inliers and outliers of the intersection of a point
4359- cloud with this occupied voxel list.'''
4360- xyzs = poseutil.transformPoints(xyzs, pose)[0]
4361- overlap_inds = np.array(self.calccollisions(None, xyzs, returntotal=False))
4362- inliers = xyzs[overlap_inds]
4363- outliers = xyzs[np.logical_not(overlap_inds)]
4364- return inliers, outliers
4365-
4366-if __name__ == "__main__":
4367- import doctest
4368- doctest.testmod()
4369
4370=== added file 'mrol_mapping/optimization.py'
4371--- mrol_mapping/optimization.py 1970-01-01 00:00:00 +0000
4372+++ mrol_mapping/optimization.py 2012-11-20 22:16:18 +0000
4373@@ -0,0 +1,173 @@
4374+#Copyright 2010-2011, Julian Ryde and Nicholas Hillier.
4375+#
4376+#This program is distributed in the hope that it will be useful,
4377+#but WITHOUT ANY WARRANTY; without even the implied warranty of
4378+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4379+#GNU Lesser General Public License for more details.
4380+#
4381+#You should have received a copy of the GNU Lesser General Public License
4382+#along with this program. If not, see <http://www.gnu.org/licenses/>.
4383+
4384+from __future__ import division
4385+import time
4386+import numpy as np
4387+import poseutil
4388+
4389+# TODO this function seems to be unused
4390+def _cost_min_scipy(objective_func, initialpose, args):
4391+ import scipy.optimize as optimize
4392+ initialpose = initialpose.getTuple()
4393+ returns = optimize.fmin(objective_func, initialpose, args=args, disp=True, full_output=True)
4394+ #returns = optimize.fmin_powell(objective_func, initialpose, args=args, disp=True, full_output=True)
4395+ #returns = optimize.fmin_slsqp(objective_func, initialpose, args=args, full_output=True) # doesn't work?
4396+ #initpose = np.asarray(initialpose)
4397+ #delta = np.asarray((0.5, 0.5, 0.5, 0.2, 0.2, 0.2))
4398+ #lower = initpose - delta
4399+ #upper = initpose + delta
4400+ #returns = optimize.anneal(objective_func, initialpose, args=args, lower=lower, upper=upper)
4401+ bestpose = poseutil.Pose3D(X=returns[0])
4402+ cost = returns[1]
4403+ return bestpose, cost
4404+
4405+
4406+def _get_offsets():
4407+ # TODO Try parallel testing of these poses.
4408+ I3 = np.identity(3)
4409+ X = np.array([1,0,0])
4410+ Y = np.array([0,1,0])
4411+ Z = np.array([0,0,1])
4412+ offsets = np.vstack((np.identity(6), -np.identity(6)))
4413+ x_ = np.hstack([np.vstack([X,X,X]),I3])
4414+ x__ = np.hstack([np.vstack([X,X,X]),-I3])
4415+ y_ = np.hstack([np.vstack([Y,Y,Y]),I3])
4416+ y__ = np.hstack([np.vstack([Y,Y,Y]),-I3])
4417+ z_ = np.hstack([np.vstack([Z,Z,Z]),I3])
4418+ z__ = np.hstack([np.vstack([Z,Z,Z]),-I3])
4419+ offsets = np.vstack([offsets,x_,-x_,x__,-x__,y_,-y_,y__,-y__,z_,-z_,z__,-z__])
4420+ offsets = np.int32(offsets)
4421+
4422+ #offsets = np.vstack((np.identity(6), -np.identity(6)))
4423+ return offsets
4424+
4425+def cost_min(cost_func, initialpose, args, dx, dq, max_iterations=100, verbosity=1):
4426+ ''' The format of this function allows standard optimizers from the
4427+ scipy.optimize library and arbitrary cost functions.'''
4428+ dpose = np.array((dx,dx,dx,dq,dq,dq))
4429+ already_checked = {}
4430+
4431+ # TODO this step approach is only correct for small dq
4432+
4433+ offsets = _get_offsets()
4434+ initialoverlap = -cost_func(initialpose, args[0])
4435+ previousmax = initialoverlap
4436+ maxo = initialoverlap
4437+ beststep = (0,0,0,0,0,0)
4438+ already_checked[beststep] = initialoverlap
4439+ called_count = 1
4440+ cost_func_time = 0
4441+ if verbosity > 1: print 'dx, dq: ', dx, np.degrees(dq), 'degrees'
4442+ for iter_count in range(max_iterations):
4443+ steps = beststep + offsets
4444+ overlaps = []
4445+ # check poses surrounding beststep
4446+ for step in steps:
4447+ step_tup = tuple(step)
4448+ # optimization to save calculating the cost for poses which we have
4449+ # previously calculated the cost for
4450+ # TODO this is dodgy because the poses consist of floats
4451+ if step_tup in already_checked:
4452+ overlaps.append(already_checked[step_tup])
4453+ continue
4454+ else:
4455+ start = time.time()
4456+ pose = initialpose.getTuple() + step * dpose
4457+ pose = poseutil.Pose3D(pose)
4458+ cost = -cost_func(pose, args[0]) # negative because maximiser
4459+ cost_func_time += time.time() - start
4460+ already_checked[step_tup] = cost
4461+ called_count += 1
4462+ overlaps.append(cost)
4463+ if verbosity > 3:
4464+ print pose, cost
4465+ overlaps = np.array(overlaps)
4466+ max_overlap_ind = np.argmax(overlaps)
4467+ maxo = overlaps[max_overlap_ind]
4468+ if sum(overlaps == maxo) > 1:
4469+ print 'WARNING: multiple maxima'
4470+ if verbosity > 2:
4471+ print iter_count, ':', overlaps, maxo
4472+ # break if current pose is maximum in the case when alternative pose is
4473+ # of equal overlap pick the previous pose
4474+ if maxo <= previousmax:
4475+ maxo = previousmax
4476+ break
4477+ else:
4478+ # re-assign for next loop
4479+ beststep = steps[max_overlap_ind]
4480+ previousmax = maxo
4481+ if verbosity > 1:
4482+ print 'Best step:', beststep, maxo
4483+ if iter_count >= max_iterations:
4484+ print "WARNING: Maximum number of iterations reached. Solution did not reach convergence."
4485+
4486+ if verbosity > 0:
4487+ print 'cost function evaluated:', called_count, 'times over', iter_count, 'iterations'
4488+ print 'Average cost function evaluation time (ms): %.2f' % (1e3 * cost_func_time/called_count)
4489+ print len(args[0]), 'cost increase:', initialoverlap, '->', maxo
4490+
4491+ bestpose = initialpose.getTuple() + beststep * dpose
4492+ bestpose = poseutil.Pose3D(bestpose)
4493+ # -ve again because it is standard for the calling function to assume a
4494+ # minimisation.
4495+ return bestpose, maxo
4496+
4497+def plotobjective(cost_func, initialpose, xyzs, plot_range=(-0.2, 0.2, np.radians(-5), np.radians(5)), dx=None, dq=None):
4498+ '''
4499+ Plots the cost function for various poses, centered about the initialpose.
4500+ '''
4501+ dofs = {0:'x', 1:'y', 2:'z', 3:'rotx', 4:'roty', 5:'rotz'}
4502+ xmin, xmax = plot_range[:2]
4503+ qmin, qmax = plot_range[2:]
4504+ if dx == None:
4505+ dx = (xmax-xmin)/100.0
4506+ if dq == None:
4507+ dq = (qmax-qmin)/100.0
4508+ ranges = np.array([
4509+ [xmin, xmin, xmin, qmin, qmin, qmin],
4510+ [xmax, xmax, xmax, qmax, qmax, qmax],
4511+ [dx, dx, dx, dq, dq, dq]])
4512+ ranges = ranges.T
4513+ import pylab
4514+ pylab.ion()
4515+ pylab.figure()
4516+ pylab.subplot(2, 1, 1)
4517+ for i in range(3):
4518+ X = np.arange(ranges[i, 0], ranges[i, 1], ranges[i, 2]) + initialpose[i]
4519+ pose = np.array(initialpose, dtype=float)
4520+ Y = []
4521+ for x in X:
4522+ pose[i] = x
4523+ Y.append(cost_func(pose, xyzs))
4524+ pylab.plot(X - initialpose[i], Y, 'x-', label=str(dofs[i]))
4525+ print "+"
4526+ pylab.legend()
4527+ pylab.xlabel('m')
4528+ pylab.ylabel('objective function value')
4529+ pylab.subplot(2, 1, 2)
4530+ for i in range(3, 6):
4531+ X = np.arange(ranges[i, 0], ranges[i, 1], ranges[i, 2]) + initialpose[i]
4532+ pose = np.array(initialpose, dtype=float)
4533+ Y = []
4534+ for x in X:
4535+ pose[i] = x
4536+ Y.append(cost_func(pose, xyzs))
4537+ pylab.plot(X - initialpose[i], Y, 'x-', label=str(dofs[i]))
4538+ print "*"
4539+ pylab.legend()
4540+ pylab.xlabel('rad')
4541+ pylab.ylabel('objective function value')
4542+ #pylab.show()
4543+ #pylab.draw()
4544+
4545+
4546+
4547
4548=== removed file 'mrol_mapping/optimization.py'
4549--- mrol_mapping/optimization.py 2012-03-04 17:10:07 +0000
4550+++ mrol_mapping/optimization.py 1970-01-01 00:00:00 +0000
4551@@ -1,173 +0,0 @@
4552-#Copyright 2010-2011, Julian Ryde and Nicholas Hillier.
4553-#
4554-#This program is distributed in the hope that it will be useful,
4555-#but WITHOUT ANY WARRANTY; without even the implied warranty of
4556-#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4557-#GNU Lesser General Public License for more details.
4558-#
4559-#You should have received a copy of the GNU Lesser General Public License
4560-#along with this program. If not, see <http://www.gnu.org/licenses/>.
4561-
4562-from __future__ import division
4563-import time
4564-import numpy as np
4565-import poseutil
4566-
4567-# TODO this function seems to be unused
4568-def _cost_min_scipy(objective_func, initialpose, args):
4569- import scipy.optimize as optimize
4570- initialpose = initialpose.getTuple()
4571- returns = optimize.fmin(objective_func, initialpose, args=args, disp=True, full_output=True)
4572- #returns = optimize.fmin_powell(objective_func, initialpose, args=args, disp=True, full_output=True)
4573- #returns = optimize.fmin_slsqp(objective_func, initialpose, args=args, full_output=True) # doesn't work?
4574- #initpose = np.asarray(initialpose)
4575- #delta = np.asarray((0.5, 0.5, 0.5, 0.2, 0.2, 0.2))
4576- #lower = initpose - delta
4577- #upper = initpose + delta
4578- #returns = optimize.anneal(objective_func, initialpose, args=args, lower=lower, upper=upper)
4579- bestpose = poseutil.Pose3D(X=returns[0])
4580- cost = returns[1]
4581- return bestpose, cost
4582-
4583-
4584-def _get_offsets():
4585- # TODO Try parallel testing of these poses.
4586- I3 = np.identity(3)
4587- X = np.array([1,0,0])
4588- Y = np.array([0,1,0])
4589- Z = np.array([0,0,1])
4590- offsets = np.vstack((np.identity(6), -np.identity(6)))
4591- x_ = np.hstack([np.vstack([X,X,X]),I3])
4592- x__ = np.hstack([np.vstack([X,X,X]),-I3])
4593- y_ = np.hstack([np.vstack([Y,Y,Y]),I3])
4594- y__ = np.hstack([np.vstack([Y,Y,Y]),-I3])
4595- z_ = np.hstack([np.vstack([Z,Z,Z]),I3])
4596- z__ = np.hstack([np.vstack([Z,Z,Z]),-I3])
4597- offsets = np.vstack([offsets,x_,-x_,x__,-x__,y_,-y_,y__,-y__,z_,-z_,z__,-z__])
4598- offsets = np.int32(offsets)
4599-
4600- #offsets = np.vstack((np.identity(6), -np.identity(6)))
4601- return offsets
4602-
4603-def cost_min(cost_func, initialpose, args, dx, dq, max_iterations=100, verbosity=1):
4604- ''' The format of this function allows standard optimizers from the
4605- scipy.optimize library and arbitrary cost functions.'''
4606- dpose = np.array((dx,dx,dx,dq,dq,dq))
4607- already_checked = {}
4608-
4609- # TODO this step approach is only correct for small dq
4610-
4611- offsets = _get_offsets()
4612- initialoverlap = -cost_func(initialpose, args[0])
4613- previousmax = initialoverlap
4614- maxo = initialoverlap
4615- beststep = (0,0,0,0,0,0)
4616- already_checked[beststep] = initialoverlap
4617- called_count = 1
4618- cost_func_time = 0
4619- if verbosity > 1: print 'dx, dq: ', dx, np.degrees(dq), 'degrees'
4620- for iter_count in range(max_iterations):
4621- steps = beststep + offsets
4622- overlaps = []
4623- # check poses surrounding beststep
4624- for step in steps:
4625- step_tup = tuple(step)
4626- # optimization to save calculating the cost for poses which we have
4627- # previously calculated the cost for
4628- # TODO this is dodgy because the poses consist of floats
4629- if step_tup in already_checked:
4630- overlaps.append(already_checked[step_tup])
4631- continue
4632- else:
4633- start = time.time()
4634- pose = initialpose.getTuple() + step * dpose
4635- pose = poseutil.Pose3D(pose)
4636- cost = -cost_func(pose, args[0]) # negative because maximiser
4637- cost_func_time += time.time() - start
4638- already_checked[step_tup] = cost
4639- called_count += 1
4640- overlaps.append(cost)
4641- if verbosity > 3:
4642- print pose, cost
4643- overlaps = np.array(overlaps)
4644- max_overlap_ind = np.argmax(overlaps)
4645- maxo = overlaps[max_overlap_ind]
4646- if sum(overlaps == maxo) > 1:
4647- print 'WARNING: multiple maxima'
4648- if verbosity > 2:
4649- print iter_count, ':', overlaps, maxo
4650- # break if current pose is maximum in the case when alternative pose is
4651- # of equal overlap pick the previous pose
4652- if maxo <= previousmax:
4653- maxo = previousmax
4654- break
4655- else:
4656- # re-assign for next loop
4657- beststep = steps[max_overlap_ind]
4658- previousmax = maxo
4659- if verbosity > 1:
4660- print 'Best step:', beststep, maxo
4661- if iter_count >= max_iterations:
4662- print "WARNING: Maximum number of iterations reached. Solution did not reach convergence."
4663-
4664- if verbosity > 0:
4665- print 'cost function evaluated:', called_count, 'times over', iter_count, 'iterations'
4666- print 'Average cost function evaluation time (ms): %.2f' % (1e3 * cost_func_time/called_count)
4667- print len(args[0]), 'cost increase:', initialoverlap, '->', maxo
4668-
4669- bestpose = initialpose.getTuple() + beststep * dpose
4670- bestpose = poseutil.Pose3D(bestpose)
4671- # -ve again because it is standard for the calling function to assume a
4672- # minimisation.
4673- return bestpose, maxo
4674-
4675-def plotobjective(cost_func, initialpose, xyzs, plot_range=(-0.2, 0.2, np.radians(-5), np.radians(5)), dx=None, dq=None):
4676- '''
4677- Plots the cost function for various poses, centered about the initialpose.
4678- '''
4679- dofs = {0:'x', 1:'y', 2:'z', 3:'rotx', 4:'roty', 5:'rotz'}
4680- xmin, xmax = plot_range[:2]
4681- qmin, qmax = plot_range[2:]
4682- if dx == None:
4683- dx = (xmax-xmin)/100.0
4684- if dq == None:
4685- dq = (qmax-qmin)/100.0
4686- ranges = np.array([
4687- [xmin, xmin, xmin, qmin, qmin, qmin],
4688- [xmax, xmax, xmax, qmax, qmax, qmax],
4689- [dx, dx, dx, dq, dq, dq]])
4690- ranges = ranges.T
4691- import pylab
4692- pylab.ion()
4693- pylab.figure()
4694- pylab.subplot(2, 1, 1)
4695- for i in range(3):
4696- X = np.arange(ranges[i, 0], ranges[i, 1], ranges[i, 2]) + initialpose[i]
4697- pose = np.array(initialpose, dtype=float)
4698- Y = []
4699- for x in X:
4700- pose[i] = x
4701- Y.append(cost_func(pose, xyzs))
4702- pylab.plot(X - initialpose[i], Y, 'x-', label=str(dofs[i]))
4703- print "+"
4704- pylab.legend()
4705- pylab.xlabel('m')
4706- pylab.ylabel('objective function value')
4707- pylab.subplot(2, 1, 2)
4708- for i in range(3, 6):
4709- X = np.arange(ranges[i, 0], ranges[i, 1], ranges[i, 2]) + initialpose[i]
4710- pose = np.array(initialpose, dtype=float)
4711- Y = []
4712- for x in X:
4713- pose[i] = x
4714- Y.append(cost_func(pose, xyzs))
4715- pylab.plot(X - initialpose[i], Y, 'x-', label=str(dofs[i]))
4716- print "*"
4717- pylab.legend()
4718- pylab.xlabel('rad')
4719- pylab.ylabel('objective function value')
4720- #pylab.show()
4721- #pylab.draw()
4722-
4723-
4724-
4725
4726=== added file 'mrol_mapping/pointcloud.py'
4727--- mrol_mapping/pointcloud.py 1970-01-01 00:00:00 +0000
4728+++ mrol_mapping/pointcloud.py 2012-11-20 22:16:18 +0000
4729@@ -0,0 +1,229 @@
4730+'''
4731+Point cloud module
4732+Author: Julian Ryde and Nick Hillier
4733+'''
4734+#Copyright 2010-2011, Julian Ryde and Nicholas Hillier.
4735+#
4736+#This program is distributed in the hope that it will be useful,
4737+#but WITHOUT ANY WARRANTY; without even the implied warranty of
4738+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4739+#GNU Lesser General Public License for more details.
4740+#
4741+#You should have received a copy of the GNU Lesser General Public License
4742+#along with this program. If not, see <http://www.gnu.org/licenses/>.
4743+
4744+import numpy as np
4745+import os
4746+
4747+import util
4748+
4749+
4750+def nearest_neighbour(A, B):
4751+ '''Determine the distances the nearest neighbours in B to the points in A.
4752+ Uses a slow but accurate and hopefully bug free method
4753+
4754+ Note that order is important.'''
4755+ # TODO quicker version of this
4756+ NN_dists = []
4757+ NN_inds = []
4758+ for i, a in enumerate(A):
4759+ dists = np.sum((B - a) ** 2, axis=1)
4760+ ind = np.argmin(dists)
4761+ NN_dists.append(dists[ind])
4762+ NN_inds.append(ind)
4763+ return np.asarray(np.sqrt(NN_dists)), np.asarray(NN_inds)
4764+
4765+# TODO unit test for this?
4766+def equal(A, B):
4767+ '''Is true is the arrays A and B are equal ignoring their row order'''
4768+ return PointCloud(A) == PointCloud(B)
4769+
4770+def load(fname, delimiter=None, cols=(0, 1, 2), headerlines=0):
4771+ '''Loads a point cloud from disk whether it is a text array, numpy
4772+ binary array, assumes that each row is a point and the first three
4773+ columns are the coordinates and if specified as a 6 column dataset, the last 3 columns are
4774+ RGB colour data'''
4775+ if len(cols) != 3:
4776+ if len(cols) != 6:
4777+ print "Data must be 3 or 6 columns"
4778+ return PointCloud(np.array([]))
4779+ try:
4780+ X = np.loadtxt(fname, usecols=cols, delimiter=delimiter, skiprows=headerlines)
4781+ except:
4782+ X = np.load(fname)
4783+ if X.shape[1] != 3:
4784+ if X.shape[1] != 6:
4785+ print "Data must be 3 or 6 columns"
4786+ return PointCloud(np.array([]))
4787+ return PointCloud(X[:,:X.shape[1]])
4788+
4789+def save(fname, xyzs, text=False):
4790+ '''Save a points in Nx3 arrays to disk. By default it is saved as a numpy
4791+ binary array, but it can be saved as plain text by setting the
4792+ 'text' flag'''
4793+ if text:
4794+ np.savetxt(fname,xyzs)
4795+ else:
4796+ np.save(fname,xyzs)
4797+
4798+def save_ply(fname, xyzs):
4799+ ''' Save points in Nx3 arrays as .ply Polygon File Format. This format is
4800+ popular and can be loaded by programs such as blender, meshlab and the
4801+ point cloud library. '''
4802+
4803+ header = '''ply
4804+format ascii 1.0
4805+element vertex %d
4806+property float x
4807+property float y
4808+property float z
4809+end_header
4810+''' % len(xyzs)
4811+
4812+ # TODO new version of numpy.savetxt will support header option
4813+ F = open(fname, 'w')
4814+ F.write(header)
4815+ for x, y, z in xyzs:
4816+ F.write('%.03f %.03f %.03f\n' % (x, y, z))
4817+ return
4818+
4819+class PointCloud:
4820+ # TODO point clouds should be stored as ints in millimetres?
4821+
4822+ def __init__(self, arr, timestamp=0):
4823+ '''Initialise the point cloud with an array of points'''
4824+ # TODO handle both n x 3 and 3 by n arrays
4825+ # TODO throw error if conversion to an arrays not successful
4826+ self.RGB = False # set if the point cloud has RGB data associated
4827+ self.add_points(arr, timestamp)
4828+ self.pose = None
4829+ self.userdata = None
4830+
4831+ def __eq__(self, pc, tol=0.00001):
4832+ # TODO pick a proper value for this tol, like np.allclose
4833+ if len(self) != len(pc):
4834+ return False
4835+ xyzs = self.points[:,:3]
4836+ nn_dists = nearest_neighbour(xyzs, pc.points[:,:3])[0]
4837+ return np.all(nn_dists < tol)
4838+
4839+ def __sub__(self, pc):
4840+ if type(pc) == np.ndarray:
4841+ assert pc.shape[0] == 3, "subtraction operator must be on 3xN arrays, 3x1 lists or pointcloud objects"
4842+ if type(pc) == list:
4843+ assert len(pc) == 3, "subtraction operator must be on 3xN arrays, 3x1 lists or pointcloud objects"
4844+ new_xyzs = self.points[:,:3] - pc
4845+ return PointCloud(np.hstack((new_xyzs, self.points[:,3:])))
4846+ #TODO allow subtraction of two pointclouds
4847+
4848+ def __add__(self, pc):
4849+ if type(pc) == np.ndarray:
4850+ assert pc.shape[0] == 3, "addition operator must be on 3xN arrays, 3x1 lists or pointcloud objects"
4851+ new_xyzs = self.points[:,:3] + pc
4852+ return PointCloud(np.hstack((new_xyzs, self.points[:,3:])))
4853+ elif type(pc) == list:
4854+ assert len(pc) == 3, "addition operator must be on 3xN arrays, 3x1 lists or pointcloud objects"
4855+ new_xyzs = self.points[:,:3] + pc
4856+ return PointCloud(np.hstack((new_xyzs, self.points[:,3:])))
4857+ else:
4858+ # TODO: this requires more thought
4859+ return self.add_points(pc)
4860+
4861+ def __len__(self):
4862+ return self.points.shape[0]
4863+
4864+ def save(self, filename):
4865+ save(filename, self.points)
4866+
4867+ def save_ply(self, filename):
4868+ save_ply(filename, self.points)
4869+
4870+ # TODO: pose transform operator, may make addition/subtraction of 3x1 array redundant?
4871+
4872+ def add_points(self, arr, timestamp=0):
4873+ # TODO enable growing of the pointcloud points
4874+ arr = np.asarray(arr)
4875+ if len(arr) > 0:
4876+ self.points = np.atleast_2d(arr)
4877+ else:
4878+ self.points = arr # empty pc object
4879+ if self.points.shape[1] == 6:
4880+ self.RGB = True
4881+ self.timestamp = timestamp
4882+
4883+ def display(self, color=None, title='', scene=None):
4884+ ''' Displays the point cloud in a visual window. To display in an
4885+ existing window supply visual scene object like the one returned by
4886+ this function. '''
4887+ import visual # import here only if needed
4888+ # import here to avoid circular dependency
4889+ import mrol_mapping.visualiser.dispxyz as dispxyz
4890+
4891+ if self.RGB == True:
4892+ xyzs = self.points[:,:3]
4893+ if color == None:
4894+ color = self.points[:,3:6]/np.max(self.points[:,3:6])
4895+ else:
4896+ xyzs = self.points
4897+
4898+ if scene is None:
4899+ scene = visual.display(title=title + 'dispxyz')
4900+
4901+ #scene.exit = False
4902+ MT = dispxyz.EnableMouseThread(scene)
4903+ MT.start()
4904+ dispxyz.showpts(xyzs, pose=self.pose, color=color)
4905+ scene.forward = (0,1,0)
4906+ scene.up = (0,0,1)
4907+ scene.forward = (0,0,-1)
4908+ return scene, MT
4909+
4910+ # TODO merge other implementation of boxfilter into this
4911+ def boxfilter(self, xmin=None, ymin=None, zmin=None, xmax=None, ymax=None, zmax=None):
4912+ '''Remove points outside of specified limits'''
4913+ for i, limit in enumerate([xmin, ymin, zmin]):
4914+ if limit is not None:
4915+ self.points = self.points[self.points[:,i] > limit]
4916+ for i, limit in enumerate([xmax, ymax, zmax]):
4917+ if limit is not None:
4918+ self.points = self.points[self.points[:,i] < limit]
4919+
4920+ # TODO move the util.volumetricsample code to here
4921+ def volumetricsample(self, res):
4922+ '''Returns a downsampled point cloud volumetrically according to the
4923+ resolution'''
4924+ xyzs = self.points[:,:3]
4925+ xyzs, inds = util.volumetricsample(xyzs, res, return_inds=True)
4926+ #self.points = np.hstack([xyzs, self.points[inds, 3:]])
4927+ return PointCloud(np.hstack([xyzs, self.points[inds, 3:]]))
4928+
4929+class ScansDir:
4930+ # TODO deprecated, just use a list of file names and pointcloud load instead
4931+ ''' Reads in a group of pointcloud files from a directory '''
4932+ # adapted from icra2011/make_template
4933+ def __init__(self, basedir, skip_to=None):
4934+ self.basedir = basedir
4935+ fnames = os.listdir(basedir)
4936+ fnames.sort()
4937+ self.basenames = []
4938+ skip_flag = False
4939+ for fname in fnames:
4940+ # TODO pass in startswith as an argument or make all files saved with this interface start with mrolpc
4941+ if not fname.startswith('mrolpc'): continue
4942+ if skip_to:
4943+ if (not fname.startswith(skip_to)) and (not skip_flag):
4944+ continue
4945+ else:
4946+ skip_flag = True
4947+ self.basenames.append(fname)
4948+
4949+ def __iter__(self):
4950+ return self
4951+
4952+ def next(self):
4953+ '''Returns a scan'''
4954+ if len(self.basenames) == 0: raise StopIteration
4955+ self.curname = self.basedir+'/'+self.basenames.pop(0)
4956+ print self.curname
4957+ xyzs = load(self.curname)
4958+ return xyzs
4959
4960=== removed file 'mrol_mapping/pointcloud.py'
4961--- mrol_mapping/pointcloud.py 2012-06-25 16:36:58 +0000
4962+++ mrol_mapping/pointcloud.py 1970-01-01 00:00:00 +0000
4963@@ -1,227 +0,0 @@
4964-'''
4965-Point cloud module
4966-Author: Julian Ryde and Nick Hillier
4967-'''
4968-#Copyright 2010-2011, Julian Ryde and Nicholas Hillier.
4969-#
4970-#This program is distributed in the hope that it will be useful,
4971-#but WITHOUT ANY WARRANTY; without even the implied warranty of
4972-#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
4973-#GNU Lesser General Public License for more details.
4974-#
4975-#You should have received a copy of the GNU Lesser General Public License
4976-#along with this program. If not, see <http://www.gnu.org/licenses/>.
4977-
4978-import numpy as np
4979-import os
4980-
4981-import util
4982-
4983-
4984-def nearest_neighbour(A, B):
4985- '''Determine the distances the nearest neighbours in B to the points in A.
4986- Uses a slow but accurate and hopefully bug free method
4987-
4988- Note that order is important.'''
4989- # TODO quicker version of this
4990- NN_dists = []
4991- NN_inds = []
4992- for i, a in enumerate(A):
4993- dists = np.sum((B - a) ** 2, axis=1)
4994- ind = np.argmin(dists)
4995- NN_dists.append(dists[ind])
4996- NN_inds.append(ind)
4997- return np.asarray(np.sqrt(NN_dists)), np.asarray(NN_inds)
4998-
4999-# TODO unit test for this?
5000-def equal(A, B):
The diff has been truncated for viewing.

Subscribers

People subscribed via source and target branches

to all changes: