1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
|
#Copyright 2010-2011, Julian Ryde and Nicholas Hillier.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Lesser General Public License for more details.
#
#You should have received a copy of the GNU Lesser General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import numpy as np
import pointcloud
import occupiedlist
import mrol
import poseutil
import util
import cPickle as pickle
import quantizer
import time
import log
import logging
logger = log.getLogger('mapper')
logger.setLevel(logging.INFO)
USE_BACKTRACING = True
_map_resolution_levels = 4 # number of different levels of resolution
def loadmap(fname):
return pickle.load(open(fname))
class MapBuilder(object):
def __init__(self, resolution,
visualise=False,
levels=_map_resolution_levels,
odometry_map=False,
rotate=False):
self.vmap = VoxelMap(resolution, levels=levels,
rotate=rotate)
self.odometry_map = odometry_map
self.bestpose = poseutil.Pose3D()
self.scancount = 0
if visualise:
self.vmap.display()
def add_pointcloud(self, pc):
if self.scancount == 0:
self.scancount = 1
self.vmap.add_scan(pc, pc.pose)
pc.pose = (poseutil.Pose3D() if pc.pose is None else pc.pose)
return self.vmap.get_voxels(level=-1), pc.pose
# reduce number of scan points to one per voxel
# sample = pc.volumetricsample(self.vmap.resolution)
logger.info('--------- Scan %d ---------' % self.scancount)
self.scancount += 1
if pc.pose is None:
guessPose = self.bestpose
else:
guessPose = pc.pose
start = time.time()
if not self.odometry_map:
self.bestpose, maxcost = self.vmap.align_points(pc, guessPose)
alignment_time = time.time() - start
start = time.time()
if self.odometry_map: # create map without aligning
self.vmap.add_scan(pc, guessPose)
else:
self.vmap.add_scan(pc, self.bestpose)
mapupdate_time = time.time() - start
logger.info('Map voxel count:{0}'.format( len(self.vmap)))
logger.info('Aligned pose:{0}'.format(self.bestpose))
logger.info('Cost value: {0}'.format(maxcost))
logger.info('Alignment time(s): %.2f' % alignment_time)
logger.info('Map update time(s): %.2f' % mapupdate_time)
return self.vmap.get_voxels(level=-1), self.bestpose
def buildmap(pointclouds, resolution, visualise=True, odometry_map=False,
rotate=False,
bestpose_queue=None,
voxel_queue=None):
'''Takes an iterable of point clouds which can have guess poses and produce
a map. If no guess poses uses previous pose as an estimate
bestpose_queue: A util.Queue object through which a mapper thread can
optionally communicate back the bestpose got so far. This might be used to
optimize the pc.pose i.e. the initial guesspose
'''
vmap = VoxelMap(resolution, levels=_map_resolution_levels, rotate=rotate)
pc = pointclouds.next()
vmap.add_scan(pc, pc.pose)
logger.info('--------- Scan 0 ---------')
logger.info('Map voxel count:{0}'.format(len(vmap)))
if visualise:
vmap.display()
bestpose = poseutil.Pose3D()
# in case this needs to be used as the first guess pose
i = 0
maxcost = None
while True:
try:
pc = pointclouds.next()
except StopIteration:
break
i += 1
# reduce number of scan points to one per voxel
sample = pc.volumetricsample(resolution)
logger.info('--------- Scan %d ---------' % i)
if pc.pose is None:
guessPose = bestpose
else:
guessPose = pc.pose
start = time.time()
if not odometry_map:
bestpose, maxcost = vmap.align_points(sample, guessPose)
if bestpose_queue:
bestpose_queue.put_force(bestpose)
alignment_time = time.time() - start
start = time.time()
if odometry_map: # create map without aligning
vmap.add_scan(pc, guessPose)
else:
vmap.add_scan(pc, bestpose)
if voxel_queue:
(vdata, vres) = vmap.get_voxels(level=-1)
voxel_queue.put_force((vdata, vres))
mapupdate_time = time.time() - start
logger.info('Map voxel count:{0}'.format( len(vmap)))
logger.info('Aligned pose:{0}'.format(bestpose))
logger.info('Cost value: {0}'.format(maxcost))
#logger.info('Execution times (s): %.2f\t%.2f' % (alignment_time, mapupdate_time))
logger.info('Alignment time(s): %.2f' % alignment_time)
logger.info('Map update time(s): %.2f' % mapupdate_time)
return vmap
def _build_Kinect_sensor_model(resolution, minR=0.5, maxR=5):
'''builds an occupiedlist sensor model aligned with the x-axis at the
origin for free space determination'''
fov = np.radians(75)
#hres = 640
#vres = 480
hres = 320
vres = 240
dtheta = fov / hres
A = 0.5 * hres / np.tan(0.5 * fov)
# perpendicular distance of camera image plane in pixel space
cols = np.arange(hres) - hres * 0.5
rows = np.arange(vres) - vres * 0.5
rows.shape = -1, 1
# normalise by A so you can step in x-axis
rows /= A
cols /= A
Y = np.tile(cols, (vres, 1)).ravel()
Z = np.tile(rows, (1, hres)).ravel()
sensor_model_ol = occupiedlist.OccupiedList(resolution, maxvoxels=2e6)
# this way voxels that have multiple rays passing through them are not
# tested multiple times.
xyzs = np.empty((vres * hres, 3))
for X in np.arange(minR, maxR, resolution):
# TODO this is an approximation calculate the angles
xyzs[:, 0] = X
xyzs[:, 1] = Y * X
xyzs[:, 2] = Z * X
sensor_model_ol.add_points(xyzs)
print X
return sensor_model_ol
class VoxelMap:
'''Mobile robot mapper capable of 2D and 3D maps. Need to initialise with
a initial pose and scan so that future scans can be aligned, poses are all
poseutil objects. Recommended way is to take a 3D scan
whilst the robot is stationary to seed the map and then go from there,
matching 2D or 3D scans to the map for localisation within the map,
or incrementally growing the map.'''
# Special functions #############################################
# optional also include an estimate of the pose error
def __init__(self, resolution, levels=1, rotate=False):
# lattice_type='cubic', #alignment_method='OVL','Pt2Pt_ICP','Pt2Pl_ICP'
self.mrol = mrol.MROL(resolution, levels)
self.verbosity = 0
self.trajectory = []
# List of poses at which observations have been taken
# The verbosity level. Higher verbosity gives more print statements.
#self.set_feature_range(100)
self.lattice = 'cubic' # not used anywhere yet because cubic is default
# for assistance in discretisation of angular increments in the
# gauss_siedel optimisation
self._sensor_pts = None # the sensor model as voxel points
self._rv = None # the robot visualiser
self._rotate = rotate # if visualising, autorotate
def __len__(self):
''' return the number of voxels at the finest resolution '''
return len(self.mrol.mapvoxels[-1])
#def __str__(self)
# ''' return key statistics of the map as a string: e.g. mapped volume,
# extents, number of voxels, resolution levels, resolutions.
# Getter functions #############################################
def get_resolution(self):
return self.mrol.getfinest().resolution
def get_voxels(self, level=-1):
'''Returns the (almost) raw voxel information together
with the resolution of the voxelisation.'''
voxels = self.mrol.getvoxels(level=level)
resolution = self.mrol.getresolution(level=level)
return voxels, resolution
def get_points(self, level=-1):
'''Returns the map as a point cloud of the voxel centers. If the
map has been setup with multiple resolutions, return the voxels at the
desired resolution. Defaults to the finest resolution.'''
voxel_centers = self.mrol.getpoints(level=level)
pc = pointcloud.PointCloud(voxel_centers)
return pc
def get_overlap(self, pose, xyzs):
'''Return the overlap between a pointcloud at a given pose and the map.'''
ol = self.mrol.getfinest()
overlap = ol.calccollisions(pose, xyzs)
return overlap
# other functions #############################################
def save(self, fname):
outfile = open(fname, 'w')
self._rv = None # remove visualiser
pickle.dump(self, outfile, protocol=2)
def display(self,npts=1e6,changes=False):
'''Shows a representation of the map in a gui'''
from visualiser.robotvisualiser import Visualiser
if self._rv is None:
self._rv_changes = changes
self._rv = Visualiser(npts=npts, Rotate=self._rotate)
self._rv.set_orthographic(Bool=True)
# TODO visualiser should take a point cloud object
self._rv.addmappts(self.get_points().points)
# add the trajectory points
for traj in self.trajectory:
self._rv.addtrajectorypoint(traj.getPosition())
#else:
#self._disp.se
def add_scan(self, pc, pose, timestamp=None):
'''Adds a new scan to the existing map.
Adds the occupied voxels and removes the freespace.
'''
userdata = pc.userdata
xyzs = pc.points
if USE_BACKTRACING:
self.clear_freespace(xyzs, pose)
self.add_points(xyzs, pose, timestamp, userdata)
def clear_freespace(self, xyzs, pose):
'''Removes the free space voxels from the occupancy list.
Parameters:
xyzs: The occupied PointCloud, with the sensor as origin.
pose: The pose of `xyzs` PointCloud with respect to global map.
'''
xyzs, return_pose = poseutil.transformPoints(xyzs, pose)
sensor_loc = poseutil.transformPoints(np.array([[0, 0, 0]]), pose)
self.mrol.clear_freespace(xyzs, sensor_loc[0])
def add_points(self, xyzs, pose, timestamp=None, userdata=None):
'''Adds the pointcloud xyzs to the map, transforming them by the given
pose if required. Returns the overlap of the new points against the
existing map.'''
if isinstance(xyzs, pointcloud.PointCloud):
xyzs = xyzs.points
assert xyzs.shape[1] == 3, '3 dimensional points'
xyzs, return_pose = poseutil.transformPoints(xyzs, pose)
self.trajectory.append(return_pose)
if self._rv is not None: # visualisation
finest = self.mrol.getfinest()
# determine unique new voxels convert to points and add to the map
#ol = occupiedlist.OccupiedList(finest.resolution)
ol = finest
#ol.add_points(xyzs)
#ol = ol - finest
vox = ol.getvoxeldata()
self._rv.addmappts(vox)
#self._rv.setminmax(np.min(self.get_points().points[:,2]),np.max(self.get_points().points[:,2]))
self._rv.setrobotpose(return_pose.getMatrix())
self._rv.addtrajectorypoint(return_pose.getTuple()[0:3])
if self._rv_changes:
inliers, outliers = self.mrol.getfinest().segment_ovl(None,xyzs)
self._rv.setrightpts(inliers)
self._rv.setleftpts(outliers)
self.mrol.add_points(xyzs, userdata=userdata, timestamp=timestamp)
return self.get_overlap(return_pose.getTuple(), xyzs)
# TODO Consider a remove_points based on timestamps?
def remove_points(self, xyzs, pose):
'''Remove points from the map. In practice, the idea is
to decrement the probability of the voxel in the map having been
observed.
Returns the voxels at the finest resolution that are now empty.
'''
xyzs = poseutil.transformPoints(xyzs,pose)[0]
return self.mrol.removepoints(xyzs)
#def set_feature_range(self, feature_range):
# ''' Sets the 'feature_range' variable. Used for assistance in
# the discretisation of angular increments in the gauss_siedel
# optimisation '''
# self.feature_range = float(feature_range)
# self.mrol.feature_range = self.feature_range
# def set_options? instead of a long list of init options
# def get_options? so the user knows the current setup
#def some_sort_of_map_maintenance_interface
# - raytrace cleanup, time decay, or callable method based on number
# of observations.
# - perhaps as a parameter to the init for auto-maintenance?
# If we provide an interface to directly add voxels (e.g. from
# pre-made maps), it would be nice to be able to merge voxels of
# different resolutions and lattice types. This is a low priority
# feature
#def add_voxels(self, voxels, res, lattice_type=self.lattice_type) # perhaps rename add_map?
#def get_voxels(self, res/level, lattice_type=self.lattice_type) # perhaps rename get_map?
# return center coordinate in same format as points are added so it looks like a point cloud
#def get_voxel_data
# for things like number of observations. Can also be retrieved from
# get_voxels with the raw option?
#def get_voxel_primitive(self, res, lattice_type=self.lattice_type)
# return some representation of the voxel shape for visualisation purposes
# (tri/quad-mesh, corner points?)
#def sample_points(self, method='volumetric')
# to do some point cloud sampling (volumetric, random etc)
def align_points(self, xyzs, guess_pose, two_D=False, full_data=False):
'''Align the point cloud xyzs with the data already contained in the
map, using the guess_pose as a seed to the optimisation. Returns the
pose that best fits the point cloud data to the map and the value of
the objective function at that pose.
If the map is a multi-resolution structure, then the alignment will use
the data at the various resolutions to perform the alignment, which is
more expensive, but more robust. To force alignment at just a single
resolution, specify the level parameter.
By default, the alignment is a 6DOF optimization. This can be
constrained to the XY plane by enabling the two_D parameter.'''
xyzs = xyzs.points[:,:3]
bestpose, cost = self.mrol.optimise_alignment(xyzs, guess_pose, full_data=full_data)
return bestpose, cost
#def align_points_to_map(self, xyzs, guess_pose):
# bestpose, cost = optimization.gauss_siedel(self._performance_objectiveFast, guess_pose, args=(xyzs, 1.0), quiet=True)
# xyzs_xformed = poseutil.transformPoints(xyzs, bestpose)
# return bestpose, cost, xyzs_xformed
def generate_freespace(self, resolution, pose=None, minR=0.5, maxR=5):
'''Generates an occuplied list consisting of the observed free space at
the specified resolution. This is useful for detecting object removal
from a map.'''
assert resolution >= self.get_resolution()
if self._sensor_pts is None:
sensor_model_ol = _build_Kinect_sensor_model(resolution, minR=minR, maxR=maxR) # for simulated data
self._sensor_pts = sensor_model_ol.getpoints()
logger.info("Sensor model points:", len(self._sensor_pts))
#vis = visualiser.robotvisualiser.Visualiser(npts=1e7)
#vis.addmappts(self._sensor_pts)
base_ol = self.mrol.getfinest()
freespace_ol = occupiedlist.OccupiedList(resolution, maxvoxels=5e6)
# loop through the trajectory of poses
# ray trace from each pose and add voxels to the free space occupied voxel list
if pose == None:
for pose in self.trajectory:
logger.info("Pose {0}".format(pose))
inliers, outliers = base_ol.segment_ovl(pose, self._sensor_pts)
# TODO some means of not adding points that are further away than the intersection (doesn't work very well without this!)
# Julian suggests a masking operation whilst stepping through the layers of the model in X
freespace_ol.add_points(outliers)
freespace_ol.removepoints(inliers)
else:
inliers, outliers = base_ol.segment_ovl(pose, self._sensor_pts)
# TODO some means of not adding points that are further away than the intersection
freespace_ol.add_points(outliers)
freespace_ol.removepoints(inliers)
return freespace_ol
def _trajectory_objective(self, guess_pose, pts, foo):
return -occupiedlist.calccollisions(guess_pose, self._traj_alignment_voxel_set, self.res, pts)
# TODO add support for not specifying centrepose or spreadpose, looks
# through entire map
def globally_localise(self, points, centrepose, spreadpose):
'''Localises the point cloud in this map without requiring a good
initial guess pose. Returns the bestpose along with measure of overlap.'''
return self.mrol.match(points.points, centrepose, spreadpose)
|