@phdthesis {, title = {Motion Segmentation and 3D Positioning of Mobile Robots in an Intelligent Space Using and Array of Static Cameras}, year = {2010}, month = {06/2010}, school = {Universidad de Alcal{\'a}}, abstract = {The main objective of this PhD thesis is to make contributions to motion segmentation and 3D localization of multiple mobile robots in an intelligent space using a multi-camera sensor system. In order to achieve this goal, a set of calibrated and synchronized cameras placed in fixed positions within the environment (intelligent space) are used. Moreover, the proposal in this thesis does not rely on previous knowledge about the robot structure or invasive landmarks on board the robots. The proposed solution for motion segmentation and 3D localization is based on the minimization of an objective function that combines information from all the cameras. This objective function depends on three groups of variables: the segmentation boundaries, the motion parameters (linear and angular velocities related to the world coordinate system) and the depth of each point in the scene. Since the objective function depends on three groups of variables, its minimization is carried out using a greedy iterative algorithm that consists on three steps that, after initialization of segmentation boundaries and depth, are repeated until convergence. In each of these steps, two of the three groups of variables are fixed, and the equation is solved for the remaining one. Before the minimization, it is necessary to initialize the curves that define the contours of the segmentation and the depth of each point that belongs to the mobile robots in the images acquired by each camera. Moreover, an estimation of the number of mobile robots included in the scene is required. Since cameras are located in fixed positions within the intelligent space, curve initialization is carried out by comparing each input image to a background model computed previously. Both, the background modelling and the comparison of each input image to the background model are carried out using GPCA. Regarding depth, Visual Hull 3D (VH3D) is used to relate the information from all the available cameras, obtaining a 3D reconstruction of the mobile robots in the world reference system. This 3D reconstruction provides an effective approximation for the initial depth of each point in each camera. Finally, an extended version of the k-means algorithm is used to estimate the number of mobile robots in the scene. After the motion segmentation and 3D position estimation of all mobile objects in the scene, the identification of the robots is obtained. It is possible because the mobile robots are controlled by the intelligent space, and the information of the odometry sensors onboard each robot is available. After that, in order to track the mobile robots, an eXtended Particle Filter with Classification Process (XPFCP) is used. The choice of XPFCP against the basic particle filter (PF) or the extended particle filter (XPF) is due to its multimodal nature. The XPFCP allows tracking a variable number of robots with a single estimator, without increasing the state vector. The results obtained from the XPFCP provide a good estimation for the position of each robot in the next image that is incorporated into the initialization step, reducing the processing time it requires. In order to validate the solutions proposed in this thesis, several experimental tests have been carried out. In these experimental tests, several image sequences acquired in the intelligent space of the University of Alcala (ISPACE-UAH) are used. These sequences have different features (different numbers of robots, people walking, objects inserted, illumination changes, etc.) in order to validate the solution under different conditions.}, attachments = {https://geintra-uah.org/system/files/private/extended_abstract_cristina_losada.pdf , https://geintra-uah.org/system/files/phd_tesis_cristina_losada_.pdf , https://geintra-uah.org/system/files/private/phd_tesis_cristina_losada.pdf}, author = {Cristina Losada-Guti{\'e}rrez}, editor = {Manuel Mazo and Sira E. Palazuelos} }