% generated by bibtexbrowser <http://www.monperrus.net/martin/bibtexbrowser/>
% 
% Encoding: UTF-8
@inproceedings{albrecht_imitating_2011,
 author = {S Albrecht and K Ramirez-Amaro and Fand WD Ruiz-Ugalde and M Leibold and Mand BM Ulbrich},
 title = {Imitating human reaching motions using physically inspired optimization
	principles},
 booktitle = {11th {IEEE-RAS} International Conference on Humanoid Robots},
 year = {2011},
 address = {Bled, Slovenia},
 month = {oct},
}

@article{aldoma_tutorial:_2012,
 author = {A Aldoma and ZC Marton and F Tombari and W Wohlkinger and C Potthast and B Zeisl and RB Rusu and S Gedikli and M Vincze},
 title = {Tutorial: Point Cloud Library: Three-Dimensional Object Recognition
	and 6 {DOF} Pose Estimation},
 journal = {Robotics \& Automation Magazine, {IEEE}},
 year = {2012},
 volume = {19},
 pages = {80–91},
 number = {3},
}

@inproceedings{aldoma_cad-model_2011,
 author = {A Aldoma and M Vincze and N Blodow and Dand GS Gossow and RB Rusu and GR. Bradski},
 title = {{CAD-model} recognition and {6DOF} pose estimation using {3D} cues},
 booktitle = {{IEEE} International Conference on Computer Vision Workshops, {ICCV}
	2011 Workshops, Barcelona, Spain, November 6-13, 2011},
 year = {2011},
 pages = {585--592},
}

@inproceedings{amin_multi-view_2013,
 author = {S Amin and M Andriluka and M Rohrbach and B Schiele},
 title = {Multi-view Pictorial Structures for {3D} Human Pose Estimation},
 booktitle = {British Machine Vision Conference ({BMVC)}},
 year = {2013},
 address = {Bristol, {UK}},
}

@inproceedings{andreakis_incremental_2009,
 author = {A Andreakis and Nvon Hoyningen-Huene and M Beetz},
 title = {Incremental Unsupervised Time Series Analysis Using Merge Growing
	Neural Gas},
 booktitle = {{WSOM}},
 year = {2009},
 editor = {Príncipe, José Carlos and Miikkulainen, Risto},
 volume = {5629},
 series = {Lecture Notes in Computer Science},
 pages = {10--18},
 publisher = {Springer},
 abstract = {We propose Merge Growing Neural Gas ({MGNG)} as a novel unsupervised
	growing neural network for time series analysis. {MGNG} combines
	the state-of-the-art recursive temporal context of Merge Neural Gas
	({MNG)} with the incremental Growing Neural Gas ({GNG)} and enables
	thereby the analysis of unbounded and possibly infinite time series
	in an online manner. There is no need to define the number of neurons
	a priori and only constant parameters are used. In order to focus
	on frequent sequence patterns an entropy maximization strategy is
	utilized which controls the creation of new neurons. Experimental
	results demonstrate reduced time complexity compared to {MNG} while
	retaining similar accuracy in time series representation.},
 isbn = {978-3-642-02396-5},
}

@inproceedings{arbuckle_controlling_1999,
 author = {T Arbuckle and M Beetz},
 title = {Controlling Image Processing: Providing Extensible, Run-time Configurable
	Functionality on Autonomous Robots},
 booktitle = {Proceedings of the 1999 {IEEE/RSJ} International Conference on Intelligent
	Robots and Systems},
 year = {1999},
 volume = {2},
 pages = {787–792},
 abstract = {The dynamic nature of autonomous robots' tasks requires that their
	image processing operations are tightly coupled to those actions
	within their control systems which require the visual information.
	While there are many image processing libraries that provide the
	raw image processing functionality required for autonomous robot
	applications, these libraries do not provide the additional functionality
	necessary for transparently binding image processing operations within
	a robot's control system. In particular such libraries lack facilities
	for process scheduling, sequencing, concurrent execution and resource
	management. The paper describes the design and implementation of
	an enabling extensible system-{RECIPE-for} providing image processing
	functionality in a form that is convenient for robot control together
	with concrete implementation examples},
}

@inproceedings{arbuckle_extensible_1999,
 author = {T Arbuckle and M Beetz},
 title = {Extensible, Runtime-configurable Image Processing on Robots — the
	{RECIPE} system},
 booktitle = {Proceedings of the 1999 {IEEE/RSJ} International Conference on Intelligent
	Robots and Systems},
 year = {1999},
}

@inproceedings{arbuckle_recipe_1998,
 author = {T Arbuckle and M Beetz},
 title = {{RECIPE} - A System for Building Extensible, Run-time Configurable,
	Image Processing Systems},
 booktitle = {Proceedings of Computer Vision and Mobile Robotics ({CVMR)} Workshop},
 year = {1998},
 pages = {91–98},
 abstract = {This paper describes the design, and implementation of {RECIPE}, an
	extensible, run-time configurable, image capture and processing system
	specifically designed for use with robotic systems and currently
	under active development here at Bonn. Robotic systems, particularly
	autonomous robotic systems, present both challenges and opportunities
	to the implementors of their vision systems. On the one hand, robotic
	systems constrain the vision systems in terms of their available
	resources and in the specific form of the hardware to be employed.
	On the other hand, intelligent processes can employ sensory input
	to modify the image capture and image processing to fit the current
	context of the robot. {RECIPE} meets these challenges while facilitating
	the modular development of efficient image processing operations.
	Implementing all of its functionality within a platform and compiler
	neutral framework as scriptable, active objects which are dynamically
	loaded at run-time, {RECIPE} provides a common basis for the development
	of image processing systems on robots. At the same time, it permits
	the image processing operations being employed by the robot system
	to be monitored and adjusted according to all of the sensory information
	available to the robot, encouraging the deployment of efficient,
	context specific, algorithms. Finally, it has been designed to encourage
	robust, fault-tolerant approaches to the action of image processing.},
}

@inproceedings{balint-benczedi_efficient_2012,
 author = {F Balint-Benczedi and ZC Marton and M Beetz},
 title = {Efficient Part-Graph Hashes for Object Categorization},
 booktitle = {5th International Conference on Cognitive Systems ({CogSys)}},
 year = {2012},
}

@inproceedings{bandlow_agilo_1999,
 author = {T Bandlow and M Klupsch and R Hanek and T Schmitt},
 title = {Agilo {RoboCuppers:} {RoboCup} Team Description},
 booktitle = {3. {RoboCup} Workshop, {IJCAI} 99},
 year = {1999},
 pages = {691–694},
 abstract = {This paper describes the robot soccer team the Munich Agilo {RoboCuppers}
	the {RoboCup} team of the image understanding group ({FG} {BV)} at
	the Technische Universität München. The name is derived from the
	Agilolfinger, which were the first Bavarian ruling dynasty in the
	8th century, with Tassilo as its most famous representative. With
	a team of five Pioneer 1 robots, equipped with {CCD} camera and a
	single board computer each and coordinated by a master {PC} outside
	the field we participate in the Middle Robot League of the Third
	International Workshop on {RoboCup} in Stockholm 1999. We use a multi-agent
	based approach to represent different robots and to encapsulate concurrent
	tasks within the robots. A fast feature extraction based on the image
	processing library {HALCON} provides the data necessary for the onboard
	scene interpretation. In addition, these features as well as the
	odometric data of the robots are sent over the net to the master
	{PC}, where they are verified with regard to consistency and plausibility
	and fusioned to one global view of the scene. The results are distributed
	to all robots supporting their local planning modules. This data
	is also used by the global planning module coordinating the team's
	behaviour.},
}

@inproceedings{bandlow_fast_1999,
 author = {T Bandlow and M Klupsch and R Hanek and T Schmitt},
 title = {Fast Image Segmentation, Object Recognition and Localization in a
	{RoboCup} Scenario},
 booktitle = {3. {RoboCup} Workshop, {IJCAI} 99},
 year = {1999},
 pages = {174–185},
 abstract = {This paper presents the vision system of the robot soccer team Agilo
	{RoboCuppers} the {RoboCup} team of the image understanding group
	({FG} {BV)} at the Technische Universität München. The name is derived
	from the Agilolfinger, which were the first Bavarian ruling dynasty
	in the 8th century, with Tassilo as its most famous representative.
	We present a fast and robust color classification method yielding
	significant regions in the image. The boundaries between adjacent
	regions are used to localize objects like the ball or other robots
	on the field. Furthermore for each player the free motion space is
	determined and its position and orientation on the field is estimated.
	All this is done completely vision based, without any additional
	sensors.},
}

@phdthesis{bandouch_observing_2011,
 author = {J Bandouch},
 title = {Observing and Interpreting Complex Human Activities in Everyday Environments},
 school = {Technische Universität München},
 year = {2011},
 url = {http://nbn-resolving.de/urn/resolver.pl?urn:nbn:de:bvb:91-diss-20101028-973175-1-4},
}

@inproceedings{bandouch_tracking_2009,
 author = {J Bandouch and M Beetz},
 title = {Tracking Humans Interacting with the Environment Using Efficient
	Hierarchical Sampling and Layered Observation Models},
 booktitle = {{IEEE} Int. Workshop on Human-Computer Interaction ({HCI).} In conjunction
	with {ICCV2009}},
 year = {2009},
 abstract = {We present a markerless tracking system for unconstrained human motions
	which are typical for everyday manipulation tasks. Our system is
	capable of tracking a high-dimensional human model (51 {DOF)} without
	constricting the type of motion and the need for training sequences.
	The system reliably tracks humans that frequently interact with the
	environment, that manipulate objects, and that can be partially occluded
	by the environment. We describe and discuss two key components that
	substantially contribute to the accuracy and reliability of the system.
	First, a sophisticated hierarchical sampling strategy for recursive
	Bayesian estimation that combines partitioning with annealing strategies
	to enable efficient search in the presence of many local maxima.
	Second, a simple yet effective appearance model that allows for the
	combination of shape and appearance masks to implicitly deal with
	two cases of environmental occlusions by (1) subtracting dynamic
	non-human objects from the region of interest and (2) modeling objects
	(e.g. tables) that both occlude and can be occluded by human subjects.
	The appearance model is based on bit representations that makes our
	algorithm well suited for implementation on highly parallel hardware
	such as commodity {GPUs.} Extensive evaluations on the {HumanEva2}
	benchmarks show the potential of our method when compared to state-of-the-art
	Bayesian techniques. Besides the {HumanEva2} benchmarks, we present
	results on more challenging sequences, including table setting tasks
	in a kitchen environment and persons getting into and out of a car
	mock-up.},
}

@inproceedings{bandouch_accurate_2008,
 author = {J Bandouch and F Engstler and M Beetz},
 title = {Accurate Human Motion Capture Using an Ergonomics-Based Anthropometric
	Human Model},
 booktitle = {Proceedings of the Fifth International Conference on Articulated
	Motion and Deformable Objects ({AMDO)}},
 year = {2008},
 abstract = {In this paper we present our work on markerless model-based {3D} human
	motion capture using multiple cameras. We use an industry proven
	anthropometric human model that was modeled taking ergonomic considerations
	into account. The outer surface consists of a precise yet compact
	{3D} surface mesh that is mostly rigid on body part level apart from
	some small but important torsion deformations. Benefits are the ability
	to capture a great amount of possible human appearances with high
	accuracy while still having a simple to use and computationally efficient
	model. We have introduced special optimizations such as caching into
	the model to improve its performance in tracking applications. Available
	force and comfort measures within the model provide further opportunities
	for future research. {3D} articulated pose estimation is performed
	in a Bayesian framework, using a set of hierarchically coupled local
	particle filters for tracking. This makes it possible to sample efficiently
	from the high dimensional space of articulated human poses without
	constraining the allowed movements. Sequences of tracked upper-body
	as well as full-body motions captured by three cameras show promising
	results. Despite the high dimensionality of our model (51 {DOF)}
	we succeed at tracking using only silhouette overlap as weighting
	function due to the precise outer appearance of our model and the
	hierarchical decomposition.},
}

@inproceedings{bandouch_evaluation_2008,
 author = {J Bandouch and F Engstler and M Beetz},
 title = {Evaluation of Hierarchical Sampling Strategies in {3D} Human Pose
	Estimation},
 booktitle = {Proceedings of the 19th British Machine Vision Conference ({BMVC)}},
 year = {2008},
 abstract = {A common approach to the problem of {3D} human pose estimation from
	video is to recursively estimate the most likely pose via particle
	filtering. However, standard particle filtering methods fail the
	task due to the high dimensionality of the {3D} articulated human
	pose space. In this paper we present a thorough evaluation of two
	variants of particle filtering, namely Annealed Particle Filtering
	and Partitioned Sampling Particle Filtering, that have been proposed
	to make the problem feasible by exploiting the hierarchical structures
	inside the pose space. We evaluate both methods in the context of
	markerless model-based {3D} motion capture using silhouette shapes
	from multiple cameras. For that we created a simulation from ground
	truth sequences of human motions, which enables us to focus our evaluation
	on the sampling capabilities of the approaches, i.e. on how efficient
	particles are spread towards the modes of the distribution. We show
	the behaviour with respect to the amount of cameras used, the amount
	of particles used, as well as the dimensionality of the search space.
	Especially the performance when using more complex human models (40
	{DOF} and above) that are able to capture human movements with higher
	precision compared to previous approaches is of interest in this
	work. In summary, we show that both methods have complementary strengths,
	and propose a combined method that is able to perform the tracking
	task with higher robustness despite reduced computational effort.},
}

@article{bandouch_self-training_2012,
 author = {J Bandouch and OC Jenkins and M Beetz},
 title = {A Self-Training Approach for Visual Tracking and Recognition of Complex
	Human Activity Patterns},
 journal = {International Journal of Computer Vision},
 year = {2012},
 volume = {99},
 pages = {166--189},
 number = {2},
}

@inproceedings{beetz_runtime_2001,
 author = {M Beetz},
 title = {Runtime Plan Adaptation in Structured Reactive Controllers},
 booktitle = {Proceedings of the Fourth International Conference on Autonomous
	Agents},
 year = {2001},
 editor = {Andre, E. and Sen, S.},
}

@incollection{beetz_towards_2005,
 author = {M Beetz},
 title = {Towards Comprehensive Computational Models for Plan-Based Control
	of Autonomous Robots},
 booktitle = {Mechanizing Mathematical Reasoning: Essays in Honor of Jörg H. Siekmann
	on the Occasion of His 60th Birthday},
 publisher = {Springer {LNCS} 2605},
 year = {2005},
 editor = {Dieter Hutter, Werner Stephan},
 pages = {514–527},
}

@inproceedings{beetz_structured_1999,
 author = {M Beetz},
 title = {Structured Reactive Controllers — A computational Model of Everyday
	Activity},
 booktitle = {Proceedings of the Third International Conference on Autonomous Agents},
 year = {1999},
 editor = {Etzioni, O. and Müller, J. and Bradshaw, J.},
 pages = {228–235},
}

@inproceedings{beetz_runtime_2000,
 author = {M Beetz},
 title = {Runtime Plan Adaptation in Structured Reactive Controllers},
 booktitle = {Proceedings of the Fourth International Conference on Autonomous
	Agents},
 year = {2000},
 editor = {Gini, M. and Rosenschein, J.},
}

@book{beetz_plan-based_2002-1,
 title = {Plan-based Control of Robotic Agents},
 publisher = {Springer Publishers},
 year = {2002},
 author = {M Beetz},
 volume = {{LNAI} 2554},
 series = {Lecture Notes in Artificial Intelligence},
}

@inproceedings{beetz_plan_2002,
 author = {M Beetz},
 title = {Plan Representation for Robotic Agents},
 booktitle = {Proceedings of the Sixth International Conference on {AI} Planning
	and Scheduling},
 year = {2002},
 pages = {223–232},
 address = {Menlo Park, {CA}},
 publisher = {{AAAI} Press},
}

@incollection{beetz_towards_2002,
 author = {M Beetz},
 title = {Towards integrated computational models for the plan-based control
	of robotic agents.},
 booktitle = {Festschrift zum 60. Geburtstag von Prof. J. Siekmann},
 publisher = {Springer Publishers},
 year = {2002},
 series = {Lecture Notes in Artificial Intelligence},
}

@article{beetz_plan_2001,
 author = {M Beetz},
 title = {Plan Management for Robotic Agents},
 journal = {{KI} - Künstliche Intelligenz; Special Issue on Planning and Scheduling},
 year = {2001},
 volume = {15},
 pages = {12–17},
 number = {2},
 abstract = {Autonomous robots that perform complex jobs in changing environments
	must be capable of managing their plans as the environmental conditions
	or their tasks change. This raises the problem of deciding whether,
	when, where, and how to revise the plans as the robots' beliefs change.
	This article investigates an approach to execution time plan management
	in which the plans themselves specify the plan adaptation processes.
	In this approach the robot makes strategical (farsighted) adaptations
	while it executes a plan using tactical (immediate) decisions and
	overwrites tactical adaptations after strategical decisions have
	been reached (if necessary). We present experiments in which the
	plan adaptation technique is used for the control of two autonomous
	mobile robots. In one of them it controlled the course of action
	of a museums tourguide robot that has operated for thirteen days
	and performed about 3200 plan adaptations reliably.},
}

@article{beetz_structured_2001,
 author = {M Beetz},
 title = {Structured Reactive Controllers},
 journal = {Journal of Autonomous Agents and Multi-Agent Systems. Special Issue:
	Best Papers of the International Conference on Autonomous Agents
	'99},
 year = {2001},
 volume = {4},
 pages = {25–55},
 month = {jun},
}

@book{beetz_concurrent_2000,
 title = {Concurrent Reactive Plans: Anticipating and Forestalling Execution
	Failures},
 publisher = {Springer Publishers},
 year = {2000},
 author = {M Beetz},
 volume = {{LNAI} 1772},
 series = {Lecture Notes in Artificial Intelligence},
}

@phdthesis{beetz_plan-based_2000,
 author = {M Beetz},
 title = {Plan-based Control of Robotic Agents},
 school = {University of Bonn},
 year = {2000},
}

@phdthesis{beetz_anticipating_1996,
 author = {M Beetz},
 title = {Anticipating and Forestalling Execution Failures in Structured Reactive
	Plans},
 school = {Yale University},
 year = {1996},
 type = {Technical Report, {YALE/DCS/RR1097}},
}

@article{beetz_enabling_2000,
 author = {M Beetz and T Arbuckle and T Belker and M Bennewitz and A Cremers and D Hähnel and D Schulz},
 title = {Enabling Autonomous Robots to Perform Complex Tasks},
 journal = {{KI} - Künstliche Intelligenz; Special Issue on Autonomous Robots},
 year = {2000},
 abstract = {Recent extensions of the {RHINO} control system, a system for controlling
	autonomous mobile robots, have further enhanced its ability to perform
	complex, dynamically changing, tasks. We present an overview of the
	extended {RHINO} system, sketching the functionality of its main
	components and their inter-relationships as well as long-term experiments
	demonstrating the practicality of its approach. Pointers are also
	provided to the detailed technical references.},
}

@article{beetz_integrated_2001,
 author = {M Beetz and T Arbuckle and M Bennewitz and W Burgard and A Cremers and D Fox and Hand HD Grosskreutz and D Schulz},
 title = {Integrated Plan-based Control of Autonomous Service Robots in Human
	Environments},
 journal = {{IEEE} Intelligent Systems},
 year = {2001},
 volume = {16},
 pages = {56–65},
 number = {5},
 abstract = {The authors extend the Rhino robot by adding the means for plan-based
	high-level control and plan transformation, further enhancing its
	probabilistic reasoning capabilities. The result: an autonomous robot
	capable of accomplishing prolonged, complex, and dynamically changing
	tasks in the real world.},
}

@inproceedings{beetz_transparent_1998,
 author = {M Beetz and T Arbuckle and A Cremers and M Mann},
 title = {Transparent, Flexible, and Resource-adaptive Image Processing for
	Autonomous Service Robots},
 booktitle = {Procs. of the 13th European Conference on Artificial Intelligence
	({ECAI-98)}},
 year = {1998},
 editor = {Prade, H.},
 pages = {632–636},
 abstract = {We present the design of a programming system for {IP} routines which
	satisfies the requirements above. Our solution consists of {RECIPE},
	a dynamically loadable, modular architecture in a distributed robot
	control system that provides the basic {IP} functionality and manages
	images and other {IP} data structures. It provides a variety of standard
	{IP} routines such as edge detectors, convolutions, noise reduction,
	segmentation, etc. {RPLIP}, an extension of the abstract machine
	provided by the robot control/plan language {RPL.} {RPLIP} provides
	suitable abstractions for images, regions of interest, etc, and supports
	a tight integration of the vision routines into the robot control
	system. Image Processing Plans that provide various methods for combining
	{IP} methods into {IP} pipelines. {IP} plans support the implementation
	of robust vision routines and the integration of other sensors such
	as laser range finders and sonars for object recognition tasks and
	scene analysis. Since vision routines are {RPL} programs, they can
	be constructed, revised, and reasoned about while the robot control
	program is being executed.},
}

@inproceedings{beetz_camera-based_2006,
 author = {M Beetz and J Bandouch and S Gedikli and Nvon Hoyningen-Huene and B Kirchlechner and A Maldonado},
 title = {Camera-based Observation of Football Games for Analyzing Multi-agent
	Activities},
 booktitle = {Proceedings of the Fifth International Joint Conference on Autonomous
	Agents and Multiagent Systems ({AAMAS)}},
 year = {2006},
 abstract = {This paper describes a camera-based observation system for football
	games that is used for the automatic analysis of football games and
	reasoning about multi-agent activity. The observation system runs
	on video streams produced by cameras set up for {TV} broadcasting.
	The observation system achieves reliability and accuracy through
	various mechanisms for adaptation, probabilistic estimation, and
	exploiting domain constraints. It represents motions compactly and
	segments them into classified ball actions.},
 keywords = {soccer},
}

@inproceedings{beetz_towards_2009,
 author = {M Beetz and J Bandouch and D Jain and M Tenorth},
 title = {Towards Automated Models of Activities of Daily Life},
 booktitle = {First International Symposium on Quality of Life Technology – Intelligent
	Systems for Better Living},
 year = {2009},
 address = {Pittsburgh, Pennsylvania {USA}},
 abstract = {We propose automated probabilistic models of everyday activities ({AM-EvA)}
	as a novel technical means for the perception, interpretation, and
	analysis of everyday manipulation tasks and activities of daily life.
	{AM-EvAs} are based on action-related concepts in everyday activities
	such as action-related places (the place where cups are taken from
	the cupboard), capabilities (the objects that can be picked up single-handedly),
	etc. These concepts are probabilistically derived from a set of previous
	activities that are fully and automatically observed by computer
	vision and additional sensor systems. {AM-EvA} models enable robots
	and technical systems to analyze activities in the complete situation
	and activity context. They render the classification and the assessment
	of actions and situations objective and can justify the probabilistic
	interpretation with respect to the activities the concepts have been
	learned from. In this paper, we describe the current state of implementation
	of the system that realizes this idea of automated models of everyday
	activities and show example results from the observation and analysis
	of table setting episodes.},
}

@inproceedings{beetz_assistive_2007,
 author = {M Beetz and J Bandouch and A Kirsch and A Maldonado and A Müller and RB Rusu},
 title = {The Assistive Kitchen — A Demonstration Scenario for Cognitive Technical
	Systems},
 booktitle = {Proceedings of the 4th {COE} Workshop on Human Adaptive Mechatronics
	({HAM)}},
 year = {2007},
 abstract = {This paper introduces the Assistive Kitchen as a comprehensive demonstration
	and challenge scenario for technical cognitive systems. We describe
	its hardware and software infrastructure. Within the Assistive Kitchen
	application, we select particular domain activities as research subjects
	and identify the cognitive capabilities needed for perceiving, interpreting,
	analyzing, and executing these activities as research foci. We conclude
	by outlining open research issues that need to be solved to realize
	the scenarios successfully.},
}

@inproceedings{beetz_learning_2000,
 author = {M Beetz and T Belker},
 title = {Learning Structured Reactive Navigation Plans from Executing {MDP}
	Navigation Policies},
 booktitle = {8th International Symposium on Intelligent Robotic Systems, {SIRS}
	2000},
 year = {2000},
 editor = {Ferryman},
 abstract = {Autonomous robots, such as robot office couriers, need navigation
	routines that support flexible task execution and effective action
	planning. This paper describes {XfrmLearn}, a system that learns
	structured symbolic navigation plans. Given a navigation task, {XfrmLearn}
	learns to structure continuous navigation behavior and represents
	the learned structure as compact and transparent plans. The structured
	plans are obtained by starting with monolithic default plans that
	are optimized for average performance and adding subplans to improve
	the navigation performance for the given task. Compactness is achieved
	by incorporating only subplans that achieve significant performance
	gains. The resulting plans support action planning and opportunistic
	task execution. {XfrmLearn} is implemented and extensively evaluated
	on an autonomous mobile robot.},
}

@inproceedings{beetz_environment_2000,
 author = {M Beetz and T Belker},
 title = {Environment and Task Adaptation for Robotic Agents},
 booktitle = {Procs. of the 14th European Conference on Artificial Intelligence
	({ECAI-2000)}},
 year = {2000},
 editor = {Horn, W.},
 pages = {648–652},
 abstract = {This paper investigates the problem of improving the performance of
	general state-of-the-art robot control systems by autonomously adapting
	them to specific tasks and environments. We propose model- and test-based
	transformational learning ({MTTL)} as a computational model for performing
	this task. {MTTL} uses abstract models of control systems and environments
	in order to propose promising adaptations. To account for model deficiencies
	resulting from abstraction, hypotheses are statistically tested based
	on experimentation in the physical world. We describe {XfrmLearn},
	an implementation of {MTTL}, and apply it to the problem of indoor
	navigation. We present experiments in which {XfrmLearn} improves
	the navigation performance of a state-of-the-art high-speed navigation
	system for a given set of navigation tasks by up to 44 percent.},
}

@inproceedings{beetz_learning_2001,
 author = {M Beetz and T Belker},
 title = {Learning Structured Reactive Navigation Plans from Executing {MDP}
	policies},
 booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
 year = {2001},
 pages = {19–20},
 abstract = {Autonomous robots, such as robot office couriers, need navigation
	routines that support flexible task execution and effective action
	planning. This paper describes {XfrmLearn}, a system that learns
	structured symbolic navigation plans. Given a navigation task, {XfrmLearn}
	learns to structure continuous navigation behavior and represents
	the learned structure as compact and transparent plans. The structured
	plans are obtained by starting with monolithic default plans that
	are optimized for average performance and adding subplans to improve
	the navigation performance for the given task. Compactness is achieved
	by incorporating only subplans that achieve significant performance
	gains. The resulting plans support action planning and opportunistic
	task execution. {XfrmLearn} is implemented and extensively evaluated
	on an autonomous mobile robot.},
}

@inproceedings{beetz_experience-_1999,
 author = {M Beetz and T Belker},
 title = {Experience- and Model-based Transformational Learning of Symbolic
	Behavior Specifications},
 booktitle = {Proceedings of the {IJCAI} Workshop on Robot Action Planning},
 year = {1999},
 abstract = {The paper describes Xfrml, a system that learns symbolic behavior
	specifications to control and improve the continuous sensor-driven
	navigation behavior of an autonomous mobile robot. The robot is to
	navigate between a set of predefined locations in an office environment
	and employs a navigation system consisting of a path planner and
	a reactive collision avoidance system. {XfrmLearn} rationally reconstructs
	the continuous sensor-driven navigation behavior in terms of task
	hierarchies by identifying significant structures and commonalities
	in behaviors. It also constructs a statistical behavior model for
	typical navigation tasks. The behavior model together with a model
	of how the collision avoidance module should "perceive" the environment
	is used to detect behavior "flaws", diagnose them, and revise the
	plans to improve their performance. The learning method is implemented
	on an autonomous mobile robot.},
}

@inproceedings{beetz_planning_1998,
 author = {M Beetz and M Bennewitz},
 title = {Planning, Scheduling, and Plan Execution for Autonomous Robot Office
	Couriers},
 booktitle = {Proceedings of the workshop {“Integrating} Planning, Scheduling and
	Execution in Dynamic and Uncertain Environments” at the Fourth International
	Conference on {AI} in Planning Systems ({AIPS)}},
 year = {1998},
 editor = {Bergmann, R. and Kott, A.},
 volume = {Workshop Notes 98-02},
 publisher = {{AAAI} Press},
 abstract = {Scheduling the tasks of an autonomous robot office courier and carrying
	out the scheduled tasks reliably and efficiently pose challenging
	problems for autonomous robot control. The controller has to accomplish
	longterm efficiency rather than optimize problem-solving episodes.
	It also has to exploit opportunities and avoid problems flexibly
	because often the robot is forced to generate schedules based on
	partial information. We propose to implement the controller for scheduled
	activity by employing concurrent reactive plans that reschedule the
	course of action whenever necessary and while performing their actions.
	The plans are represented modularly and transparently to allow for
	easy transformation. Scheduling and schedule repair methods are implemented
	as plan transformation rules.},
}

@inproceedings{beetz_probabilistic_1999,
 author = {M Beetz and M Bennewitz and H Grosskreutz},
 title = {Probabilistic, Prediction-based Schedule Debugging for Autonomous
	Robot Office Couriers},
 booktitle = {Proceedings of the 23rd German Conference on Artificial Intelligence
	({KI} 99)},
 year = {1999},
 address = {Bonn, Germany},
 publisher = {Springer Verlag},
 abstract = {Acting efficiently and meeting deadlines requires autonomous robots
	to schedule their activities. It also requires them to act flexibly:
	to exploit opportunities and avoid problems as they occur. Scheduling
	activities to meet these requirements is an important research problem
	in its own right. In addition, it provides us with a problem domain
	where modern symbolic {AI} planning techniques can enable robots
	to exhibit better performance than they possibly could without planning.
	This paper describes {PPSD}, a novel planning technique that enables
	autonomous robots to impose order constraints on concurrent percept-driven
	plans to increase the plans' efficiency. The basic idea is to generate
	a schedule under simplified conditions and then to iteratively detect,
	diagnose, and eliminate behavior flaws caused by the schedule based
	on a small number of randomly sampled symbolic execution scenarios.
	The paper discusses the integration of {PPSD} into the controller
	of an autonomous robot office courier and gives an example of its
	use.},
}

@inproceedings{beetz_cop-man_2009,
 author = {M Beetz and N Blodow and U Klank and ZC Marton and D Pangercic and RB Rusu},
 title = {{CoP-Man} – Perception for Mobile Pick-and-Place in Human Living
	Environments},
 booktitle = {Proceedings of the 22nd {IEEE/RSJ} International Conference on Intelligent
	Robots and Systems ({IROS)} Workshop on Semantic Perception for Mobile
	Manipulation},
 year = {2009},
 address = {St. Louis, {MO}, {USA}},
 month = {oct},
}

@inproceedings{beetz_agilo_2002-1,
 author = {M Beetz and S Buck and R Hanek and A Hofhauser and T Schmitt},
 title = {{AGILO} {RoboCuppers} 2002: Applying Cooperative Game State Estimation
	Experience-based Learning, and Plan-based Control to Autonomous Robot
	Soccer},
 booktitle = {{RoboCup} International Symposium 2002},
 year = {2002},
 series = {Lecture Notes in Computer Science},
 abstract = {This paper describes the computational model underlying the {AGILO}
	autonomous robot soccer team and its implementation. The most salient
	aspects of the {AGILO} control software are that it includes (1)
	a cooperative probabilistic game state estimator working with a simple
	off-the-shelf camera system; (2) a situated action selection module
	that makes amble use of experience-based learning and produces coherent
	team behavior even if inter-robot communication is perturbed; and
	(3) a playbook executor that can perform preprogrammed complex soccer
	plays in appropriate situations by employing plan-based control techniques.
	The use of such sophisticated state estimation and control techniques
	characterizes the {AGILO} software. The paper discusses the computational
	techniques and necessary extensions based on experimental data from
	the 2001 robot soccer world championship.},
}

@inproceedings{beetz_agilo_2002,
 author = {M Beetz and S Buck and R Hanek and T Schmitt and B Radig},
 title = {The {AGILO} Autonomous Robot Soccer Team: Computational Principles,
	Experiences, and Perspectives},
 booktitle = {International Joint Conference on Autonomous Agents and Multi Agent
	Systems ({AAMAS)} 2002},
 year = {2002},
 pages = {805–812},
 address = {Bologna, Italy},
 abstract = {This paper describes the computational model underlying the {AGILO}
	autonomous robot soccer team, its implementation, and our experiences
	with it. The most salient aspects of the {AGILO} control software
	are that it includes (1) a cooperative probabilistic game state estimator
	working with a simple off-the-shelf camera system; (2) a situated
	action selection module that makes amble use of experience-based
	learning and produces coherent team behavior even if inter-robot
	communication is perturbed; and (3) a playbook executor that can
	perform preprogrammed complex soccer plays in appropriate situations
	by employing plan-based control techniques. The use of such sophisticated
	state estimation and control techniques distinguishes the {AGILO}
	software from many others applied to mid-size autonomous robot soccer.
	The paper discusses the computational techniques and necessary extensions
	based on experimental data from the 2001 robot soccer world championship.},
}

@article{beetz_integrating_1998,
 author = {M Beetz and W Burgard and D Fox and A Cremers},
 title = {Integrating Active Localization into High-level Control Systems},
 journal = {Robotics and Autonomous Systems},
 year = {1998},
 volume = {23},
 pages = {205–220},
}

@article{beetz_learning_2010,
 author = {M Beetz and M Buss and B Radig},
 title = {Learning from Humans – Cognition-enabled Computational Models of
	Everyday Activity},
 journal = {Künstliche Intelligenz},
 year = {2010},
}

@inproceedings{beetz_cognitive_2007,
 author = {M Beetz and M Buss and D Wollherr},
 title = {Cognitive Technical Systems — What Is the Role of Artificial Intelligence?},
 booktitle = {Proceedings of the 30th German Conference on Artificial Intelligence
	({KI-2007)}},
 year = {2007},
 editor = {Hertzberg, J. and Beetz, M. and Englert, R.},
 pages = {19–42},
 abstract = {The newly established cluster of excellence {COTESYS} investigates
	the realization of cognitive capabilities such as perception, learning,
	reasoning, planning, and execution for technical systems including
	humanoid robots, flexible manufacturing systems, and autonomous vehicles.
	In this paper we describe cognitive technical systems using a sensor-equipped
	kitchen with a robotic assistant as an {example.We} will particularly
	consider the role of Artificial Intelligence in the research enterprise.
	Key research foci of Artificial Intelligence research in {COTESYS}
	include (*) symbolic representations grounded in perception and action,
	(*) first-order probabilistic representations of actions, objects,
	and situations, (*) reasoning about objects and situations in the
	context of everyday manipulation tasks, and (*) the representation
	and revision of robot plans for everyday activity.},
}

@inproceedings{beetz_watching_2004,
 author = {M. Beetz and F. Fischer and S. Flossmann and B. and UA. Kirchlechner and C. Holzer},
 title = {Watching Football with the Eyes of Experts: Integrated Intelligent
	Systems for the Automatic Analysis of (Simulated) Football Games},
 booktitle = {5th Annual Conference dvs-Section Computer Science in Sport},
 year = {2004},
 keywords = {soccer},
}

@inproceedings{beetz_motion_2004,
 author = {M Beetz and S Flossmann and T Stammeier},
 title = {Motion and Episode Models for (Simulated) Football Games: Acquisition,
	Representation, and Use},
 booktitle = {3rd International Joint Conference on Autonomous Agents \& Multi
	Agent Systems ({AAMAS)}},
 year = {2004},
 keywords = {soccer},
}

@inproceedings{beetz_visually_2007,
 author = {M Beetz and S Gedikli and J Bandouch and B Kirchlechner and Nvon Hoyningen-Huene and A Perzylo},
 title = {Visually Tracking Football Games Based on {TV} Broadcasts},
 booktitle = {Proceedings of the Twentieth International Joint Conference on Artificial
	Intelligence ({IJCAI)}},
 year = {2007},
 abstract = {This paper describes {ASPOGAMO}, a visual tracking system that determines
	the coordinates and trajectories of football players in camera view
	based on {TV} broadcasts. To do so, {ASPOGAMO} solves a complex probabilistic
	estimation problem that consists of three subproblems that interact
	in subtle ways: the estimation of the camera direction and zoom factor,
	the tracking and smoothing of player routes, and the disambiguation
	of tracked players after occlusions. The paper concentrates on system
	aspects that make it suitable for operating under unconstrained conditions
	and in (almost) realtime. We report on results obtained in a public
	demonstration at {RoboCup} 2006 where we conducted extensive experiments
	with real data from live coverage of World Cup 2006 games in Germany.},
 keywords = {soccer},
}

@inproceedings{beetz_agilo_2003,
 author = {M Beetz and S Gedikli and R Hanek and Tand SF Schmitt},
 title = {{AGILO} {RoboCuppers} 2003: Computational Priciples and Research
	Directions},
 booktitle = {{RoboCup} International Symposium 2003},
 year = {2003},
 series = {Padova},
 abstract = {This paper gives an overview about the approaches chosen by the middle
	size robot soccer team of the Munich University of Technology, the
	{AGILO} {RoboCuppers.} First a brief sytem overview will be given.
	Then the computational priciples are described. Finally the directions
	for further research are outlined.},
}

@inproceedings{beetz_semi-automatic_1999,
 author = {M Beetz and M Giesenschlag and R Englert and E Gülch and A Cremers},
 title = {Semi-automatic Acquisition of Symbolically-annotated {3D} Models
	of Office Environments},
 booktitle = {International Conference on Robotics and Automation ({ICRA-99)}},
 year = {1999},
}

@inproceedings{beetz_causal_1998,
 author = {M. Beetz and H. Grosskreutz},
 title = {Causal Models of Mobile Service Robot Behavior},
 booktitle = {Fourth International Conference on {AI} Planning Systems},
 year = {1998},
 editor = {Simmons, R. and Veloso, M. and Smith, S.},
 pages = {163--170},
 address = {Morgan Kaufmann},
}

@article{beetz_probabilistic_2005,
 author = {M Beetz and H Grosskreutz},
 title = {Probabilistic Hybrid Action Models for Predicting Concurrent Percept-driven
	Robot Behavior},
 journal = {Journal of Artificial Intelligence Research},
 year = {2005},
 volume = {24},
 pages = {799–849},
 abstract = {This article develops Probabilistic Hybrid Action Models ({PHAMs)},
	a realistic causal model for predicting the behavior generated by
	modern percept-driven robot plans. {PHAMs} represent aspects of robot
	behavior that cannot be represented by most action models used in
	{AI} planning: the temporal structure of continuous control processes,
	their non-deterministic effects, several modes of their interferences,
	and the achievement of triggering conditions in closed-loop robot
	plans. The main contributions of this article are: (1) {PHAMs}, a
	model of concurrent percept-driven behavior, its formalization, and
	proofs that the model generates probably, qualitatively accurate
	predictions; and (2) a resource-efficient inference method for {PHAMs}
	based on sampling projections from probabilistic action models and
	state descriptions. We show how {PHAMs} can be applied to planning
	the course of action of an autonomous robot office courier based
	on analytical and experimental results.},
}

@inproceedings{beetz_probabilistic_2000,
 author = {M Beetz and H Grosskreutz},
 title = {Probabilistic Hybrid Action Models for Predicting Concurrent Percept-driven
	Robot Behavior},
 booktitle = {Proceedings of the Sixth International Conference on {AI} Planning
	Systems},
 year = {2000},
 publisher = {{AAAI} Press},
 abstract = {This paper develops Probabilistic Hybrid Action Models ({PHAMs)},
	a realistic causal model for predicting the behavior generated by
	modern concurrent percept-driven robot {plans.PHAMs} represent aspects
	of robot behavior that cannot be represented by most action models
	used in {AI} planning: the temporal structure of continuous control
	processes, their non-deterministic effects, and several modes of
	their interferences. The main contributions of the paper are: (1)
	{PHAMs}, a model of concurrent percept-driven behavior, its formalization,
	and proofs that the model generates probably, qualitatively accurate
	predictions; and (2) a resource-efficient inference method for {PHAMs}
	based on sampling projections from probabilistic action models and
	state descriptions. We discuss how {PHAMs} can be applied to planning
	the course of action of an autonomous robot office courier based
	on analytical and experimental results.},
}

@book{beetz_advances_2002,
 title = {Advances in Plan-based Control of Robotic Agents},
 publisher = {Springer Publishers},
 year = {2002},
 author = {M Beetz and J Hertzberg and M Ghallab and M Pollack},
 volume = {{LNAI} 2554},
 series = {Lecture Notes in Artificial Intelligence},
}

@inproceedings{beetz_plan-based_2002,
 author = {M Beetz and A Hofhauser},
 title = {Plan-based control for autonomous robot soccer},
 booktitle = {Advances in Plan-based Control of Autonomous Robots. Selected Contributions
	of the Dagstuhl Seminar Plan-based Control of Robotic Agents, Lecture
	Notes in Artificial Intelligence ({LNAI)}},
 year = {2002},
 publisher = {Springer-Verlag},
}

@article{beetz_aspogamo:_2009,
 author = {M Beetz and Nvon Hoyningen-Huene and B Kirchlechner and S Gedikli and F Siles and Mand LM Durus},
 title = {{ASpoGAMo:} Automated Sports Game Analysis Models},
 journal = {International Journal of Computer Science in Sport},
 year = {2009},
 volume = {8},
 number = {1},
 abstract = {We propose automated sport game models as a novel technical means
	for the analysis of team sport games. The basic idea is that automated
	sport game models are based on a conceptualization of key notions
	in such games and probabilistically derived from a set of previous
	games. In contrast to existing approaches, automated sport game models
	provide an analysis that is sensitive to their context and go beyond
	simple statistical aggregations allowing objective, transparent and
	meaningful concept definitions. Based on automatically gathered spatio-temporal
	data by a computer vision system, a model hierarchy is built bottom
	up, where context-sensitive concepts are instantiated by the application
	of machine learning techniques. We describe the current state of
	implementation of the {ASpoGaMo} system including its computer vision
	subsystem that realizes the idea of automated sport game models.
	Their usage is exemplified with an analysis of the final of the soccer
	World Cup 2006.},
 keywords = {soccer},
}

@article{beetz_towards_2010-1,
 author = {M Beetz and D Jain and L Mösenlechner and M Tenorth},
 title = {Towards Performing Everyday Manipulation Activities},
 journal = {Robotics and Autonomous Systems},
 year = {2010},
 volume = {58},
 pages = {1085–1095},
 number = {9},
}

@article{beetz_cognition-enabled_2012,
 author = {M Beetz and D Jain and L Mösenlechner and M Tenorth and L Kunze and N Blodow and D Pangercic},
 title = {Cognition-Enabled Autonomous Robot Control for the Realization of
	Home Chore Task Intelligence},
 journal = {Proceedings of the {IEEE}, Special Issue on Quality of Life Technology},
 year = {2012},
 volume = {100},
 pages = {2454–2471},
 number = {8},
}

@inproceedings{beetz_interpretation_2004,
 author = {M. Beetz and B. Kirchlechner and F. Fischer},
 title = {Interpretation and Processing of Position Data for the Empirical
	Study of the Behavior of Simulation League Robocup Teams},
 booktitle = {{KI} 2004 Workshop},
 year = {2004},
}

@article{beetz_computerized_2005,
 author = {M Beetz and B Kirchlechner and M Lames},
 title = {Computerized Real-Time Analysis of Football Games},
 journal = {{IEEE} Pervasive Computing},
 year = {2005},
 volume = {4},
 pages = {33--39},
 number = {3},
 abstract = {The research reported in this article is part of an ambitious, mid-term
	project that studies the automated analysis of football games. The
	input for game analysis is position data provided by tiny microwave
	senders that are placed into the ball and the shin guards of football
	players. The main objectives of the project are (1) the investigation
	of novel computational mechanisms that enable computer systems to
	recognize intentional activities based on position data, (2) the
	development of an integrated software system to automate game interpretation
	and analysis, and (3) the demonstration of the impact of automatic
	game analysis on sport science, football coaching, and sports entertainment.
	The results are to be showcased in the form of an intelligent information
	system for the matches at the Football World Championship 2006 in
	Germany.},
 keywords = {soccer},
}

@article{beetz_special_2010,
 author = {M Beetz and A Kirsch},
 title = {Special Issue on Cognition for Technical Systems},
 journal = {Künstliche Intelligenz},
 year = {2010},
 volume = {24},
}

@inproceedings{beetz_rpl-learn:_2004,
 author = {M Beetz and A Kirsch and A Müller},
 title = {{RPL-LEARN:} Extending an Autonomous Robot Control Language to Perform
	Experience-based Learning},
 booktitle = {3rd International Joint Conference on Autonomous Agents \& Multi
	Agent Systems ({AAMAS)}},
 year = {2004},
 abstract = {In this paper, we extend the autonomous robot control and plan language
	{RPL} with constructs for specifying experiences, control tasks,
	learning systems and their parameterization, and exploration strategies.
	Using these constructs, the learning problems can be represented
	explicitly and transparently and become executable. With the extended
	language we rationally reconstruct parts of the {AGILO} autonomous
	robot soccer controllers and show the feasibility and advantages
	of our approach.},
}

@inproceedings{beetz_robotic_2011-1,
 author = {M Beetz and U Klank and I Kresse and A Maldonado and L Mösenlechner and D Pangercic and Tand TM Rühr},
 title = {Robotic Roommates Making Pancakes},
 booktitle = {11th {IEEE-RAS} International Conference on Humanoid Robots},
 year = {2011},
 address = {Bled, Slovenia},
 month = {oct},
}

@inproceedings{beetz_robotic_2011,
 author = {M Beetz and U Klank and A Maldonado and D Pangercic and T Rühr},
 title = {Robotic Roommates Making Pancakes - Look Into Perception-Manipulation
	Loop},
 booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)},
	Workshop on Mobile Manipulation: Integrating Perception and Manipulation},
 year = {2011},
 pages = {529–536},
 month = {may},
}

@inproceedings{beetz_local_1996,
 author = {M. Beetz},
 title = {Local Planning of Ongoing Activities},
 booktitle = {Third International Conference on {AI} Planning Systems},
 year = {1996},
 editor = {Drabble, Brian},
 pages = {19--26},
 address = {Morgan Kaufmann},
}

@inproceedings{beetz_improving_1994,
 author = {M. Beetz},
 title = {Improving Robot Plans During Their Execution},
 booktitle = {Second International Conference on {AI} Planning Systems},
 year = {1994},
 editor = {Hammond, K.},
 pages = {3--12},
 address = {Morgan Kaufmann},
}

@inproceedings{beetz_declarative_1992,
 author = {M. Beetz},
 title = {Declarative Goals in Reactive Plans},
 booktitle = {First International Conference on {AI} Planning Systems},
 year = {1992},
 editor = {Hendler, J.},
 pages = {3--12},
 address = {Morgan Kaufmann},
}

@inproceedings{beetz_executing_1996,
 author = {M. Beetz},
 title = {Executing Structured Reactive Plans},
 booktitle = {{AAAI} Fall Symposium: Issues in Plan Execution},
 year = {1996},
 editor = {Pryor, L. and Steel, S.},
}

@inproceedings{beetz_expressing_1997,
 author = {M. Beetz},
 title = {Expressing Transformations of Structured Reactive Plans},
 booktitle = {Recent Advances in {AI} Planning. Proceedings of the 1997 European
	Conference on Planning},
 year = {1997},
 pages = {64--76},
 publisher = {Springer Publishers},
}

@inproceedings{beetz_fast_1997,
 author = {M. Beetz},
 title = {Fast Probabilistic Plan Debugging},
 booktitle = {Recent Advances in {AI} Planning. Proceedings of the 1997 European
	Conference on Planning},
 year = {1997},
 pages = {77--90},
 publisher = {Springer Publishers},
}

@inproceedings{beetz_cram_2010,
 author = {M Beetz and L Mösenlechner and M Tenorth},
 title = {{CRAM} – A Cognitive Robot Abstract Machine for Everyday Manipulation
	in Human Environments},
 booktitle = {{IEEE/RSJ} International Conference on Intelligent Robots and Systems},
 year = {2010},
 pages = {1012--1017},
 address = {Taipei, Taiwan},
 month = {oct},
}

@inproceedings{beetz_cram_2012,
 author = {M Beetz and L Mösenlechner and M Tenorth and T Rühr},
 title = {{CRAM} – a Cognitive Robot Abstract Machine},
 booktitle = {5th International Conference on Cognitive Systems ({CogSys} 2012)},
 year = {2012},
}

@inproceedings{beetz_structured_1998,
 author = {M Beetz and H Peters},
 title = {Structured Reactive Communication Plans — Integrating Conversational
	Actions into High-level Robot Control Systems},
 booktitle = {Proceedings of the 22nd German Conference on Artificial Intelligence
	({KI} 98), Bremen, Germany},
 year = {1998},
 publisher = {Springer Verlag},
}

@article{beetz_agilo_2004,
 author = {M Beetz and T Schmitt and R Hanek and S Buck and F Stulp and D Schröter and B Radig},
 title = {The {AGILO} Robot Soccer Team – Experience-based Learning and Probabilistic
	Reasoning in Autonomous Robot Control},
 journal = {Autonomous Robots},
 year = {2004},
 volume = {17},
 pages = {55–77},
 number = {1},
 abstract = {This article describes the computational model underlying the {AGILO}
	autonomous robot soccer team, its implementation, and our experiences
	with it. According to our model the control system of an autonomous
	soccer robot consists of a probabilistic game state estimator and
	a situated action selection module. The game state estimator computes
	the robot's belief state with respect to the current game situation
	using a simple off-theshelf camera system. The estimated game state
	comprises the positions and dynamic states of the robot itself and
	its teammates as well as the positions of the ball and the opponent
	players. Employing sophisticated probabilistic reasoning techniques
	and exploiting the cooperation between team mates, the robot can
	estimate complex game states reliably and accurately despite incomplete
	and inaccurate state information. The action selection module selects
	actions according to specified selection criteria as well as learned
	experiences. Automatic learning techniques made it possible to develop
	fast and skillful routines for approaching the ball, assigning roles,
	and performing coordinated plays. The paper discusses the computational
	techniques based on experimental data from the 2001 robot soccer
	world championship.},
}

@inproceedings{beetz_perspectives_2000,
 author = {M Beetz and J Schumacher and A Cremers and B Hellingrath and C Mazzocco},
 title = {Perspectives on Plan-based Multiagent Systems for Distributed Supply
	Chain Management in the Steel Industry},
 booktitle = {Proceedings of the {ECAI2000} Workshop on Agent Technologies and
	Their Application Scenarios in Logistics},
 year = {2000},
 editor = {Timm, I.},
}

@article{beetz_generality_2010,
 author = {M Beetz and F Stulp and P Esden-Tempski and A Fedrizzi and U Klank and I Kresse and Aand RF Maldonado},
 title = {Generality and Legibility in Mobile Manipulation},
 journal = {Autonomous Robots Journal (Special Issue on Mobile Manipulation)},
 year = {2010},
 volume = {28},
 pages = {21–44},
 number = {1},
}

@inproceedings{beetz_autonomous_2003,
 author = {M Beetz and F Stulp and A Kirsch and A Müller and S Buck},
 title = {Autonomous Robot Controllers Capable of Acquiring Repertoires of
	Complex Skills},
 booktitle = {{RoboCup} International Symposium 2003},
 year = {2003},
 series = {Padova},
 month = {jul},
 abstract = {Due to the complexity and sophistication of the skills needed in real
	world tasks, the development of autonomous robot controllers requires
	an ever increasing application of learning techniques. To date, however,
	learning steps are mainly executed in isolation and only the learned
	code pieces become part of the controller. This approach has several
	drawbacks: the learning steps themselves are undocumented and not
	executable. In this paper, we extend an existing control language
	with constructs for specifying control tasks, process models, learning
	problems, exploration strategies, etc. Using these constructs, the
	learning problems can be represented explicitly and transparently
	and, as they are part of the overall program implementation, become
	executable. With the extended language we rationally reconstruct
	large parts of the action selection module of the {AGILO2001} autonomous
	soccer robots.},
}

@inproceedings{beetz_assistive_2008,
 author = {M Beetz and F Stulp and B Radig and Jand BN Bandouch and M Dolha and A Fedrizzi and D Jain and U Klank and I Kresse and A Maldonado and Z Marton and L Mösenlechner and F Ruiz and RB Rusu and M Tenorth},
 title = {The Assistive Kitchen – A Demonstration Scenario for Cognitive Technical
	Systems},
 booktitle = {{IEEE} 17th International Symposium on Robot and Human Interactive
	Communication ({RO-MAN)}, Muenchen, Germany},
 year = {2008},
 pages = {1--8},
}

@article{beetz_towards_2010,
 author = {M Beetz and M Tenorth and D Jain and J Bandouch},
 title = {Towards Automated Models of Activities of Daily Life},
 journal = {Technology and Disability},
 year = {2010},
 volume = {22},
 pages = {27–40},
 number = {1-2},
}

@inproceedings{beetz_semantic_2012,
 author = {M Beetz and M Tenorth and D Pangercic and B Pitzer},
 title = {Semantic Object Maps for Household Tasks},
 booktitle = {5th International Conference on Cognitive Systems ({CogSys} 2012)},
 year = {2012},
}

@inproceedings{belker_learning_2001,
 author = {T Belker and M Beetz},
 title = {Learning to Execute Robot Navigation Plans},
 booktitle = {Proceedings of the 25th German Conference on Artificial Intelligence
	({KI} 01)},
 year = {2001},
 address = {Wien, Austria},
 publisher = {Springer Verlag},
 abstract = {Most state-of-the-art navigation systems for autonomous service robots
	decompose navigation into global navigation planning and local reactive
	navigation. While the methods for navigation planning and local navigation
	are well understood, the plan execution problem, the problem of how
	to generate and parameterize local navigation tasks from a given
	navigation plan, is largely unsolved. This article describes how
	a robot can autonomously learn to execute navigation plans. We formalize
	the problem as a Markov Decision Problem ({MDP)}, discuss how it
	can be simplified to make its solution feasible, and describe how
	the robot can acquire the necessary action models. We show, both
	in simulation and on a {RWI} B21 mobile robot, that the learned models
	are able to produce competent navigation behavior.},
}

@article{belker_learning_2002,
 author = {T Belker and M Beetz and A Cremers},
 title = {Learning Action Models for the Improved Execution of Navigation Plans},
 journal = {Robotics and Autonomous Systems},
 year = {2002},
 volume = {38},
 pages = {137–148},
 number = {3–4},
 month = {mar},
 abstract = {Most state-of-the-art navigation systems for autonomous service robots
	decompose navigation into global navigation planning and local reactive
	navigation. While the methods for navigation planning and local navigation
	themselves are well understood, the plan execution problem, the problem
	of how to generate and parameterize local navigation tasks from a
	given navigation plan, is largely unsolved. This article describes
	how a robot can autonomously learn to execute navigation plans. We
	formalize the problem as a Markov Decision Process ({MDP)} and derive
	a decision theoretic action selection function from it. The action
	selection function employs models of the robot's navigation actions,
	which are autonomously acquired from experience using neural network
	or regression tree learning algorithms. We show, both in simulation
	and on a {RWI} B21 mobile robot, that the learned models together
	with the derived action selection function achieve competent navigation
	behavior.},
}

@inproceedings{bersch_segmentation_2012,
 author = {C Bersch and D Pangercic and S Osentoski and K Hausman and ZC Marton and R Ueda and K Okada and M Beetz},
 title = {Segmentation of Textured and Textureless Objects through Interactive
	Perception},
 booktitle = {{RSS} Workshop on Robots in Clutter: Manipulation, Perception and
	Navigation in Human Environments},
 year = {2012},
 address = {Sydney, Australia},
 month = {jul},
}

@inproceedings{bertelsmeier_kontextunterstutzte_1977,
 author = {R. Bertelsmeier and B Radig},
 title = {Kontextunterstützte Analyse von Szenen mit bewegten Objekten.},
 booktitle = {Digital Bildverarbeitung - Digital Image Processing, {GI/NTG} Fachtagung,
	München, 28.-30. März 1977},
 year = {1977},
 editor = {Nagel, Hans-Hellmut},
 pages = {101--128},
 publisher = {Springer},
 isbn = {3-540-08169-0},
}

@inproceedings{bigontina_pose_OGRW_2014,
 author = {A Bigontina and M Herrmann and M Hoernig and B Radig},
 title = {Human Body Part Classification in Monocular Soccer Images},
 booktitle = {9-th Open German-Russian Workshop on Pattern Recognition and Image
	Understanding},
 year = {2014},
 address = {Koblenz},
 month = {12},
 keywords = {Articulated Pose Estimation, Human Body Pose Estimation, Pixel-based
	Classification, Random Forests, soccer},
 owner = {herrmmic},
 timestamp = {2014.12.04},
}

@inproceedings{blas_fault-tolerant_2009,
 author = {MR Blas and RB Rusu and M Blanke and M Beetz},
 title = {Fault-tolerant {3D} Mapping with Application to an Orchard Robot},
 booktitle = {Proceedings of the 7th {IFAC} International Symposium on Fault Detection,
	Supervision and Safety of Technical Processes ({SAFEPROCESS'09)},
	Barcelona, Spain, June 30 - July 3},
 year = {2009},
}

@inproceedings{blodow_autonomous_2011,
 author = {N Blodow and LC Goron and ZC Marton and D Pangercic and T Rühr and M Tenorth and M Beetz},
 title = {Autonomous Semantic Mapping for Robots Performing Everyday Manipulation
	Tasks in Kitchen Environments},
 booktitle = {Proceedings of the {IEEE/RSJ} International Conference on Intelligent
	Robots and Systems ({IROS)}},
 year = {2011},
 address = {San Francisco, {CA}, {USA}},
 month = {sep},
}

@inproceedings{blodow_perception_2010,
 author = {N Blodow and D Jain and ZC Marton and M Beetz},
 title = {Perception and Probabilistic Anchoring for Dynamic World State Logging},
 booktitle = {10th {IEEE-RAS} International Conference on Humanoid Robots},
 year = {2010},
 pages = {160--166},
 address = {Nashville, {TN}, {USA}},
 month = {dec},
}

@inproceedings{blodow_making_2010,
 author = {N Blodow and ZC Marton and D Pangercic and M Beetz},
 title = {Making Sense of {3D} Data},
 booktitle = {Robotics: Science and Systems Conference ({RSS)}, Workshop on Strategies
	and Evaluation for Mobile Manipulation in Household Environments},
 year = {2010},
}

@inproceedings{blodow_inferring_2011,
 author = {N Blodow and ZC Marton and D Pangercic and T Rühr and M Tenorth and M Beetz},
 title = {Inferring Generalized Pick-and-Place Tasks from Pointing Gestures},
 booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)},
	Workshop on Semantic Perception, Mapping and Exploration},
 year = {2011},
 month = {may},
}

@inproceedings{blodow_partial_2009,
 author = {N Blodow and RB Rusu and ZC Marton and M Beetz},
 title = {Partial View Modeling and Validation in {3D} Laser Scans for Grasping},
 booktitle = {9th {IEEE-RAS} International Conference on Humanoid Robots (Humanoids)},
 year = {2009},
 address = {Paris, France},
 month = {dec},
}

@techreport{brscic_multi_2010,
 author = {D. Brščić and M Eggers and F. Rohrmüller and O. and SS. Kourakos and D. Althoff and M. Lawitzky and A. and RM. Mörtl and V. Koropouli and J. R. M Hernández and X. Zang and W. Wang and D. Wollherr and K. Kühnlenz and C Mayer and T. Kruse and A. Kirsch and J. Blume and A. Bannat and T. Rehrl and F. Wallhoff and T. Lorenz and P. Basili and C. Lenz and T. Röder and G. Panin and W. Maier and S. and BM. Hirche and M. Beetz and B Radig and A. Schubö and S. Glasauer and A. Knoll and E. Steinbach},
 title = {Multi Joint Action in {CoTeSys} — Setup and Challenges},
 institution = {{CoTeSys} Cluster of Excelence: Technische Universität München \&
	Ludwig-Maximilians-Universität München},
 year = {2010},
 number = {{CoTeSys-TR-10-01}},
 address = {Munich, Germany},
 month = {jun},
}

@phdthesis{buck_experience-based_2003,
 author = {S Buck},
 title = {Experience-Based Control and Coordination of Autonomous Mobile Systems
	in Dynamic Environments},
 school = {Department of Informatics, Technische Universität München},
 year = {2003},
 url = {http://tumb1.biblio.tu-muenchen.de/publ/diss/in/2003/buck.html},
}

@inproceedings{buck_m-rose:_2002,
 author = {S Buck and M Beetz and T Schmitt},
 title = {M-{ROSE:} A Multi Robot Simulation Environment for Learning Cooperative
	Behavior},
 booktitle = {Distributed Autonomous Robotic Systems 5, Lecture Notes in Artificial
	Intelligence},
 year = {2002},
 editor = {Asama, H. and Arai, T. and Fukuda, T. and Hasegawa, T.},
 series = {{LNAI}},
 publisher = {Springer-Verlag},
}

@inproceedings{buck_reliable_2002,
 author = {S Buck and M Beetz and T Schmitt},
 title = {Reliable Multi Robot Coordination Using Minimal Communication and
	Neural Prediction},
 booktitle = {Advances in Plan-based Control of Autonomous Robots. Selected Contributions
	of the Dagstuhl Seminar {“Plan-based} Control of Robotic Agents”},
 year = {2002},
 editor = {Beetz, M. and Hertzberg, J. and Ghallab, M. and Pollack, M.},
 series = {Lecture Notes in Artificial Intelligence},
 publisher = {Springer},
}

@inproceedings{buck_approximating_2002,
 author = {S Buck and M Beetz and T Schmitt},
 title = {Approximating the Value Function for Continuous Space Reinforcement
	Learning in Robot Control},
 booktitle = {Proc. of the {IEEE} Intl. Conf. on Intelligent Robots and Systems},
 year = {2002},
 abstract = {Many robot learning tasks are very difficult to solve: their state
	spaces are high dimensional, variables and command parameters are
	continuously valued, and system states are only partly observable.
	In this paper, we propose to learn a continuous space value function
	for reinforcement learning using neural networks trained from data
	of exploration runs. The learned function is guaranteed to be a lower
	bound for, and reproduces the characteristic shape of, the accurate
	value function. We apply our approach to two robot navigation tasks,
	discuss how to deal with possible problems occurring in practice,
	and assess its performance.},
}

@inproceedings{buck_planning_2001,
 author = {S Buck and M Beetz and T Schmitt},
 title = {Planning and Executing Joint Navigation Tasks in Autonomous Robot
	Soccer},
 booktitle = {5th International Workshop on {RoboCup} (Robot World Cup Soccer Games
	and Conferences)},
 year = {2001},
}

@inproceedings{buck_agilo_2000,
 author = {S Buck and R Hanek and M Klupsch and T Schmitt},
 title = {Agilo {RoboCuppers:} {RoboCup} Team Description},
 booktitle = {The Fourth Robot World Cup Soccer Games and Conferences},
 year = {2000},
 series = {{RoboCup-2000} Melbourne},
 abstract = {This paper describes the Agilo {RoboCuppers}, team of the image understanding
	group ({FG} {BV)} at the Technische Universit?t München. With a team
	of four Pioneer 1 robots, equipped with {CCD} camera and a single
	board computer each and coordinated by a master {PC} outside the
	field we participate in the Middle Size League of the fourth international
	{RoboCup} Tournament in Melbourne 2000. We use a multi-agent based
	approach to represent different robots and to encapsulate concurrent
	tasks within the robots. A fast feature extraction based on the image
	processing library {HALCON} provides the data necessary for the on-board
	scene interpretation. All robot observations are fused to one single
	consistent view. Decision making is done on this fused data.},
}

@inproceedings{buck_learning_2000,
 author = {S Buck and M Riedmiller},
 title = {Learning Situation Dependent Success Rates Of Actions In A {RoboCup}
	Scenario},
 booktitle = {Pacific Rim International Conference on Artificial Intelligence},
 year = {2000},
 pages = {809},
 abstract = {A quickly changing, not predictable environment complicates autonomous
	decision making in a system of mobile robots. To simplify action
	selection we suggest a suitable reduction of decision space by restricting
	the number of executable actions the agent can choose from. We use
	supervised neural learning to automaticly learn success rates of
	actions to facilitate decision making. To determine probabilities
	of success each agent relies on its sensory data. We show that using
	our approach it is possible to compute probabilities of success close
	to the real success rates of actions and further we give a few results
	of games of a {RoboCup} simulation team based on this approach.},
}

@inproceedings{buck_machine_2002,
 author = {S Buck and F Stulp and M Beetz and T Schmitt},
 title = {Machine Control Using Radial Basis Value Functions and Inverse State
	Projection},
 booktitle = {Proc. of the {IEEE} Intl. Conf. on Automation, Robotics, Control,
	and Vision},
 year = {2002},
 abstract = {Typical real world machine control tasks have some characteristics
	which makes them difficult to solve: Their state spaces are high-dimensional
	and continuous, and it may be impossible to reach a satisfying target
	state by exploration or human control. To overcome these problems,
	in this paper, we propose (1) to use radial basis functions for value
	function approximation in continuous space reinforcement learning
	and (2) the use of learned inverse projection functions for state
	space exploration. We apply our approach to path planning in dynamic
	environments and to an aircraft autolanding simulation, and evaluate
	its performance.},
}

@inproceedings{buck_multi_2001,
 author = {S Buck and U. Weber and M Beetz and T Schmitt},
 title = {Multi Robot Path Planning for Dynamic Evironments: A case study},
 booktitle = {Proc. of the {IEEE} Intl. Conf. on Intelligent Robots and Systems},
 year = {2001},
}

@article{buss_cotesys_2010,
 author = {M Buss and M Beetz},
 title = {{CoTeSys} – Cognition for Technical Systems},
 journal = {Künstliche Intelligenz},
 year = {2010},
}

@article{buss_cotesys_2007,
 author = {M Buss and M Beetz and D Wollherr},
 title = {{CoTeSys} — Cognition for Technical Systems},
 journal = {International Journal of Assistive Robotics and Mechatronics},
 year = {2007},
 volume = {8},
 pages = {25--36},
 number = {4},
 abstract = {The {CoTeSys} cluster of excellence investigates cognition for technical
	systems such as vehicles, robots, and factories. Cognitive technical
	systems ({CTS)} are information processing systems equipped with
	artificial sensors and actuators, integrated and embedded into physical
	systems, and acting in a physical world. They differ from other technical
	systems as they perform cognitive control and have cognitive capabilities.
	Cognitive control orchestrates reflexive and habitual behavior in
	accord with longterm intentions. Cognitive capabilities such as perception,
	reasoning, learning, and planning turn technical systems into systems
	that “know what they are doing”. The cognitive capabilities will
	result in systems of higher reliability, flexibility, adaptivity,
	and better performance. They will be easier to interact and cooperate
	with.},
}

@inproceedings{buss_cotesys_2007-1,
 author = {M Buss and M Beetz and D Wollherr},
 title = {{CoTeSys} — Cognition for Technical Systems},
 booktitle = {Proceedings of the 4th {COE} Workshop on Human Adaptive Mechatronics
	({HAM)}},
 year = {2007},
 abstract = {The {CoTeSys} cluster of excellence investigates cognition for technical
	systems such as vehicles, robots, and factories. Cognitive technical
	systems ({CTS)} are information processing systems equipped with
	artificial sensors and actuators, integrated and embedded into physical
	systems, and acting in a physical world. They differ from other technical
	systems as they perform cognitive control and have cognitive capabilities.
	Cognitive control orchestrates reflexive and habitual behavior in
	accord with longterm intentions. Cognitive capabilities such as perception,
	reasoning, learning, and planning turn technical systems into systems
	that “know what they are doing”. The cognitive capabilities will
	result in systems of higher reliability, flexibility, adaptivity,
	and better performance. They will be easier to interact and cooperate
	with.},
}

@phdthesis{durus_ball_tracking_2014,
 author = {M Durus},
 title = {Ball Tracking and Action Recognition of Soccer Players in TV Broadcast
	Videos},
 school = {Technische Universität München},
 year = {2014},
 address = {München},
 keywords = {soccer},
 url = {http://nbn-resolving.de/urn/resolver.pl?urn:nbn:de:bvb:91-diss-20140414-1145077-0-1},
}

@phdthesis{,
 author = {M Eggers},
 title = {Perspective-Adjusting Appearance Model for Distributed Multi-View
	Person Tracking},
 school = {Technische Universität München},
 year = {2014},
 owner = {herrmmic},
 timestamp = {2015.04.16},
 url = {http://nbn-resolving.de/urn/resolver.pl?urn:nbn:de:bvb:91-diss-20141104-1219467-0-9},
}

@article{eggers_setup_2013,
 author = {M Eggers and V Dikov and C Mayer and C Steger and B Radig},
 title = {Setup and calibration of a distributed camera system for surveillance
	of laboratory space},
 journal = {Pattern Recognition and Image Analysis},
 year = {2013},
 volume = {23},
 pages = {481--487},
 number = {4},
 month = {oct},
 doi = {10.1134/S1054661813040032},
 issn = {1054-6618, 1555-6212},
 language = {en},
 url = {http://link.springer.com/10.1134/S1054661813040032},
 urldate = {2014-05-15},
}

@inproceedings{engstler_memoman_2009,
 author = {F Engstler and J Bandouch and H Bubb},
 title = {{MeMoMan} - Model Based Markerless Capturing of Human Motion},
 booktitle = {The 17th World Congress on Ergonomics (International Ergonomics Association,
	{IEA)}},
 year = {2009},
 address = {Beijing, China},
}

@inproceedings{ertelt_integration_2009,
 author = {C Ertelt and T Rühr and D Pangercic and K Shea and M Beetz},
 title = {Integration of Perception, Global Planning and Local Planning in
	the Manufacturing Domain},
 booktitle = {Proceedings of Emerging Technologies and Factory Automation ({ETFA).}},
 year = {2009},
}

@inproceedings{fedrizzi_transformational_2009,
 author = {A Fedrizzi and L Moesenlechner and F Stulp and M Beetz},
 title = {Transformational Planning for Mobile Manipulation based on Action-related
	Places},
 booktitle = {Proceedings of the International Conference on Advanced Robotics
	({ICAR).}},
 year = {2009},
 pages = {1–8},
}

@inproceedings{fischer_experiences_2004,
 author = {S Fischer and S Döring and M Wimmer and A Krummheuer},
 title = {Experiences with an Emotional Sales Agent},
 booktitle = {Affective Dialogue Systems},
 year = {2004},
 editor = {André, Elisabeth and er, Laila Dybkj{\textbackslash}a and Minker,
	Wolfgang and Heisterkamp, Paul},
 volume = {3068},
 series = {Lecture Notes in Computer Science},
 pages = {309--312},
 address = {Kloster Irsee, Germany},
 month = {jun},
 publisher = {Springer},
 abstract = {With {COSIMAB2B} we demonstrate a prototype of a complex and visionary
	e-procurement application. The embodied character agent named {COSIMA}
	is able to respect a customer's preferences and deals with him or
	her via natural speech. She expresses various emotions via mimic,
	gesture, combined with speech output, and {COSIMA} is even able to
	consider the customer's emotions via mimic recognition. As first
	observations show, this is a very promising approach to improve the
	bargaining with the customer or the recommendation of products.},
 isbn = {3-540-22143-3},
}

@inproceedings{friesdorf_mutually_2009,
 author = {F Friesdorf and D Pangercic and H Bubb and M Beetz},
 title = {Mutually Augmented Cognition},
 booktitle = {Proceedings of the International Conference on Social Robotics ({ICSR).}},
 year = {2009},
}

@inproceedings{gast_did_2009,
 author = {J Gast and A Bannat and T Rehrl and Cand WF Mayer and G Rigoll and B Radig},
 title = {Did I Get it Right: Head Gesture Analysis for Human-Machine Interaction},
 booktitle = {Human-Computer Interaction. Novel Interaction Methods and Techniques},
 year = {2009},
 series = {Lecture Notes in Computer Science},
 publisher = {Springer},
}

@phdthesis{gedikli_continual_2009,
 author = {S Gedikli},
 title = {Continual and Robust Estimation of Camera Parameters in Broadcasted
	Sports Games},
 school = {Technische Universität München},
 year = {2009},
 keywords = {soccer},
}

@inproceedings{gedikli_adaptive_2007,
 author = {S Gedikli and J Bandouch and Nvon Hoyningen-Huene and B Kirchlechner and M Beetz},
 title = {An Adaptive Vision System for Tracking Soccer Players from Variable
	Camera Settings},
 booktitle = {Proceedings of the 5th International Conference on Computer Vision
	Systems ({ICVS)}},
 year = {2007},
 abstract = {In this paper we present {ASpoGAMo}, a vision system capable of estimating
	motion trajectories of soccer players taped on video. The system
	performs well in a multitude of application scenarios because of
	its adaptivity to various camera setups, such as single or multiple
	camera settings, static or dynamic ones. Furthermore, {ASpoGAMo}
	can directly process image streams taken from {TV} broadcast, and
	extract all valuable information despite scene interruptions and
	cuts between different cameras. The system achieves a high level
	of robustness through the use of modelbased vision algorithms for
	camera estimation and player recognition and a probabilistic multi-player
	tracking framework capable of dealing with occlusion situations typical
	in team-sports. The continuous interplay between these submodules
	is adding to both the reliability and the efficiency of the overall
	system.},
 keywords = {soccer},
}

@inproceedings{geipel_learning_2006,
 author = {M Geipel and M Beetz},
 title = {Learning to shoot goals, Analysing the Learning Process and the Resulting
	Policies},
 booktitle = {{RoboCup-2006:} Robot Soccer World Cup X},
 year = {2006},
 editor = {Lakemeyer, Gerhard and Sklar, Elizabeth and Sorenti, Domenico and
	Takahashi, Tomoichi},
 publisher = {Springer Verlag, Berlin},
 abstract = {Reinforcement learning is a very general unsupervised learning mechanism.
	Due to its generality reinforcement learning does not scale very
	well for tasks that involve inferring subtasks. In particular when
	the subtasks are dynamically changing and the environment is adversarial.
	One of the most challenging reinforcement learning tasks so far has
	been the 3 to 2 keepaway task in the {RoboCup} simulation league.
	In this paper we apply reinforcement learning to a even more challenging
	task: attacking the opponents goal. The main contribution of this
	paper is the empirical analysis of a portfolio of mechanisms for
	scaling reinforcement learning towards learning attack policies in
	simulated robot soccer.},
}

@inproceedings{gonsior_improving_2011,
 author = {B Gonsior and S Sosnowski and C Mayer and J Blume and B Radig and D Wollherr and K Kühnlenz},
 title = {Improving Aspects of Empathy and Subjective Performance for {HRI}
	through Mirroring Facial Expressions},
 booktitle = {Proceedings of the 19th {IEEE} International Symposium on Robot and
	Human Interactive Communication},
 year = {2011},
 keywords = {facial expressions},
}

@inproceedings{goron_segmenting_2012,
 author = {LC Goron and ZC Marton and Gand BM Lazea},
 title = {Segmenting Cylindrical and Box-like Objects in Cluttered {3D} Scenes},
 booktitle = {7th German Conference on Robotics ({ROBOTIK)}},
 year = {2012},
 address = {Munich, Germany},
 month = {may},
}

@inproceedings{goron_automatic_2010,
 author = {LC Goron and ZC Marton and Gand BM Lazea},
 title = {Automatic Layered {3D} Reconstruction of Simplified Object Models
	for Grasping},
 booktitle = {Joint 41st International Symposium on Robotics ({ISR)} and 6th German
	Conference on Robotics ({ROBOTIK)}},
 year = {2010},
 address = {Munich, Germany},
}

@inproceedings{gossow_distinctive_2012,
 author = {D Gossow and D Weikersdorfer and M Beetz},
 title = {Distinctive Texture Features from Perspective-Invariant Keypoints},
 booktitle = {21st International Conference on Pattern Recognition},
 year = {2012},
}

@book{grotzinger_learning_2011,
 title = {Learning Probabilistic Models of Robot Behaviour from Logged Execution
	Traces},
 year = {2011},
 author = {S Grötzinger},
}

@phdthesis{hanek_fitting_2004,
 author = {R Hanek},
 title = {Fitting Parametric Curve Models to Images Using Local Self-adapting
	Seperation Criteria},
 school = {Department of Informatics, Technische Universität München},
 year = {2004},
 url = {http://tumb1.biblio.tu-muenchen.de/publ/diss/in/2004/hanek.html},
}

@inproceedings{hanek_contracting_2001,
 author = {R Hanek},
 title = {The Contracting Curve Density Algorithm and its Application to Model-based
	Image Segmentation},
 booktitle = {{IEEE} Conf. Computer Vision and Pattern Recognition},
 year = {2001},
 series = {Kauai, Hawaii, {USA}},
 pages = {I:797--804},
 url = {http://www9.in.tum.de/papers/2001/CVPR-2001-Hanek.abstract.html},
}

@article{hanek_contracting_2004,
 author = {R Hanek and M Beetz},
 title = {The Contracting Curve Density Algorithm: Fitting Parametric Curve
	Models to Images Using Local Self-adapting Separation Criteria},
 journal = {International Journal of Computer Vision},
 year = {2004},
 volume = {59},
 pages = {233--258},
 number = {3},
 abstract = {The task of fitting parametric curve models to the boundaries of perceptually
	meaningful image regions is a key problem in computer vision with
	numerous applications, such as image segmentation, pose estimation,
	object tracking, and 3-D reconstruction. In this article, we propose
	the Contracting Curve Density ({CCD)} algorithm as a solution to
	the curve-fitting problem. The {CCD} algorithm extends the state-of-the-art
	in two important ways. First, it applies a novel likelihood function
	for the assessment of a fit between the curve model and the image
	data. This likelihood function can cope with highly inhomogeneous
	image regions, because it is formulated in terms of local image statistics.
	The local image statistics are learned on the fly from the vicinity
	of the expected curve. They provide therefore locally adapted criteria
	for separating the adjacent image regions. These local criteria replace
	often used predefined fixed criteria that rely on homogeneous image
	regions or specific edge properties. The second contribution is the
	use of blurred curve models as efficient means for iteratively optimizing
	the posterior density over possible model parameters. These blurred
	curve models enable the algorithm to trade-off two conflicting objectives,
	namely heaving a large area of convergence and achieving high accuracy.
	We apply the {CCD} algorithm to several challenging image segmentation
	and 3-D pose estimation problems. Our experiments with {RGB} images
	show that the {CCD} algorithm achieves a high level of robustness
	and subpixel accuracy even in the presence of severe texture, shading,
	clutter, partial occlusion, and strong changes of illumination.},
}

@inproceedings{hanek_vision-based_2000,
 author = {R Hanek and T Schmitt},
 title = {Vision-Based Localization and Data Fusion in a System of Cooperating
	Mobile Robots},
 booktitle = {Proc. of the {IEEE} Intl. Conf. on Intelligent Robots and Systems},
 year = {2000},
 pages = {1199–1204},
 publisher = {{IEEE/RSJ}},
 abstract = {The approach presented in this paper allows a team of mobile robots
	to estimate cooperatively their poses, i.e. positions and orientations,
	and the poses of other observed objects from images. The images are
	obtained by calibrated color cameras mounted on the robots. Model
	knowledge of the robots' environment, the geometry of observed objects,
	and the characteristics of the cameras are represented in curve functions
	which describe the relation between model curves in the image and
	the sought pose parameters. The pose parameters are estimated by
	minimizing the distance between model curves and actual image curves.
	Observations from possibly different view points obtained at different
	times are fused by a method similar to the extended Kalman filter.
	In contrast to the extended Kalman filter, which is based on a linear
	approximation of the measurement equations, we use an iterative optimization
	technique which takes non-linearities into account. The approach
	has been successfully used in robot soccer, where it reliably maintained
	a joint pose estimate for the players and the ball.},
}

@article{hanek_towards_2003,
 author = {R Hanek and T Schmitt and S Buck and M Beetz},
 title = {Towards {RoboCup} without color labeling},
 journal = {{AI} Magazine},
 year = {2003},
 volume = {24},
 pages = {37–40},
 number = {2},
}

@inproceedings{hanek_fast_2002,
 author = {R Hanek and T Schmitt and S Buck and M Beetz},
 title = {Fast Image-based Object Localization in Natural Scenes},
 booktitle = {{IEEE/RSJ} International Conference on Intelligent Robots and Systems
	({IROS)} 2002},
 year = {2002},
 series = {Lausanne},
 pages = {116–122},
 abstract = {In many robot applications, autonomous robots must be capable of localizing
	the objects they are to manipulate. In this paper we address the
	object localization problem by fitting a parametric curve model to
	the object contour in the image. The initial prior of the object
	pose is iteratively refined to the posterior distribution by optimizing
	the separation of the object and the background. The local separation
	criteria are based on local statistics which are iteratively computed
	from the object and the background region. No prior knowledge on
	color distributions is needed. Experiments show that the method is
	capable of localizing objects in a cluttered and textured scene even
	under strong variations of illumination. The method is able to localize
	a soccer ball within frame rate.},
}

@inproceedings{hanek_towards_2002,
 author = {R Hanek and T Schmitt and S Buck and M Beetz},
 title = {Towards {RoboCup} without Color Labeling},
 booktitle = {{RoboCup} International Symposium 2002},
 year = {2002},
 series = {Lecture Notes in Artificial Intelligence ({LNAI)}},
 address = {Fukuoka, Japan},
 publisher = {Springer Publishers},
}

@inproceedings{hanek_multiple_2000,
 author = {R Hanek and T Schmitt and M Klupsch and S Buck},
 title = {From Multiple Images to a Consistent View},
 booktitle = {The Fourth Robot World Cup Soccer Games and Conferences, {RoboCup-2000}
	Melbourne},
 year = {2000},
 pages = {288–296},
 publisher = {Springer},
 abstract = {The approach presented in this paper allows a team of mobile robots
	to estimate cooperatively their poses, i.e. positions and orientations,
	and the poses of other observed objects from images. The images are
	obtained by calibrated color cameras mounted on the robots. Model
	knowledge of the robots' environment, the geometry of observed objects,
	and the characteristics of the cameras are represented in curve functions
	which describe the relation between model curves in the image and
	the sought pose parameters. The pose parameters are estimated by
	minimizing the distance between model curves and actual image curves.
	Observations from possibly different view points obtained at different
	times are fused by a method similar to the extended Kalman filter.
	In contrast to the extended Kalman filter, which is based on a linear
	approximation of the measurement equations, we use an iterative optimization
	technique which takes non-linearities into account. The approach
	has been successfully used in robot soccer, where it reliably maintained
	a joint pose estimate for the players and the ball.},
}

@phdthesis{hansen_modellgetriebene_2002,
 author = {C Hansen},
 title = {Modellgetriebene Verfolgung formvariabler Objekte in Videobildfolgen},
 school = {Department of Informatics, Technische Universität München},
 year = {2002},
}

@inproceedings{hausman_tracking-based_2013,
 author = {K Hausman and F Balint-Benczedi and D Pangercic and ZC Marton and R Ueda and K Okada and M Beetz},
 title = {Tracking-based Interactive Segmentation of Textureless Objects},
 booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
 year = {2013},
 address = {Karlsruhe, Germany},
 month = {may},
}

@inproceedings{hausman_segmentation_2012,
 author = {K Hausman and C Bersch and D Pangercic and S Osentoski and ZC Marton and M Beetz},
 title = {Segmentation of Cluttered Scenes through Interactive Perception},
 booktitle = {{ICRA} 2012 Workshop on Semantic Perception and Mapping for Knowledge-enabled
	Service Robotics},
 year = {2012},
 address = {St. Paul, {MN}, {USA}},
 month = {may},
}

@techreport{heinz_using_2008,
 author = {S Heinz and M Sachenbacher},
 title = {Using Model Counting to Find Optimal Distinguishing Tests},
 institution = {Zuse Institute Berlin},
 year = {2008},
 number = {08-32},
}

@inproceedings{heinz_using_2008-1,
 author = {S Heinz and M Sachenbacher},
 title = {Using Model Counting to Find Optimal Distinguishing Tests},
 booktitle = {Proc. First International Workshop on Counting Problems in {CSP}
	and {SAT}, and other neighbouring problems (Counting'08)},
 year = {2008},
}

@article{herrmmic_tracking_2014,
 author = {M Herrmann and M Hoernig and B Radig},
 title = {Online Multi-player Tracking in Monocular Soccer Videos },
 journal = {AASRI Procedia },
 year = {2014},
 volume = {8},
 pages = {30 - 37},
 number = {0},
 issn = {2212-6716},
 keywords = {computer vision; soccer},
 url = {http://www.sciencedirect.com/science/article/pii/S2212671614000730},
}

@article{herrmann_automatic_2014,
 author = {M Herrmann and C Mayer and B Radig},
 title = {Automatic Generation of Image Analysis Programs},
 journal = {Pattern Recognition and Image Analysis},
 year = {2014},
 volume = {24},
 pages = {400-408},
 number = {3},
 doi = {10.1134/S1054661814030079},
 issn = {1054-6618},
 keywords = {automatic programming; inductive programming; generate-and-search;
	machine learning; computer vision; image analysis; object detection},
 language = {English},
 publisher = {Pleiades Publishing},
 url = {http://dx.doi.org/10.1134/S1054661814030079},
}

@inproceedings{herrmann_automatic_2013,
 author = {M Herrmann and C Mayer and B Radig},
 title = {Automatic Generation of Image Analysis Programs},
 booktitle = {11th International Conference on Pattern Recognition and Image Analysis
	({PRIA-11-2013)}},
 year = {2013},
 volume = {1},
 pages = {36--39},
 address = {Samara},
 month = {sep},
 publisher = {The Russian Academy of Sciences},
 keywords = {automatic programming; inductive programming; generate-and-search;
	machine learning; computer vision; image analysis; object detection},
}

@article{OJWT-v1i2n01_Hoernig,
 author = {M Hoernig and A Bigontina and B Radig},
 title = {A Comparative Evaluation of Current HTML5 Web Video Implementations},
 journal = {Open Journal of Web Technologies (OJWT)},
 year = {2014},
 volume = {1},
 pages = {1--9},
 number = {2},
 bibsource = {RonPub UG (haftungsbeschr{\"a}nkt)},
 issn = {2199 -188X},
 publisher = {RonPub UG (haftungsbeschr{\"a}nkt)},
 url = {http://www.ronpub.com/publications/OJWT-v1i2n01_Hoernig.pdf},
}

@article{hoernig_real-time_2014,
 author = {M Hoernig and M Herrmann and B Radig},
 title = {Real-Time Segmentation Methods for Monocular Soccer Videos},
 journal = {Pattern Recogntion and Image Analysis, To appear},
 year = {2015},
 keywords = {soccer},
}

@inproceedings{hoernig_shot_detection_2014,
 author = {M Hoernig and M Herrmann and B Radig},
 title = {Multi Temporal Distance Images for Shot Detection in Soccer Games},
 booktitle = {EUSIPCO 2014 (22nd European Signal Processing Conference 2014) (EUSIPCO
	2014)},
 year = {2014},
 address = {Lisbon, Portugal},
 month = {sep},
 abstract = {We present a new approach for video shot detection and introduce multi
	temporal distance images (MTDIs), formed by chi-square based similarity
	measures that are calculated pairwise within a floating window of
	video frames. By using MTDI-based boundary detectors, various cuts
	and transitions in various shapes (dissolves, overlayed effects,
	fades, and others) can be determined. The algorithm has been developed
	within the special context of soccer game TV broadcasts, where a
	particular interest in long view shots is intrinsic. With a correct
	shot detection rate in camera 1 shots of 98.2\% within our representative
	test data set, our system outperforms competing state-of-the-art
	systems.},
 days = {1},
 keywords = {soccer video analysis; video indexing; multi temporal distance image
	(MTDI); video segmentation; video shot boundary detection; soccer},
}

@inproceedings{hoernig_real_2013,
 author = {M Hoernig and M Herrmann and B Radig},
 title = {Real Time Soccer Field Analysis from Monocular {TV} Video Data},
 booktitle = {11th International Conference on Pattern Recognition and Image Analysis
	({PRIA-11-2013)}},
 year = {2013},
 volume = {2},
 pages = {567--570},
 address = {Samara},
 month = {sep},
 publisher = {The Russian Academy of Sciences},
 keywords = {soccer},
}

@phdthesis{von_hoyningen-huene_real-time_2011,
 author = {N von Hoyningen-Huene},
 title = {Real-time Tracking of Player Identities in Team Sports},
 school = {Technische Universität München},
 year = {2011},
 keywords = {soccer},
}

@incollection{hoyningen-huene_importance_2010,
 author = {Nv Hoyningen-Huene and M Beetz},
 title = {Importance Sampling as One Solution to the Data Association Problem
	in Multi-target Tracking},
 booktitle = {{VISIGRAPP} 2009},
 publisher = {Springer-Verlag Berlin Heidelberg},
 year = {2010},
 editor = {Ranchordas, {AlpeshKumar} and Araujo, Helder},
 number = {68},
 series = {Communications in Computer and Information Science ({CCIS)}},
 pages = {309–325},
 abstract = {Tracking multiple targets with similar appearance is a common task
	in many computer vision applications as surveillance or sports analysis.
	We propose a Rao-Blackwellized Resampling Particle Filter ({RBRPF)}
	as a real-time multi-target tracking method that solves the data
	association problem by a Monte Carlo approach. Each particle containing
	the whole target configuration is predicted by using a process model
	and resampled by sampling associations and fusing of the predicted
	state with the assigned measurement(s) instead of the common dispersion.
	As each target state is modeled as a Gaussian, Rao-Blackwellization
	can be used to solve some of these steps analytically. The sampling
	of associations splits the multi-target tracking problem in multiple
	single target tracking problems, which can be handled by Kalman filters
	in an optimal way. The method is independent of the order of measurements
	which is mostly predetermined by the measuring process in contrast
	to other state-of-the-art approaches. Smart resampling and memoization
	is introduced to equip the tracking method with real-time capabilities
	in the first place exploiting the discreteness of the associations.
	The probabilistic framework allows for consideration of appearance
	models and the fusion of different sensors. A way to constrain the
	multiplicity of measurements associated with a single target is proposed
	and – along with the ability to cope with a high number of targets
	in clutter – evaluated in a simulation experiment. We demonstrate
	the applicability of the proposed method to real world applications
	by tracking soccer players captured by multiple cameras through occlusions
	in real-time.},
}

@inproceedings{hoyningen-huene_rao-blackwellized_2009,
 author = {Nvon Hoyningen-Huene and M Beetz},
 title = {Rao-Blackwellized Resampling Particle Filter for Real-Time Player
	Tracking in Sports},
 booktitle = {Fourth International Conference on Computer Vision Theory and Applications
	({VISAPP)}},
 year = {2009},
 editor = {Ranchordas, {AlpeshKumar} and Araujo, Helder},
 volume = {1},
 pages = {464--470},
 address = {Lisboa, Portugal},
 month = {feb},
 publisher = {{INSTICC} press},
 abstract = {Tracking multiple targets with similiar appearance is a common task
	in computer vision applications, especially in sports games. We propose
	a Rao-Blackwellized Resampling Particle Filter ({RBRPF)} as an implementable
	real-time continuation of a state-of-the-art multi-target tracking
	method. Target configurations are tracked by sampling associations
	and solving single-target tracking problems by Kalman filters. As
	an advantage of the new method the independence assumption between
	data associations is relaxed to increase the robustness in the sports
	domain. Smart resampling and memoization is introduced to equip the
	tracking method with real-time capabilities in the first place. The
	probabilistic framework allows for consideration of appearance models
	and the fusion of different sensors. We demonstrate its applicability
	to real world applications by tracking soccer players captured by
	multiple cameras through occlusions in real-time.},
 keywords = {soccer},
}

@inproceedings{hoyningen-huene_robust_2009,
 author = {Nvon Hoyningen-Huene and M Beetz},
 title = {Robust real-time multiple target tracking},
 booktitle = {Ninth Asian Conference on Computer Vision ({ACCV)}},
 year = {2009},
 address = {Xi'an, China},
 month = {sep},
 abstract = {We propose a novel efficient algorithm for robust tracking of a fixed
	number of targets in real-time with low failure rate. The method
	is an instance of Sequential Importance Resampling filters approximating
	the posterior of complete target configurations as a mixture of Gaussians.
	Using predicted target positions by Kalman filters, data associations
	are sampled for each measurement sweep according to their likelihood
	allowing to constrain the number of associations per target. Updated
	target configurations are weighted for resampling pursuant to their
	explanatory power for former positions and measurements. Fixed-lag
	of the resulting positions increases the tracking quality while smart
	resampling and memoization decrease the computational demand. A negative
	information handling exploits missing measurements for a target outside
	the monitored area. We present both, qualitative and quantitative
	experimental results on two demanding real-world applications with
	occluded and highly confusable targets, demonstrating the robustness
	and real-time performance of our approach outperforming current state-of-the-art
	{MCMC} methods.},
}

@inproceedings{hoyningen-huene_gram:_2007,
 author = {Nv Hoyningen-Huene and B Kirchlechner and M Beetz},
 title = {{GrAM:} Reasoning with Grounded Action Models by Combining Knowledge
	Representation and Data Mining},
 booktitle = {Towards Affordance-based Robot Control},
 year = {2007},
 abstract = {This paper proposes {GrAM} (Grounded Action Models), a novel integration
	of actions and action models into the knowledge representation and
	inference mechanisms of agents. In {GrAM} action models accord to
	agent behavior and can be specified explicitly and implicitly. The
	explicit representation is an action class specific set of Markov
	logic rules that predict action properties. Stated implicitly an
	action model defines a data mining problem that, when executed, computes
	the model's explicit representation. When inferred from an implicit
	representation the prediction rules predict typical behavior and
	are learned from a set of training examples, or, in other words,
	grounded in the respective experience of the agents. Therefore, {GrAM}
	allows for the functional and thus adaptive specification of concepts
	such as the class of situations in which a special action is typically
	executed successfully or the concept of agents that tend to execute
	certain kinds of actions. {GrAM} represents actions and their models
	using an upgrading of the representation language {OWL} and equips
	the Java Theorem Prover ({JTP)}, a hybrid reasoner for {OWL}, with
	additional mechanisms that allow for the automatic acquisition of
	action models and solving a variety of inference tasks for actions,
	action models and functional descriptions.},
}

@inproceedings{hughes_action_2013,
 author = {CML Hughes and M Tenorth and Mand HJ Bienkiewicz},
 title = {Action sequencing and error production in stroke patients with apraxia
	– Behavioral modeling using Bayesian Logic Networks},
 booktitle = {6th International Conference on Health Informatics ({HEALTHINF} 2013)},
 year = {2013},
 address = {Barcelona, Spain},
 month = {feb},
}

@inproceedings{hammerle_sensor-based_2005,
 author = {S Hämmerle and M Wimmer and B Radig and M Beetz},
 title = {Sensor-based Situated, Individualized, and Personalized Interaction
	in Smart Environments},
 booktitle = {{INFORMATIK} 2005 - Informatik {LIVE!} Band 1, Beiträge der 35. Jahrestagung
	der Gesellschaft für Informatik ({GI)}},
 year = {2005},
 editor = {Cremers, Armin B. and Manthey, Rainer and Martini, Peter and Steinhage,
	Volker},
 volume = {67},
 series = {{LNI}},
 pages = {261--265},
 address = {Bonn, Germany},
 month = {sep},
 publisher = {{GI}},
 abstract = {Smart environments are sensor equipped areas that know about their
	environment thus being able to adapt to the user. We present {sHOME},
	a multiagent based platform for integrating situated, individualized,
	and personalized information. {sHOME} acquires sensor data to determine
	the user's identity, his location, his gesture, and natural language
	commands and stores it in a central knowledge base.},
 isbn = {3-88579-396-2},
}

@book{intelligent_autonomous_systems_group_tum-ros_2012,
 title = {{TUM-ROS} code repository},
 year = {2012},
 author = {TUM Intelligent Autonomous Systems Group},
 url = {http://www.ros.org/wiki/tum-ros-pkg},
}

@inproceedings{isik_coordination_2006,
 author = {M Isik and F Stulp and G Mayer and H Utz},
 title = {Coordination without Negotiation in Teams of Heterogeneous Robots},
 booktitle = {Proceedings of the {RoboCup} Symposium},
 year = {2006},
 pages = {355–362},
 address = {Bremen, Germany},
}

@phdthesis{jain_probabilistic_2012,
 author = {D Jain},
 title = {Probabilistic Cognition for Technical Systems: Statistical Relational
	Models for High-Level Knowledge Representation, Learning and Reasoning},
 school = {Technische Universität München},
 year = {2012},
 url = {http://mediatum.ub.tum.de/node?id=1096684&change_language=en},
}

@inproceedings{jain_knowledge_2011,
 author = {D Jain},
 title = {Knowledge Engineering with Markov Logic Networks: A Review},
 booktitle = {{DKB} 2011: Proceedings of the Third Workshop on Dynamics of Knowledge
	and Belief},
 year = {2011},
}

@inproceedings{jain_adaptive_2010,
 author = {D Jain and A Barthels and M Beetz},
 title = {Adaptive Markov Logic Networks: Learning Statistical Relational Models
	with Dynamic Parameters},
 booktitle = {19th European Conference on Artificial Intelligence ({ECAI)}},
 year = {2010},
 pages = {937--942},
}

@inproceedings{jain_soft_2010,
 author = {D Jain and M Beetz},
 title = {Soft Evidential Update via Markov Chain Monte Carlo Inference},
 booktitle = {{KI} 2010: Advances in Artificial Intelligence, 33rd Annual German
	Conference on {AI}},
 year = {2010},
 volume = {6359},
 series = {Lecture Notes in Computer Science},
 pages = {280--290},
 address = {Karlsruhe, Germany},
 publisher = {Springer},
 isbn = {978-3-642-16110-0},
}

@inproceedings{jain_bayesian_2011,
 author = {D Jain and Kvon Gleissenthall and M Beetz},
 title = {Bayesian Logic Networks and the Search for Samples with Backward
	Simulation and Abstract Constraint Learning},
 booktitle = {{KI} 2011: Advances in Artificial Intelligence, 34th Annual German
	Conference on {AI}},
 year = {2011},
 volume = {7006},
 series = {Lecture Notes in Computer Science},
 pages = {144--156},
 address = {Berlin, Germany},
 month = {oct},
 publisher = {Springer},
 isbn = {978-3-642-24454-4},
}

@inproceedings{jain_extending_2007,
 author = {D Jain and B Kirchlechner and M Beetz},
 title = {Extending Markov Logic to Model Probability Distributions in Relational
	Domains},
 booktitle = {{KI} 2007: Advances in Artificial Intelligence, 30th Annual German
	Conference on {AI}},
 year = {2007},
 volume = {4667},
 series = {Lecture Notes in Computer Science},
 pages = {129–143},
 publisher = {Springer},
 isbn = {978-3-540-74564-8},
}

@inproceedings{jain_markov_2009,
 author = {D Jain and P Maier and G Wylezich},
 title = {Markov Logic as a Modelling Language for Weighted Constraint Satisfaction
	Problems},
 booktitle = {Eighth International Workshop on Constraint Modelling and Reformulation,
	in conjunction with {CP2009}},
 year = {2009},
 abstract = {Many real-world problems, for example resource allocation, can be
	formalized as soft constraint optimization problems. A fundamental
	issue is the compact and precise declaration of such problems. We
	propose Markov logic networks ({MLNs)}, a representation formalism
	well-known from statistical relational learning, as a simple yet
	highly expressive modelling framework, for {MLNs} enable the representation
	of general principles that abstract away from concrete entities in
	order to achieve a separation between the model and the data to which
	it is applied. {MLNs} provide the full power of first-order logic
	and combine it with probabilistic semantics, thus allowing a flexible
	representation of soft constraints. We introduce an automatic conversion
	of maximum a posteriori ({MAP)} inference problems in {MLNs} to weighted
	constraint satisfaction problems to leverage a large body of available
	solving methods, and we make our software suite available to the
	public. We demonstrate the soundness of our approach on a real-world
	room allocation problem, providing experimental results.},
}

@inproceedings{jain_equipping_2009,
 author = {D Jain and L Mösenlechner and M Beetz},
 title = {Equipping Robot Control Programs with First-Order Probabilistic Reasoning
	Capabilities},
 booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
 year = {2009},
 pages = {3626--3631},
}

@inproceedings{jain_equipping_2008,
 author = {D Jain and L Mösenlechner and M Beetz},
 title = {Equipping Robot Control Programs with First-Order Probabilistic Reasoning
	Capabilities},
 booktitle = {Proceedings of the 1st International Workshop on Cognition for Technical
	Systems},
 year = {2008},
 address = {München, Germany},
 month = {oct},
}

@techreport{jain_bayesian_2009,
 author = {D Jain and S Waldherr and M Beetz},
 title = {Bayesian Logic Networks},
 institution = {{IAS} Group, Fakultät für Informatik, Technische Universität München},
 year = {2009},
}

@inproceedings{kammerl_real-time_2012,
 author = {J Kammerl and N Blodow and RB Rusu and S Gedikli and M Beetz and E Steinbach},
 title = {Real-time Compression of Point Cloud Streams},
 booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
 year = {2012},
 address = {Minnesota, {USA}},
 month = {may},
}

@inproceedings{kanezaki_voxelized_2011,
 author = {A Kanezaki and ZC Marton and D Pangercic and T Harada and Y Kuniyoshi and M Beetz},
 title = {Voxelized Shape and Color Histograms for {RGB-D}},
 booktitle = {{IEEE/RSJ} International Conference on Intelligent Robots and Systems
	({IROS)}, Workshop on Active Semantic Perception and Object Search
	in the Real World},
 year = {2011},
 address = {San Francisco, {CA}, {USA}},
 month = {sep},
}

@inproceedings{kidson_elaborative_2012,
 author = {R Kidson and D Stanimirovic and D Pangercic and M Beetz},
 title = {Elaborative Evaluation of {RGB-D} based Point Cloud Registration
	for Personal Robots},
 booktitle = {{ICRA} 2012 Workshop on Semantic Perception and Mapping for Knowledge-enabled
	Service Robotics},
 year = {2012},
 address = {St. Paul, {MN}, {USA}},
 month = {may},
}

@techreport{kirsch_be_2010,
 author = {A Kirsch},
 title = {Be a Robot — A Study on Everyday Activities Performed in Real and
	Virtual Worlds},
 institution = {Technische Universität München},
 year = {2010},
 number = {{TUM-I1006}},
 abstract = {This report presents a user study, in which we compare the behaviour
	for setting and clearing the table in reality and in a simulated,
	computer-game-like environment. The aim was to examine the potential
	of using a computer-game-like simulation for user studies on cognition,
	in particular for robot-centred studies on human-robot interaction,
	but also other areas such as studies about context-specific and context-independent
	behaviour. A simulation allows the creation of a large number of
	environments at low cost and enables comparisons of behaviour in
	reality and simulation. In the present pilot study we have considered
	three points of interest: 1) the differences in user skills with
	the used simulation, 2) comparison of human behaviour in simulation
	and reality performing everyday activities, and 3) comparison of
	behaviour in different simulated environments.},
}

@article{kirsch_robot_2009,
 author = {A Kirsch},
 title = {Robot Learning Language – Integrating Programming and Learning for
	Cognitive Systems},
 journal = {Robotics and Autonomous Systems Journal},
 year = {2009},
 volume = {57},
 pages = {943–954},
 number = {9},
 url = {http://dx.doi.org/10.1016/j.robot.2009.05.001},
}

@phdthesis{kirsch_integration_2008,
 author = {A Kirsch},
 title = {Integration of Programming and Learning in a Control Language for
	Autonomous Robots Performing Everyday Activities},
 school = {Technische Universität München},
 year = {2008},
 abstract = {Robots performing complex tasks in changing, everyday environments
	and required to improve with experience must continually monitor
	the way they execute their routines and revise them if necessary.
	Existing approaches, which use either monolithic or isolated, nonrecurring
	learning processes, cannot sufficiently focus their learning processes
	to satisfy these requirements. To meet this challenge we propose
	to make learning an integral part of the control program by providing
	a control language that includes constructs for specifying and executing
	learning problems. Our Robot Learning Language ({RoLL)} makes learning
	tasks executable within the control program. It allows for the specification
	of complete learning processes including the acquisition of experience,
	the execution of learning algorithms and the integration of learning
	results into the program. {RoLL} is built upon the concept of experience,
	which is a learning task specific symbolic summary of a problem solving
	episode. This means that experiences do not only record the observed
	data, but also include the robot's intentions and the perceived execution
	context. The experience acquisition in {RoLL} is designed in a way
	that experiences can be defined outside the primary control program,
	using hybrid automata as a tool for declaratively specifying experience
	and anchoring it to the program. The rich experience concept enables
	convenient abstraction and an economic use of experiences. {RoLL's}
	design allows the inclusion of arbitrary experience-based learning
	algorithms. Upon the completion of the learning process {RoLL} automatically
	integrates the learned function into the control program without
	interrupting Program Execution. {RoLL} enables the plug-and-play
	addition of new learning problems and keeps the control program modular
	and transparent. {RoLL's} control structures make learning an integral
	part of the control program and can serve as a powerful implementational
	platform for comprehensive learning approaches such as developmental,
	life-long and imitation learning.},
 url = {http://mediatum2.ub.tum.de/node?id=625553},
}

@inproceedings{kirsch_towards_2005,
 author = {A Kirsch},
 title = {Towards High-performance Robot Plans with Grounded Action Models:
	Integrating Learning Mechanisms into Robot Control Languages},
 booktitle = {{ICAPS} Doctoral Consortium},
 year = {2005},
 abstract = {For planning in the domain of autonomous robots, abstraction of state
	and actions is indispensable. This abstraction however comes at the
	cost of suboptimal execution, as relevant information is ignored.
	A solution is to maintain abstractions for planning, but to fill
	in precise information on the level of execution. To do so, the control
	program needs models of its own behavior, which could be learned
	by the robot automatically. In my dissertation I develop a robot
	control and plan language, which provides mechanisms for the representation
	of state variables, goals and actions, and integrates learning into
	the language.},
}

@inproceedings{kirsch_training_2007,
 author = {A Kirsch and M Beetz},
 title = {Training on the Job — Collecting Experience with Hierarchical Hybrid
	Automata},
 booktitle = {Proceedings of the 30th German Conference on Artificial Intelligence
	({KI-2007)}},
 year = {2007},
 editor = {Hertzberg, J. and Beetz, M. and Englert, R.},
 pages = {473–476},
 abstract = {We propose a novel approach to experience collection for autonomous
	service robots performing complex activities. This approach enables
	robots to collect data for many learning problems at a time, abstract
	it and transform it into information specific to the learning tasks
	and thereby speeding up the learning process. The approach is based
	on the concept of hierarchical hybrid automata, which are used as
	transparent and expressive representational mechanisms that allow
	for the specification of these experience related capabilities independent
	of the program itself. The suitability of the approach is demonstrated
	through experiments in which a robot doing household chore performs
	experience-based learning.},
}

@inproceedings{kirsch_combining_2005,
 author = {A Kirsch and M Beetz},
 title = {Combining Learning and Programming for High-Performance Robot Controllers},
 booktitle = {Tagungsband Autonome Mobile Systeme 2005},
 year = {2005},
 series = {Reihe Informatik aktuell},
 publisher = {Springer Verlag},
 abstract = {The implementation of high-performance robot controllers for complex
	control tasks such as playing autonomous robot soccer is tedious,
	error-prone, and a never ending programming task. In this paper we
	propose programmers to write autonomous controllers that optimize
	and automatically adapt themselves to changing circumstances of task
	execution using explicit perception, dynamics and action models.
	To this end we develop {ROLL} (Robot Learning Language), a control
	language allowing for model-based robot programming. {ROLL} provides
	language constructs for specifying executable code pieces of how
	to learn and update these models. We are currently using {ROLL's}
	mechanisms for implementing a rational reconstruction of our soccer
	robot controllers.},
}

@inproceedings{kirsch_testbed_2010,
 author = {A Kirsch and Y Chen},
 title = {A Testbed for Adaptive Human-Robot Collaboration},
 booktitle = {33rd Annual German Conference on Artificial Intelligence ({KI} 2010)},
 year = {2010},
 abstract = {This paper presents a novel method for developing and evaluating intelligent
	robot behavior for joint human-robot activities. We extended a physical
	simulation of an autonomous robot to interact with a second, human-controlled
	agent as in a computer game. We have conducted a user study to demonstrate
	the viability of the approach for adaptive human-aware planning for
	collaborative everyday activities. The paper presents the details
	of our simulation and its control for human subjects as well as results
	of the user study.},
}

@inproceedings{kirsch_learning_2010,
 author = {A Kirsch and F Cheng},
 title = {Learning Ability Models for Human-Robot Collaboration},
 booktitle = {Robotics: Science and Systems ({RSS)} — Workshop on Learning for
	Human-Robot Interaction Modeling},
 year = {2010},
 abstract = {Our vision is a pro-active robot that assists elderly or disabled
	people in everyday activities. Such a robot needs knowledge in the
	form of prediction models about a person's abilities, preferences
	and expectations in order to decide on the best way to assist. We
	are interested in learning such models from observation. We report
	on a first approach to learn ability models for manipulation tasks
	and identify some general challenges for the acquisition of human
	models.},
}

@inproceedings{kirsch_integrated_2009,
 author = {A Kirsch and T Kruse and L Mösenlechner},
 title = {An Integrated Planning and Learning Framework for Human-Robot Interaction},
 booktitle = {4th Workshop on Planning and Plan Execution for Real-World Systems
	(held in conjuction with {ICAPS} 09)},
 year = {2009},
}

@article{kirsch_plan-based_2010,
 author = {A Kirsch and T Kruse and E. A Sisbot and R Alami and M Lawitzky and D Brščić and Sand BP Hirche and S Glasauer},
 title = {Plan-based Control of Joint Human-Robot Activities},
 journal = {Künstliche Intelligenz},
 year = {2010},
 volume = {24},
 pages = {223–231},
 number = {3},
 abstract = {Cognition in technical systems is especially relevant for the interaction
	with humans. We present a newly emerging application for autonomous
	robots: companion robots that are not merely machines performing
	tasks for humans, but assistants that achieve joint goals with humans.
	This collaborative aspect entails specific challenges for {AI} and
	robotics. In this article, we describe several planning and action-related
	problems for human-robot collaboration and point out the challenges
	to implement cognitive robot assistants.},
}

@inproceedings{kirsch_making_2005,
 author = {A Kirsch and M Schweitzer and M Beetz},
 title = {Making Robot Learning Controllable: A Case Study in Robot Navigation},
 booktitle = {Proceedings of the {ICAPS} Workshop on Plan Execution: A Reality
	Check},
 year = {2005},
 abstract = {In many applications the performance of learned robot controllers
	drags behind those of the respective hand-coded ones. In our view,
	this situation is caused not mainly by deficiencies of the learning
	algorithms but rather by an insufficient embedding of learning in
	robot control programs. This paper presents a case study in which
	{RoLL}, a robot control language that allows for explicit representations
	of learning problems, is applied to learning robot navigation tasks.
	The case study shows that {RoLL's} constructs for specifying learning
	problems (1) make aspects of autonomous robot learning explicit and
	controllable; (2) have an enormous impact on the performance of the
	learned controllers and therefore encourage the engineering of high
	performance learners; (3) make the learning processes repeatable
	and allow for writing bootstrapping robot controllers. Taken together
	the approach constitutes an important step towards engineering controllers
	of autonomous learning robots.},
}

@phdthesis{klank_everyday_2012,
 author = {U Klank},
 title = {Everyday Perception for Mobile Manipulation in Human Environments},
 school = {Technische Universität München},
 year = {2012},
 url = {http://nbn-resolving.de/urn:nbn:de:bvb:91-diss-20120412-1080039-1-7},
}

@inproceedings{klank_transparent_2011,
 author = {U Klank and D Carton and M Beetz},
 title = {Transparent Object Detection and Reconstruction on a Mobile Platform},
 booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
 year = {2011},
 address = {Shanghai, China},
 month = {may},
}

@inproceedings{klank_robots_2012,
 author = {U Klank and L Mösenlechner and A Maldonado and M Beetz},
 title = {Robots that Validate Learned Perceptual Models},
 booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
 year = {2012},
 address = {St. Paul, {MN}, {USA}},
 month = {may},
}

@article{klank_automatic_2008,
 author = {U Klank and N. Padoy and H. Feussner and N. Navab},
 title = {Automatic feature generation in endoscopic images},
 journal = {International Journal of Computer Assisted Radiology and Surgery},
 year = {2008},
 volume = {3},
 pages = {331–339},
 number = {3},
}

@inproceedings{klank_real-time_2009,
 author = {U Klank and D Pangercic and RB Rusu and M Beetz},
 title = {Real-time {CAD} Model Matching for Mobile Manipulation and Grasping},
 booktitle = {9th {IEEE-RAS} International Conference on Humanoid Robots},
 year = {2009},
 pages = {290–296},
 address = {Paris, France},
 month = {dec},
}

@inproceedings{klank_3d_2009,
 author = {U Klank and MZ Zia and M Beetz},
 title = {{3D} Model Selection from an Internet Database for Robotic Vision},
 booktitle = {International Conference on Robotics and Automation ({ICRA)}},
 year = {2009},
 pages = {2406–2411},
 abstract = {We propose a new method for automatically accessing an internet database
	of {3D} models that are searchable only by their user-annotated labels,
	for using them for vision and robotic manipulation purposes. Instead
	of having only a local database containing already seen objects,
	we want to use shared databases available over the internet. This
	approach while having the potential to dramatically increase the
	visual recognition capability of robots, also poses certain problems,
	like wrong annotation due to the open nature of the database, or
	overwhelming amounts of data (many {3D} models) or the lack of relevant
	data (no models matching a specified label). To solve those problems
	we propose the following: First, we present an outlier/inlier classification
	method for reducing the number of results and discarding invalid
	{3D} models that do not match our query. Second, we utilize an approach
	from computer graphics, the so called 'morphing', to this application
	to specialize the models, in order to describe more objects. Third,
	we search for {3D} models using a restricted search space, as obtained
	from our knowledge of the environment. We show our classification
	and matching results and finally show how we can recover the correct
	scaling with the stereo setup of our robot.},
}

@inproceedings{klapfer_pouring_2012,
 author = {R Klapfer and L Kunze and M Beetz},
 title = {Pouring and Mixing Liquids — Understanding the Physical Effects of
	Everyday Robot Manipulation Actions},
 booktitle = {35th German Conference on Artificial Intelligence ({KI-2012)}, Workshop
	on Human Reasoning and Automated Deduction},
 year = {2012},
 address = {Saarbrücken, Germany},
 month = {sep},
}

@inproceedings{klupsch_object-oriented_1998,
 author = {M Klupsch},
 title = {Object-Oriented Representation of Time-Varying Data Sequences in
	Multiagent Systems},
 booktitle = {World Multiconference on Systemics, Cybernetics and Informatics ({SCI}
	'98) - 4th International Conference on Information Systems, Analysis
	and Synthesis ({ISAS} '98)},
 year = {1998},
 editor = {Callaos, Nagib C.},
 volume = {2},
 pages = {33–40},
 address = {Orlando, {FL}},
 month = {jul},
 publisher = {International Institute of Informatics and Systemics ({IIIS)}},
}

@phdthesis{klupsch_objektorientierte_2000,
 author = {M Klupsch},
 title = {Objektorientierte Daten- und Zeitmodelle für die Echtzeit-Bildfolgenauswertung},
 school = {Fakultät für Informatik, Technische Universität München},
 year = {2000},
 abstract = {This work describes new concepts for the object-oriented modeling
	and representation of time-varying image and sensor data sequences
	as well as the functions which process these data sequences. Different
	frameworks for developing sensor data modules from function and data
	objects are presented. These allow to develop, configure and control
	these modules easily and to integrate them into complex real-time
	program systems transparently as logical sensors. The aim of this
	work is to provide a software system which supports the design and
	implementation process of efficient and scalable program components
	and applications for real-time processing of image sequences and
	distributed sensor data analysis on standard computer systems. One
	of the fundamentals of this work is a consistent, explicit modeling
	of time. This concerns the sensor based data capturing and modeling
	of the outer process as well as the description of the data processing
	system itself. The first aspect allows to relate the data to the
	course of events in the real world and to model the dynamic aspects
	of the scene, the latter provides mechanisms for analysing the performance
	of the data processing methods. Data sequences are modelled as autonomous
	objects ({'Sequence')} collecting the individual measurements of
	a specific scene state like images or other sensor data, and the
	features derived from these. In addition, they represent general
	properties and methods, which are common for all kinds of data sequences,
	such as data initialization, access to current and old values, access
	to their temporal properties, and methods for updating the data sequence
	or interpolating values. Sensors and operators are modelled as {'Functor'}
	objects, which on an abstract level provide the functionality for
	continuously capturing, transforming, or analysing the dynamic data
	sequences. They encapsulate concrete sensor integrations and operator
	sequences including their static parameters. In addition, they represent
	general, application independent operator properties, e.g., connections
	to the input and output data sequences, attributes and methods for
	analysing the time consumption, or a general interface for the cyclic
	operator execution. With the help of these Sequence and Functor objects
	the data flow representation of a sensor data module is easy to implement
	without the need for an explicit program control specificaton. Instead,
	the program components are locally executed by new input data or
	by access to the output data. That behavior can be modified according
	to topical requirements. It can be controlled by software agents.
	So, it is easy to adapt the program control and the level of concurrency.
	The presented concepts were prototyped as C++ class library, which
	provides a framework for the representation of data Sequences, Functors,
	software agents, and temporal expressions. Based on this library
	an extensive distributed robotic application - a team of soccer playing
	robots - was developed and succesfully employed and tested at different
	international {RoboCup} competitions.},
}

@inproceedings{klupsch_agilo_1998,
 author = {M Klupsch and M Lückenhaus and Cand LI Zierl and T Bandlow and M Grimme and I Kellerer and F Schwarzer},
 title = {Agilo {RoboCuppers:} {RoboCup} Team Description},
 booktitle = {Proceedings of the Second {RoboCup} Workshop, {RoboCup-98}},
 year = {1998},
 editor = {Asada, Minoru},
 pages = {431–438},
 address = {Paris},
 month = {jul},
 abstract = {This paper describes the Agilo {RoboCuppers} - the {RoboCup} team
	of the image understanding group ({FG} {BV)} at the Technische Universität
	München. With a team of five Pioneer 1 robots, equipped with a {CCD}
	camera and single board computer each and coordinated by a master
	{PC} outside the field we participated in the medium size {RoboCup}
	league in Paris 1998. We use a multi-agent based approach to represent
	different robots and to encapsulate concurrent tasks within the robots.
	A fast feature extraction based on the image processing library {HALCON}
	provides the necessary data for the onboard scene interpretation.
	These features as well as the odometric data are checked on the master
	{PC} with regard to consistency and plausibility. The results are
	distributed to all robots as base for their local planning modules
	and also used by a coordinating global planning module.},
}

@inproceedings{kranz_knife_2007,
 author = {M Kranz and A Maldonado and B Hoernler and RB Rusu and M Beetz and G Rigoll and A Schmidt},
 title = {A Knife and a Cutting Board as Implicit User Interface - Towards
	Context-Aware Kitchen Utilities},
 booktitle = {Proceedings of First International Conference on Tangible and Embedded
	Interaction 2007, {TEI} 2007, February 15-17 Baton Rouge, Louisiana,
	{USA}},
 year = {2007},
}

@inproceedings{kranz_sensing_2007,
 author = {M Kranz and A Maldonado and RB Rusu and B Hoernler and G Rigoll and M Beetz and A Schmidt},
 title = {Sensing Technologies and the Player-Middleware for Context-Awareness
	in Kitchen Environments},
 booktitle = {Proceedings of Fourth International Conference on Networked Sensing
	Systems, June 6 - 8, 2007, Braunschweig, Germany},
 year = {2007},
}

@inproceedings{kranz_player-stage_2006,
 author = {M Kranz and RB Rusu and A Maldonado and M Beetz and A Schmidt},
 title = {A {Player/Stage} System for Context-Aware Intelligent Environments},
 booktitle = {Proceedings of {UbiSys'06}, System Support for Ubiquitous Computing
	Workshop, at the 8th Annual Conference on Ubiquitous Computing (Ubicomp
	2006), Orange County California, September 17-21, 2006},
 year = {2006},
 abstract = {We propose {Player/Stage}, a well-known platform widely used in robotics,
	as middleware for ubiquitous computing. {Player/Stage} provides uniform
	interfaces to sensors and actuators and allows the computational
	matching of input and output. {Player/Stage} exactly addresses the
	issues of dealing with heterogeneous hardware but currently only
	with a focus towards robotics. We show how to integrate ubiquitous
	computing platforms into {Player/Stage} and propose {Player/Stage}
	as middleware for ubiquitous computing projects.},
}

@inproceedings{kresse_movement-aware_2012,
 author = {I Kresse and M Beetz},
 title = {Movement-aware Action Control – Integrating Symbolic and Control-theoretic
	Action Execution},
 booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
 year = {2012},
 address = {St. Paul, {MN}, {USA}},
 month = {may},
}

@inproceedings{kresse_multimodal_2011,
 author = {I Kresse and U Klank and M Beetz},
 title = {Multimodal Autonomous Tool Analyses and Appropriate Application},
 booktitle = {11th {IEEE-RAS} International Conference on Humanoid Robots},
 year = {2011},
 address = {Bled, Slovenia},
 month = {oct},
}

@inproceedings{kriegel_combining_2013,
 author = {S Kriegel and M Brucker and ZC Marton and T Bodenmuller and M Suppa},
 title = {Combining object modeling and recognition for active scene exploration},
 booktitle = {Intelligent Robots and Systems ({IROS)}, 2013 {IEEE/RSJ} International
	Conference on},
 year = {2013},
 pages = {2384–2391},
 publisher = {{IEEE}},
}

@inproceedings{kruse_towards_2010,
 author = {T Kruse and A Kirsch},
 title = {Towards Opportunistic Action Selection in Human-Robot Cooperation},
 booktitle = {33rd Annual German Conference on Artificial Intelligence ({KI} 2010)},
 year = {2010},
}

@inproceedings{kruse_dynamic_2010,
 author = {T Kruse and A Kirsch and E. A Sisbot and R Alami},
 title = {Dynamic Generation and Execution of Human Aware Navigation Plans},
 booktitle = {Proceedings of the Ninth International Conference on Autonomous Agents
	and Multiagent Systems ({AAMAS)}},
 year = {2010},
}

@inproceedings{kruse_exploiting_2010,
 author = {T Kruse and A Kirsch and E. A Sisbot and R Alami},
 title = {Exploiting Human Cooperation in Human-Centered Robot Navigation},
 booktitle = {{IEEE} International Symposium in Robot and Human Interactive Communication
	(Ro-Man)},
 year = {2010},
}

@phdthesis{kunze_robot_2014,
 author = {L Kunze},
 title = {Naïve Physics and Commonsense Reasoning for Everyday Robot Manipulation},
 school = {Technische Universität München},
 year = {2014},
 address = {München},
 url = {http://nbn-resolving.de/urn/resolver.pl?urn:nbn:de:bvb:91-diss-20140214-1138034-0-5},
}

@inproceedings{kunze_searching_2012,
 author = {L Kunze and M Beetz and M Saito and Hand OK Azuma and M Inaba},
 title = {Searching Objects in Large-scale Indoor Environments: A Decision-thereotic
	Approach},
 booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
 year = {2012},
 address = {St. Paul, {MN}, {USA}},
 month = {may},
}

@inproceedings{kunze_logic_2011,
 author = {L Kunze and ME Dolha and M Beetz},
 title = {Logic Programming with Simulation-based Temporal Projection for Everyday
	Robot Object Manipulation},
 booktitle = {2011 {IEEE/RSJ} International Conference on Intelligent Robots and
	Systems ({IROS)}},
 year = {2011},
 address = {San Francisco, {CA}, {USA}},
 month = {sep},
}

@inproceedings{kunze_simulation-based_2011,
 author = {L Kunze and ME Dolha and E Guzman and M Beetz},
 title = {Simulation-based Temporal Projection of Everyday Robot Object Manipulation},
 booktitle = {Proc. of the 10th Int. Conf. on Autonomous Agents and Multiagent
	Systems ({AAMAS} 2011)},
 year = {2011},
 editor = {Yolum and Tumer and Stone and Sonenberg},
 address = {Taipei, Taiwan},
 month = {may},
 publisher = {{IFAAMAS}},
}

@inproceedings{kunze_making_2012,
 author = {L Kunze and A Haidu and M Beetz},
 title = {Making Virtual Pancakes — Acquiring and Analyzing Data of Everyday
	Manipulation Tasks through Interactive Physics-based Simulations},
 booktitle = {Poster and Demo Track of the 35th German Conference on Artificial
	Intelligence ({KI-2012)}},
 year = {2012},
 address = {Saarbrücken, Germany},
 month = {sep},
}

@inproceedings{kunze_salient_2007,
 author = {L Kunze and K Lingemann and A Nüchter and J Hertzberg},
 title = {Salient Visual Features to Help Close the Loop in {6D} {SLAM}},
 booktitle = {The 5th International Conference on Computer Vision Systems, 2007},
 year = {2007},
}

@inproceedings{kunze_towards_2011,
 author = {L Kunze and T Roehm and M Beetz},
 title = {Towards Semantic Robot Description Languages},
 booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
 year = {2011},
 pages = {5589–5595},
 address = {Shanghai, China},
 month = {may},
}

@inproceedings{kunze_putting_2010,
 author = {L Kunze and M Tenorth and M Beetz},
 title = {Putting People's Common Sense into Knowledge Bases of Household Robots},
 booktitle = {33rd Annual German Conference on Artificial Intelligence ({KI} 2010)},
 year = {2010},
 pages = {151–159},
 address = {Karlsruhe, Germany},
 month = {sep},
 publisher = {Springer},
}

@phdthesis{lanser_modellbasierte_1997,
 author = {S Lanser},
 title = {Modellbasierte Lokalisation gestützt auf monokulare Videobilder},
 school = {Technische Universität München},
 year = {1997},
}

@inproceedings{leha_optimization_2009,
 author = {A Leha and D Pangercic and T Rühr and M Beetz},
 title = {Optimization of Simulated Production Process Performance using Machine
	Learning},
 booktitle = {Proceedings of Emerging Technologies and Factory Automation ({ETFA).}},
 year = {2009},
}

@inproceedings{lemaignan_oro_2010,
 author = {S Lemaignan and R Ros and L Mösenlechner and R Alami and M Beetz},
 title = {{ORO}, a knowledge management module for cognitive architectures
	in robotics},
 booktitle = {Proceedings of the 2010 {IEEE/RSJ} International Conference on Intelligent
	Robots and Systems},
 year = {2010},
 pages = {3548–3553},
 address = {Taipei, Taiwan},
 month = {oct},
}

@article{lemaignan_grounding_2011,
 author = {S Lemaignan and R Ros and E. A Sisbot and R Alami and M Beetz},
 title = {Grounding the Interaction: Anchoring Situated Discourse in Everyday
	Human-Robot Interaction},
 journal = {International Journal of Social Robots},
 year = {2011},
 pages = {1--19},
 issn = {1875-4791},
 url = {http://dx.doi.org/10.1007/s12369-011-0123-x},
}

@inproceedings{lenz_distributed_2010,
 author = {C. Lenz and T. Röder and M Eggers and S. Amin and T. Kisler and B Radig and G. Panin and A. Knoll},
 title = {A Distributed Many-Camera System for Multi-Person Tracking},
 booktitle = {Proceedings of the First International Joint Conference on Ambient
	Intelligence ({AmI} 2010)},
 year = {2010},
 editor = {Wichert, R. and Ruyter, B. de},
 month = {nov},
 publisher = {Springer Lecture Notes in Computer Science},
}

@inproceedings{li_obstacle_2009,
 author = {J Li and A Maldonado and M Beetz and A Schuboe},
 title = {Obstacle avoidance in a pick-and-place task},
 booktitle = {Proceedings of the 2009 {IEEE} Conference on Robotics and Biomimetics},
 year = {2009},
 address = {Guilin, Guangxi, China},
 month = {dec},
}

@inproceedings{maier_self-diagnosis_2009,
 author = {P Maier},
 title = {Self-Diagnosis and Self-Planning with Constraint-based Hybrid Models},
 booktitle = {Proc. First International Conference on Prognostics and Health Management
	({PHM'09)/Doctoral} Consortium},
 year = {2009},
 address = {San Diego, {CA}, {USA}},
 month = {sep},
}

@inproceedings{maier_adaptive_2008,
 author = {P Maier},
 title = {Adaptive Abstraction of Constraint-Based Models for Self-Diagnosis
	and Planning},
 booktitle = {Proc. {AAAI/SIGART} Doctoral Consortium},
 year = {2008},
 pages = {1859–1860},
 address = {Menlo Park, California},
 month = {jul},
 publisher = {The {AAAI} Press},
}

@inproceedings{maier_compiling_2011,
 author = {P Maier and D Jain and M Sachenbacher},
 title = {Compiling {AI} Engineering Models for Probabilistic Inference},
 booktitle = {{KI} 2011: Advances in Artificial Intelligence, 34th Annual German
	Conference on {AI}},
 year = {2011},
 volume = {7006},
 series = {Lecture Notes in Computer Science},
 pages = {191--203},
 address = {Berlin, Germany},
 month = {oct},
 publisher = {Springer},
 isbn = {978-3-642-24454-4},
}

@inproceedings{maier_diagnostic_2011,
 author = {P Maier and D Jain and M Sachenbacher},
 title = {Diagnostic Hypothesis Enumeration vs. Probabilistic Inference for
	Hierarchical Automata Models},
 booktitle = {Proceedings of the 22nd International Workshop on Principles of Diagnosis
	({DX-2011)}},
 year = {2011},
 address = {Murnau, Germany},
}

@inproceedings{maier_plan_2010,
 author = {P Maier and D Jain and S Waldherr and M Sachenbacher},
 title = {Plan Assessment for Autonomous Manufacturing as Bayesian Inference},
 booktitle = {{KI} 2010: Advances in Artificial Intelligence, 33rd Annual German
	Conference on {AI}},
 year = {2010},
 volume = {6359},
 series = {Lecture Notes in Computer Science},
 pages = {263--271},
 address = {Karlsruhe, Germany},
 publisher = {Springer},
 isbn = {978-3-642-16110-0},
}

@inproceedings{maier_diagnosis_2009,
 author = {P Maier and M Sachenbacher},
 title = {Diagnosis and Fault-adaptive Control for Mechatronic Systems using
	Hybrid Constraint Automata},
 booktitle = {Proc. First International Conference on Prognostics and Health Management
	({PHM'09)}},
 year = {2009},
 address = {San Diego, {CA}, {USA}},
 month = {sep},
}

@inproceedings{maier_factory_2009,
 author = {P Maier and M Sachenbacher},
 title = {Factory Monitoring and Control with Mixed {Hardware/Software}, {Discrete/Continuous}
	Models},
 booktitle = {Proc. of 14th {IEEE} International Conference on Emerging Technologies
	and Factory Automation ({ETFA-2009)}},
 year = {2009},
}

@inproceedings{maier_self-monitoring_2009,
 author = {P Maier and M Sachenbacher},
 title = {Self-Monitoring and Control for Embedded Systems using Hybrid Constraint
	Automata},
 booktitle = {Proc. Workshop on Self-X in Mechatronics and other Engineering Applications},
 year = {2009},
 address = {Paderborn, Germany},
 month = {sep},
}

@inproceedings{maier_adaptive_2008-1,
 author = {P Maier and M Sachenbacher},
 title = {Adaptive Domain Abstraction in a Soft-Constraint Message-Passing
	Algorithm},
 booktitle = {Proc. Ninth International Workshop on Preferences and Soft Constraints
	(Soft'08)},
 year = {2008},
}

@inproceedings{maier_constraint_2008,
 author = {P Maier and M Sachenbacher},
 title = {Constraint Optimization and Abstraction for Embedded Intelligent
	Systems},
 booktitle = {Proc. Fifth International Conference on Integration of {AI} and {OR}
	Techniques in Constraint Programming for Combinatorial Optimization
	Problems ({CPAIOR'08)}},
 year = {2008},
 pages = {338–342},
 address = {Paris, France},
}

@inproceedings{maier_constraint-based_????,
 author = {P Maier and M Sachenbacher},
 title = {Constraint-Based Integration of Plan Tracking and Prognosis for Autonomous
	Production},
 booktitle = {32nd Annual German Conference on Artificial Intelligence},
 pages = {403–410},
}

@inproceedings{maier_integrated_2009-2,
 author = {P Maier and M Sachenbacher},
 title = {Integrated Diagnosis and Plan Assessment for Autonomous Production
	Processes},
 booktitle = {Workshop Proc. {SAS@} {IJCAI}},
 year = {2009},
}

@inproceedings{maier_constraint-based_2009,
 author = {P Maier and M Sachenbacher and T Rühr and L Kuhn},
 title = {Constraint-Based Integration of Plan Tracking and Prognosis for Autonomous
	Production},
 booktitle = {{KI} 2009: Advances in Artificial Intelligence, 32nd Annual German
	Conference on {AI}},
 year = {2009},
 volume = {5803},
 series = {Lecture Notes in Computer Science},
 pages = {403--410},
 address = {Paderborn, Germany},
 month = {sep},
 publisher = {Springer},
}

@inproceedings{maier_integrated_2009,
 author = {P Maier and M Sachenbacher and T Rühr and L Kuhn},
 title = {Integrated Plan Tracking and Prognosis for Autonomous Production
	Processes},
 booktitle = {Proc. of 14th {IEEE} International Conference on Emerging Technologies
	and Factory Automation ({ETFA-2009)}},
 year = {2009},
 month = {sep},
}

@inproceedings{maier_integrated_2009-1,
 author = {P Maier and M Sachenbacher and T Rühr and L Kuhn},
 title = {Integrated Diagnosis and Plan Assessment for Autonomous Production
	Processes},
 booktitle = {Proc. of The {IJCAI-09} Workshop on Self-* and Autonomous Systems
	({SAS-2009)}},
 year = {2009},
 month = {jul},
}

@inproceedings{maier_integrating_2009,
 author = {P Maier and M Sachenbacher and T Rühr and L Kuhn},
 title = {Integrating Model-based Diagnosis and Prognosis in Autonomous Production},
 booktitle = {Proc. First International Conference on Prognostics and Health Management
	({PHM'09)}},
 year = {2009},
 address = {San Diego, {CA}, {USA}},
 month = {sep},
}

@inproceedings{malaka_solving_2000,
 author = {R Malaka and S Buck},
 title = {Solving Nonlinear Optimization Problems Using Networks Of Spiking
	Neurons},
 booktitle = {{IEEE} International Joint Conference on Neural Networks},
 year = {2000},
 volume = {6},
 pages = {486–491},
 abstract = {Most artificial neural networks used in practical applications are
	based on simple neuron types in a multi-layer architecture. Here,
	we propose to solve optimization problems using a fully recurrent
	network of spiking neurons mimicking the response behavior of biological
	neurons. Such networks can compute a series of different solutions
	for a given problem and converge into a periodical sequence of such
	solutions. The goal of this paper is to prove that neural networks
	like the {SRM} (Spike Response Model) are able to solve nonlinear
	optimization problems. We demonstrate this for the traveling salesman
	problem. Our network model is able to compute multiple solutions
	and can use its dynamics to leave local minima in which classical
	models would be stuck. For adapting the model, we introduce a suitable
	network architecture and show how to encode the problem directly
	into the network weights.},
}

@inproceedings{maldonado_improving_2012,
 author = {A Maldonado and H Alvarez-Heredia and M Beetz},
 title = {Improving robot manipulation through fingertip perception},
 booktitle = {{IEEE} International Conference on Intelligent Robots and Systems
	({IROS)}},
 year = {2012},
 address = {Vilamoura, Algarve, Portugal},
 month = {oct},
}

@inproceedings{maldonado_robotic_2010,
 author = {A Maldonado and U Klank and M Beetz},
 title = {Robotic grasping of unmodeled objects using time-of-flight range
	data and finger torque information},
 booktitle = {2010 {IEEE/RSJ} International Conference on Intelligent Robots and
	Systems ({IROS)}},
 year = {2010},
 pages = {2586–2591},
 address = {Taipei, Taiwan},
 month = {oct},
}

@book{marco_creating_2012,
 title = {Creating and using {RoboEarth} object models},
 year = {2012},
 author = {DD Marco and A Koch and O Zweigle and K Häussermann and BS le and P Levi and DGand RL Lopez and J Civera and J. M. M. Montiel and M Tenorth and AC Perzylo and M Waibel and MJGvan de Molengraft},
 address = {St. Paul, {MN}, {USA}},
 month = {may},
}

@inproceedings{marco_roboearth_2012,
 author = {Ddi Marco and M Tenorth and K Häussermann and O Zweigle and P Levi},
 title = {{RoboEarth} Action Recipe Execution},
 booktitle = {12th International Conference on Intelligent Autonomous Systems},
 year = {2012},
}

@inproceedings{marconi_sherpa_2012,
 author = {L. Marconi and C. Melchiorri and M. Beetz and D. Pangercic† and R. Siegwart and S. Leutenegger and R. Carloni and S. Stramigioli and H. Bruyninckx and P. Doherty and A. Kleiner and V. Lippiello and A. Finzi and B. Siciliano and A. Sala and N. Tomatis},
 title = {The {SHERPA} project: smart collaboration between humans and ground-aerial
	robots for improving rescuing activities in alpine environments},
 booktitle = {{IEEE} International Symposium on Safety, Security, and Rescue Robotics
	({SSRR)}},
 year = {2012},
 address = {College Station, Texas, {USA}},
 month = {nov},
}

@inproceedings{marton_autonomous_2008,
 author = {ZC Marton and N Blodow and M Dolha and M Tenorth and RB Rusu and M Beetz},
 title = {Autonomous Mapping of Kitchen Environments and Applications},
 booktitle = {Proceedings of the 1st International Workshop on Cognition for Technical
	Systems, Munich, Germany, 6-8 October},
 year = {2008},
}

@inproceedings{marton_reconstruction_2009,
 author = {ZC Marton and LC Goron and RBand BM Rusu},
 title = {Reconstruction and Verification of {3D} Object Models for Grasping},
 booktitle = {Proceedings of the 14th International Symposium on Robotics Research
	({ISRR09)}},
 year = {2009},
 address = {Lucerne, Switzerland},
 month = {sep},
}

@article{marton_combined_2011,
 author = {ZC Marton and D Pangercic and N Blodow and M Beetz},
 title = {Combined {2D-3D} Categorization and Classification for Multimodal
	Perception Systems},
 journal = {The International Journal of Robotics Research},
 year = {2011},
 volume = {30},
 pages = {1378–1402},
 number = {11},
 month = {sep},
}

@inproceedings{marton_fast_2009,
 author = {ZC Marton and RB Rusu and M Beetz},
 title = {On Fast Surface Reconstruction Methods for Large and Noisy Datasets},
 booktitle = {Proceedings of the {IEEE} International Conference on Robotics and
	Automation ({ICRA)}},
 year = {2009},
 address = {Kobe, Japan},
 month = {may},
}

@inproceedings{marton_probabilistic_2009,
 author = {ZC Marton and RB Rusu and D Jain and U Klank and M Beetz},
 title = {Probabilistic Categorization of Kitchen Objects in Table Settings
	with a Composite Sensor},
 booktitle = {{IEEE/RSJ} International Conference on Intelligent Robots and Systems
	({IROS)}},
 year = {2009},
 pages = {4777--4784},
 address = {St. Louis, {MO}, {USA}},
 month = {oct},
}

@inproceedings{marton_object_2012,
 author = {ZC Marton and F Balint-Benczedi and Nand GLC Blodow and M Beetz},
 title = {Object Categorization in Clutter using Additive Features and Hashing
	of Part-graph Descriptors},
 booktitle = {Proceedings of Spatial Cognition ({SC)}},
 year = {2012},
 address = {Abbey Kloster Seeon, Germany},
}

@inproceedings{marton_advantages_2011,
 author = {ZC Marton and N Blodow and M Beetz},
 title = {Advantages of Spatial-temporal Object Maps for Service Robotics},
 booktitle = {{IEEE} Workshop on Advanced Robotics and its Social Impacts ({ARSO)}},
 year = {2011},
 address = {Half-Moon Bay, {CA}, {USA}},
 month = {oct},
}

@inproceedings{marton_efficient_2011,
 author = {ZC Marton and D Pangercic and M Beetz},
 title = {Efficient Surface and Feature Estimation in {RGBD}},
 booktitle = {{RGB-D} Workshop on {3D} Perception in Robotics at the European Robotics
	({euRobotics)} Forum},
 year = {2011},
 address = {Väster{\textbackslash}aas, Sweden},
 month = {apr},
}

@inproceedings{marton_general_2010,
 author = {ZC Marton and D Pangercic and N Blodow and J Kleinehellefort and M Beetz},
 title = {General {3D} Modelling of Novel Objects from a Single View},
 booktitle = {Proceedings of the {IEEE/RSJ} International Conference on Intelligent
	Robots and Systems ({IROS)}},
 year = {2010},
 address = {Taipei, Taiwan},
 month = {oct},
}

@inproceedings{marton_hierarchical_2010,
 author = {ZC Marton and D Pangercic and RB Rusu and A Holzbach and M Beetz},
 title = {Hierarchical Object Geometric Categorization and Appearance Classification
	for Mobile Manipulation},
 booktitle = {Proceedings of the {IEEE-RAS} International Conference on Humanoid
	Robots},
 year = {2010},
 address = {Nashville, {TN}, {USA}},
 month = {dec},
}

@article{marton_ensembles_2012,
 author = {ZC Marton and F Seidel and Fand BM Balint-Benczedi},
 title = {Ensembles of Strong Learners for Multi-cue Classification},
 journal = {Pattern Recognition Letters ({PRL)}, Special Issue on Scene Understandings
	and Behaviours Analysis},
 year = {2012},
}

@inproceedings{marton_towards_2012,
 author = {ZC Marton and F Seidel and M Beetz},
 title = {Towards Modular Spatio-temporal Perception for Task-adapting Robots},
 booktitle = {Postgraduate Conference on Robotics and Development of Cognition
	({RobotDoC-PhD)}, a satellite event of the 22nd International Conference
	on Artificial Neural Networks ({ICANN)}},
 year = {2012},
 address = {Lausanne, Switzerland},
}

@phdthesis{mayer_facial_2012,
 author = {C Mayer},
 title = {Facial Expression Recognition With A Three-Dimensional Face Model},
 school = {Technische Universität München},
 year = {2012},
 address = {München},
 keywords = {facial expressions},
 url = {http://nbn-resolving.de/urn/resolver.pl?urn:nbn:de:bvb:91-diss-20120110-1080232-1-5},
}

@article{mayer_cross-database_2014,
 author = {C Mayer and M Eggers and B Radig},
 title = {Cross-database evaluation for facial expression recognition},
 journal = {Pattern Recognition and Image Analysis},
 year = {2014},
 volume = {24},
 pages = {124--132},
 number = {1},
 month = {jan},
 doi = {10.1134/S1054661814010106},
 issn = {1054-6618, 1555-6212},
 keywords = {facial expressions},
 language = {en},
 url = {http://link.springer.com/10.1134/S1054661814010106},
 urldate = {2014-05-15},
}

@article{mayer_face_2013,
 author = {C Mayer and B Radig},
 title = {Face model fitting with learned displacement experts and multi-band
	images},
 journal = {Pattern Recognition and Image Analysis},
 year = {2013},
 volume = {23},
 pages = {287--295},
 number = {2},
 month = {apr},
 doi = {10.1134/S1054661813020119},
 issn = {1054-6618, 1555-6212},
 keywords = {facial expressions},
 language = {en},
 url = {http://link.springer.com/10.1134/S1054661813020119},
 urldate = {2014-05-15},
}

@article{mayer_face_2011,
 author = {C Mayer and B Radig},
 title = {Face model fitting with learned displacement experts and multi-band
	images},
 journal = {Pattern Recognition and Image Analysis},
 year = {2011},
 volume = {21},
 pages = {526--529},
 number = {3},
 month = {sep},
 doi = {10.1134/S1054661811020738},
 issn = {1054-6618, 1555-6212},
 keywords = {facial expressions},
 language = {en},
 url = {http://link.springer.com/10.1134/S1054661811020738},
 urldate = {2014-05-15},
}

@inproceedings{mayer_learning_2011,
 author = {C Mayer and B Radig},
 title = {Learning Displacement Experts from Multi-band Images for Face Model
	Fitting},
 booktitle = {International Conference on Advances in Computer-Human Interaction},
 year = {2011},
 month = {feb},
 keywords = {facial expressions},
}

@inproceedings{mayer_towards_2010,
 author = {C Mayer and S Sosnowski and K Kühnlenz and B Radig},
 title = {Towards robotic facial mimicry: system development and evaluation},
 booktitle = {Proceedings of the 19th {IEEE} International Symposium on Robot and
	Human Interactive Communication},
 year = {2010},
 keywords = {facial expressions},
}

@inproceedings{mayer_facial_2009,
 author = {C Mayer and M Wimmer and M Eggers and B Radig},
 title = {Facial Expression Recognition with {3D} Deformable Models},
 booktitle = {Proceedings of the 2nd International Conference on Advancements Computer-Human
	Interaction ({ACHI)}},
 year = {2009},
 publisher = {Springer},
 keywords = {facial expressions},
}

@article{mayer_adjusted_2009,
 author = {C Mayer and M Wimmer and B Radig},
 title = {Adjusted Pixel Features for Facial Component Classification},
 journal = {Image and Vision Computing Journal},
 year = {2009},
 keywords = {facial expressions},
}

@inproceedings{mayer_interpreting_2008,
 author = {C Mayer and M Wimmer and F Stulp and Z Riaz and A Roth and M Eggers and B Radig},
 title = {Interpreting the Dynamics of Facial Expressions in Real Time Using
	Model-based Techniques},
 booktitle = {Proceedings of the 3rd Workshop on Emotion and Computing: Current
	Research and Future Impact},
 year = {2008},
 pages = {45--46},
 address = {Kaiserslautern, Germany},
 month = {sep},
 keywords = {facial expressions},
}

@inproceedings{mayer_real_2008,
 author = {C Mayer and M Wimmer and F Stulp and Z Riaz and A Roth and M Eggers and B Radig},
 title = {A Real Time System for Model-based Interpretation of the Dynamics
	of Facial Expressions},
 booktitle = {Proc. of the International Conference on Automatic Face and Gesture
	Recognition ({FGR08)}},
 year = {2008},
 address = {Amsterdam, Netherlands},
 month = {sep},
 keywords = {facial expressions},
}

@article{meyer_discrete_2002,
 author = {M Meyer and M. Desbrun and P. Schröder},
 title = {Discrete differential-geometry operators for triangulated 2-manifolds},
 journal = {Visualization and mathematics},
 year = {2002},
 volume = {3},
 pages = {34–57},
 number = {7},
}

@inproceedings{morisset_leaving_2009,
 author = {B Morisset and RB Rusu and A Sundaresan and K Hauser and M Agrawal and JC Latombe and M Beetz},
 title = {Leaving Flatland: Toward Real-Time {3D} Navigation},
 booktitle = {Proceedings of the {IEEE} International Conference on Robotics and
	Automation ({ICRA)}, Kobe, Japan, May 12-17},
 year = {2009},
}

@article{mozos_furniture_2011,
 author = {OM Mozos and ZC Marton and M Beetz},
 title = {Furniture Models Learned from the {WWW} – Using Web Catalogs to Locate
	and Categorize Unknown Furniture Pieces in {3D} Laser Scans},
 journal = {Robotics \& Automation Magazine},
 year = {2011},
 volume = {18},
 pages = {22–32},
 number = {2},
 month = {jun},
}

@inproceedings{murray_modeling_2011,
 author = {WR. Murray and D Jain},
 title = {Modeling Cognitive Frames for Situations with Markov Logic Networks},
 booktitle = {Proceedings of the 8th International {NLPCS} Workshop: Human-Machine
	Interaction in Translation, Copenhagen Studies in Language 41},
 year = {2011},
 pages = {167–178},
 month = {aug},
 publisher = {Samfundslitteratur},
}

@inproceedings{mosenlechner_fast_2013,
 author = {L Mösenlechner and M Beetz},
 title = {Fast Temporal Projection Using Accurate Physics-Based Geometric Reasoning},
 booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
 year = {2013},
 address = {Karlsruhe, Germany},
 month = {may},
}

@inproceedings{mosenlechner_parameterizing_2011,
 author = {L Mösenlechner and M Beetz},
 title = {Parameterizing Actions to have the Appropriate Effects},
 booktitle = {{IEEE/RSJ} International Conference on Intelligent Robots and Systems
	({IROS)}},
 year = {2011},
 address = {San Francisco, {CA}, {USA}},
 month = {sep},
}

@inproceedings{mosenlechner_using_2009,
 author = {L Mösenlechner and M Beetz},
 title = {Using Physics- and Sensor-based Simulation for High-fidelity Temporal
	Projection of Realistic Robot Behavior},
 booktitle = {19th International Conference on Automated Planning and Scheduling
	({ICAPS'09).}},
 year = {2009},
 abstract = {Planning means deciding on the future course of action based on predictions
	of what will happen when an activity is carried out in one way or
	the other. As we apply action planning to autonomous, sensor-guided
	mobile robots with manipulators or even to humanoid robots we need
	very realistic and detailed predictions of the behavior generated
	by a plan in order to improve the robot's performance substantially.
	In this paper we investigate the high-fidelity temporal projection
	of realistic robot behavior based on physics- and sensor-based simulation
	systems. We equip a simulator and interpreter with means to log simulated
	plan executions into a database. A logic-based query and inference
	mechanism then retrieves and reconstructs the necessary information
	from the database and translates the information into a first-order
	representation of robot plans and the behavior they generate. The
	query language enables the robot planning system to infer the intentions,
	the beliefs, and the world state at any projected time. It also allows
	the planning system to recognize, diagnose, and analyze various plan
	failures typical for performing everyday manipulation tasks.},
}

@inproceedings{mosenlechner_becoming_2010,
 author = {L Mösenlechner and N Demmel and M Beetz},
 title = {Becoming Action-aware through Reasoning about Logged Plan Execution
	Traces},
 booktitle = {{IEEE/RSJ} International Conference on Intelligent {RObots} and Systems.},
 year = {2010},
 pages = {2231–2236},
 address = {Taipei, Taiwan},
 month = {oct},
}

@inproceedings{mosenlechner_high_2008,
 author = {L Mösenlechner and A Müller and M Beetz},
 title = {High Performance Execution of Everyday Pick-and-Place Tasks by Integrating
	Transformation Planning and Reactive Execution},
 booktitle = {Proceedings of the 1st International Workshop on Cognition for Technical
	Systems, München, Germany, 6-8 October},
 year = {2008},
 abstract = {We investigate the plan-based control of physically and sensorically
	realistic simulated autonomous mobile robots performing everyday
	pick-and-place tasks in human environments, such as table setting.
	Our approach applies {AI} planning techniques to transform default
	plans that can be inferred from instructions for activities of daily
	life into flexible, high-performance robot plans. To find high performance
	plans the planning system applies transformations such as carrying
	plates to the table by stacking them or leaving cabinet doors open
	while setting the table, which require substantial changes of the
	control structure of the intended activities. We argue and demonstrate
	that applying {AI} planning techniques directly to concurrent reactive
	plan languages, instead of using layered software architectures with
	different languages, enables the robot action planner to achieve
	substantial performance improvements (23\% - 45\% depending on the
	tasks). We also argue that the transformation of concurrent reactive
	plans is necessary to obtain the results. Our claims are supported
	by extensive empirical investigations in realistic simulations.},
}

@phdthesis{muller_transformational_2008,
 author = {A Müller},
 title = {Transformational Planning for Autonomous Household Robots using Libraries
	of Robust and Flexible Plans},
 school = {Technische Universität München},
 year = {2008},
 abstract = {One of the oldest dreams of Artificial Intelligence is the realization
	of autonomous robots that achieve a level of problem-solving competency
	comparable to humans. Human problem-solving capabilities are particularly
	impressive in the context of everyday ac- tivities such as performing
	household chores: people are able to deal with ambiguous and incomplete
	information, they adapt their plans to different environments and
	specific sit- uations achieving intuitively almost optimal behavior,
	they cope with interruptions and failures and manage multiple interfering
	jobs. The investigations presented in this work make substantial
	progress in the direction of building robots that show similar behavior.
	This thesis addresses the problem of competently accomplishing everyday
	manipu- lation activities, such as setting the table and preparing
	meals, as a plan-based control problem. In plan-based control, robots
	do not only execute their programs but also reason about and modify
	them. We propose {TRANER} (Transformational Planner) as a suitable
	planning system for the optimization of everyday manipulation activities.
	{TRANER} real- izes planning through a generate-test cycle in which
	plan revision rules propose alternative plans and new plans are simulated
	in order to test and evaluate them. The unique features of {TRANER}
	are that it can realize very general and abstract plan revisions
	such as "stack objects before carrying them instead of handling them
	one by one" and that it successfully operates on plans in a way that
	they generate reliable, flexible, and efficient robot behavior in
	realistic simulations. The key contributions of this dissertation
	are threefold. First, it extends the plan rep- resentation to support
	the specification of robust and transformable plans. Second, it pro-
	poses a library of general and flexible plans for a household robot,
	using the extended plan representation. Third, it establishes a powerful,
	yet intuitive syntax for transforma- tion rules together with a set
	of general transformation rules for optimizing pick-and-place tasks
	in an everyday setting using the rule language. The viability and
	strength of the approach is empirically demonstrated in comprehen-
	sive and extensive experiments in a simulation environment with realistically
	simulated action and sensing mechanisms. The experiments show that
	transformational planning is necessary to tailor the robot's activities
	and that it is capable of substantially improving the robot's performance.},
 url = {http://mediatum2.ub.tum.de/node?id=645588},
}

@inproceedings{muller_designing_2006,
 author = {A Müller and M Beetz},
 title = {Designing and Implementing a Plan Library for a Simulated Household
	Robot},
 booktitle = {Cognitive Robotics: Papers from the {AAAI} Workshop},
 year = {2006},
 editor = {Beetz, Michael and Rajan, Kanna and Thielscher, Michael and Rusu,
	Radu Bogdan},
 series = {Technical Report {WS-06-03}},
 pages = {119–128},
 address = {Menlo Park, California},
 publisher = {American Association for Artificial Intelligence},
 abstract = {As we are deploying planning mechanisms in real-world applications,
	such as the control of autonomous robots, it becomes apparent that
	the performance of plan-based controllers critically depends on the
	design and implementation of plan libraries. Despite its importance
	the investigation of designs of plan libraries and plans has been
	largely ignored. In this paper we describe parts of a plan library
	that we are currently developing and applying to the control of a
	simulated household robot. The salient features of our plans are
	that they are designed for reliable, flexible, and optimized execution,
	and are grounded into sensor data and action routines. We provide
	empirical evidence that design criteria that we are proposing have
	considerable impact on the performance level of robots.},
 isbn = {978-1-57735-285-3},
}

@inproceedings{muller_towards_2007,
 author = {A Müller and M Beetz},
 title = {Towards a Plan Library for Household Robots},
 booktitle = {Proceedings of the {ICAPS'07} Workshop on Planning and Plan Execution
	for Real-World Systems: Principles and Practices for Planning in
	Execution},
 year = {2007},
 address = {Providence, {USA}},
 month = {sep},
 abstract = {This paper describes the structure for a plan library of a service
	robot intended to perform household chores. The plans in the library
	are particularly designed to enable reliable, flexible, and efficient
	robot control, to learn control heuristics, to generalize the plans
	to cope with new objects and situations. We believe that plans with
	these characteristics are required for competent autonomous robots
	performing skilled manipulation tasks in human environments.},
}

@inproceedings{muller_transformational_2007,
 author = {A Müller and A Kirsch and M Beetz},
 title = {Transformational Planning for Everyday Activity},
 booktitle = {Proceedings of the 17th International Conference on Automated Planning
	and Scheduling ({ICAPS'07)}},
 year = {2007},
 pages = {248–255},
 address = {Providence, {USA}},
 month = {sep},
 abstract = {We propose an approach to transformational planning and learning of
	everyday activity. This approach is targeted at autonomous robots
	that are to perform complex activities such as household chore. Our
	approach operates on flexible and reliable plans suited for long-term
	activity and applies plan transformations that generate competent
	and high-performance robot behavior. We show as a proof of concept
	that general transformation rules can be formulated that achieve
	substantially and significantly improved performance using table
	setting as an example.},
}

@inproceedings{muller_object-oriented_2004,
 author = {A Müller and A Kirsch and M Beetz},
 title = {Object-oriented Model-based Extensions of Robot Control Languages},
 booktitle = {27th German Conference on Artificial Intelligence},
 year = {2004},
 abstract = {More than a decade after mobile robots arrived in many research labs
	it is still difficult to find plan-based autonomous robot controllers
	that perform, beyond doubt, better than they possibly could without
	applying {AI} methods. One of the main reason for this situation
	is abstraction. {AI} based control techniques typically abstract
	away from the mechanisms that generate the physical behavior and
	refuse the use of control structures that have proven to be necessary
	for producing flexible and reliable robot behavior. The consequence
	is: {AI-based} control mechanisms can neither explain and diagnose
	how a certain behavior resulted from a given plan nor can they revise
	the plans to improve its physical performance. In our view, a substantial
	improvement on this situation is not possible without having a new
	generation of robot control languages. These languages must, on the
	one hand, be expressive enough for specifying and producing high
	performance robot behavior and, on the other hand, be transparent
	and explicit enough to enable execution time inference mechanisms
	to reason about, and manipulate these control programs. This paper
	reports on aspects of the design of {RPL-II}, which we propose as
	such a next generation control language. We describe the nuts and
	bolts of extending our existing language R P L to support explicit
	models of physical systems, and object-oriented modeling of control
	tasks and programs. We show the application of these concepts in
	the context of autonomous robot soccer.},
}

@inproceedings{nissler_sample_2013,
 author = {C Nissler and ZC Marton and M Suppa},
 title = {Sample consensus fitting of bivariate polynomials for initializing
	{EM-based} modeling of smooth {3D} surfaces},
 booktitle = {Intelligent Robots and Systems ({IROS)}, 2013 {IEEE/RSJ} International
	Conference on},
 year = {2013},
 pages = {4228–4234},
 publisher = {{IEEE}},
}

@inproceedings{nyga_everything_2012,
 author = {D Nyga and M Beetz},
 title = {Everything Robots Always Wanted to Know about Housework (But were
	afraid to ask)},
 booktitle = {2012 {IEEE/RSJ} International Conference on Intelligent Robots and
	Systems ({IROS)}},
 year = {2012},
 address = {Vilamoura, Portugal},
 month = {oct},
}

@inproceedings{nyga_how-models_2011,
 author = {D Nyga and M Tenorth and M Beetz},
 title = {How-Models of Human Reaching Movements in the Context of Everyday
	Manipulation Activities},
 booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
 year = {2011},
 address = {Shanghai, China},
 month = {may},
}

@inproceedings{pangercic_fast_2011,
 author = {D Pangercic and V Haltakov and M Beetz},
 title = {Fast and Robust Object Detection in Household Environments Using
	Vocabulary Trees with {SIFT} Descriptors},
 booktitle = {{IEEE/RSJ} International Conference on Intelligent Robots and Systems
	({IROS)}, Workshop on Active Semantic Perception and Object Search
	in the Real World},
 year = {2011},
 address = {San Francisco, {CA}, {USA}},
 month = {sep},
}

@book{pangercic_robot_2011,
 title = {A Robot that Shops for and Stores Groceries},
 year = {2011},
 author = {D Pangercic and K Mathe and ZC Marton and LC Goron and MS Opris and Mand TM Schuster and D Jain and T Ruehr and M Beetz},
 address = {San Francisco, {CA}, {USA}},
 month = {aug},
 url = {http://youtu.be/x0Ybod_6ADA},
}

@inproceedings{pangercic_3d-based_2008,
 author = {D Pangercic and RB Rusu and M Beetz},
 title = {{3D-Based} Monocular {SLAM} for Mobile Agents Navigating in Indoor
	Environments},
 booktitle = {Proceedings of the 13th {IEEE} International Conference on Emerging
	Technologies and Factory Automation ({ETFA)}, Hamburg, Germany, September
	15-18},
 year = {2008},
}

@inproceedings{pangercic_visual_2009,
 author = {D Pangercic and R Tavcar and M Tenorth and M Beetz},
 title = {Visual Scene Detection and Interpretation using Encyclopedic Knowledge
	and Formal Description Logic},
 booktitle = {Proceedings of the International Conference on Advanced Robotics
	({ICAR).}},
 year = {2009},
 address = {Munich, Germany},
 month = {jun},
}

@inproceedings{pangercic_combining_2010,
 author = {D Pangercic and M Tenorth and D Jain and M Beetz},
 title = {Combining Perception and Knowledge Processing for Everyday Manipulation},
 booktitle = {{IEEE/RSJ} International Conference on Intelligent Robots and Systems
	({IROS)}},
 year = {2010},
 pages = {1065--1071},
 address = {Taipei, Taiwan},
 month = {oct},
}

@inproceedings{pangercic_semantic_2012,
 author = {D Pangercic and M Tenorth and B Pitzer and M Beetz},
 title = {Semantic Object Maps for Robotic Housework - Representation, Acquisition
	and Use},
 booktitle = {2012 {IEEE/RSJ} International Conference on Intelligent Robots and
	Systems ({IROS)}},
 year = {2012},
 address = {Vilamoura, Portugal},
 month = {oct},
}

@inproceedings{pietzsch_face_2008,
 author = {S Pietzsch and M Wimmer and F Stulp and B Radig},
 title = {Face Model Fitting with Generic, Group-specific, and Person-specific
	Objective Functions},
 booktitle = {3rd International Conference on Computer Vision Theory and Applications
	({VISAPP)}},
 year = {2008},
 volume = {2},
 pages = {5--12},
 address = {Madeira, Portugal},
 month = {jan},
 abstract = {In model-based fitting, the model parameters that best fit the image
	are determined by searching for the optimum of an objective function.
	Often, this function is designed manually, based on implicit and
	domain-dependent knowledge. We acquire more robust objective function
	by learning them from annotated images, in which many critical decisions
	are automated, and the remaining manual steps do not require domain
	knowledge. Still, the trade-off between generality and accuracy remains.
	General functions can be applied to a large range of objects, whereas
	specific functions describe a subset of objects more accurately.
	Gross et al. have demonstrated this principle by comparing generic
	to person-specific Active Appearance Models. As it is impossible
	to learn a person-specific objective function for the entire human
	population, we automatically partition the training images and then
	learn partition-specific functions. The number of groups influences
	the specificity of the learned functions. We automatically determine
	the optimal partitioning given the number of groups, by minimizing
	the expected fitting error. Our empirical evaluation demonstrates
	that the group-specific objective functions more accurately describe
	the images of the corresponding group. The results of this paper
	are especially relevant to face model tracking, as individual faces
	will not change throughout an image sequence.},
 keywords = {facial expressions},
}

@article{radig_perception_2011,
 author = {B Radig and C Mayer},
 title = {Perception as a key component for cognitive technical systems},
 journal = {Pattern Recognition and Image Analysis},
 year = {2011},
 volume = {21},
 pages = {160--163},
 number = {2},
 month = {jun},
 doi = {10.1134/S1054661811020921},
 issn = {1054-6618, 1555-6212},
 language = {en},
 url = {http://link.springer.com/10.1134/S1054661811020921},
 urldate = {2014-05-15},
}

@inproceedings{riaz_image_2009,
 author = {Z Riaz and M Beetz and B Radig},
 title = {Image Normalization for Face Recognition using {3D} Model},
 booktitle = {International Conference of Information and Communication Technologies,
	Karachi, Pakistan},
 year = {2009},
 publisher = {{IEEE}},
 keywords = {facial expressions},
}

@inproceedings{riaz_shape_2008,
 author = {Z Riaz and M Beetz and B Radig},
 title = {Shape Invariant Recognition of Segmented Human Faces using Eigenfaces},
 booktitle = {Proceedings of the 12th International Multitopic Conference},
 year = {2008},
 publisher = {{IEEE}},
 keywords = {facial expressions},
}

@inproceedings{riaz_unified_2009,
 author = {Z Riaz and S Gedikli and M Beetz and B Radig},
 title = {A Unified Features Approach to Human Face Image Analysis and Interpretation},
 booktitle = {Affective Computing and Intelligent Interaction, Amsterdam, Netherlands},
 year = {2009},
 publisher = {{IEEE}},
 keywords = {facial expressions},
}

@inproceedings{riaz_3d_2009,
 author = {Z Riaz and C Mayer and M Beetz and B Radig},
 title = {{3D} Model for Face Recognition across Facial Expressions},
 booktitle = {Biometric {ID} Management and Multimodal Communication, Madrid, Spain},
 year = {2009},
 publisher = {Springer},
 keywords = {facial expressions},
}

@inproceedings{riaz_facial_2009,
 author = {Z Riaz and C Mayer and M Beetz and B Radig},
 title = {Facial Expressions Recognition from Image Sequences},
 booktitle = {2nd International Conference on Cross-Modal Analysis of Speech, Gestures,
	Gaze and Facial Expressions, Prague, Czech Republic},
 year = {2009},
 publisher = {Springer},
 keywords = {facial expressions},
}

@inproceedings{riaz_model_2009-1,
 author = {Z Riaz and C Mayer and M Beetz and B Radig},
 title = {Model Based Analysis of Face Images for Facial Feature Extraction},
 booktitle = {Computer Analysis of Images and Patterns, Munster, Germany},
 year = {2009},
 publisher = {Springer},
 keywords = {facial expressions},
}

@inproceedings{riaz_multi-feature_2009,
 author = {Z Riaz and C Mayer and S Sarfraz and Mand RB Beetz},
 title = {Multi-Feature Fusion in Advanced Robotics Applications},
 booktitle = {Internaional Conference on Frontier of Information Technology},
 year = {2009},
 publisher = {{ACM}},
}

@inproceedings{riaz_model_2009,
 author = {Z Riaz and C Mayer and M Wimmer and M Beetz and B Radig},
 title = {A Model Based approach for Expression Invariant Face Recognition},
 booktitle = {3rd International Conference on Biometrics, Alghero Italy},
 year = {2009},
 publisher = {Springer},
 keywords = {facial expressions},
}

@inproceedings{riaz_model_2008,
 author = {Z Riaz and C Mayer and M Wimmer and B Radig},
 title = {Model Based Face Recognition Across Facial Expressions},
 booktitle = {Journal of Information and Communication Technology},
 year = {2008},
 month = {dec},
 keywords = {facial expressions},
}

@phdthesis{ridder_interpretation_2000,
 author = {C Ridder},
 title = {Interpretation von Videobildfolgen zur Beobachtung artikularer Bewegung
	von Personen anhand eines generischen {3D} Objektmodells},
 school = {Technische Universität München, Fachbereich Informatik},
 year = {2000},
}

@inproceedings{rink_feature_2013,
 author = {C Rink and ZC Marton and D Seth and T Bodenmuller and M Suppa},
 title = {Feature based particle filter registration of {3D} surface models
	and its application in robotics},
 booktitle = {Intelligent Robots and Systems ({IROS)}, 2013 {IEEE/RSJ} International
	Conference on},
 year = {2013},
 pages = {3187–3194},
 publisher = {{IEEE}},
}

@inproceedings{rohrbach_database_2012,
 author = {M Rohrbach and S Amin and M Andriluka and B Schiele},
 title = {A Database for Fine Grained Activity Detection of Cooking Activities},
 booktitle = {2012 {IEEE} Conference on Computer Vision and Pattern Recognition
	({CVPR)}},
 year = {2012},
 address = {Providence, United States},
 month = {jun},
}

@inproceedings{rohrbach_script_2012,
 author = {M Rohrbach and M Regneri and M Andriluka and S Amin and M Pinkal and B Schiele},
 title = {Script Data for Attribute-based Recognition of Composite Activities},
 booktitle = {Computer Vision - {ECCV} 2012 : 12th European Conference on Computer
	Vision},
 year = {2012},
 volume = {2012},
 series = {Lecture Notes in Computer Science},
 address = {Firenze, Italy},
 month = {oct},
 publisher = {Springer},
}

@inproceedings{ruiz-ugalde_fast_2011,
 author = {F Ruiz-Ugalde and G Cheng and M Beetz},
 title = {Fast adaptation for effect-aware pushing},
 booktitle = {11th {IEEE-RAS} International Conference on Humanoid Robots},
 year = {2011},
 address = {Bled, Slovenia},
 month = {oct},
}

@inproceedings{ruiz-ugalde_prediction_2010,
 author = {F Ruiz-Ugalde and G Cheng and M Beetz},
 title = {Prediction of action outcomes using an object model},
 booktitle = {2010 {IEEE/RSJ} International Conference on Intelligent Robots and
	Systems ({IROS)}},
 year = {2010},
 address = {Taipei, Taiwan},
 month = {oct},
}

@article{rusu_acquiring_2006,
 author = {RB Rusu},
 title = {Acquiring Models of Everyday Activities for Robotic Control in {'Current}
	{PhD} Research in Pervasive Computing'},
 journal = {Technical Reports - University of Munich, Department of Computer
	Science, Media Informatics Group},
 year = {2006},
 volume = {{LMU-MI-2005-3}},
 month = {mar},
 abstract = {Intelligent sensor equipped environments can be of much greater help
	if they are capable of recognizing the actions and activities of
	their users, and inferring their intentions. An intelligent kitchen
	that recognizes what a person is looking for can highlight the target
	object. An oven noticing that the cook is on the phone can reduce
	the heating temperature, in order to avoid the meal getting burnt.
	In my dissertation research, I investigate the representation of
	models of everyday activities and study how such models can be learned
	from sensory data.},
 editor = {Ferscha, A. and Langheinrich, M. and Schmidt, A.},
 issn = {1862-5207},
}

@phdthesis{rusu_semantic_2009,
 author = {RB Rusu},
 title = {Semantic {3D} Object Maps for Everyday Manipulation in Human Living
	Environments},
 school = {Technische Universität München},
 year = {2009},
}

@inproceedings{rusu_action_2008,
 author = {RB Rusu and J Bandouch and ZC Marton and N Blodow and M Beetz},
 title = {Action Recognition in Intelligent Environments using Point Cloud
	Features Extracted from Silhouette Sequences},
 booktitle = {{IEEE} 17th International Symposium on Robot and Human Interactive
	Communication ({RO-MAN)}, Muenchen, Germany},
 year = {2008},
 abstract = {In this paper we present our work on human action recognition in intelligent
	environments. We classify actions by looking at a time-sequence of
	silhouettes extracted from various camera images. By treating time
	as the third spatial dimension we generate so-called space-time shapes
	that contain rich information about the actions. We propose a novel
	approach for recognizing actions, by representing the shapes as {3D}
	point clouds and estimating feature histograms for them. Preliminary
	results show that our method robustly derives different classes of
	actions, even in the presence of large variability in the data, coming
	from different persons at different time intervals.},
}

@article{rusu_human_2009,
 author = {RB Rusu and J Bandouch and F Meier and I Essa and M Beetz},
 title = {Human Action Recognition using Global Point Feature Histograms and
	Action Shapes},
 journal = {Advanced Robotics journal, Robotics Society of Japan ({RSJ)}},
 year = {2009},
 abstract = {This article investigates the recognition of human actions from {3D}
	point clouds that encode the motions of people acting in sensor-distributed
	indoor environments. Data streams are time-sequences of silhouettes
	extracted from cameras in the environment. From the {2D} silhouette
	contours we generate space-time streams by continuously aligning
	and stacking the contours along the time axis as third spatial dimension.
	The space-time stream of an observation sequence is segmented into
	parts corresponding to subactions using a pattern matching technique
	based on suffix trees and interval scheduling. Then, the segmented
	space-time shapes are processed by treating the shapes as {3D} point
	clouds and estimating global point feature histograms for them. The
	resultant models are clustered using statistical analysis, and our
	experimental results indicate that the presented methods robustly
	derive different action classes. This holds despite large intra-class
	variance in the recorded datasets due to performances from different
	persons at different time intervals.},
}

@inproceedings{rusu_fast_2009-1,
 author = {RB Rusu and N Blodow and M Beetz},
 title = {Fast Point Feature Histograms ({FPFH)} for {3D} Registration},
 booktitle = {Proceedings of the {IEEE} International Conference on Robotics and
	Automation ({ICRA)}, Kobe, Japan, May 12-17},
 year = {2009},
}

@inproceedings{rusu_close-range_2009,
 author = {RB Rusu and N Blodow and ZC Marton and M Beetz},
 title = {Close-range Scene Segmentation and Reconstruction of {3D} Point Cloud
	Maps for Mobile Manipulation in Human Environments},
 booktitle = {Proceedings of the {IEEE/RSJ} International Conference on Intelligent
	Robots and Systems ({IROS)}},
 year = {2009},
 address = {St. Louis, {MO}, {USA}},
 month = {oct},
}

@inproceedings{rusu_aligning_2008,
 author = {RB Rusu and N Blodow and ZC Marton and M Beetz},
 title = {Aligning Point Cloud Views using Persistent Feature Histograms},
 booktitle = {Proceedings of the 21st {IEEE/RSJ} International Conference on Intelligent
	Robots and Systems ({IROS)}, Nice, France, September 22-26},
 year = {2008},
}

@inproceedings{rusu_towards_2007,
 author = {RB Rusu and N Blodow and ZC Marton and A Soos and M Beetz},
 title = {Towards {3D} Object Maps for Autonomous Household Robots},
 booktitle = {Proceedings of the 20th {IEEE} International Conference on Intelligent
	Robots and Systems ({IROS)}},
 year = {2007},
 address = {San Diego, {CA}, {USA}},
 abstract = {This paper describes a mapping system that acquires {3D} object models
	of man-made indoor environments such as kitchens. The system segments
	and geometrically reconstructs cabinets with doors, tables, drawers,
	and shelves, objects that are important for robots retrieving and
	manipulating objects in these environments. The system also acquires
	models of objects of daily use such glasses, plates, and ingredients.
	The models enable the recognition of the objects in cluttered scenes
	and the classification of newly encountered objects. Key technical
	contributions include (1) a robust, accurate, and efficient algorithm
	for constructing complete object models from {3D} point clouds constituting
	partial object views, (2) feature-based recognition procedures for
	cabinets, tables, and other task-relevant furniture objects, and
	(3) automatic inference of object instance and class signatures for
	objects of daily use that enable robots to reliably recognize the
	objects in cluttered and real task contexts. We present results from
	the sensor-based mapping of a real kitchen.},
}

@article{rusu_robots_2008,
 author = {RB Rusu and B Gerkey and M Beetz},
 title = {Robots in the kitchen: Exploiting ubiquitous sensing and actuation},
 journal = {Robotics and Autonomous Systems Journal (Special Issue on Network
	Robot Systems)},
 year = {2008},
 abstract = {Our goal is to develop intelligent service robots that operate in
	standard human environments, automating common tasks. In pursuit
	of this goal, we follow the ubiquitous robotics paradigm, in which
	intelligent perception and control are combined with ubiquitous computing.
	By exploiting sensors and effectors in its environment, a robot can
	perform more complex tasks without becoming overly complex itself.
	Following this insight, we have developed a service robot that operates
	autonomously in a sensor-equipped kitchen. The robot learns from
	demonstration and performs sophisticated tasks in concert with the
	network of devices in its environment. We report on the design, implementation,
	and usage of this system, which is freely available for use and improvement
	by others in the research community.},
}

@inproceedings{rusu_fast_2009,
 author = {RB Rusu and A Holzbach and N Blodow and M Beetz},
 title = {Fast Geometric Point Labeling using Conditional Random Fields},
 booktitle = {Proceedings of the {IEEE/RSJ} International Conference on Intelligent
	Robots and Systems ({IROS)}},
 year = {2009},
 address = {St. Louis, {MO}, {USA}},
 month = {oct},
}

@inproceedings{rusu_detecting_2009,
 author = {RB Rusu and A Holzbach and G Bradski and M Beetz},
 title = {Detecting and Segmenting Objects for Mobile Manipulation},
 booktitle = {Proceedings of {IEEE} Workshop on Search in {3D} and Video ({S3DV)},
	held in conjunction with the 12th {IEEE} International Conference
	on Computer Vision ({ICCV)}},
 year = {2009},
 address = {Kyoto, Japan},
 month = {sep},
}

@inproceedings{rusu_perception_2009,
 author = {RB Rusu and A Holzbach and R Diankov and G Bradski and M Beetz},
 title = {Perception for Mobile Manipulation and Grasping using Active Stereo},
 booktitle = {9th {IEEE-RAS} International Conference on Humanoid Robots (Humanoids)},
 year = {2009},
 address = {Paris, France},
 month = {dec},
}

@inproceedings{rusu_extending_2007,
 author = {RB Rusu and A Maldonado and M Beetz and B Gerkey},
 title = {Extending {Player/Stage/Gazebo} towards Cognitive Robots Acting in
	Ubiquitous Sensor-equipped Environments},
 booktitle = {Proceedings of the {IEEE} International Conference on Robotics and
	Automation ({ICRA)} Workshop for Network Robot Systems, 2007, April
	14, Rome, Italy},
 year = {2007},
 abstract = {Standardized middleware for autonomous robot control has proven itself
	to enable faster deployment of robots, to make robot control code
	more interchangeable, and experiments easier to replicate. Unfortunately,
	the support provided by current middleware is in most cases limited
	to what current robots do: navigation. However, as we tackle more
	ambitious service robot applications, more comprehensive middleware
	support is needed. We increasingly need the middleware to support
	ubiquitous sensing infrastructures, robot manipulation tasks, and
	cognitive capabilities. In this paper we describe and discuss current
	extensions of the {Player/Stage/Gazebo} ({P/S/G)} middleware, one
	of the most widespread used robot middlewares, of which we are active
	developers, that satisfy these requirements.},
}

@inproceedings{rusu_player-stage_2006,
 author = {RB Rusu and A Maldonado and M Beetz and M Kranz and L Mösenlechner and P Holleis and A Schmidt},
 title = {{Player/Stage} as Middleware for Ubiquitous Computing},
 booktitle = {Proceedings of the 8th Annual Conference on Ubiquitous Computing
	(Ubicomp 2006), Orange County California, September 17-21},
 year = {2006},
 abstract = {The effective development and deployment of comprehensive and heterogeneous
	ubiquitous computing applications is hindered by the lack of a comprehensive
	middleware infrastructure: interfaces to sensors are company specific
	and sometimes even product specific. Typically, these interfaces
	also do not sustain the development of robust systems that make use
	of sensor data fusion. In this paper, we propose the use of {Player/Stage},
	a middleware commonly used as a defacto standard by the robotics
	community, as the backbone of a heterogeneous ubiquitous system.
	{Player/Stage} offers many features needed in ubicomp, mostly because
	dealing with uncertainty and many different sensor and actuator systems
	has been a long term problem in robotics as well. We emphasize they
	key features of the {Player/Stage} project, and show how ubicomp
	devices can be integrated into the system, as well as how existing
	devices can be used. On top of that, we present our sensor-enabled
	{AwareKitchen} environment which makes use of automatic data analysis
	algorithms integrated as drivers in the {Player/Stage} platform.
	All our work is released as open source software under the {Player/Stage}
	package, of which we are active developers.},
}

@inproceedings{rusu_interpretation_2008,
 author = {RB Rusu and ZC Marton and N Blodow and M Beetz},
 title = {Interpretation of Urban Scenes based on Geometric Features},
 booktitle = {Proceedings of the 21st {IEEE/RSJ} International Conference on Intelligent
	Robots and Systems ({IROS)} Workshop on {3D} Mapping, Nice, France,
	September 26},
 year = {2008},
}

@inproceedings{rusu_learning_2008,
 author = {RB Rusu and ZC Marton and N Blodow and M Beetz},
 title = {Learning Informative Point Classes for the Acquisition of Object
	Model Maps},
 booktitle = {Proceedings of the 10th International Conference on Control, Automation,
	Robotics and Vision ({ICARCV)}, Hanoi, Vietnam, December 17-20},
 year = {2008},
}

@inproceedings{rusu_persistent_2008,
 author = {RB Rusu and ZC Marton and N Blodow and M Beetz},
 title = {Persistent Point Feature Histograms for {3D} Point Clouds},
 booktitle = {Proceedings of the 10th International Conference on Intelligent Autonomous
	Systems ({IAS-10)}, Baden-Baden, Germany},
 year = {2008},
 abstract = {This paper proposes a novel way of characterizing the local geometry
	of {3D} points, using persistent feature histograms. The relationships
	between the neighbors of a point are analyzed and the resulted values
	are stored in a 16-bin histogram. The histograms are pose and point
	cloud density invariant and cope well with noisy datasets. We show
	that geometric primitives have unique signatures in this feature
	space, preserved even in the presence of additive noise. To extract
	a compact subset of points which characterizes a point cloud dataset,
	we perform an in-depth analysis of all point feature histograms using
	different distance metrics. Preliminary results show that point clouds
	can be roughly segmented based on the uniqueness of geometric primitives
	feature histograms. We validate our approach on datasets acquired.
	from laser sensors in indoor (kitchen) environments.},
}

@article{rusu_towards_2008,
 author = {RB Rusu and ZC Marton and N Blodow and M Dolha and M Beetz},
 title = {Towards {3D} Point Cloud Based Object Maps for Household Environments},
 journal = {Robotics and Autonomous Systems Journal (Special Issue on Semantic
	Knowledge in Robotics)},
 year = {2008},
 volume = {56},
 pages = {927–941},
 number = {11},
 month = {nov},
}

@inproceedings{rusu_functional_2008,
 author = {RB Rusu and ZC Marton and N Blodow and ME Dolha and M Beetz},
 title = {Functional Object Mapping of Kitchen Environments},
 booktitle = {Proceedings of the 21st {IEEE/RSJ} International Conference on Intelligent
	Robots and Systems ({IROS)}, Nice, France, September 22-26},
 year = {2008},
}

@inproceedings{rusu_model-based_2009,
 author = {RB Rusu and ZC Marton and N Blodow and A Holzbach and M Beetz},
 title = {Model-based and Learned Semantic Object Labeling in {3D} Point Cloud
	Maps of Kitchen Environments},
 booktitle = {Proceedings of the {IEEE/RSJ} International Conference on Intelligent
	Robots and Systems ({IROS)}},
 year = {2009},
 address = {St. Louis, {MO}, {USA}},
 month = {oct},
}

@inproceedings{rusu_laser-based_2009,
 author = {RB Rusu and W Meeussen and S Chitta and M Beetz},
 title = {Laser-based Perception for Door and Handle Identification},
 booktitle = {Proceedings of the International Conference on Advanced Robotics
	({ICAR)}},
 year = {2009},
 address = {Munich},
 month = {jun},
}

@inproceedings{rusu_real-time_2009,
 author = {RB Rusu and IA Sucan and B Gerkey and S Chitta and M Beetz and LE. Kavraki},
 title = {Real-time Perception-Guided Motion Planning for a Personal Robot},
 booktitle = {Proceedings of the {IEEE/RSJ} International Conference on Intelligent
	Robots and Systems ({IROS)}},
 year = {2009},
 pages = {4245–4252},
 address = {St. Louis, {MO}, {USA}},
 month = {oct},
}

@inproceedings{rusu_leaving_2008,
 author = {RB Rusu and A Sundaresan and B Morisset and M Agrawal and M Beetz},
 title = {Leaving Flatland: Realtime {3D} Stereo Semantic Reconstruction},
 booktitle = {Proceedings of the International Conference on Intelligent Robotics
	and Applications ({ICIRA)} 2008, October 15-17, Wuhan, China},
 year = {2008},
}

@inproceedings{rusu_realtime_2008,
 author = {RB Rusu and A Sundaresan and B Morisset and M Agrawal and M Beetz and K Konolige},
 title = {Realtime Extended {3D} Reconstruction from Stereo for Navigation},
 booktitle = {Proceedings of the 21st {IEEE/RSJ} International Conference on Intelligent
	Robots and Systems ({IROS)} Workshop on {3D} Mapping, Nice, France,
	September 26},
 year = {2008},
}

@article{rusu_leaving_2009,
 author = {RB Rusu and A Sundaresan and B Morisset and K Hauser and M Agrawal and JC Latombe and M Beetz},
 title = {Leaving Flatland: Efficient Real-Time {3D} Navigation},
 journal = {Journal of Field Robotics ({JFR)}},
 year = {2009},
}

@inproceedings{ruhr_structured_2008,
 author = {T Rühr and D Pangercic and M Beetz},
 title = {Structured Reactive Controllers and Transformational Planning for
	Manufacturing},
 booktitle = {Proceedings of the 13th {IEEE} International Conference on Emerging
	Technologies and Factory Automation ({ETFA)}, Hamburg, Germany, September
	15-18},
 year = {2008},
}

@inproceedings{ruhr_generalized_2012,
 author = {T Rühr and J Sturm and D Pangercic and Mand CD Beetz},
 title = {A Generalized Framework for Opening Doors and Drawers in Kitchen
	Environments},
 booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
 year = {2012},
 address = {St. Paul, {MN}, {USA}},
 month = {may},
}

@inproceedings{sachenbacher_test_2008,
 author = {M Sachenbacher and P Maier},
 title = {Test Strategy Generation using Quantified {CSPs}},
 booktitle = {Proc. International Conference on Principles and Practice of Constraint
	Programming ({CP'08)}},
 year = {2008},
}

@inproceedings{sachenbacher_model-based_2008,
 author = {M Sachenbacher and S Schwoon},
 title = {Model-based Testing Using Quantified {CSPs:} A Map},
 booktitle = {Proc. Workshop on Model-based Systems ({MBS-2008)}},
 year = {2008},
 pages = {37–41},
 address = {Patras, Greece},
}

@inproceedings{sachenbacher_model-based_2008-1,
 author = {M Sachenbacher and S Schwoon},
 title = {Model-based Test Generation Using Quantified {CSPs}},
 booktitle = {Proc. International Workshop on Principles of Diagnosis ({DX'08)}},
 year = {2008},
}

@inproceedings{saito_semantic_2011,
 author = {M Saito and H Chen and K Okada and Mand KL Inaba and M Beetz},
 title = {Semantic Object Search in Large-scale Indoor Environments},
 booktitle = {{IEEE/RSJ} International Conference on Intelligent Robots and Systems
	({IROS)}, Workshop on Active Semantic Perception and Object Search
	in the Real World},
 year = {2011},
 address = {San Francisco, {CA}, {USA}},
 month = {sep},
}

@inproceedings{sarfraz_bayesian_2009,
 author = {M. S. Sarfraz and A. Saeed and M. H. Khan and Z Riaz},
 title = {Bayesian Prior Models for Vehicle Make and Model Recognition},
 booktitle = {Internaional Conference on Frontier of Information Technology},
 year = {2009},
 publisher = {{ACM}},
}

@phdthesis{schmitt_vision-based_2004,
 author = {T Schmitt},
 title = {Vision-based Probabilistic State Estimation for Cooperating autonomous
	Robots},
 school = {Department of Informatics, Technische Universität München},
 year = {2004},
 url = {http://tumb1.biblio.tu-muenchen.de/publ/diss/in/2004/schmitt.html},
}

@inproceedings{schmitt_designing_2003,
 author = {T Schmitt and M Beetz},
 title = {Designing Probabilistic State Estimators for Autonomous Robot Control},
 booktitle = {{IEEE/RSJ} Intl. Conf. on Intelligent Robots and Systems ({IROS)}},
 year = {2003},
 abstract = {This paper sketches and discusses design options for complex probabilistic
	state estimators and investigates their interactions and their impact
	on performance. We consider, as an example, the estimation of game
	states in autonomous robot soccer. We show that many factors other
	than the choice of algorithms determine the performance of the estimation
	systems. We propose empirical investigations and learning as necessary
	tools for the development of successful state estimation systems.},
}

@inproceedings{schmitt_watch_2002,
 author = {T Schmitt and M Beetz and R Hanek and S Buck},
 title = {Watch their Moves: Applying Probabilistic Multiple Object Tracking
	to Autonomous Robot Soccer},
 booktitle = {The Eighteenth National Conference on Artificial Intelligence},
 year = {2002},
 address = {Edmonton, Canada},
 abstract = {In many autonomous robot applications robots must be capable of estimating
	the positions and motions of moving objects in their environments.
	In this paper, we apply probabilistic multiple object tracking to
	estimating the positions of opponent players in autonomous robot
	soccer. We extend an existing tracking algorithm to handle multiple
	mobile sensors with uncertain positions, discuss the specification
	of probabilistic models needed by the algorithm, and describe the
	required vision-interpretation algorithms. The multiple object tracking
	has been successfully applied throughout the {RoboCup} 2001 world
	championship.},
}

@inproceedings{schmitt_agilo_2001,
 author = {T Schmitt and S Buck and M Beetz},
 title = {{AGILO} {RoboCuppers} 2001: Utility- and Plan-based Action Selection
	based on Probabilistically Estimated Game Situations},
 booktitle = {5th International Workshop on {RoboCup} (Robot World Cup Soccer Games
	and Conferences)},
 year = {2001},
 editor = {Stone, P. and Balch, T. and Kraetzschmar, G.},
 series = {Lecture Notes in Computer Science},
 publisher = {Springer Verlag},
 abstract = {This paper describes the {AGILO} {RoboCuppers} 1 the {RoboCup} team
	of the image understanding group ({FG} {BV)} at the Technische Universität
	München. a? With a team of four Pioneer I robots, all equipped with
	{CCD} camera and a single board computer, we've participated in all
	international middle size league tournaments from 1998 until 2001.
	We use a modular approach of concurrent subprograms for image processing,
	self localization, object tracking, action selection, path planning
	and basic robot control. A fast feature extraction process provides
	the data necessary for the on-board scene interpretation. All robot
	observations are fused into a single environmental model, which forms
	the basis for action selection, path planning and low-level robot
	control.},
}

@inproceedings{schmitt_developing_2003,
 author = {T Schmitt and R Hanek and M Beetz},
 title = {Developing Comprehensive State Estimators for Robot Soccer},
 booktitle = {{RoboCup} International Symposium 2003},
 year = {2003},
 series = {Padova},
 abstract = {This paper sketches and discusses design options for complex probabilistic
	state estimators and investigates their interactions and their impact
	on performance. We consider, as an example, the estimation of game
	states in autonomous robot soccer. We show that many factors other
	than the choice of algorithms determine the performance of the estimation
	systems. We propose empirical investigations and learning as necessary
	tools for the development of successful state estimation systems.},
}

@article{schmitt_cooperative_2002,
 author = {T Schmitt and R Hanek and M Beetz and S Buck and B Radig},
 title = {Cooperative Probabilistic State Estimation for Vision-based Autonomous
	Mobile Robots},
 journal = {{IEEE} Transactions on Robotics and Automation},
 year = {2002},
 volume = {18},
 number = {5},
 month = {oct},
 abstract = {With the services that autonomous robots are to provide becoming more
	demanding, the states that the robots have to estimate become more
	complex. In this article, we develop and analyze a probabilistic,
	vision-based state estimation method for individual, autonomous robots.
	This method enables a team of mobile robots to estimate their joint
	positions in a known environment and track the positions of autonomously
	moving objects. The tate estimators of different robots cooperate
	to increase the accuracy and reliability of the estimation process.
	This cooperation between the robots enables them to track temporarily
	occluded objects and to faster recover their position after they
	have lost track of it. The method is empirically validated based
	on experiments with a team of physical robots.},
}

@inproceedings{schmitt_cooperative_2001,
 author = {T Schmitt and R Hanek and S Buck and M Beetz},
 title = {Cooperative Probabilistic State Estimation for Vision-based Autonomous
	Mobile Robots},
 booktitle = {Proc. of the {IEEE/RSJ} International Conference on Intelligent Robots
	and Systems ({IROS)}},
 year = {2001},
 pages = {1630–1638},
 address = {Maui, Hawaii},
 abstract = {With the services that autonomous robots are to provide becoming more
	demanding, the states that the robots have to estimate become more
	complex. In this paper, we develop and analyze a probabilistic, vision-based
	state estimation method for individual, autono-mous robots. This
	method enables a team of mobile robots to estimate their joint positions
	in a known environment and track the positions of autonomously moving
	objects. The state estimators of different robots cooperate to increase
	the accuracy and reliability of the estimation process. This cooperation
	between the robots enables them to track temporarily occluded objects
	and to faster recover their position after they have lost track of
	it. The method is empirically validated based on experiments with
	a team of physical robots.},
}

@inproceedings{schmitt_cooperative_2001-1,
 author = {T Schmitt and R Hanek and S Buck and M Beetz},
 title = {Cooperative Probabilistic State Estimation for Vision-based Autonomous
	Soccer Robots},
 booktitle = {{DAGM} Symposium},
 year = {2001},
 volume = {2191},
 series = {Lecture Notes in Computer Science},
 pages = {321–328},
 publisher = {Springer},
}

@inproceedings{schmitt_cooperative_2001-2,
 author = {T Schmitt and R Hanek and S Buck and M Beetz},
 title = {Cooperative Probabilistic State Estimation fo Vision-based Autonomous
	Soccer Robots},
 booktitle = {{RoboCup} International Symposium 2001},
 year = {2001},
 address = {Seattle, {USA}},
 abstract = {With the services that autonomous robots are to provide becoming more
	demanding, the states that the robots have to estimate become more
	complex. In this paper, we develop and analyze a probabilistic, vision-based
	state estimation method for individual, autono-mous robots. This
	method enables a team of mobile robots to estimate their joint positions
	in a known environment and track the positions of autonomously moving
	objects. The state estimators of different robots cooperate to increase
	the accuracy and reliability of the estimation process. This cooperation
	between the robots enables them to track temporarily occluded objects
	and to faster recover their position after they have lost track of
	it. The method is empirically validated based on experiments with
	a team of physical robots.},
}

@inproceedings{schroter_acquiring_2004,
 author = {D Schröter and M Beetz},
 title = {Acquiring Modells of Rectangular Objects for Robot Maps},
 booktitle = {Proc. of {IEEE} International Conference on Robotics and Automation
	({ICRA)}, New {Orleans/USA}},
 year = {2004},
 abstract = {State-of-the-art robot mapping approaches are capable of acquiring
	impressively accurate {2D} and {3D} models of their environments.
	To the best of our knowledge few of them can acquire models of task-relevant
	objects. In this paper, we introduce a novel method for acquiring
	models of task-relevant objects from stereo images. The proposed
	algorithm applies methods from projective geometry and works for
	rectangular objects, which are, in office- and museum-like environments,
	the most commonly found subclass of geometric objects. The method
	is shown to work accurately and for a wide range of viewing angles
	and distances.},
}

@inproceedings{schroter_rg_2004,
 author = {D Schröter and M Beetz},
 title = {{RG} Mapping: Building Object-Oriented Representations of Structured
	Human Environments},
 booktitle = {6-th Open Russian-German Workshop on Pattern Recognition and Image
	Understanding ({OGRW)}, {Katun/Russia}},
 year = {2004},
 abstract = {We present a new approach to mapping of indoor environments, where
	the environment structure in terms of regions and gateways is automatically
	extracted, while the robot explores. Objects, both in {2D} and {3D},
	are modelled explicitly in those maps and allow for robust localization.
	We refer to those maps as object-oriented environment representations
	or Region \& Gateway Maps. Region \& Gateway Mapping is capable of
	acquiring very compact, structured, and semantically annotated maps.
	We show that those maps can be built online and that they are extremely
	useful in plan-based control of autonomous robots as well as for
	robot-human interaction.},
}

@inproceedings{schroter_rg_2002,
 author = {D Schröter and M Beetz and J.-S. Gutmann},
 title = {{RG} Mapping: Learning Compact and Structured {2D} Line Maps of Indoor
	Environments},
 booktitle = {11th {IEEE} International Workshop on Robot and Human Interactive
	Communication ({ROMAN)}, {Berlin/Germany}},
 year = {2002},
 abstract = {In this paper we present Region \& Gateway ({RG)} Mapping, a novel
	approach to laser-based {2D} line mapping of indoor environments.
	{RG} Mapping is capable of acquiring very compact, structured, and
	semantically annotated maps. We present and empirically analyze the
	method based on map acquisition experiments with autonomous mobile
	robots. The experiments show that {RG} mapping drastically compresses
	the data contained in line scan maps without substantial loss of
	accuracy.},
}

@inproceedings{schroter_detection_2004,
 author = {D Schröter and T. Weber and M Beetz and B Radig},
 title = {Detection and Classification of Gateways for the Acquisition of Structured
	Robot Maps},
 booktitle = {Proc. of 26th Pattern Recognition Symposium ({DAGM)}, {Tübingen/Germany}},
 year = {2004},
 abstract = {The automatic acquisition of structured object maps requires sophisticated
	perceptual mechanisms that enable the robot to recognize the objects
	that are to be stored in the robot map. This paper investigates a
	particular object recognition problem: the automatic detection and
	classification of gateways in office environments based on laser
	range data. We will propose, discuss, and empirically evaluate a
	sensor model for crossing gateways and different approaches to gateway
	classification including simple maximum classifiers and {HMM-based}
	classification of observation sequences.},
}

@inproceedings{schubo_subsequent_2008,
 author = {A Schubö and A Maldonado and S Stork and M Beetz},
 title = {Subsequent Actions Influence Motor Control Parameters of a Current
	Grasping Action},
 booktitle = {{IEEE} 17th International Symposium on Robot and Human Interactive
	Communication ({RO-MAN)}, Muenchen, Germany},
 year = {2008},
}

@inproceedings{schuller_detection_2008,
 author = {B Schuller and M Wimmer and D Arsic and T Moosmayr and G Rigoll},
 title = {Detection of Security Related Affect and Behaviour in Passenger Transport},
 booktitle = {Proc. of the 9th {INTERSPEECH}},
 year = {2008},
 address = {Brisbane, Australia},
 month = {sep},
 publisher = {{ISCA}, {ASSTA}},
}

@inproceedings{schuller_audiovisual_2007,
 author = {B Schuller and M Wimmer and D Arsic and G Rigoll and B Radig},
 title = {Audiovisual Behavior Modeling by Combined Feature Spaces},
 booktitle = {{IEEE} International Conference on Acoustics, Speech, and Signal
	Processing ({ICASSP)}},
 year = {2007},
 volume = {2},
 pages = {733--736},
 address = {Honolulu, Hawaii, {USA}},
 month = {apr},
 isbn = {1-4244-0728-1},
}

@inproceedings{schuller_brute-forcing_2008,
 author = {B Schuller and M Wimmer and L Mösenlechner and C Kern and G Rigoll},
 title = {Brute-Forcing Hierarchical Functionals for Paralinguistics: a Waste
	of Feature Space?},
 booktitle = {Proceedings of {ICASSP} 2008},
 year = {2008},
 address = {Las Vegas, Nevada, {USA}},
 month = {apr},
}

@inproceedings{schultz_emotionale_2007,
 author = {R. Schultz and K. Oertel and C Peter and Mand VJ Wimmer and B. Urban},
 title = {Emotionale Aspekte in Produktevaluationen},
 booktitle = {2. Kongress Multimediatechnik},
 year = {2007},
 address = {Wismar, Germany},
 month = {oct},
}

@inproceedings{schumacher_agentenbasiertes_2001,
 author = {J Schumacher and M Beetz},
 title = {Ein agentenbasiertes Verfahren zur effizienten Beantwortung von Lieferterminanfragen
	in einer Supply-Chain},
 booktitle = {Proceedings der Verbundtagung {VertIS} 2001},
 year = {2001},
}

@inproceedings{schuster_learning_2012,
 author = {M Schuster and D Jain and M Tenorth and M Beetz},
 title = {Learning Organizational Principles in Human Environments},
 booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
 year = {2012},
 pages = {3867--3874},
 address = {St. Paul, {MN}, {USA}},
 month = {may},
}

@phdthesis{siles_canales_automated_2014,
 author = {F Siles Canales},
 title = {Automated Semantic Annotation of Football Games from {TV} Broadcast},
 school = {Technische Universität München},
 year = {2014},
 address = {München},
 abstract = {The main objective of this thesis is to investigate mechanisms for
	the creation of a computational system, for the automated semantic
	annotation of football games from {TV} broadcast. An abstract model
	is used for the representation of football, and for storing and retrieving
	relevant information for answering football-related queries. The
	principal hypothesis is that the model can be populated, based on
	the trajectories of the targets on the field of play.},
 keywords = {soccer},
 url = {http://nbn-resolving.de/urn/resolver.pl?urn:nbn:de:bvb:91-diss-20140214-1169627-0-7},
}

@inproceedings{sosnowski_mirror_2010,
 author = {S Sosnowski and C Mayer and K Kühnlenz and B Radig},
 title = {Mirror my emotions! Combining facial expression analysis and synthesis
	on a robot},
 booktitle = {The Thirty Sixth Annual Convention of the Society for the Study of
	Artificial Intelligence and Simulation of Behaviour ({AISB2010)}},
 year = {2010},
 keywords = {facial expressions},
}

@phdthesis{stulp_tailoring_2007,
 author = {F Stulp},
 title = {Tailoring Robot Actions to Task Contexts using Action Models},
 school = {Technische Universität München},
 year = {2007},
 abstract = {In motor control, high-level goals must be expressed in terms of low-level
	motor commands. An effective approach to bridge this gap, widespread
	in both nature and robotics, is to acquire a set of temporally extended
	actions, each designed for specific goals and task contexts. An action
	selection module then selects the appropriate action in a given situation.
	In this approach, high-level goals are mapped to actions, and actions
	produce streams of motor commands. The first mapping is often ambiguous,
	as several actions or action parameterizations can achieve the same
	goal. Instead of choosing an arbitrary action or parameterization,
	the robot should select those that best fulfill some pre-specified
	requirement, such as minimal execution duration, successful execution,
	or coordination of actions with others. The key to being able to
	perform this selection lies in prediction. By predicting the performance
	of different actions and action parameterizations, the robot can
	also predict which of them best meets the requirement. Action models,
	which have many similarities with human forward models, enable robots
	to make such predictions. In this dissertation, we introduce a computational
	model for the acquisition and application of action models. Robots
	first learn action models from observed experience, and then use
	them to optimize their performance with the following methods: 1)
	{\textbackslash}{emphSubgoal} refinement, which enables robots to
	optimize actions in action sequences by predicting which action parameterization
	leads to the best performance. 2) {\textbackslash}{emphCondition}
	refinement and {\textbackslash}emphsubgoal assertion, with which
	robots can adapt existing actions to novel task contexts and goals
	by predicting when action execution will fail. 3) {\textbackslash}{emphImplicit}
	coordination, in which multiple robots globally coordinate their
	actions, by locally making predictions about the performance of other
	robots. The acquisition and applications of action models have been
	realized and empirically evaluated in three robotic domains: the
	{\textbackslash}pioneer robots of our {RoboCup} mid-size league team,
	a simulated B21 in a kitchen environment, and a {PowerCube} robot
	arm. The main principle behind this approach is that in robot controller
	design, knowledge that robots learn themselves from observed experience
	complements well the abstract knowledge that humans specify.},
 url = {http://mediatum2.ub.tum.de/node?id=617105},
}

@article{stulp_combining_2008,
 author = {F Stulp and M Beetz},
 title = {Combining Declarative, Procedural and Predictive Knowledge to Generate
	and Execute Robot Plans Efficiently and Robustly},
 journal = {Robotics and Autonomous Systems Journal (Special Issue on Semantic
	Knowledge)},
 year = {2008},
 abstract = {One of the main challenges in motor control is expressing high-level
	goals in terms of low-level actions. To do so effectively, motor
	control systems must reason about actions at different levels of
	abstraction. Grounding high-level plans in low-level actions is essential
	semantic knowledge for plan-based control of real robots. We present
	a robot control system that uses declarative, procedural and predictive
	to generate, execute and optimize plans. Declarative knowledge is
	represented in {PDDL}, durative actions constitute procedural knowledge,
	and predictive knowledge is learned by observing action executions.
	We demonstrate how learned predictive knowledge enables robots to
	autonomously optimize plan execution with respect to execution duration
	and robustness in real-time. The approach is evaluated in two different
	robotic domains.},
}

@inproceedings{stulp_learning_2008,
 author = {F Stulp and M Beetz},
 title = {Learning Predictive Knowledge to Optimize Robot Motor Control},
 booktitle = {International Conference on Cognitive Systems ({CogSys} 2008)},
 year = {2008},
}

@article{stulp_refining_2008,
 author = {F Stulp and M Beetz},
 title = {Refining the execution of abstract actions with learned action models},
 journal = {Journal of Artificial Intelligence Research ({JAIR)}},
 year = {2008},
 volume = {32},
 month = {jun},
}

@inproceedings{stulp_action_2006,
 author = {F Stulp and M Beetz},
 title = {Action Awareness – Enabling Agents to Optimize, Transform, and Coordinate
	Plans},
 booktitle = {Proceedings of the Fifth International Joint Conference on Autonomous
	Agents and Multiagent Systems ({AAMAS)}},
 year = {2006},
 abstract = {As agent systems are solving more and more complex tasks in increasingly
	challenging domains, the systems themselves are becoming more complex
	too, often compromising their adaptivity and robustness. A promising
	approach to solve this problem is to provide agents with reflective
	capabilities. Agents that can reflect on the effects and expected
	performance of their actions, are more aware and knowledgeable of
	their capabilities and shortcomings. In this paper, we introduce
	a computational model for what we call {\textbackslash}emphaction
	awareness. To achieve this awareness, agents learn predictive action
	models from observed experience. This knowledge is then used to optimize,
	transform and coordinate plans. We apply this computational model
	to a number of typical scenarios from robotic soccer. Various experiments
	on real robots demonstrate that action awareness enables the robots
	to improve the performance of their plans substantially.},
}

@book{stulp_optimized_2005,
 title = {Optimized Execution of Action Chains through Subgoal Refinement},
 year = {2005},
 author = {F Stulp and M Beetz},
 abstract = {In this paper we propose a novel computation model for the execution
	of abstract action chains. In this computation model a robot first
	learns situation-specific performance models of abstract actions.
	It then uses these models to automatically specialize the abstract
	actions for their execution in a given action chain. This specialization
	results in refined chains that are optimized for performance. As
	a side effect this behavior optimization also appears to produce
	action chains with seamless transitions between actions.},
 url = {http://ic.arc.nasa.gov/people/sailesh/icaps2005wksp/},
}

@inproceedings{stulp_optimized_2005-1,
 author = {F Stulp and M Beetz},
 title = {Optimized Execution of Action Chains Using Learned Performance Models
	of Abstract Actions},
 booktitle = {Proceedings of the Nineteenth International Joint Conference on Artificial
	Intelligence ({IJCAI)}},
 year = {2005},
 abstract = {Many plan-based autonomous robot controllers generate chains of abstract
	actions in order to achieve complex, dynamically changing, and possibly
	interacting goals. The execution of these action chains often results
	in robot behavior that shows abrupt transitions between subsequent
	actions, causing suboptimal performance. The resulting motion patterns
	are so characteristic for robots that people imitating robotic behavior
	will do so by making abrupt movements between actions. In this paper
	we propose a novel computation model for the execution of abstract
	action chains. In this computation model a robot first learns situation-specific
	performance models of abstract actions. It then uses these models
	to automatically specialize the abstract actions for their execution
	in a given action chain. This specialization results in refined chains
	that are optimized for performance. As a side effect this behavior
	optimization also appears to produce action chains with seamless
	transitions between actions.},
}

@book{stulp_tailoring_2005,
 title = {Tailoring Action Parameterizations to Their Task Contexts},
 year = {2005},
 author = {F Stulp and M Beetz},
 abstract = {Solving complex tasks successfully and efficiently not only depends
	on {\textbackslash}em what you do, but also {\textbackslash}em how
	you do it. Different task contexts have different performance measures,
	and thus require different ways of executing an action to optimize
	performance. Simply adding new actions that are tailored to perform
	well within a specific task context makes planning or action selection
	programming more difficult, as generality and adaptivity is lost.
	Rather, existing actions should be parametrized such that they optimize
	the task-specific performance measure. In this paper we propose a
	novel computation model for the execution of abstract action chains.
	In this computation model, a robot first learns situation-specific
	performance models of abstract actions. It then uses these models
	to automatically specialize the abstract actions for their execution
	in a given action chain. This specialization results in refined chains
	that are optimized for performance. As a side effect this behavior
	optimization also appears to produce action chains with seamless
	transitions between actions.},
 url = {http://www.tzi.de/ṽisser/ijcai05/},
}

@inproceedings{stulp_action-related_2009,
 author = {F Stulp and A Fedrizzi and M Beetz},
 title = {Action-Related Place-Based Mobile Manipulation},
 booktitle = {Proceedings of the International Conference on Intelligent Robots
	and Systems ({IROS)}},
 year = {2009},
 pages = {3115–3120},
}

@inproceedings{stulp_learning_2009,
 author = {F Stulp and A Fedrizzi and M Beetz},
 title = {Learning and Performing Place-based Mobile Manipulation},
 booktitle = {Proceedings of the 8th International Conference on Development and
	Learning ({ICDL).}},
 year = {2009},
 pages = {1–7},
}

@article{stulp_learning_2012,
 author = {F Stulp and A Fedrizzi and L Mösenlechner and M Beetz},
 title = {Learning and Reasoning with Action-Related Places for Robust Mobile
	Manipulation},
 journal = {Journal of Artificial Intelligence Research ({JAIR)}},
 year = {2012},
 volume = {43},
 pages = {1–42},
}

@inproceedings{stulp_combining_2009,
 author = {F Stulp and A Fedrizzi and F Zacharias and M Tenorth and J Bandouch and M Beetz},
 title = {Combining Analysis, Imitation, and Experience-based Learning to Acquire
	a Concept of Reachability},
 booktitle = {9th {IEEE-RAS} International Conference on Humanoid Robots},
 year = {2009},
 pages = {161–167},
}

@inproceedings{stulp_evaluating_2004,
 author = {F Stulp and S Gedikli and M Beetz},
 title = {Evaluating Multi-Agent Robotic Systems Using Ground Truth},
 booktitle = {Proceedings of the Workshop on Methods and Technology for Empirical
	Evaluation of Multi-agent Systems and Multi-robot Teams ({MTEE)}},
 year = {2004},
 abstract = {A thorough empirical evaluation of multi-agent robotic systems is
	greatly facilitated if the {\textbackslash}em true state of the world
	over time can be obtained. The accuracy of the beliefs as well as
	the overall performance can then be measured objectively and efficiently.
	In this paper we present a system for determining the {\textbackslash}em
	ground truth state of the world, similar to the ceiling cameras used
	in {RoboCup} small-size league. We have used this ground truth data
	to evaluate the accuracy of the self- and object-localization of
	the robots in our {RoboCup} mid-size league team, the Agilo {RoboCuppers.}
	More complex models of the state estimation module have also been
	learned. These models provide insight into the workings and shortcomings
	of this module, and can be used to improve it.},
}

@inproceedings{stulp_implicit_2006,
 author = {F Stulp and M Isik and M Beetz},
 title = {Implicit Coordination in Robotic Teams using Learned Prediction Models},
 booktitle = {Proceedings of the {IEEE} International Conference on Robotics and
	Automation ({ICRA)}},
 year = {2006},
 pages = {1330--1335},
 abstract = {Many application tasks require the cooperation of two or more robots.
	Humans are good at cooperation in shared workspaces, because they
	anticipate and adapt to the intentions and actions of others. In
	contrast, multi-agent and multi-robot systems rely on communication
	to exchange their intentions. This causes problems in domains where
	perfect communication is not guaranteed, such as rescue robotics,
	autonomous vehicles participating in traffic, or robotic soccer.
	In this paper, we introduce a computational model for implicit coordination,
	and apply it to a typical coordination task from robotic soccer:
	regaining ball possession. The computational model specifies that
	performance prediction models are necessary for coordination, so
	we learn them off-line from observed experience. By taking the perspective
	of the team mates, these models are then used to predict utilities
	of others, and optimize a shared performance model for joint actions.
	In several experiments conducted with our robotic soccer team, we
	evaluate the performance of implicit coordination.},
}

@inproceedings{stulp_agilo_2004,
 author = {F Stulp and A Kirsch and S Gedikli and M Beetz},
 title = {{AGILO} {RoboCuppers} 2004},
 booktitle = {{RoboCup} International Symposium 2004},
 year = {2004},
 series = {Lisbon},
 month = {jul},
 abstract = {The Agilo {RoboCup} team is the primary platform for our research
	on the semi-automatic acquisition of visuo-motoric plans. It is realized
	using inexpensive, off the shelf, easily extendible hardware components
	and a standard software environment. The control system of an autonomous
	soccer robot consists of a probabilistic game state estimator and
	a situated action selection module. The game state estimator computes
	the robot's belief state with respect to the current game situation.
	The action selection module selects actions according to specified
	goals as well as learned experiences. Automatic learning techniques
	made it possible to develop fast and skillful routines for approaching
	the ball, assigning roles, and performing coordinated plays.},
}

@inproceedings{stulp_seamless_2007,
 author = {F Stulp and W Koska and A Maldonado and M Beetz},
 title = {Seamless Execution of Action Sequences},
 booktitle = {Proceedings of the {IEEE} International Conference on Robotics and
	Automation ({ICRA)}},
 year = {2007},
 pages = {3687--3692},
 abstract = {One of the most notable and recognizable features of robot motion
	is the abrupt transitions between actions in action sequences. In
	contrast, humans and animals perform sequences of actions efficiently,
	and with seamless transitions between subsequent actions. This smoothness
	is not a goal in itself, but a side-effect of the evolutionary optimization
	of other performance measures. In this paper, we argue that such
	jagged motion is an inevitable consequence of the way human designers
	and planners reason about abstract actions. We then present subgoal
	refinement, a procedure that optimizes action sequences. Subgoal
	refinement determines action parameters that are not relevant to
	why the action was selected, and optimizes these parameters with
	respect to expected execution performance. This performance is computed
	using action models, which are learned from observed experience.
	We integrate subgoal refinement in an existing planning system, and
	demonstrate how requiring optimal performance causes smooth motion
	in three robotic domains.},
}

@inproceedings{stulp_compact_2009-1,
 author = {F Stulp and I Kresse and A Maldonado and Fand FA Ruiz and M Beetz},
 title = {Compact Models of Human Reaching Motions for Robotic Control in Everyday
	Manipulation Tasks},
 booktitle = {Proceedings of the 8th International Conference on Development and
	Learning ({ICDL).}},
 year = {2009},
}

@inproceedings{stulp_compact_2009,
 author = {F Stulp and E Oztop and P Pastor and Mand SS Beetz},
 title = {Compact Models of Motor Primitive Variations for Predictable Reaching
	and Obstacle Avoidance},
 booktitle = {9th {IEEE-RAS} International Conference on Humanoid Robots},
 year = {2009},
}

@inproceedings{stulp_feature_2006,
 author = {F Stulp and M Pflüger and M Beetz},
 title = {Feature Space Generation using Equation Discovery},
 booktitle = {Proceedings of the 29th German Conference on Artificial Intelligence
	({KI)}},
 year = {2006},
}

@article{stulp_implicit_2010,
 author = {F Stulp and H Utz and M Isik and G Mayer},
 title = {Implicit Coordination with Shared Belief: A Heterogeneous Robot Soccer
	Team Case Study},
 journal = {Advanced Robotics, the International Journal of the Robotics Society
	of Japan},
 year = {2010},
}

@inproceedings{sun_eyewatchme_2009,
 author = {L Sun and U Klank and M Beetz},
 title = {{EYEWATCHME} - {3D} Hand and object tracking for inside out activity
	analysis},
 booktitle = {{IEEE} Computer Society Conference on Computer Vision and Pattern
	Recognition, 2009. {CVPR} 2009.},
 year = {2009},
 pages = {9--16},
 month = {jun},
 abstract = {This paper investigates the inside-out recognition of everyday manipulation
	tasks using a gaze-directed camera, which is a camera that actively
	directs at the visual attention focus of the person wearing the camera.
	We present {EYEWATCHME}, an integrated vision and state estimation
	system that at the same time tracks the positions and the poses of
	the acting hands, the pose that the manipulated object, and the pose
	of the observing camera. Taken together, {EYEWATCHME} provides comprehensive
	data for learning predictive models of vision-guided manipulation
	that include the objects people are attending, the interaction of
	attention and reaching/grasping, and the segmentation of reaching
	and grasping using visual attention as evidence. Key technical contributions
	of this paper include an ego view hand tracking system that estimates
	27 {DOF} hand poses. The hand tracking system is capable of detecting
	hands and estimating their poses despite substantial self-occlusion
	caused by the hand and occlusions caused by the manipulated object.
	{EYEWATCHME} can also cope with blurred images that are caused by
	rapid eye movements. The second key contribution is the of the integrated
	activity recognition system that simultaneously tracks the attention
	of the person, the hand poses, and the poses of the manipulated objects
	in terms of a global scene coordinates. We demonstrate the operation
	of {EYEWATCHME} in the context of kitchen tasks including filling
	a cup with water.},
 doi = {10.1109/CVPR.2009.5204358},
 keywords = {{3D} object tracking, blurred images, computer graphics, {EYEWATCHME},
	gaze-directed camera, grasping segmentation, human computer interaction,
	image restoration, image segmentation, image sensors, inside out
	activity analysis, integrated activity recognition system, object
	recognition, reaching segmentation, state estimation system, substantial
	self-occlusion, {tracking3D} hand tracking, vision-guided manipulation},
}

@phdthesis{tenorth_knowledge_2011,
 author = {M Tenorth},
 title = {Knowledge Processing for Autonomous Robots},
 school = {Technische Universität München},
 year = {2011},
 url = {http://nbn-resolving.de/urn/resolver.pl?urn:nbn:de:bvb:91-diss-20111125-1079930-1-7},
}

@inproceedings{tenorth_tum_2009,
 author = {M Tenorth and J Bandouch and M Beetz},
 title = {The {TUM} Kitchen Data Set of Everyday Manipulation Activities for
	Motion Tracking and Action Recognition},
 booktitle = {{IEEE} International Workshop on Tracking Humans for the Evaluation
	of their Motion in Image Sequences ({THEMIS)}, in conjunction with
	{ICCV2009}},
 year = {2009},
 abstract = {We introduce the publicly available {TUM} Kitchen Data Set as a comprehensive
	collection of activity sequences recorded in a kitchen environment
	equipped with multiple complementary sensors. The recorded data consists
	of observations of naturally performed manipulation tasks as encountered
	in everyday activities of human life. Several instances of a table-setting
	task were performed by different subjects, involving the manipulation
	of objects and the environment. We provide the original video sequences,
	fullbody motion capture data recorded by a markerless motion tracker,
	{RFID} tag readings and magnetic sensor readings from objects and
	the environment, as well as corresponding action labels. In this
	paper, we both describe how the data was computed, in particular
	the motion tracker and the labeling, and give examples what it can
	be used for. We present first results of an automatic method for
	segmenting the observed motions into semantic classes, and describe
	how the data can be integrated in a knowledge-based framework for
	reasoning about the observations.},
}

@article{tenorth_knowrob_2013,
 author = {M Tenorth and M Beetz},
 title = {{KnowRob} – A Knowledge Processing Infrastructure for Cognition-enabled
	Robots. Part 1: The {KnowRob} System},
 journal = {International Journal of Robotics Research ({IJRR)}},
 year = {2013},
}

@inproceedings{tenorth_exchange_2012,
 author = {M Tenorth and M Beetz},
 title = {Exchange of Action-related Information among Autonomous Robots},
 booktitle = {12th International Conference on Intelligent Autonomous Systems},
 year = {2012},
}

@inproceedings{tenorth_knowledge_2012,
 author = {M Tenorth and M Beetz},
 title = {Knowledge Processing for Autonomous Robot Control},
 booktitle = {{AAAI} Spring Symposium on Designing Intelligent Robots: Reintegrating
	{AI}},
 year = {2012},
 address = {Stanford, {CA}, {USA}},
 month = {mar},
}

@inproceedings{tenorth_unified_2012,
 author = {M Tenorth and M Beetz},
 title = {A Unified Representation for Reasoning about Robot Actions, Processes,
	and their Effects on Objects},
 booktitle = {2012 {IEEE/RSJ} International Conference on Intelligent Robots and
	Systems ({IROS)}},
 year = {2012},
 address = {Vilamoura, Portugal},
 month = {oct},
}

@techreport{tenorth_deliverable_2010,
 author = {M Tenorth and M Beetz},
 title = {Deliverable D5.2: The {RoboEarth} Language – Language Specification},
 institution = {{FP7-ICT-248942} {RoboEarth}},
 year = {2010},
 number = {D5.2},
 abstract = {This document describes the current state of implementation of the
	{RoboEarth} representation language. This language is designed for
	two main purposes. First, it should allow to represent all information
	a robot needs to perform a reasonably complex task. This includes
	information about (1) Plans, which consist of the actions a task
	is composed of, ordering constraints among them, monitoring and failure
	handling, as well as action parameters like objects, locations, grasp
	types; (2) Objects, especially types, dimensions, states, and other
	properties, but also locations of specific objects a robot has detected,
	and object models that can be used for recognition; and the (3) Environment,
	including maps for self-localization as well as poses of objects
	like pieces of furniture. The second main task of the {RoboEarth}
	language is to allow a robot to decide on its own if a certain piece
	of information is useful to it. That means, a robot must be able
	to check if an action description contains a plan for the action
	it would like to do, if it meets all requirements to perform this
	action, and if it has the sensors needed to use an object recognition
	model. Using the semantic descriptions in the {RoboEarth} language,
	a robot can perform the checks using logical inference.},
}

@inproceedings{tenorth_priming_2010,
 author = {M Tenorth and M Beetz},
 title = {Priming Transformational Planning with Observations of Human Activities},
 booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
 year = {2010},
 pages = {1499–1504},
 address = {Anchorage, {AK}, {USA}},
 month = {may},
}

@inproceedings{tenorth_knowrob_2009,
 author = {M Tenorth and M Beetz},
 title = {{KnowRob} – Knowledge Processing for Autonomous Personal Robots},
 booktitle = {{IEEE/RSJ} International Conference on Intelligent Robots and Systems},
 year = {2009},
 pages = {4261–4266},
 abstract = {Mobile household robots need much knowledge about objects, places
	and actions when performing more and more complex tasks. They must
	be able to recognize objects, know what they are and how they can
	be used. We present a practical approach to robot knowledge representation
	that combines description logics knowledge bases with a rich environment
	model, data mining and (self-) observation modules. The robot observes
	itself and humans while executing actions and uses the collected
	experiences to learn models of action-related concepts grounded in
	its perception and action system. We demonstrate our approach by
	learning places that are involved in mobile robot manipulation actions,
	by locating objects based on their function and by supplying knowledge
	required for understanding underspecified task descriptions as commonly
	given by humans.},
}

@inproceedings{tenorth_towards_2008,
 author = {M Tenorth and M Beetz},
 title = {Towards Practical and Grounded Knowledge Representation Systems for
	Autonomous Household Robots},
 booktitle = {Proceedings of the 1st International Workshop on Cognition for Technical
	Systems, München, Germany, 6-8 October},
 year = {2008},
 abstract = {Mobile household robots need much knowledge about objects, places
	and actions when performing more and more complex tasks. They must
	be able to recognize objects, know what they are and how they can
	be used. This knowledge can often be specified more easily in terms
	of action-related concepts than by giving declarative descriptions
	of the appearance of objects. Defining chairs as objects to sit on,
	for instance, is much more natural than describing how chairs in
	general look like. Having grounded symbolic models of its actions
	and related concepts allows the robot to reason about its activities
	and improve its problem solving performance. In order to use action-related
	concepts, the robot must be able to find them in its environment.
	We present a practical approach to robot knowledge representation
	that combines description logics knowledge bases with data mining
	and (self-) observation modules. The robot collects experiences while
	executing actions and uses them to learn models and aspects of action-related
	concepts grounded in its perception and action system. We demonstrate
	our approach by learning places that are involved in mobile robot
	manipulation actions.},
}

@article{tenorth_knowledge_2010,
 author = {M Tenorth and D Jain and M Beetz},
 title = {Knowledge Representation for Cognitive Robots},
 journal = {Künstliche Intelligenz},
 year = {2010},
 volume = {24},
 pages = {233–240},
 number = {3},
}

@inproceedings{tenorth_towards_2012,
 author = {M Tenorth and K Kamei and S Satake and T Miyashita and N Hagita},
 title = {Towards a Networked Robot Architecture{\textbackslash} for Distributed
	Task Execution and Knowledge Exchange},
 booktitle = {Third International Workshop on Standards and Common Platforms for
	Robotics ({SCPR} 2012), in conjunction with {SIMPAR} 2012},
 year = {2012},
 address = {Tsukuba, Japan},
 month = {nov},
}

@article{tenorth_web-enabled_2011,
 author = {M Tenorth and U Klank and D Pangercic and M Beetz},
 title = {Web-enabled Robots – Robots that Use the Web as an Information Resource},
 journal = {Robotics \& Automation Magazine},
 year = {2011},
 volume = {18},
 pages = {58–68},
 number = {2},
}

@inproceedings{tenorth_knowrob-map_2010,
 author = {M Tenorth and L Kunze and D Jain and M Beetz},
 title = {{KNOWROB-MAP} – Knowledge-Linked Semantic Object Maps},
 booktitle = {10th {IEEE-RAS} International Conference on Humanoid Robots},
 year = {2010},
 pages = {430--435},
 address = {Nashville, {TN}, {USA}},
 month = {dec},
}

@inproceedings{tenorth_understanding_2010,
 author = {M Tenorth and D Nyga and M Beetz},
 title = {Understanding and Executing Instructions for Everyday Manipulation
	Tasks from the World Wide Web},
 booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
 year = {2010},
 pages = {1486–1491},
 address = {Anchorage, {AK}, {USA}},
 month = {may},
}

@techreport{tenorth_understanding_2009,
 author = {M Tenorth and D Nyga and M Beetz},
 title = {Understanding and Executing Instructions for Everyday Manipulation
	Tasks from the World Wide Web},
 institution = {{IAS} group, Technische Universität München, Fakultät für Informatik},
 year = {2009},
}

@article{tenorth_representation_2013,
 author = {M Tenorth and AC Perzylo and Rand BM Lafrenz},
 title = {Representation and Exchange of Knowledge about Actions, Objects,
	and Environments in the {RoboEarth} Framework},
 journal = {{IEEE} Transactions on Automation Science and Engineering (T-{ASE)}},
 year = {2013},
}

@inproceedings{tenorth_roboearth_2012,
 author = {M Tenorth and AC Perzylo and Rand BM Lafrenz},
 title = {The {RoboEarth} language: Representing and Exchanging Knowledge about
	Actions, Objects, and Environments},
 booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
 year = {2012},
 address = {St. Paul, {MN}, {USA}},
 month = {may},
}

@inproceedings{tenorth_learning_2013,
 author = {M Tenorth and FDla Torre and M Beetz},
 title = {Learning Probability Distributions over Partially-Ordered Human Everyday
	Activities},
 booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
 year = {2013},
 address = {Karlsruhe, Germany},
 month = {may},
}

@article{thrun_probabilistic_2000,
 author = {S Thrun and M Beetz and M Bennewitz and A Cremers and F Dellaert and D Fox and D Hähnel and C Rosenberg and N Roy and J Schulte and D Schulz},
 title = {Probabilistic Algorithms and the Interactive Museum Tour-Guide Robot
	Minerva},
 journal = {International Journal of Robotics Research},
 year = {2000},
 abstract = {This paper describes Minerva, an interactive tour-guide robot that
	was successful ly deployed in a Smithsonian museum. Minerva's software
	is pervasively probabilistic, relying on explicit representations
	of uncertainty in perception and control. This article describes
	Minerva's major software components, and provides a comparative analysis
	of the results obtained in the Smithsonian museum. During two weeks
	of highly successful operation, the robot interacted with thousands
	of people, both in the museum and through the Web, traversing more
	than 44km at speeds of up to 163 cm/sec in the unmodified museum.},
}

@inproceedings{tischler_application_2007,
 author = {MA. Tischler and C Peter and M Wimmer and J Voskamp},
 title = {Application of emotion recognition methods in automotive research},
 booktitle = {Proceedings of the 2nd Workshop on Emotion and Computing – Current
	Research and Future Impact},
 year = {2007},
 editor = {Reichardt, Dirk and Levi, Paul},
 pages = {50--55},
 address = {Oldenburg, Germany},
 month = {sep},
 abstract = {This paper reports on a pilot study applying emotion recognition technologies
	developed for Human-Machine-Interfaces in automobile research. The
	aim of the study was to evaluate technologies for quantifying driving
	pleasure in a close-to-reality scenario. Results show that car driving
	scenarios pose particular requirements on emotion recognition technologies
	which could be met by modifications of current systems.},
}

@inproceedings{usenko_furniture_2012,
 author = {V Usenko and F Seidel and ZC Marton and DPM Beetz},
 title = {Furniture Classification using {WWW} {CAD} Models},
 booktitle = {{IROS’12} Workshop on Active Semantic Perception ({ASP’12)}},
 year = {2012},
 address = {Vilamoura, Portugal},
 month = {oct},
}

@inproceedings{utz_sharing_2004,
 author = {H Utz and F Stulp and A Mühlenfeld},
 title = {Sharing Belief in Teams of Heterogeneous Robots},
 booktitle = {{RoboCup-2004:} The Eighth {RoboCup} Competitions and Conferences},
 year = {2004},
 editor = {Nardi, Daniele and Riedmiller, Martin and Sammut, Claude},
 pages = {508--515},
 address = {Lisbon, Portugal},
 publisher = {Springer Verlag},
 abstract = {This paper describes the joint approach of three research groups to
	enable a heterogeneous team of robots to exchange belief. The communication
	framework presented imposes little restrictions on the design and
	implementation of the individual autonomous mobile systems. The three
	groups have individually taken part in the {RoboCup} F2000 league
	since 1998. Although recent rule changes allow for more robots per
	team, the cost of acquiring and maintaining autonomous mobile robots
	keeps teams from making use of this opportunity. A solution is to
	build mixed teams with robots from different labs. As almost all
	robots in this league are custom built research platforms with unique
	sensors, actuators, and software architectures, forming a heterogeneous
	team presents an exciting challenge.},
 url = {http://citeseer.ist.psu.edu/utz04sharing.html},
}

@article{waibel_roboearth_2011,
 author = {M Waibel and M Beetz and R D'Andrea and R Janssen and M Tenorth and J Civera and J Elfring and D Gálvez-López and K Häussermann and J. M. M. Montiel and Aand sle  BS\textbackslashs Perzylo and O Zweigle and Rvan de Molengraft},
 title = {{RoboEarth} - A World Wide Web for Robots},
 journal = {Robotics \& Automation Magazine},
 year = {2011},
 volume = {18},
 pages = {69–82},
 number = {2},
}

@inproceedings{wallhoff_real-time_2010,
 author = {F Wallhoff and T Rehrl and C Mayer and B Radig},
 title = {Real-Time Face and Gesture Analysis for Human-Robot Interaction},
 booktitle = {Real-Time Image and Video Processing 2010},
 year = {2010},
 series = {Proceedings of {SPIE}},
 keywords = {facial expressions},
}

@inproceedings{weikersdorfer_depth-adaptive_2012,
 author = {D Weikersdorfer and D Gossow and M Beetz},
 title = {Depth-Adaptive Superpixels},
 booktitle = {21st International Conference on Pattern Recognition},
 year = {2012},
}

@book{wimmer_future_2008,
 title = {Future User Interfaces Enhanced by Facial Expression Recognition
	– Interpreting Human Faces with Model-based Techniques},
 publisher = {{VDM}, Verlag Dr. Müller},
 year = {2008},
 author = {M Wimmer},
 month = {mar},
 keywords = {facial expressions},
}

@phdthesis{wimmer_model-based_2007,
 author = {M Wimmer},
 title = {Model-based Image Interpretation with Application to Facial Expression
	Recognition},
 school = {Technische Universitat München, Institute for Informatics},
 year = {2007},
 month = {dec},
 keywords = {facial expressions},
 url = {http://nbn-resolving.de/urn/resolver.pl?urn:nbn:de:bvb:91-diss-20071220-618214-1-1},
}

@inproceedings{wimmer_asm_2008,
 author = {M Wimmer and S Fujie and F Stulp and T Kobayashi and B Radig},
 title = {An {ASM} Fitting Method Based on Machine Learning that Provides a
	Robust Parameter Initialization for {AAM} Fitting},
 booktitle = {Proc. of the International Conference on Automatic Face and Gesture
	Recognition ({FGR08)}},
 year = {2008},
 address = {Amsterdam, Netherlands},
 month = {sep},
 abstract = {Due to their use of information contained in texture, Active Appearance
	Models ({AAM)} generally outperform Active Shape Models ({ASM)} in
	terms of fitting accuracy. Although many extensions and improvements
	over the original {AAM} have been proposed, on of the main drawbacks
	of {AAMs} remains its dependence on good initial model parameters
	to achieve accurate fitting results. In this paper, we determine
	the initial model parameters for {AAM} fitting with {ASM} fitting,
	and use machine learning techniques to improve the scope and accuracy
	of {ASM} fitting. Combining the precision of {AAM} fitting with the
	large radius of convergence of learned {ASM} fitting improves the
	results by an order of magnitude, as our empirical evaluation on
	a database of publicly available benchmark images demonstrates.},
}

@article{wimmer_bitte_2006,
 author = {M Wimmer and S Hämmerle},
 title = {Bitte recht freundlich},
 journal = {Journal: Zukunft im Brennpunkt},
 year = {2006},
 volume = {5},
 pages = {35--38},
 month = {dec},
}

@inproceedings{wimmer_facial_2008,
 author = {M Wimmer and BA. MacDonald and D Jayamuni and A Yadav},
 title = {Facial Expression Recognition for Human-robot Interaction – A Prototype},
 booktitle = {2{\textbackslash}textsuperscriptnd Workshop Robot Vision. Lecture
	Notes in Computer Science.},
 year = {2008},
 editor = {Klette, Reinhard and Sommer, Gerald},
 volume = {4931/2008},
 pages = {139--152},
 address = {Auckland, New Zealand},
 month = {feb},
 publisher = {Springer},
 abstract = {To be effective in the human world robots must respond to human emotional
	states. This paper focuses on the recognition of the six universal
	human facial expressions. In the last decade there has been successful
	research on facial expression recognition ({FER)} in controlled conditions
	suitable for human-computer interaction. However the human-robot
	scenario presents additional challenges including a lack of control
	over lighting conditions and over the relative poses and separation
	of the robot and human, the inherent mobility of robots, and stricter
	real time computational requirements dictated by the need for robots
	to respond in a timely fashion. Our approach imposes lower computational
	requirements by specifically adapting model-based techniques to the
	{FER} scenario. It contains adaptive skin color extraction, localization
	of the entire face and facial components, and specifically learned
	objective functions for fitting a deformable face model. Experimental
	evaluation reports a recognition rate of 70\% on the Cohn-Kanade
	facial expression database, and 67\% in a robot scenario, which compare
	well to other {FER} systems.},
 keywords = {facial expressions},
}

@inproceedings{wimmer_are_2008,
 author = {M Wimmer and C Mayer and M Eggers and B Radig},
 title = {Are You Happy with Your First Name?},
 booktitle = {Proceedings of the 3rd Workshop on Emotion and Computing: Current
	Research and Future Impact},
 year = {2008},
 pages = {23--29},
 address = {Kaiserslautern, Germany},
 month = {sep},
}

@inproceedings{wimmer_tailoring_2008,
 author = {M Wimmer and C Mayer and S Pietzsch and B Radig},
 title = {Tailoring Model-based Techniques for Facial Expression Interpretation},
 booktitle = {The First International Conference on Advances in Computer-Human
	Interaction ({ACHI08)}},
 year = {2008},
 address = {Sainte Luce, Martinique},
 month = {feb},
 keywords = {facial expressions},
}

@inproceedings{wimmer_recognizing_2008,
 author = {M Wimmer and C Mayer and B Radig},
 title = {Recognizing Facial Expressions Using Model-based Image Interpretation},
 booktitle = {Verbal and Nonverbal Communication Behaviours, {COST} Action 2102
	International Workshop},
 year = {2008},
 address = {Vietri sul Mare, Italy},
 month = {apr},
 abstract = {Even if electronic devices widely occupy our daily lives, human-machine
	interaction still lacks intuition. Therefore, researchers intend
	to resolve these shortcomings by augmenting traditional systems with
	aspects of human-human interaction and consider human emotion, behavior,
	and intention. This publication focusses on one aspect of this challenge:
	recognizing facial expressions. Our approach achieves real-time performance
	and provides robustness for real-world applicability. This computer
	vision task comprises of various phases for which it exploits model-based
	techniques that accurately localize facial features, seamlessly track
	them through image sequences, and finally infer facial expressions
	visible. We specifically adapt state-of-the-art techniques to each
	of these challenging phases. Our system has been successfully presented
	to industrial, political, and scientific audience in various events.},
 keywords = {facial expressions},
}

@inproceedings{wimmer_robustly_2008,
 author = {M Wimmer and C Mayer and B Radig},
 title = {Robustly Classifying Facial Components Using a Set of Adjusted Pixel
	Features},
 booktitle = {Proc. of the International Conference on Face and Gesture Recognition
	({FGR08)}},
 year = {2008},
 address = {Amsterdam, Netherlands},
 month = {sep},
 abstract = {Efficient and accurate localization of the components of human faces,
	such as skin, lips, eyes, and brows, provides benefit to various
	real-world applications. However, high intra-class and small inter-class
	variations in color prevent simple but quick pixel classifiers from
	yielding robust results. In contrast, more elaborate classifiers
	consider shape or region features but they do not achieve real-time
	performance. In this paper, we show that it definitely is possible
	to robustly determine the facial components and achieve far more
	than real-time performance. We also use quick pixel-level classifiers
	and provide them with a set of pixel features that are adapted to
	the image characteristics beforehand. We do not manually select the
	pixel features and specify the calculation rules. In contrast, our
	idea is to provide a multitude of features and let the Machine Learning
	algorithm decide which of them are important. The evaluation draws
	a comparison to fixed approaches that do not adapt the computation
	of the features to the image content in any way. The obtained accuracy
	is precise enough to be used for real-world applications such as
	for model-based interpretation of human faces.},
 keywords = {facial expressions},
}

@inproceedings{wimmer_face_2008,
 author = {M Wimmer and C Mayer and F Stulp and B Radig},
 title = {Face Model Fitting based on Machine Learning from Multi-band Images
	of Facial Components},
 booktitle = {Workshop on Non-Rigid Shape Analysis and Deformable Image Alignment,
	held in conjunction with {CVPR}},
 year = {2008},
 address = {Anchorage, {AK}, {USA}},
 month = {jun},
 abstract = {Geometric models allow to determine semantic information about real-world
	objects. Model fitting algorithms need to find the best match between
	a parameterized model and a gi ven image. This task inherently requires
	an objective function to estimate the error between a model parameterization
	and an image. The accuracy of this function directly inf luences
	the accuracy of the entire process of model fitting. Unfortunately,
	building these functions is a non-trivial task. Dedicated to the
	application of face model fitting, this paper proposes to consider
	a multi-band image representation that indicates the facial components,
	from which a large set of image features is computed. Since it is
	not possible to manually formulate an objective function that considers
	this large amount of features, we apply a Machine Lear ning framework
	to construct them. This automatic approach is capable of considering
	the large amount of features provided and yield highly accurate objective
	functions for fa ce model fitting. Since the Machine Learning framework
	rejects non-relevant image features, we obtain high performance runtime
	characteristics as well.},
 keywords = {facial expressions},
}

@inproceedings{wimmer_estimating_2007,
 author = {M Wimmer and C Mayer and F Stulp and B Radig},
 title = {Estimating Natural Activity by Fitting {3D} Models via Learned Objective
	Functions},
 booktitle = {Workshop on Vision, Modeling, and Visualization ({VMV)}},
 year = {2007},
 volume = {1},
 pages = {233--241},
 address = {Saarbrücken, Germany},
 month = {nov},
 abstract = {Model-based image interpretation has proven to robustly extract high-level
	scene descriptors from raw image data. Furthermore, geometric texture
	models represent a fundamental component for visualizing real-world
	scenarios. However, the motion of the model and the real-world object
	must be similar in order to portray natural activity. Again, this
	information can be determined by inspecting images via model-based
	image interpretation. This paper sketches the challenge of fitting
	models to images, describes the shortcomings of current approaches
	and proposes a technique based on machine learning techniques. We
	identify the objective function as a crucial component for fitting
	models to images. Furthermore, we state preferable properties of
	these functions and we propose to learn such a function from manually
	annotated example images.},
}

@inproceedings{wimmer_robustly_2008-1,
 author = {M Wimmer and S Pietzsch and C Mayer and B Radig},
 title = {Robustly Estimating the Color of Facial Components Using a Set of
	Adjusted Pixel Features},
 booktitle = {14. Workshop Farbbildverarbeitung},
 year = {2008},
 pages = {85--96},
 address = {Aachen, Germany},
 month = {oct},
 keywords = {facial expressions},
}

@inproceedings{wimmer_learning_2007,
 author = {M Wimmer and S Pietzsch and F Stulp and B Radig},
 title = {Learning Robust Objective Functions with Application to Face Model
	Fitting},
 booktitle = {Proceedings of the 29th {DAGM} Symposium},
 year = {2007},
 volume = {1},
 pages = {486--496},
 address = {Heidelberg, Germany},
 month = {sep},
 abstract = {Model-based image interpretation extracts high-level information from
	images using a priori knowledge about the object of interest. The
	computational challenge is to determine the model parameters that
	best match a given image by searching for the global optimum of the
	involved objective function. Unfortunately, this function is usually
	designed manually, based on implicit and domain-dependent knowledge,
	which prevents the fitting task from yielding accurate results. In
	this paper, we demonstrate how to improve model fitting by learning
	objective functions from annotated training images. Our approach
	automates many critical decisions and the remaining manual steps
	hardly require domain-dependent knowledge. This yields more robust
	objective functions that are able to achieve the accurate model fit.
	Our evaluation uses a publicly available image database and compares
	the obtained results to a recent state-of-the-art approach.},
 keywords = {facial expressions},
}

@inproceedings{wimmer_adaptive_2005,
 author = {M Wimmer and B Radig},
 title = {Adaptive Skin Color Classificator},
 booktitle = {Proceedings of the first International Conference on Graphics, Vision
	and Image Processing},
 year = {2005},
 editor = {al, Ashraf Aboshosha et},
 volume = {I},
 pages = {324--327},
 address = {Cairo, Egypt},
 month = {dec},
 publisher = {{ICGST}},
 abstract = {A lot of computer vision applications benefit from robust skin color
	classification. But this is a hard challenge due to the various image
	conditions like camera settings, illumination, light source, shadows
	and many more. Furthermore people?s tans and ethnic groups also extend
	those conditions. In this work we present a parametric skin color
	classifier that can be adapted to the conditions of each image or
	image sequence. This is done by evaluating some previously know skin
	color pixels which are acquired by applying a face detector. This
	approach can distinguish skin color from very similar color like
	lip color or eye brow color. Its high speed and high accuracy makes
	it appropriate for real time applications such as face tracking and
	mimic recognition.},
 isbn = {21970/2005},
}

@inproceedings{wimmer_initial_2007,
 author = {M Wimmer and B Radig},
 title = {Initial Pose Estimation for {3D} Models Using Learned Objective Functions},
 booktitle = {Proceedings of the 8th Asian Conference on Computer Vision ({ACCV07)}},
 year = {2007},
 editor = {Yagi, Yasushi and Kang, Sing Bing and Kweon, In So and Zha, Hongbin},
 volume = {4844},
 series = {{LNCS}},
 pages = {332--341},
 address = {Heidelberg},
 month = {nov},
 publisher = {Springer},
 abstract = {Tracking {3D} models in image sequences essentially requires determining
	their initial position and orientation. Our previous work identifies
	the objective function as a crucial component for fitting {2D} models
	to images. We state preferable properties of these functions and
	we propose to learn such a function from annotated example images.
	This paper extends this approach by making it appropriate to also
	fit {3D} models to images. The correctly fitted model represents
	the initial pose for model tracking. However, this extension induces
	nontrivial challenges such as out-of-plane rotations and self occlusion,
	which cause large variation to the models surface visible in the
	image. We solve this issue by connecting the input features of the
	objective function directly to the model. Furthermore, sequentially
	executing objective functions specifically learned for different
	displacements from the correct positions yields highly accurate objective
	values.},
 isbn = {978-3-540-76389-5},
}

@inproceedings{wimmer_automatically_2007,
 author = {M Wimmer and B Radig},
 title = {Automatically Learning the Objective Function for Model Fitting},
 booktitle = {Proceedings of the Meeting in Image Recognition and Understanding
	({MIRU)}},
 year = {2007},
 address = {Hiroshima, Japan},
 month = {jul},
 abstract = {Model-based image interpretation has proven to appropriately extract
	high-level information from images. A priori knowledge about the
	object of interest represents the basis of this task. Model fitting
	determines the model that best matches a given image by searching
	for the global optimum of an objective function. Unfortunately, the
	objective function is usually designed manually, based on implicit
	and domain-dependent knowledge. In contrast, this paper describes
	how to obtain highly accurate objective functions by learning them
	from annotated training images. It automates many critical decisions
	and the remaining manual steps hardly require domain-dependent knowledge
	at all. This approach yields highly accurate objective functions.
	Our evaluation fits a face model to a publicly available image database
	and compares the obtained results to a recent state-of-the-art approach.},
}

@article{wimmer_adaptive_2006,
 author = {M Wimmer and B Radig},
 title = {Adaptive Skin Color Classificator},
 journal = {{ICGST} International Journal on Graphics, Vision and Image Processing},
 year = {2006},
 volume = {Special Issue on Biometrics},
 abstract = {Skin color is an important feature of faces. Various applications
	benefit from robust skin color detection. Skin color may look quite
	different, depending on camera settings, illumination, shadows, people's
	tans, ethnic groups. That variation is a challenging aspect of skin
	color classification. In this paper, we present an approach that
	uses a high level vision module to detect an image specific skin
	color model. This model is representative for the context conditions
	within the image and is used to adapt dynamic skin color classifiers
	to it. This approach distinguishes skin color from very similar color
	like lip color or eyebrow color. Its high speed and accuracy makes
	it appropriate for real time applications such as face model fitting,
	gaze estimation, and recognition of facial expressions.},
}

@inproceedings{wimmer_person_2006,
 author = {M Wimmer and B Radig and M Beetz},
 title = {A Person and Context Specific Approach for Skin Color Classification},
 booktitle = {Procedings of the 18th International Conference of Pattern Recognition
	({ICPR} 2006)},
 year = {2006},
 volume = {2},
 pages = {39--42},
 address = {Los Alamitos, {CA}, {USA}},
 month = {aug},
 publisher = {{IEEE} Computer Society},
 abstract = {Skin color is an important feature of faces. Various applications
	benefit from robust skin color detection. Depending on camera settings,
	illumination, shadows, people?s tans, and ethnic groups skin color
	looks differently, which is a challenging aspect for detecting it
	automatically. In this paper, we present an approach that uses a
	high level vision module to detect an image specific skin color model.
	This model is then used to adapt parametric skin color classifiers
	to the processed image. This approach is capable to distinguish skin
	color from extremely similar colors, such as lip color or eyebrow
	color. Its high speed and high accuracy make it appropriate for real
	time applications such as face tracking and recognition of facial
	expressions.},
}

@inproceedings{wimmer_sipbild_2007,
 author = {M Wimmer and B Radig and C Mayer},
 title = {{SIPBILD} – Mimik- und Gestikerkennung in der Mensch-Maschine-Schnittstelle},
 booktitle = {Beiträge der 37. Jahrestagung der Gesellschaft für Informatik ({GI)}},
 year = {2007},
 volume = {1},
 pages = {271--274},
 address = {Bremen, Germany},
 month = {sep},
 abstract = {Für eine natürliche Mensch-Maschine Interaktion spielt die Interpretation
	visueller Informationen eine zentrale Rolle. Fehlende Kontrolle der
	Umgebungsbedingungen wie Helligkeit und Hintergrundfarbe stellt hohe
	Anforderungen an die Bilderkennungssoftware. {SIPBILD} schafft es,
	mit modellbasierter Bildinterpretation die menschliche Mimik und
	Gestik zu erkennen. Um diese Technik in natürlichen Umgebungen einzusetzen,
	ist es allerdings notwendig, die bisherigen Techniken entscheidend
	zu verbessern. Insbesondere stellen wir eine Vorgehensweise vor,
	die robustes Model-Fitting ohne spezielles Fachwissen in der Bildverarbeitung
	erreicht und der Einsatz dieser Technik somit keinen Experten mehr
	verlangt.},
}

@article{wimmer_recognizing_2008-1,
 author = {M Wimmer and Z Riaz and C Mayer and B Radig},
 title = {Recognizing Facial Expressions Using Model-based Image Interpretation},
 journal = {Advances in Human-Computer Interaction},
 year = {2008},
 volume = {1},
 pages = {587--600},
 month = {oct},
 editor = {Pinder, Shane},
 keywords = {facial expressions},
}

@inproceedings{wimmer_low-level_2008,
 author = {M Wimmer and B Schuller and D Arsic and B Radig and G Rigoll},
 title = {Low-level Fusion of Audio and Video Feature for Multi-modal Emotion
	Recognition},
 booktitle = {3rd International Conference on Computer Vision Theory and Applications
	({VISAPP)}},
 year = {2008},
 volume = {2},
 pages = {145--151},
 address = {Madeira, Portugal},
 month = {jan},
 abstract = {Bimodal emotion recognition through audiovisual feature fusion has
	been shown superior over each individual modality in the past. Still,
	synchronization of the two streams is a challenge, as many vision
	approaches work on a frame basis opposing audio turn- or chunk-basis.
	Therefore, late fusion schemes such as simple logic or voting strategies
	are commonly used for the overall estimation of underlying affect.
	However, early fusion is known to be more effective in many other
	multimodal recognition tasks. We therefore suggest a combined analysis
	by descriptive statistics of audio and video Low-Level-Descriptors
	for subsequent static {SVM} Classification. This strategy also allows
	for a combined feature-space optimization which will be discussed
	herein. The high effectiveness of this approach is shown on a database
	of 11.5h containing six emotional situations in an airplane scenario.},
}

@article{wimmer_learning_2008,
 author = {M Wimmer and F Stulp and S Pietzsch and B Radig},
 title = {Learning Local Objective Functions for Robust Face Model Fitting},
 journal = {{IEEE} Transactions on Pattern Analysis and Machine Intelligence
	({PAMI)}},
 year = {2008},
 volume = {30},
 pages = {1357--1370},
 number = {8},
 doi = {http://doi.ieeecomputersociety.org/10.1109/TPAMI.2007.70793},
 issn = {0162-8828},
 keywords = {facial expressions},
}

@inproceedings{wimmer_enabling_2007,
 author = {M Wimmer and F Stulp and B Radig},
 title = {Enabling Users to Guide the Design of Robust Model Fitting Algorithms},
 booktitle = {Workshop on Interactive Computer Vision, held in conjunction with
	{ICCV} 2007},
 year = {2007},
 pages = {28},
 address = {Rio de Janeiro, Brazil},
 month = {oct},
 publisher = {Omnipress},
 abstract = {Model-based image interpretation extracts high-level information from
	images using a priori knowledge about the object of interest. The
	computational challenge in model fitting is to determine the model
	parameters that best match a given image, which corresponds to finding
	the global optimum of the objective function. When it comes to the
	robustness and accuracy of fitting models to specific images, humans
	still outperform state-of-the-art model fitting systems. Therefore,
	we propose a method in which non-experts can guide the process of
	designing model fitting algorithms. In particular, this paper demonstrates
	how to obtain robust objective functions for face model fitting applications,
	by learning their calculation rules from example images annotated
	by humans. We evaluate the obtained function using a publicly available
	image database and compare it to a recent state-of-the-art approach
	in terms of accuracy.},
 isbn = {978-1-4244-1631-8},
}

@inproceedings{wimmer_learning_2006,
 author = {M Wimmer and F Stulp and S Tschechne and B Radig},
 title = {Learning Robust Objective Functions for Model Fitting in Image Understanding
	Applications},
 booktitle = {Proceedings of the 17th British Machine Vision Conference ({BMVC)}},
 year = {2006},
 editor = {Chantler, Michael J. and Trucco, Emanuel and Fisher, Robert B.},
 volume = {3},
 pages = {1159--1168},
 address = {Edinburgh, {UK}},
 month = {sep},
 publisher = {{BMVA}},
 abstract = {Model-based methods in computer vision have proven to be a good approach
	for compressing the large amount of information in images. Fitting
	algorithms search for those parameters of the model that optimise
	the objective function given a certain image. Although fitting algorithms
	have been the subject of intensive research and evaluation, the objective
	function is usually designed ad hoc and heuristically with much implicit
	domain-dependent knowledge. This paper formulates a set of requirements
	that robust objective functions should satisfy. Furthermore, we propose
	a novel approach that learns the objective function from training
	images that have been annotated with the preferred model parameters.
	The requirements are automatically enforced during the learning phase,
	which yields generally applicable objective functions. We compare
	the performance of our approach to other approaches. For this purpose,
	we propose a set of indicators that evaluate how well an objective
	function meets the stated requirements.},
}

@inproceedings{wimmer_human_2007,
 author = {M Wimmer and U Zucker and B Radig},
 title = {Human Capabilities on Video-based Facial Expression Recognition},
 booktitle = {Proceedings of the 2nd Workshop on Emotion and Computing – Current
	Research and Future Impact},
 year = {2007},
 editor = {Reichardt, Dirk and Levi, Paul},
 pages = {7--10},
 address = {Osnabrück, Germany},
 month = {sep},
 abstract = {A lot of promising computer vision research has been conducted in
	order to automatically recognize facial expressions during the last
	decade. Some of them achieve high accuracy, however, it has not yet
	been investigated how accurately humans accomplish this task, which
	will introduce a comparable measure. Therefore, we conducted a survey
	on this issue and this paper evaluates the gathered information regarding
	the recognition rate and the confusion of facial expressions.},
 keywords = {facial expressions},
}

@inproceedings{witzig_context_2013,
 author = {T Witzig and J. M Zöllner and D Pangercic and S Osentoski and P Roan and R Jäkel and R Dillmann},
 title = {Context Aware Shared Autonomy for Robotic Manipulation Tasks},
 booktitle = {In {IEEE/RSJ} International Conference on Intelligent Robots and
	Systems ({IROS)}, Tokyo Big Sight, Japan},
 year = {2013},
}

@incollection{wykowska_how_2009,
 author = {A Wykowska and A Maldonado and M Beetz and A Schuboe},
 title = {How Humans Optimize Their Interaction with the Environment: The Impact
	of Action Context on Human Perception},
 booktitle = {Progress in Robotics},
 publisher = {Springer Berlin Heidelberg},
 year = {2009},
 editor = {Kim, Jong-Hwan and Ge, Shuzhi Sam and Vadakkepat, Prahlad and Jesse,
	Norbert and Al Manum, Abdullah and Puthusserypady K, Sadasivan and
	Rückert, Ulrich and Sitte, Joaquin and Witkowski, Ulf and Nakatsu,
	Ryohei and Braunl, Thomas and Baltes, Jacky and Anderson, John and
	Wong, Ching-Chang and Verner, Igor and Ahlgren, David},
 volume = {44},
 series = {Communications in Computer and Information Science},
 pages = {162--172},
 isbn = {978-3-642-03986-7},
 keywords = {Computer, Science},
 url = {http://dx.doi.org/10.1007/978-3-642-03986-7_19},
}

@article{wykowska_how_2010,
 author = {A Wykowska and A Maldonado and M Beetz and A Schuboe},
 title = {How Humans Optimize Their Interaction with the Environment: The Impact
	of Action Context on Human Perception},
 journal = {International Journal of Social Robotics},
 year = {2010},
 pages = {1--9},
 issn = {1875-4791},
 keywords = {Engineering},
 url = {http://dx.doi.org/10.1007/s12369-010-0078-3},
}

@inproceedings{wykowska_how_2009-1,
 author = {A Wykowska and A Maldonado and M Beetz and A Schuboe},
 title = {How humans optimize their interaction with the environment: The impact
	of action context on human perception.},
 booktitle = {Progress in Robotics. Proceedings of the {FIRA} {RoboWorld} Congress},
 year = {2009},
 address = {Incheon, Korea},
 month = {aug},
}

@article{zaeh_artificial_2010,
 author = {M. F. Zaeh and W. Roesel and A. Bannat and T. Bautze and M. Beetz and J. Blume and K. Diepold and C. Ertelt and F. Geiger and T. Gmeiner and T. Gyger and A. Knoll and C. Lau and C. and OM. Lenz and G. Reinhart and T. Ruehr and A. Schuboe and K. Shea and I. Sgenannt Wersborg and S. Stork and W. Tekouo and F. Wallhoff and M. Wiesbeck},
 title = {Artificial Cognition in Production Systems},
 journal = {{IEEE} Transactions on Automation Science and Engineering},
 year = {2010},
 volume = {7},
 pages = {1–27},
 number = {3},
}

@inproceedings{zhu_contracting_2011,
 author = {S Zhu and D Pangercic and M Beetz},
 title = {Contracting Curve Density Algorithm for Applications in Personal
	Robotics},
 booktitle = {11th {IEEE-RAS} International Conference on Humanoid Robots},
 year = {2011},
 address = {Bled, Slovenia},
 month = {oct},
}

@inproceedings{zia_acquisition_2009,
 author = {MZ Zia and U Klank and M Beetz},
 title = {Acquisition of a Dense {3D} Model Database for Robotic Vision},
 booktitle = {International Conference on Advanced Robotics ({ICAR)}},
 year = {2009},
 abstract = {Service Robots in real world environments need to have computer vision
	capability for detecting a large class of objects. We discuss how
	freely available {3D} model databases can be used to enable robots
	to know the appearance of a wide variety of objects in human environments
	with special application to our Assistive Kitchen. However, the open
	and free nature of such databases pose problems for example the presence
	of incorrectly annotated {3D} models, or objects for which very few
	models exist online. We have previously proposed techniques to automatically
	select the useful models from the search result, and utilizing such
	models to perform simple manipulation tasks. Here, we build upon
	that work, to describe a technique based on Morphing to form new
	{3D} models if we only have a few models corresponding to a label.
	However, morphing in computer graphics requires a human operator
	and is computationally burdensome, due to which we present our own
	automatic morphing technique. We also present a simple technique
	to speed the matching process of {3D} models against real scenes
	using Visibility culling. This technique can potentially speed-up
	the matching process by 2-3 times while using less memory, if we
	have some prior information model and world pose.},
}

@incollection{zah_cognitive_2009,
 author = {MF. Zäh and M Beetz and K Shea and G Reinhart and K. Bender and C Lau and M Ostgathe and W. Vogl and M Wiesbeck and M Engelhard and Cand RT Ertelt and M. Friedrich and S. Herle},
 title = {The Cognitive Factory},
 booktitle = {Changeable and Reconfigurable Manufacturing Systems},
 publisher = {Springer},
 year = {2009},
 editor = {{ElMaraghy}, H. A.},
 pages = {355–371},
}

@inproceedings{zah_integrated_2008,
 author = {M. F. Zäh and M. Beetz and K. Shea and G. Reinhart and O. Stursberg and M. Ostgathe and C. Lau and C. Ertelt and D. and RT Pangercic and H. Ding and T. Paschedag},
 title = {An Integrated Approach to Realize the Cognitive Machine Shop},
 booktitle = {Proceedings of the 1st International Workshop on Cognition for Technical
	Systems, München, Germany, 6-8 October},
 year = {2008},
}

@book{hertzberg_ki_2007,
 title = {{KI} 2007: Advances in Artificial Intelligence},
 publisher = {Springer-Verlag},
 year = {2007},
 editor = {Hertzberg, Joachim and Beetz, Michael and Englert, Roman},
 volume = {4667},
 series = {Lecture Notes in Artificial Intelligence},
 address = {Berlin Heidelberg},
 month = {aug},
}