Direkt zum Inhalt springen
Image Understanding and Knowledge-Based Systems
TUM School of Computation, Information and Technology
Technical University of Munich

Technical University of Munich

Menu

Links

Informatik IX

Image Understanding and Knowledge-Based Systems

Boltzmannstrasse 3
85748 Garching

info@iuks.in.tum.de




% This file was created with JabRef 2.7b. % Encoding: UTF-8

@INPROCEEDINGS{albrecht_imitating_2011,

author = {Albrecht, Sebastian and Ramirez-Amaro, Karinne and Ruiz-Ugalde, Federico
and Weikersdorfer, David and Leibold, Marion and Ulbrich, Michael
and Beetz, Michael},
title = {Imitating human reaching motions using physically inspired optimization
principles},
booktitle = {11th {IEEE-RAS} International Conference on Humanoid Robots},
year = {2011},
address = {Bled, Slovenia},
month = oct

}

@ARTICLE{aldoma_tutorial:_2012,

author = {Aldoma, Aitor and Marton, Zoltan-Csaba and Tombari, Federico and
Wohlkinger, Walter and Potthast, Christian and Zeisl, Bernhard and
Rusu, Radu Bogdan and Gedikli, Suat and Vincze, Markus},
title = {Tutorial: Point Cloud Library: Three-Dimensional Object Recognition
and 6 {DOF} Pose Estimation},
journal = {Robotics \& Automation Magazine, {IEEE}},
year = {2012},
volume = {19},
pages = {80–91},
number = {3}

}

@INPROCEEDINGS{aldoma_cad-model_2011,

author = {Aldoma, Aitor and Vincze, Markus and Blodow, Nico and Gossow, David
and Gedikli, Suat and Rusu, Radu Bogdan and Bradski, Gary R.},
title = {{CAD-model} recognition and {6DOF} pose estimation using {3D} cues},
booktitle = {{IEEE} International Conference on Computer Vision Workshops, {ICCV}
2011 Workshops, Barcelona, Spain, November 6-13, 2011},
year = {2011},
pages = {585--592}

}

@INPROCEEDINGS{amin_multi-view_2013,

author = {Amin, Sikandar and Andriluka, Mykhaylo and Rohrbach, Marcus and Schiele,
Bernt},
title = {Multi-view Pictorial Structures for {3D} Human Pose Estimation},
booktitle = {British Machine Vision Conference ({BMVC)}},
year = {2013},
address = {Bristol, {UK}},
note = {Oral}

}

@INPROCEEDINGS{andreakis_incremental_2009,

author = {Andreakis, Andreas and Hoyningen-Huene, Nicolai von and Beetz, Michael},
title = {Incremental Unsupervised Time Series Analysis Using Merge Growing
Neural Gas},
booktitle = {{WSOM}},
year = {2009},
editor = {Príncipe, José Carlos and Miikkulainen, Risto},
volume = {5629},
series = {Lecture Notes in Computer Science},
pages = {10--18},
publisher = {Springer},
abstract = {We propose Merge Growing Neural Gas ({MGNG)} as a novel unsupervised
growing neural network for time series analysis. {MGNG} combines
the state-of-the-art recursive temporal context of Merge Neural Gas
({MNG)} with the incremental Growing Neural Gas ({GNG)} and enables
thereby the analysis of unbounded and possibly infinite time series
in an online manner. There is no need to define the number of neurons
a priori and only constant parameters are used. In order to focus
on frequent sequence patterns an entropy maximization strategy is
utilized which controls the creation of new neurons. Experimental
results demonstrate reduced time complexity compared to {MNG} while
retaining similar accuracy in time series representation.},
isbn = {978-3-642-02396-5}

}

@INPROCEEDINGS{arbuckle_controlling_1999,

author = {Arbuckle, Tom and Beetz, Michael},
title = {Controlling Image Processing: Providing Extensible, Run-time Configurable
Functionality on Autonomous Robots},
booktitle = {Proceedings of the 1999 {IEEE/RSJ} International Conference on Intelligent
Robots and Systems},
year = {1999},
volume = {2},
pages = {787–792},
abstract = {The dynamic nature of autonomous robots' tasks requires that their
image processing operations are tightly coupled to those actions
within their control systems which require the visual information.
While there are many image processing libraries that provide the
raw image processing functionality required for autonomous robot
applications, these libraries do not provide the additional functionality
necessary for transparently binding image processing operations within
a robot's control system. In particular such libraries lack facilities
for process scheduling, sequencing, concurrent execution and resource
management. The paper describes the design and implementation of
an enabling extensible system-{RECIPE-for} providing image processing
functionality in a form that is convenient for robot control together
with concrete implementation examples}

}

@INPROCEEDINGS{arbuckle_extensible_1999,

author = {Arbuckle, Tom and Beetz, Michael},
title = {Extensible, Runtime-configurable Image Processing on Robots — the
{RECIPE} system},
booktitle = {Proceedings of the 1999 {IEEE/RSJ} International Conference on Intelligent
Robots and Systems},
year = {1999}

}

@INPROCEEDINGS{arbuckle_recipe_1998,

author = {Arbuckle, Tom and Beetz, Michael},
title = {{RECIPE} - A System for Building Extensible, Run-time Configurable,
Image Processing Systems},
booktitle = {Proceedings of Computer Vision and Mobile Robotics ({CVMR)} Workshop},
year = {1998},
pages = {91–98},
abstract = {This paper describes the design, and implementation of {RECIPE}, an
extensible, run-time configurable, image capture and processing system
specifically designed for use with robotic systems and currently
under active development here at Bonn. Robotic systems, particularly
autonomous robotic systems, present both challenges and opportunities
to the implementors of their vision systems. On the one hand, robotic
systems constrain the vision systems in terms of their available
resources and in the specific form of the hardware to be employed.
On the other hand, intelligent processes can employ sensory input
to modify the image capture and image processing to fit the current
context of the robot. {RECIPE} meets these challenges while facilitating
the modular development of efficient image processing operations.
Implementing all of its functionality within a platform and compiler
neutral framework as scriptable, active objects which are dynamically
loaded at run-time, {RECIPE} provides a common basis for the development
of image processing systems on robots. At the same time, it permits
the image processing operations being employed by the robot system
to be monitored and adjusted according to all of the sensory information
available to the robot, encouraging the deployment of efficient,
context specific, algorithms. Finally, it has been designed to encourage
robust, fault-tolerant approaches to the action of image processing.}

}

@INPROCEEDINGS{balint-benczedi_efficient_2012,

author = {Balint-Benczedi, Ferenc and Marton, Zoltan-Csaba and Beetz, Michael},
title = {Efficient Part-Graph Hashes for Object Categorization},
booktitle = {5th International Conference on Cognitive Systems ({CogSys)}},
year = {2012}

}

@INPROCEEDINGS{bandlow_agilo_1999,

author = {Bandlow, Thorsten and Klupsch, Michael and Hanek, Robert and Schmitt,
Thorsten},
title = {Agilo {RoboCuppers:} {RoboCup} Team Description},
booktitle = {3. {RoboCup} Workshop, {IJCAI} 99},
year = {1999},
pages = {691–694},
abstract = {This paper describes the robot soccer team the Munich Agilo {RoboCuppers}
the {RoboCup} team of the image understanding group ({FG} {BV)} at
the Technische Universität München. The name is derived from the
Agilolfinger, which were the first Bavarian ruling dynasty in the
8th century, with Tassilo as its most famous representative. With
a team of five Pioneer 1 robots, equipped with {CCD} camera and a
single board computer each and coordinated by a master {PC} outside
the field we participate in the Middle Robot League of the Third
International Workshop on {RoboCup} in Stockholm 1999. We use a multi-agent
based approach to represent different robots and to encapsulate concurrent
tasks within the robots. A fast feature extraction based on the image
processing library {HALCON} provides the data necessary for the onboard
scene interpretation. In addition, these features as well as the
odometric data of the robots are sent over the net to the master
{PC}, where they are verified with regard to consistency and plausibility
and fusioned to one global view of the scene. The results are distributed
to all robots supporting their local planning modules. This data
is also used by the global planning module coordinating the team's
behaviour.}

}

@INPROCEEDINGS{bandlow_fast_1999,

author = {Bandlow, Thorsten and Klupsch, Michael and Hanek, Robert and Schmitt,
Thorsten},
title = {Fast Image Segmentation, Object Recognition and Localization in a
{RoboCup} Scenario},
booktitle = {3. {RoboCup} Workshop, {IJCAI} 99},
year = {1999},
pages = {174–185},
abstract = {This paper presents the vision system of the robot soccer team Agilo
{RoboCuppers} the {RoboCup} team of the image understanding group
({FG} {BV)} at the Technische Universität München. The name is derived
from the Agilolfinger, which were the first Bavarian ruling dynasty
in the 8th century, with Tassilo as its most famous representative.
We present a fast and robust color classification method yielding
significant regions in the image. The boundaries between adjacent
regions are used to localize objects like the ball or other robots
on the field. Furthermore for each player the free motion space is
determined and its position and orientation on the field is estimated.
All this is done completely vision based, without any additional
sensors.}

}

@PHDTHESIS{bandouch_observing_2011,

author = {Bandouch, Jan},
title = {Observing and Interpreting Complex Human Activities in Everyday Environments},
school = {Technische Universität München},
year = {2011},
url = {http://nbn-resolving.de/urn/resolver.pl?urn:nbn:de:bvb:91-diss-20101028-973175-1-4}

}

@INPROCEEDINGS{bandouch_tracking_2009,

author = {Bandouch, Jan and Beetz, Michael},
title = {Tracking Humans Interacting with the Environment Using Efficient
Hierarchical Sampling and Layered Observation Models},
booktitle = {{IEEE} Int. Workshop on Human-Computer Interaction ({HCI).} In conjunction
with {ICCV2009}},
year = {2009},
abstract = {We present a markerless tracking system for unconstrained human motions
which are typical for everyday manipulation tasks. Our system is
capable of tracking a high-dimensional human model (51 {DOF)} without
constricting the type of motion and the need for training sequences.
The system reliably tracks humans that frequently interact with the
environment, that manipulate objects, and that can be partially occluded
by the environment. We describe and discuss two key components that
substantially contribute to the accuracy and reliability of the system.
First, a sophisticated hierarchical sampling strategy for recursive
Bayesian estimation that combines partitioning with annealing strategies
to enable efficient search in the presence of many local maxima.
Second, a simple yet effective appearance model that allows for the
combination of shape and appearance masks to implicitly deal with
two cases of environmental occlusions by (1) subtracting dynamic
non-human objects from the region of interest and (2) modeling objects
(e.g. tables) that both occlude and can be occluded by human subjects.
The appearance model is based on bit representations that makes our
algorithm well suited for implementation on highly parallel hardware
such as commodity {GPUs.} Extensive evaluations on the {HumanEva2}
benchmarks show the potential of our method when compared to state-of-the-art
Bayesian techniques. Besides the {HumanEva2} benchmarks, we present
results on more challenging sequences, including table setting tasks
in a kitchen environment and persons getting into and out of a car
mock-up.}

}

@INPROCEEDINGS{bandouch_accurate_2008,

author = {Bandouch, Jan and Engstler, Florian and Beetz, Michael},
title = {Accurate Human Motion Capture Using an Ergonomics-Based Anthropometric
Human Model},
booktitle = {Proceedings of the Fifth International Conference on Articulated
Motion and Deformable Objects ({AMDO)}},
year = {2008},
abstract = {In this paper we present our work on markerless model-based {3D} human
motion capture using multiple cameras. We use an industry proven
anthropometric human model that was modeled taking ergonomic considerations
into account. The outer surface consists of a precise yet compact
{3D} surface mesh that is mostly rigid on body part level apart from
some small but important torsion deformations. Benefits are the ability
to capture a great amount of possible human appearances with high
accuracy while still having a simple to use and computationally efficient
model. We have introduced special optimizations such as caching into
the model to improve its performance in tracking applications. Available
force and comfort measures within the model provide further opportunities
for future research. {3D} articulated pose estimation is performed
in a Bayesian framework, using a set of hierarchically coupled local
particle filters for tracking. This makes it possible to sample efficiently
from the high dimensional space of articulated human poses without
constraining the allowed movements. Sequences of tracked upper-body
as well as full-body motions captured by three cameras show promising
results. Despite the high dimensionality of our model (51 {DOF)}
we succeed at tracking using only silhouette overlap as weighting
function due to the precise outer appearance of our model and the
hierarchical decomposition.}

}

@INPROCEEDINGS{bandouch_evaluation_2008,

author = {Bandouch, Jan and Engstler, Florian and Beetz, Michael},
title = {Evaluation of Hierarchical Sampling Strategies in {3D} Human Pose
Estimation},
booktitle = {Proceedings of the 19th British Machine Vision Conference ({BMVC)}},
year = {2008},
abstract = {A common approach to the problem of {3D} human pose estimation from
video is to recursively estimate the most likely pose via particle
filtering. However, standard particle filtering methods fail the
task due to the high dimensionality of the {3D} articulated human
pose space. In this paper we present a thorough evaluation of two
variants of particle filtering, namely Annealed Particle Filtering
and Partitioned Sampling Particle Filtering, that have been proposed
to make the problem feasible by exploiting the hierarchical structures
inside the pose space. We evaluate both methods in the context of
markerless model-based {3D} motion capture using silhouette shapes
from multiple cameras. For that we created a simulation from ground
truth sequences of human motions, which enables us to focus our evaluation
on the sampling capabilities of the approaches, i.e. on how efficient
particles are spread towards the modes of the distribution. We show
the behaviour with respect to the amount of cameras used, the amount
of particles used, as well as the dimensionality of the search space.
Especially the performance when using more complex human models (40
{DOF} and above) that are able to capture human movements with higher
precision compared to previous approaches is of interest in this
work. In summary, we show that both methods have complementary strengths,
and propose a combined method that is able to perform the tracking
task with higher robustness despite reduced computational effort.}

}

@ARTICLE{bandouch_self-training_2012,

author = {Bandouch, Jan and Jenkins, Odest Chadwicke and Beetz, Michael},
title = {A Self-Training Approach for Visual Tracking and Recognition of Complex
Human Activity Patterns},
journal = {International Journal of Computer Vision},
year = {2012},
volume = {99},
pages = {166--189},
number = {2}

}

@INPROCEEDINGS{beetz_runtime_2001,

author = {Beetz, Michael},
title = {Runtime Plan Adaptation in Structured Reactive Controllers},
booktitle = {Proceedings of the Fourth International Conference on Autonomous
Agents},
year = {2001},
editor = {Andre, E. and Sen, S.}

}

@INCOLLECTION{beetz_towards_2005,

author = {Beetz, Michael},
title = {Towards Comprehensive Computational Models for Plan-Based Control
of Autonomous Robots},
booktitle = {Mechanizing Mathematical Reasoning: Essays in Honor of Jörg H. Siekmann
on the Occasion of His 60th Birthday},
publisher = {Springer {LNCS} 2605},
year = {2005},
editor = {Dieter Hutter, Werner Stephan},
pages = {514–527}

}

@INPROCEEDINGS{beetz_structured_1999,

author = {Beetz, Michael},
title = {Structured Reactive Controllers — A computational Model of Everyday
Activity},
booktitle = {Proceedings of the Third International Conference on Autonomous Agents},
year = {1999},
editor = {Etzioni, O. and Müller, J. and Bradshaw, J.},
pages = {228–235}

}

@INPROCEEDINGS{beetz_runtime_2000,

author = {Beetz, Michael},
title = {Runtime Plan Adaptation in Structured Reactive Controllers},
booktitle = {Proceedings of the Fourth International Conference on Autonomous
Agents},
year = {2000},
editor = {Gini, M. and Rosenschein, J.}

}

@BOOK{beetz_plan-based_2002-1,

title = {Plan-based Control of Robotic Agents},
publisher = {Springer Publishers},
year = {2002},
author = {Beetz, Michael},
volume = {{LNAI} 2554},
series = {Lecture Notes in Artificial Intelligence}

}

@INPROCEEDINGS{beetz_plan_2002,

author = {Beetz, Michael},
title = {Plan Representation for Robotic Agents},
booktitle = {Proceedings of the Sixth International Conference on {AI} Planning
and Scheduling},
year = {2002},
pages = {223–232},
address = {Menlo Park, {CA}},
publisher = {{AAAI} Press}

}

@INCOLLECTION{beetz_towards_2002,

author = {Beetz, Michael},
title = {Towards integrated computational models for the plan-based control
of robotic agents.},
booktitle = {Festschrift zum 60. Geburtstag von Prof. J. Siekmann},
publisher = {Springer Publishers},
year = {2002},
series = {Lecture Notes in Artificial Intelligence}

}

@ARTICLE{beetz_plan_2001,

author = {Beetz, Michael},
title = {Plan Management for Robotic Agents},
journal = {{KI} - Künstliche Intelligenz; Special Issue on Planning and Scheduling},
year = {2001},
volume = {15},
pages = {12–17},
number = {2},
abstract = {Autonomous robots that perform complex jobs in changing environments
must be capable of managing their plans as the environmental conditions
or their tasks change. This raises the problem of deciding whether,
when, where, and how to revise the plans as the robots' beliefs change.
This article investigates an approach to execution time plan management
in which the plans themselves specify the plan adaptation processes.
In this approach the robot makes strategical (farsighted) adaptations
while it executes a plan using tactical (immediate) decisions and
overwrites tactical adaptations after strategical decisions have
been reached (if necessary). We present experiments in which the
plan adaptation technique is used for the control of two autonomous
mobile robots. In one of them it controlled the course of action
of a museums tourguide robot that has operated for thirteen days
and performed about 3200 plan adaptations reliably.}

}

@ARTICLE{beetz_structured_2001,

author = {Beetz, Michael},
title = {Structured Reactive Controllers},
journal = {Journal of Autonomous Agents and Multi-Agent Systems. Special Issue:
Best Papers of the International Conference on Autonomous Agents
'99},
year = {2001},
volume = {4},
pages = {25–55},
month = jun

}

@BOOK{beetz_concurrent_2000,

title = {Concurrent Reactive Plans: Anticipating and Forestalling Execution
Failures},
publisher = {Springer Publishers},
year = {2000},
author = {Beetz, Michael},
volume = {{LNAI} 1772},
series = {Lecture Notes in Artificial Intelligence}

}

@PHDTHESIS{beetz_plan-based_2000,

author = {Beetz, Michael},
title = {Plan-based Control of Robotic Agents},
school = {University of Bonn},
year = {2000},
note = {Habilitationsschrift, eingereicht im Oktober 2000.}

}

@PHDTHESIS{beetz_anticipating_1996,

author = {Beetz, Michael},
title = {Anticipating and Forestalling Execution Failures in Structured Reactive
Plans},
school = {Yale University},
year = {1996},
type = {Technical Report, {YALE/DCS/RR1097}}

}

@ARTICLE{beetz_enabling_2000,

author = {Beetz, Michael and Arbuckle, Tom and Belker, Thorsten and Bennewitz,
Maren and Cremers, Armin and Hähnel, Dirk and Schulz, Dirk},
title = {Enabling Autonomous Robots to Perform Complex Tasks},
journal = {{KI} - Künstliche Intelligenz; Special Issue on Autonomous Robots},
year = {2000},
abstract = {Recent extensions of the {RHINO} control system, a system for controlling
autonomous mobile robots, have further enhanced its ability to perform
complex, dynamically changing, tasks. We present an overview of the
extended {RHINO} system, sketching the functionality of its main
components and their inter-relationships as well as long-term experiments
demonstrating the practicality of its approach. Pointers are also
provided to the detailed technical references.}

}

@ARTICLE{beetz_integrated_2001,

author = {Beetz, Michael and Arbuckle, Tom and Bennewitz, Maren and Burgard,
Wolfram and Cremers, Armin and Fox, Dieter and Grosskreutz, Henrik
and Hähnel, Dirk and Schulz, Dirk},
title = {Integrated Plan-based Control of Autonomous Service Robots in Human
Environments},
journal = {{IEEE} Intelligent Systems},
year = {2001},
volume = {16},
pages = {56–65},
number = {5},
abstract = {The authors extend the Rhino robot by adding the means for plan-based
high-level control and plan transformation, further enhancing its
probabilistic reasoning capabilities. The result: an autonomous robot
capable of accomplishing prolonged, complex, and dynamically changing
tasks in the real world.}

}

@INPROCEEDINGS{beetz_transparent_1998,

author = {Beetz, Michael and Arbuckle, Tom and Cremers, Armin and Mann, Markus},
title = {Transparent, Flexible, and Resource-adaptive Image Processing for
Autonomous Service Robots},
booktitle = {Procs. of the 13th European Conference on Artificial Intelligence
({ECAI-98)}},
year = {1998},
editor = {Prade, H.},
pages = {632–636},
abstract = {We present the design of a programming system for {IP} routines which
satisfies the requirements above. Our solution consists of {RECIPE},
a dynamically loadable, modular architecture in a distributed robot
control system that provides the basic {IP} functionality and manages
images and other {IP} data structures. It provides a variety of standard
{IP} routines such as edge detectors, convolutions, noise reduction,
segmentation, etc. {RPLIP}, an extension of the abstract machine
provided by the robot control/plan language {RPL.} {RPLIP} provides
suitable abstractions for images, regions of interest, etc, and supports
a tight integration of the vision routines into the robot control
system. Image Processing Plans that provide various methods for combining
{IP} methods into {IP} pipelines. {IP} plans support the implementation
of robust vision routines and the integration of other sensors such
as laser range finders and sonars for object recognition tasks and
scene analysis. Since vision routines are {RPL} programs, they can
be constructed, revised, and reasoned about while the robot control
program is being executed.}

}

@INPROCEEDINGS{beetz_camera-based_2006,

author = {Beetz, Michael and Bandouch, Jan and Gedikli, Suat and Hoyningen-Huene,
Nico von and Kirchlechner, Bernhard and Maldonado, Alexis},
title = {Camera-based Observation of Football Games for Analyzing Multi-agent
Activities},
booktitle = {Proceedings of the Fifth International Joint Conference on Autonomous
Agents and Multiagent Systems ({AAMAS)}},
year = {2006},
abstract = {This paper describes a camera-based observation system for football
games that is used for the automatic analysis of football games and
reasoning about multi-agent activity. The observation system runs
on video streams produced by cameras set up for {TV} broadcasting.
The observation system achieves reliability and accuracy through
various mechanisms for adaptation, probabilistic estimation, and
exploiting domain constraints. It represents motions compactly and
segments them into classified ball actions.},
keywords = {soccer}

}

@INPROCEEDINGS{beetz_towards_2009,

author = {Beetz, Michael and Bandouch, Jan and Jain, Dominik and Tenorth, Moritz},
title = {Towards Automated Models of Activities of Daily Life},
booktitle = {First International Symposium on Quality of Life Technology – Intelligent
Systems for Better Living},
year = {2009},
address = {Pittsburgh, Pennsylvania {USA}},
abstract = {We propose automated probabilistic models of everyday activities ({AM-EvA)}
as a novel technical means for the perception, interpretation, and
analysis of everyday manipulation tasks and activities of daily life.
{AM-EvAs} are based on action-related concepts in everyday activities
such as action-related places (the place where cups are taken from
the cupboard), capabilities (the objects that can be picked up single-handedly),
etc. These concepts are probabilistically derived from a set of previous
activities that are fully and automatically observed by computer
vision and additional sensor systems. {AM-EvA} models enable robots
and technical systems to analyze activities in the complete situation
and activity context. They render the classification and the assessment
of actions and situations objective and can justify the probabilistic
interpretation with respect to the activities the concepts have been
learned from. In this paper, we describe the current state of implementation
of the system that realizes this idea of automated models of everyday
activities and show example results from the observation and analysis
of table setting episodes.}

}

@INPROCEEDINGS{beetz_assistive_2007,

author = {Beetz, Michael and Bandouch, Jan and Kirsch, Alexandra and Maldonado,
Alexis and Müller, Armin and Rusu, Radu Bogdan},
title = {The Assistive Kitchen — A Demonstration Scenario for Cognitive Technical
Systems},
booktitle = {Proceedings of the 4th {COE} Workshop on Human Adaptive Mechatronics
({HAM)}},
year = {2007},
abstract = {This paper introduces the Assistive Kitchen as a comprehensive demonstration
and challenge scenario for technical cognitive systems. We describe
its hardware and software infrastructure. Within the Assistive Kitchen
application, we select particular domain activities as research subjects
and identify the cognitive capabilities needed for perceiving, interpreting,
analyzing, and executing these activities as research foci. We conclude
by outlining open research issues that need to be solved to realize
the scenarios successfully.}

}

@INPROCEEDINGS{beetz_learning_2000,

author = {Beetz, Michael and Belker, Thorsten},
title = {Learning Structured Reactive Navigation Plans from Executing {MDP}
Navigation Policies},
booktitle = {8th International Symposium on Intelligent Robotic Systems, {SIRS}
2000},
year = {2000},
editor = {Ferryman},
abstract = {Autonomous robots, such as robot office couriers, need navigation
routines that support flexible task execution and effective action
planning. This paper describes {XfrmLearn}, a system that learns
structured symbolic navigation plans. Given a navigation task, {XfrmLearn}
learns to structure continuous navigation behavior and represents
the learned structure as compact and transparent plans. The structured
plans are obtained by starting with monolithic default plans that
are optimized for average performance and adding subplans to improve
the navigation performance for the given task. Compactness is achieved
by incorporating only subplans that achieve significant performance
gains. The resulting plans support action planning and opportunistic
task execution. {XfrmLearn} is implemented and extensively evaluated
on an autonomous mobile robot.}

}

@INPROCEEDINGS{beetz_environment_2000,

author = {Beetz, Michael and Belker, Thorsten},
title = {Environment and Task Adaptation for Robotic Agents},
booktitle = {Procs. of the 14th European Conference on Artificial Intelligence
({ECAI-2000)}},
year = {2000},
editor = {Horn, W.},
pages = {648–652},
abstract = {This paper investigates the problem of improving the performance of
general state-of-the-art robot control systems by autonomously adapting
them to specific tasks and environments. We propose model- and test-based
transformational learning ({MTTL)} as a computational model for performing
this task. {MTTL} uses abstract models of control systems and environments
in order to propose promising adaptations. To account for model deficiencies
resulting from abstraction, hypotheses are statistically tested based
on experimentation in the physical world. We describe {XfrmLearn},
an implementation of {MTTL}, and apply it to the problem of indoor
navigation. We present experiments in which {XfrmLearn} improves
the navigation performance of a state-of-the-art high-speed navigation
system for a given set of navigation tasks by up to 44 percent.}

}

@INPROCEEDINGS{beetz_learning_2001,

author = {Beetz, Michael and Belker, Thorsten},
title = {Learning Structured Reactive Navigation Plans from Executing {MDP}
policies},
booktitle = {Proceedings of the 5th International Conference on Autonomous Agents},
year = {2001},
pages = {19–20},
abstract = {Autonomous robots, such as robot office couriers, need navigation
routines that support flexible task execution and effective action
planning. This paper describes {XfrmLearn}, a system that learns
structured symbolic navigation plans. Given a navigation task, {XfrmLearn}
learns to structure continuous navigation behavior and represents
the learned structure as compact and transparent plans. The structured
plans are obtained by starting with monolithic default plans that
are optimized for average performance and adding subplans to improve
the navigation performance for the given task. Compactness is achieved
by incorporating only subplans that achieve significant performance
gains. The resulting plans support action planning and opportunistic
task execution. {XfrmLearn} is implemented and extensively evaluated
on an autonomous mobile robot.}

}

@INPROCEEDINGS{beetz_experience-_1999,

author = {Beetz, Michael and Belker, Thorsten},
title = {Experience- and Model-based Transformational Learning of Symbolic
Behavior Specifications},
booktitle = {Proceedings of the {IJCAI} Workshop on Robot Action Planning},
year = {1999},
note = {{IJCAI} Workshop on Robot Action Planning},
abstract = {The paper describes Xfrml, a system that learns symbolic behavior
specifications to control and improve the continuous sensor-driven
navigation behavior of an autonomous mobile robot. The robot is to
navigate between a set of predefined locations in an office environment
and employs a navigation system consisting of a path planner and
a reactive collision avoidance system. {XfrmLearn} rationally reconstructs
the continuous sensor-driven navigation behavior in terms of task
hierarchies by identifying significant structures and commonalities
in behaviors. It also constructs a statistical behavior model for
typical navigation tasks. The behavior model together with a model
of how the collision avoidance module should "perceive" the environment
is used to detect behavior "flaws", diagnose them, and revise the
plans to improve their performance. The learning method is implemented
on an autonomous mobile robot.}

}

@INPROCEEDINGS{beetz_planning_1998,

author = {Beetz, Michael and Bennewitz, Maren},
title = {Planning, Scheduling, and Plan Execution for Autonomous Robot Office
Couriers},
booktitle = {Proceedings of the workshop {“Integrating} Planning, Scheduling and
Execution in Dynamic and Uncertain Environments” at the Fourth International
Conference on {AI} in Planning Systems ({AIPS)}},
year = {1998},
editor = {Bergmann, R. and Kott, A.},
volume = {Workshop Notes 98-02},
publisher = {{AAAI} Press},
abstract = {Scheduling the tasks of an autonomous robot office courier and carrying
out the scheduled tasks reliably and efficiently pose challenging
problems for autonomous robot control. The controller has to accomplish
longterm efficiency rather than optimize problem-solving episodes.
It also has to exploit opportunities and avoid problems flexibly
because often the robot is forced to generate schedules based on
partial information. We propose to implement the controller for scheduled
activity by employing concurrent reactive plans that reschedule the
course of action whenever necessary and while performing their actions.
The plans are represented modularly and transparently to allow for
easy transformation. Scheduling and schedule repair methods are implemented
as plan transformation rules.}

}

@INPROCEEDINGS{beetz_probabilistic_1999,

author = {Beetz, Michael and Bennewitz, Maren and Grosskreutz, Henrik},
title = {Probabilistic, Prediction-based Schedule Debugging for Autonomous
Robot Office Couriers},
booktitle = {Proceedings of the 23rd German Conference on Artificial Intelligence
({KI} 99)},
year = {1999},
address = {Bonn, Germany},
publisher = {Springer Verlag},
abstract = {Acting efficiently and meeting deadlines requires autonomous robots
to schedule their activities. It also requires them to act flexibly:
to exploit opportunities and avoid problems as they occur. Scheduling
activities to meet these requirements is an important research problem
in its own right. In addition, it provides us with a problem domain
where modern symbolic {AI} planning techniques can enable robots
to exhibit better performance than they possibly could without planning.
This paper describes {PPSD}, a novel planning technique that enables
autonomous robots to impose order constraints on concurrent percept-driven
plans to increase the plans' efficiency. The basic idea is to generate
a schedule under simplified conditions and then to iteratively detect,
diagnose, and eliminate behavior flaws caused by the schedule based
on a small number of randomly sampled symbolic execution scenarios.
The paper discusses the integration of {PPSD} into the controller
of an autonomous robot office courier and gives an example of its
use.}

}

@INPROCEEDINGS{beetz_cop-man_2009,

author = {Beetz, Michael and Blodow, Nico and Klank, Ulrich and Marton, Zoltan
Csaba and Pangercic, Dejan and Rusu, Radu Bogdan},
title = {{CoP-Man} – Perception for Mobile Pick-and-Place in Human Living
Environments},
booktitle = {Proceedings of the 22nd {IEEE/RSJ} International Conference on Intelligent
Robots and Systems ({IROS)} Workshop on Semantic Perception for Mobile
Manipulation},
year = {2009},
address = {St. Louis, {MO}, {USA}},
month = oct,
note = {Invited paper.}

}

@INPROCEEDINGS{beetz_agilo_2002-1,

author = {Beetz, Michael and Buck, Sebastian and Hanek, Robert and Hofhauser,
Andreas and Schmitt, Thorsten},
title = {{AGILO} {RoboCuppers} 2002: Applying Cooperative Game State Estimation
Experience-based Learning, and Plan-based Control to Autonomous Robot
Soccer},
booktitle = {{RoboCup} International Symposium 2002},
year = {2002},
series = {Lecture Notes in Computer Science},
abstract = {This paper describes the computational model underlying the {AGILO}
autonomous robot soccer team and its implementation. The most salient
aspects of the {AGILO} control software are that it includes (1)
a cooperative probabilistic game state estimator working with a simple
off-the-shelf camera system; (2) a situated action selection module
that makes amble use of experience-based learning and produces coherent
team behavior even if inter-robot communication is perturbed; and
(3) a playbook executor that can perform preprogrammed complex soccer
plays in appropriate situations by employing plan-based control techniques.
The use of such sophisticated state estimation and control techniques
characterizes the {AGILO} software. The paper discusses the computational
techniques and necessary extensions based on experimental data from
the 2001 robot soccer world championship.}

}

@INPROCEEDINGS{beetz_agilo_2002,

author = {Beetz, Michael and Buck, Sebastian and Hanek, Robert and Schmitt,
Thorsten and Radig, Bernd},
title = {The {AGILO} Autonomous Robot Soccer Team: Computational Principles,
Experiences, and Perspectives},
booktitle = {International Joint Conference on Autonomous Agents and Multi Agent
Systems ({AAMAS)} 2002},
year = {2002},
pages = {805–812},
address = {Bologna, Italy},
abstract = {This paper describes the computational model underlying the {AGILO}
autonomous robot soccer team, its implementation, and our experiences
with it. The most salient aspects of the {AGILO} control software
are that it includes (1) a cooperative probabilistic game state estimator
working with a simple off-the-shelf camera system; (2) a situated
action selection module that makes amble use of experience-based
learning and produces coherent team behavior even if inter-robot
communication is perturbed; and (3) a playbook executor that can
perform preprogrammed complex soccer plays in appropriate situations
by employing plan-based control techniques. The use of such sophisticated
state estimation and control techniques distinguishes the {AGILO}
software from many others applied to mid-size autonomous robot soccer.
The paper discusses the computational techniques and necessary extensions
based on experimental data from the 2001 robot soccer world championship.}

}

@ARTICLE{beetz_integrating_1998,

author = {Beetz, Michael and Burgard, Wolfram and Fox, Dieter and Cremers,
Armin},
title = {Integrating Active Localization into High-level Control Systems},
journal = {Robotics and Autonomous Systems},
year = {1998},
volume = {23},
pages = {205–220}

}

@ARTICLE{beetz_learning_2010,

author = {Beetz, Michael and Buss, Martin and Radig, Bernd},
title = {Learning from Humans – Cognition-enabled Computational Models of
Everyday Activity},
journal = {Künstliche Intelligenz},
year = {2010}

}

@INPROCEEDINGS{beetz_cognitive_2007,

author = {Beetz, Michael and Buss, Martin and Wollherr, Dirk},
title = {Cognitive Technical Systems — What Is the Role of Artificial Intelligence?},
booktitle = {Proceedings of the 30th German Conference on Artificial Intelligence
({KI-2007)}},
year = {2007},
editor = {Hertzberg, J. and Beetz, M. and Englert, R.},
pages = {19–42},
note = {Invited paper},
abstract = {The newly established cluster of excellence {COTESYS} investigates
the realization of cognitive capabilities such as perception, learning,
reasoning, planning, and execution for technical systems including
humanoid robots, flexible manufacturing systems, and autonomous vehicles.
In this paper we describe cognitive technical systems using a sensor-equipped
kitchen with a robotic assistant as an {example.We} will particularly
consider the role of Artificial Intelligence in the research enterprise.
Key research foci of Artificial Intelligence research in {COTESYS}
include (*) symbolic representations grounded in perception and action,
(*) first-order probabilistic representations of actions, objects,
and situations, (*) reasoning about objects and situations in the
context of everyday manipulation tasks, and (*) the representation
and revision of robot plans for everyday activity.}

}

@INPROCEEDINGS{beetz_watching_2004,

author = {Beetz, M. and Fischer, F. and Flossmann, S. and Kirchlechner, B.
and Unseld, A. and Holzer, C.},
title = {Watching Football with the Eyes of Experts: Integrated Intelligent
Systems for the Automatic Analysis of (Simulated) Football Games},
booktitle = {5th Annual Conference dvs-Section Computer Science in Sport},
year = {2004},
keywords = {soccer}

}

@INPROCEEDINGS{beetz_motion_2004,

author = {Beetz, Michael and Flossmann, Sven and Stammeier, Thomas},
title = {Motion and Episode Models for (Simulated) Football Games: Acquisition,
Representation, and Use},
booktitle = {3rd International Joint Conference on Autonomous Agents \& Multi
Agent Systems ({AAMAS)}},
year = {2004},
keywords = {soccer}

}

@INPROCEEDINGS{beetz_visually_2007,

author = {Beetz, Michael and Gedikli, Suat and Bandouch, Jan and Kirchlechner,
Bernhard and Hoyningen-Huene, Nico von and Perzylo, Alexander},
title = {Visually Tracking Football Games Based on {TV} Broadcasts},
booktitle = {Proceedings of the Twentieth International Joint Conference on Artificial
Intelligence ({IJCAI)}},
year = {2007},
abstract = {This paper describes {ASPOGAMO}, a visual tracking system that determines
the coordinates and trajectories of football players in camera view
based on {TV} broadcasts. To do so, {ASPOGAMO} solves a complex probabilistic
estimation problem that consists of three subproblems that interact
in subtle ways: the estimation of the camera direction and zoom factor,
the tracking and smoothing of player routes, and the disambiguation
of tracked players after occlusions. The paper concentrates on system
aspects that make it suitable for operating under unconstrained conditions
and in (almost) realtime. We report on results obtained in a public
demonstration at {RoboCup} 2006 where we conducted extensive experiments
with real data from live coverage of World Cup 2006 games in Germany.},
keywords = {soccer}

}

@INPROCEEDINGS{beetz_agilo_2003,

author = {Beetz, Michael and Gedikli, Suat and Hanek, Robert and Schmitt, Thorsten
and Stulp, Freek},
title = {{AGILO} {RoboCuppers} 2003: Computational Priciples and Research
Directions},
booktitle = {{RoboCup} International Symposium 2003},
year = {2003},
series = {Padova},
abstract = {This paper gives an overview about the approaches chosen by the middle
size robot soccer team of the Munich University of Technology, the
{AGILO} {RoboCuppers.} First a brief sytem overview will be given.
Then the computational priciples are described. Finally the directions
for further research are outlined.}

}

@INPROCEEDINGS{beetz_semi-automatic_1999,

author = {Beetz, Michael and Giesenschlag, Markus and Englert, Roman and Gülch,
Eberhard and Cremers, Armin},
title = {Semi-automatic Acquisition of Symbolically-annotated {3D} Models
of Office Environments},
booktitle = {International Conference on Robotics and Automation ({ICRA-99)}},
year = {1999}

}

@INPROCEEDINGS{beetz_causal_1998,

author = {Beetz, M. and Grosskreutz, H.},
title = {Causal Models of Mobile Service Robot Behavior},
booktitle = {Fourth International Conference on {AI} Planning Systems},
year = {1998},
editor = {Simmons, R. and Veloso, M. and Smith, S.},
pages = {163--170},
address = {Morgan Kaufmann}

}

@ARTICLE{beetz_probabilistic_2005,

author = {Beetz, Michael and Grosskreutz, Henrik},
title = {Probabilistic Hybrid Action Models for Predicting Concurrent Percept-driven
Robot Behavior},
journal = {Journal of Artificial Intelligence Research},
year = {2005},
volume = {24},
pages = {799–849},
abstract = {This article develops Probabilistic Hybrid Action Models ({PHAMs)},
a realistic causal model for predicting the behavior generated by
modern percept-driven robot plans. {PHAMs} represent aspects of robot
behavior that cannot be represented by most action models used in
{AI} planning: the temporal structure of continuous control processes,
their non-deterministic effects, several modes of their interferences,
and the achievement of triggering conditions in closed-loop robot
plans. The main contributions of this article are: (1) {PHAMs}, a
model of concurrent percept-driven behavior, its formalization, and
proofs that the model generates probably, qualitatively accurate
predictions; and (2) a resource-efficient inference method for {PHAMs}
based on sampling projections from probabilistic action models and
state descriptions. We show how {PHAMs} can be applied to planning
the course of action of an autonomous robot office courier based
on analytical and experimental results.}

}

@INPROCEEDINGS{beetz_probabilistic_2000,

author = {Beetz, Michael and Grosskreutz, Henrik},
title = {Probabilistic Hybrid Action Models for Predicting Concurrent Percept-driven
Robot Behavior},
booktitle = {Proceedings of the Sixth International Conference on {AI} Planning
Systems},
year = {2000},
publisher = {{AAAI} Press},
abstract = {This paper develops Probabilistic Hybrid Action Models ({PHAMs)},
a realistic causal model for predicting the behavior generated by
modern concurrent percept-driven robot {plans.PHAMs} represent aspects
of robot behavior that cannot be represented by most action models
used in {AI} planning: the temporal structure of continuous control
processes, their non-deterministic effects, and several modes of
their interferences. The main contributions of the paper are: (1)
{PHAMs}, a model of concurrent percept-driven behavior, its formalization,
and proofs that the model generates probably, qualitatively accurate
predictions; and (2) a resource-efficient inference method for {PHAMs}
based on sampling projections from probabilistic action models and
state descriptions. We discuss how {PHAMs} can be applied to planning
the course of action of an autonomous robot office courier based
on analytical and experimental results.}

}

@BOOK{beetz_advances_2002,

title = {Advances in Plan-based Control of Robotic Agents},
publisher = {Springer Publishers},
year = {2002},
author = {Beetz, Michael and Hertzberg, Joachim and Ghallab, Malik and Pollack,
Martha},
volume = {{LNAI} 2554},
series = {Lecture Notes in Artificial Intelligence}

}

@INPROCEEDINGS{beetz_plan-based_2002,

author = {Beetz, Michael and Hofhauser, Andreas},
title = {Plan-based control for autonomous robot soccer},
booktitle = {Advances in Plan-based Control of Autonomous Robots. Selected Contributions
of the Dagstuhl Seminar Plan-based Control of Robotic Agents, Lecture
Notes in Artificial Intelligence ({LNAI)}},
year = {2002},
publisher = {Springer-Verlag}

}

@ARTICLE{beetz_aspogamo:_2009,

author = {Beetz, Michael and Hoyningen-Huene, Nicolai von and Kirchlechner,
Bernhard and Gedikli, Suat and Siles, Francisco and Durus, Murat
and Lames, Martin},
title = {{ASpoGAMo:} Automated Sports Game Analysis Models},
journal = {International Journal of Computer Science in Sport},
year = {2009},
volume = {8},
number = {1},
abstract = {We propose automated sport game models as a novel technical means
for the analysis of team sport games. The basic idea is that automated
sport game models are based on a conceptualization of key notions
in such games and probabilistically derived from a set of previous
games. In contrast to existing approaches, automated sport game models
provide an analysis that is sensitive to their context and go beyond
simple statistical aggregations allowing objective, transparent and
meaningful concept definitions. Based on automatically gathered spatio-temporal
data by a computer vision system, a model hierarchy is built bottom
up, where context-sensitive concepts are instantiated by the application
of machine learning techniques. We describe the current state of
implementation of the {ASpoGaMo} system including its computer vision
subsystem that realizes the idea of automated sport game models.
Their usage is exemplified with an analysis of the final of the soccer
World Cup 2006.},
keywords = {soccer}

}

@ARTICLE{beetz_towards_2010-1,

author = {Beetz, Michael and Jain, Dominik and Mösenlechner, Lorenz and Tenorth,
Moritz},
title = {Towards Performing Everyday Manipulation Activities},
journal = {Robotics and Autonomous Systems},
year = {2010},
volume = {58},
pages = {1085–1095},
number = {9}

}

@ARTICLE{beetz_cognition-enabled_2012,

author = {Beetz, Michael and Jain, Dominik and Mösenlechner, Lorenz and Tenorth,
Moritz and Kunze, Lars and Blodow, Nico and Pangercic, Dejan},
title = {Cognition-Enabled Autonomous Robot Control for the Realization of
Home Chore Task Intelligence},
journal = {Proceedings of the {IEEE}, Special Issue on Quality of Life Technology},
year = {2012},
volume = {100},
pages = {2454–2471},
number = {8}

}

@INPROCEEDINGS{beetz_interpretation_2004,

author = {Beetz, M. and Kirchlechner, B. and Fischer, F.},
title = {Interpretation and Processing of Position Data for the Empirical
Study of the Behavior of Simulation League Robocup Teams},
booktitle = {{KI} 2004 Workshop},
year = {2004}

}

@ARTICLE{beetz_computerized_2005,

author = {Beetz, Michael and Kirchlechner, Bernhard and Lames, Martin},
title = {Computerized Real-Time Analysis of Football Games},
journal = {{IEEE} Pervasive Computing},
year = {2005},
volume = {4},
pages = {33--39},
number = {3},
abstract = {The research reported in this article is part of an ambitious, mid-term
project that studies the automated analysis of football games. The
input for game analysis is position data provided by tiny microwave
senders that are placed into the ball and the shin guards of football
players. The main objectives of the project are (1) the investigation
of novel computational mechanisms that enable computer systems to
recognize intentional activities based on position data, (2) the
development of an integrated software system to automate game interpretation
and analysis, and (3) the demonstration of the impact of automatic
game analysis on sport science, football coaching, and sports entertainment.
The results are to be showcased in the form of an intelligent information
system for the matches at the Football World Championship 2006 in
Germany.},
keywords = {soccer}

}

@ARTICLE{beetz_special_2010,

author = {Beetz, Michael and Kirsch, Alexandra},
title = {Special Issue on Cognition for Technical Systems},
journal = {Künstliche Intelligenz},
year = {2010},
volume = {24}

}

@INPROCEEDINGS{beetz_rpl-learn:_2004,

author = {Beetz, Michael and Kirsch, Alexandra and Müller, Armin},
title = {{RPL-LEARN:} Extending an Autonomous Robot Control Language to Perform
Experience-based Learning},
booktitle = {3rd International Joint Conference on Autonomous Agents \& Multi
Agent Systems ({AAMAS)}},
year = {2004},
abstract = {In this paper, we extend the autonomous robot control and plan language
{RPL} with constructs for specifying experiences, control tasks,
learning systems and their parameterization, and exploration strategies.
Using these constructs, the learning problems can be represented
explicitly and transparently and become executable. With the extended
language we rationally reconstruct parts of the {AGILO} autonomous
robot soccer controllers and show the feasibility and advantages
of our approach.}

}

@INPROCEEDINGS{beetz_robotic_2011-1,

author = {Beetz, Michael and Klank, Ulrich and Kresse, Ingo and Maldonado,
Alexis and Mösenlechner, Lorenz and Pangercic, Dejan and Rühr, Thomas
and Tenorth, Moritz},
title = {Robotic Roommates Making Pancakes},
booktitle = {11th {IEEE-RAS} International Conference on Humanoid Robots},
year = {2011},
address = {Bled, Slovenia},
month = oct

}

@INPROCEEDINGS{beetz_robotic_2011,

author = {Beetz, Michael and Klank, Ulrich and Maldonado, Alexis and Pangercic,
Dejan and Rühr, Thomas},
title = {Robotic Roommates Making Pancakes - Look Into Perception-Manipulation
Loop},
booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)},
Workshop on Mobile Manipulation: Integrating Perception and Manipulation},
year = {2011},
pages = {529–536},
month = may

}

@INPROCEEDINGS{beetz_local_1996,

author = {Beetz, M. and {McDermott}, D.},
title = {Local Planning of Ongoing Activities},
booktitle = {Third International Conference on {AI} Planning Systems},
year = {1996},
editor = {Drabble, Brian},
pages = {19--26},
address = {Morgan Kaufmann}

}

@INPROCEEDINGS{beetz_improving_1994,

author = {Beetz, M. and {McDermott}, D.},
title = {Improving Robot Plans During Their Execution},
booktitle = {Second International Conference on {AI} Planning Systems},
year = {1994},
editor = {Hammond, K.},
pages = {3--12},
address = {Morgan Kaufmann}

}

@INPROCEEDINGS{beetz_declarative_1992,

author = {Beetz, M. and {McDermott}, D.},
title = {Declarative Goals in Reactive Plans},
booktitle = {First International Conference on {AI} Planning Systems},
year = {1992},
editor = {Hendler, J.},
pages = {3--12},
address = {Morgan Kaufmann}

}

@INPROCEEDINGS{beetz_executing_1996,

author = {Beetz, M. and {McDermott}, D.},
title = {Executing Structured Reactive Plans},
booktitle = {{AAAI} Fall Symposium: Issues in Plan Execution},
year = {1996},
editor = {Pryor, L. and Steel, S.}

}

@INPROCEEDINGS{beetz_expressing_1997,

author = {Beetz, M. and {McDermott}, D.},
title = {Expressing Transformations of Structured Reactive Plans},
booktitle = {Recent Advances in {AI} Planning. Proceedings of the 1997 European
Conference on Planning},
year = {1997},
pages = {64--76},
publisher = {Springer Publishers}

}

@INPROCEEDINGS{beetz_fast_1997,

author = {Beetz, M. and {McDermott}, D.},
title = {Fast Probabilistic Plan Debugging},
booktitle = {Recent Advances in {AI} Planning. Proceedings of the 1997 European
Conference on Planning},
year = {1997},
pages = {77--90},
publisher = {Springer Publishers}

}

@INPROCEEDINGS{beetz_cram_2010,

author = {Beetz, Michael and Mösenlechner, Lorenz and Tenorth, Moritz},
title = {{CRAM} – A Cognitive Robot Abstract Machine for Everyday Manipulation
in Human Environments},
booktitle = {{IEEE/RSJ} International Conference on Intelligent Robots and Systems},
year = {2010},
pages = {1012--1017},
address = {Taipei, Taiwan},
month = oct

}

@INPROCEEDINGS{beetz_cram_2012,

author = {Beetz, Michael and Mösenlechner, Lorenz and Tenorth, Moritz and Rühr,
Thomas},
title = {{CRAM} – a Cognitive Robot Abstract Machine},
booktitle = {5th International Conference on Cognitive Systems ({CogSys} 2012)},
year = {2012}

}

@INPROCEEDINGS{beetz_structured_1998,

author = {Beetz, Michael and Peters, Hanno},
title = {Structured Reactive Communication Plans — Integrating Conversational
Actions into High-level Robot Control Systems},
booktitle = {Proceedings of the 22nd German Conference on Artificial Intelligence
({KI} 98), Bremen, Germany},
year = {1998},
publisher = {Springer Verlag}

}

@ARTICLE{beetz_agilo_2004,

author = {Beetz, Michael and Schmitt, Thorsten and Hanek, Robert and Buck,
Sebastian and Stulp, Freek and Schröter, Derik and Radig, Bernd},
title = {The {AGILO} Robot Soccer Team – Experience-based Learning and Probabilistic
Reasoning in Autonomous Robot Control},
journal = {Autonomous Robots},
year = {2004},
volume = {17},
pages = {55–77},
number = {1},
abstract = {This article describes the computational model underlying the {AGILO}
autonomous robot soccer team, its implementation, and our experiences
with it. According to our model the control system of an autonomous
soccer robot consists of a probabilistic game state estimator and
a situated action selection module. The game state estimator computes
the robot's belief state with respect to the current game situation
using a simple off-theshelf camera system. The estimated game state
comprises the positions and dynamic states of the robot itself and
its teammates as well as the positions of the ball and the opponent
players. Employing sophisticated probabilistic reasoning techniques
and exploiting the cooperation between team mates, the robot can
estimate complex game states reliably and accurately despite incomplete
and inaccurate state information. The action selection module selects
actions according to specified selection criteria as well as learned
experiences. Automatic learning techniques made it possible to develop
fast and skillful routines for approaching the ball, assigning roles,
and performing coordinated plays. The paper discusses the computational
techniques based on experimental data from the 2001 robot soccer
world championship.}

}

@INPROCEEDINGS{beetz_perspectives_2000,

author = {Beetz, Michael and Schumacher, Jürgen and Cremers, Armin and Hellingrath,
Bernd and Mazzocco, Christian},
title = {Perspectives on Plan-based Multiagent Systems for Distributed Supply
Chain Management in the Steel Industry},
booktitle = {Proceedings of the {ECAI2000} Workshop on Agent Technologies and
Their Application Scenarios in Logistics},
year = {2000},
editor = {Timm, I.}

}

@ARTICLE{beetz_generality_2010,

author = {Beetz, Michael and Stulp, Freek and Esden-Tempski, Piotr and Fedrizzi,
Andreas and Klank, Ulrich and Kresse, Ingo and Maldonado, Alexis
and Ruiz, Federico},
title = {Generality and Legibility in Mobile Manipulation},
journal = {Autonomous Robots Journal (Special Issue on Mobile Manipulation)},
year = {2010},
volume = {28},
pages = {21–44},
number = {1}

}

@INPROCEEDINGS{beetz_autonomous_2003,

author = {Beetz, Michael and Stulp, Freek and Kirsch, Alexandra and Müller,
Armin and Buck, Sebastian},
title = {Autonomous Robot Controllers Capable of Acquiring Repertoires of
Complex Skills},
booktitle = {{RoboCup} International Symposium 2003},
year = {2003},
series = {Padova},
month = jul,
abstract = {Due to the complexity and sophistication of the skills needed in real
world tasks, the development of autonomous robot controllers requires
an ever increasing application of learning techniques. To date, however,
learning steps are mainly executed in isolation and only the learned
code pieces become part of the controller. This approach has several
drawbacks: the learning steps themselves are undocumented and not
executable. In this paper, we extend an existing control language
with constructs for specifying control tasks, process models, learning
problems, exploration strategies, etc. Using these constructs, the
learning problems can be represented explicitly and transparently
and, as they are part of the overall program implementation, become
executable. With the extended language we rationally reconstruct
large parts of the action selection module of the {AGILO2001} autonomous
soccer robots.}

}

@INPROCEEDINGS{beetz_assistive_2008,

author = {Beetz, Michael and Stulp, Freek and Radig, Bernd and Bandouch, Jan
and Blodow, Nico and Dolha, Mihai and Fedrizzi, Andreas and Jain,
Dominik and Klank, Uli and Kresse, Ingo and Maldonado, Alexis and
Marton, Zoltan and Mösenlechner, Lorenz and Ruiz, Federico and Rusu,
Radu Bogdan and Tenorth, Moritz},
title = {The Assistive Kitchen – A Demonstration Scenario for Cognitive Technical
Systems},
booktitle = {{IEEE} 17th International Symposium on Robot and Human Interactive
Communication ({RO-MAN)}, Muenchen, Germany},
year = {2008},
pages = {1--8},
note = {Invited paper.}

}

@ARTICLE{beetz_towards_2010,

author = {Beetz, Michael and Tenorth, Moritz and Jain, Dominik and Bandouch,
Jan},
title = {Towards Automated Models of Activities of Daily Life},
journal = {Technology and Disability},
year = {2010},
volume = {22},
pages = {27–40},
number = {1-2}

}

@INPROCEEDINGS{beetz_semantic_2012,

author = {Beetz, Michael and Tenorth, Moritz and Pangercic, Dejan and Pitzer,
Benjamin},
title = {Semantic Object Maps for Household Tasks},
booktitle = {5th International Conference on Cognitive Systems ({CogSys} 2012)},
year = {2012}

}

@INPROCEEDINGS{belker_learning_2001,

author = {Belker, Thorsten and Beetz, Michael},
title = {Learning to Execute Robot Navigation Plans},
booktitle = {Proceedings of the 25th German Conference on Artificial Intelligence
({KI} 01)},
year = {2001},
address = {Wien, Austria},
publisher = {Springer Verlag},
abstract = {Most state-of-the-art navigation systems for autonomous service robots
decompose navigation into global navigation planning and local reactive
navigation. While the methods for navigation planning and local navigation
are well understood, the plan execution problem, the problem of how
to generate and parameterize local navigation tasks from a given
navigation plan, is largely unsolved. This article describes how
a robot can autonomously learn to execute navigation plans. We formalize
the problem as a Markov Decision Problem ({MDP)}, discuss how it
can be simplified to make its solution feasible, and describe how
the robot can acquire the necessary action models. We show, both
in simulation and on a {RWI} B21 mobile robot, that the learned models
are able to produce competent navigation behavior.}

}

@ARTICLE{belker_learning_2002,

author = {Belker, Thorsten and Beetz, Michael and Cremers, Armin},
title = {Learning Action Models for the Improved Execution of Navigation Plans},
journal = {Robotics and Autonomous Systems},
year = {2002},
volume = {38},
pages = {137–148},
number = {3–4},
month = mar,
abstract = {Most state-of-the-art navigation systems for autonomous service robots
decompose navigation into global navigation planning and local reactive
navigation. While the methods for navigation planning and local navigation
themselves are well understood, the plan execution problem, the problem
of how to generate and parameterize local navigation tasks from a
given navigation plan, is largely unsolved. This article describes
how a robot can autonomously learn to execute navigation plans. We
formalize the problem as a Markov Decision Process ({MDP)} and derive
a decision theoretic action selection function from it. The action
selection function employs models of the robot's navigation actions,
which are autonomously acquired from experience using neural network
or regression tree learning algorithms. We show, both in simulation
and on a {RWI} B21 mobile robot, that the learned models together
with the derived action selection function achieve competent navigation
behavior.}

}

@INPROCEEDINGS{bersch_segmentation_2012,

author = {Bersch, Christian and Pangercic, Dejan and Osentoski, Sarah and Hausman,
Karol and Marton, Zoltan-Csaba and Ueda, Ryohei and Okada, Kei and
Beetz, Michael},
title = {Segmentation of Textured and Textureless Objects through Interactive
Perception},
booktitle = {{RSS} Workshop on Robots in Clutter: Manipulation, Perception and
Navigation in Human Environments},
year = {2012},
address = {Sydney, Australia},
month = jul

}

@INPROCEEDINGS{bertelsmeier_kontextunterstutzte_1977,

author = {Bertelsmeier, R. and Radig, Bernd},
title = {Kontextunterstützte Analyse von Szenen mit bewegten Objekten.},
booktitle = {Digital Bildverarbeitung - Digital Image Processing, {GI/NTG} Fachtagung,
München, 28.-30. März 1977},
year = {1977},
editor = {Nagel, Hans-Hellmut},
pages = {101--128},
publisher = {Springer},
isbn = {3-540-08169-0}

}

@INPROCEEDINGS{bigontina_pose_OGRW_2014,

author = {Andreas Bigontina and Michael Herrmann and Martin Hoernig and Bernd
Radig},
title = {Human Body Part Classification in Monocular Soccer Images},
booktitle = {9-th Open German-Russian Workshop on Pattern Recognition and Image
Understanding},
year = {2014},
address = {Koblenz},
month = {12},
keywords = {Articulated Pose Estimation, Human Body Pose Estimation, Pixel-based
Classification, Random Forests, soccer},
owner = {herrmmic},
timestamp = {2014.12.04}

}

@INPROCEEDINGS{blas_fault-tolerant_2009,

author = {Blas, Morten Rufus and Rusu, Radu Bogdan and Blanke, Mogens and Beetz,
Michael},
title = {Fault-tolerant {3D} Mapping with Application to an Orchard Robot},
booktitle = {Proceedings of the 7th {IFAC} International Symposium on Fault Detection,
Supervision and Safety of Technical Processes ({SAFEPROCESS'09)},
Barcelona, Spain, June 30 - July 3},
year = {2009}

}

@INPROCEEDINGS{blodow_autonomous_2011,

author = {Blodow, Nico and Goron, Lucian Cosmin and Marton, Zoltan-Csaba and
Pangercic, Dejan and Rühr, Thomas and Tenorth, Moritz and Beetz,
Michael},
title = {Autonomous Semantic Mapping for Robots Performing Everyday Manipulation
Tasks in Kitchen Environments},
booktitle = {Proceedings of the {IEEE/RSJ} International Conference on Intelligent
Robots and Systems ({IROS)}},
year = {2011},
address = {San Francisco, {CA}, {USA}},
month = sep

}

@INPROCEEDINGS{blodow_perception_2010,

author = {Blodow, Nico and Jain, Dominik and Marton, Zoltan-Csaba and Beetz,
Michael},
title = {Perception and Probabilistic Anchoring for Dynamic World State Logging},
booktitle = {10th {IEEE-RAS} International Conference on Humanoid Robots},
year = {2010},
pages = {160--166},
address = {Nashville, {TN}, {USA}},
month = dec

}

@INPROCEEDINGS{blodow_making_2010,

author = {Blodow, Nico and Marton, Zoltan-Csaba and Pangercic, Dejan and Beetz,
Michael},
title = {Making Sense of {3D} Data},
booktitle = {Robotics: Science and Systems Conference ({RSS)}, Workshop on Strategies
and Evaluation for Mobile Manipulation in Household Environments},
year = {2010}

}

@INPROCEEDINGS{blodow_inferring_2011,

author = {Blodow, Nico and Marton, Zoltan-Csaba and Pangercic, Dejan and Rühr,
Thomas and Tenorth, Moritz and Beetz, Michael},
title = {Inferring Generalized Pick-and-Place Tasks from Pointing Gestures},
booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)},
Workshop on Semantic Perception, Mapping and Exploration},
year = {2011},
month = may

}

@INPROCEEDINGS{blodow_partial_2009,

author = {Blodow, Nico and Rusu, Radu Bogdan and Marton, Zoltan Csaba and Beetz,
Michael},
title = {Partial View Modeling and Validation in {3D} Laser Scans for Grasping},
booktitle = {9th {IEEE-RAS} International Conference on Humanoid Robots (Humanoids)},
year = {2009},
address = {Paris, France},
month = dec

}

@TECHREPORT{brscic_multi_2010,

author = {Brščić, D. and Eggers, Martin and Rohrmüller, F. and Kourakos, O.
and Sosnowski, S. and Althoff, D. and Lawitzky, M. and Mörtl, A.
and Rambow, M. and Koropouli, V. and Hernández, J. R. Medina and
Zang, X. and Wang, W. and Wollherr, D. and Kühnlenz, K. and Mayer,
Christoph and Kruse, T. and Kirsch, A. and Blume, J. and Bannat,
A. and Rehrl, T. and Wallhoff, F. and Lorenz, T. and Basili, P. and
Lenz, C. and Röder, T. and Panin, G. and Maier, W. and Hirche, S.
and Buss, M. and Beetz, M. and Radig, Bernd and Schubö, A. and Glasauer,
S. and Knoll, A. and Steinbach, E.},
title = {Multi Joint Action in {CoTeSys} — Setup and Challenges},
institution = {{CoTeSys} Cluster of Excelence: Technische Universität München \&
Ludwig-Maximilians-Universität München},
year = {2010},
number = {{CoTeSys-TR-10-01}},
address = {Munich, Germany},
month = jun

}

@PHDTHESIS{buck_experience-based_2003,

author = {Buck, Sebastian},
title = {Experience-Based Control and Coordination of Autonomous Mobile Systems
in Dynamic Environments},
school = {Department of Informatics, Technische Universität München},
year = {2003},
url = {http://tumb1.biblio.tu-muenchen.de/publ/diss/in/2003/buck.html}

}

@INPROCEEDINGS{buck_m-rose:_2002,

author = {Buck, Sebastian and Beetz, Michael and Schmitt, Thorsten},
title = {M-{ROSE:} A Multi Robot Simulation Environment for Learning Cooperative
Behavior},
booktitle = {Distributed Autonomous Robotic Systems 5, Lecture Notes in Artificial
Intelligence},
year = {2002},
editor = {Asama, H. and Arai, T. and Fukuda, T. and Hasegawa, T.},
series = {{LNAI}},
publisher = {Springer-Verlag}

}

@INPROCEEDINGS{buck_reliable_2002,

author = {Buck, Sebastian and Beetz, Michael and Schmitt, Thorsten},
title = {Reliable Multi Robot Coordination Using Minimal Communication and
Neural Prediction},
booktitle = {Advances in Plan-based Control of Autonomous Robots. Selected Contributions
of the Dagstuhl Seminar {“Plan-based} Control of Robotic Agents”},
year = {2002},
editor = {Beetz, M. and Hertzberg, J. and Ghallab, M. and Pollack, M.},
series = {Lecture Notes in Artificial Intelligence},
publisher = {Springer}

}

@INPROCEEDINGS{buck_approximating_2002,

author = {Buck, Sebastian and Beetz, Michael and Schmitt, Thorsten},
title = {Approximating the Value Function for Continuous Space Reinforcement
Learning in Robot Control},
booktitle = {Proc. of the {IEEE} Intl. Conf. on Intelligent Robots and Systems},
year = {2002},
abstract = {Many robot learning tasks are very difficult to solve: their state
spaces are high dimensional, variables and command parameters are
continuously valued, and system states are only partly observable.
In this paper, we propose to learn a continuous space value function
for reinforcement learning using neural networks trained from data
of exploration runs. The learned function is guaranteed to be a lower
bound for, and reproduces the characteristic shape of, the accurate
value function. We apply our approach to two robot navigation tasks,
discuss how to deal with possible problems occurring in practice,
and assess its performance.}

}

@INPROCEEDINGS{buck_planning_2001,

author = {Buck, Sebastian and Beetz, Michael and Schmitt, Thorsten},
title = {Planning and Executing Joint Navigation Tasks in Autonomous Robot
Soccer},
booktitle = {5th International Workshop on {RoboCup} (Robot World Cup Soccer Games
and Conferences)},
year = {2001}

}

@INPROCEEDINGS{buck_agilo_2000,

author = {Buck, Sebastian and Hanek, Robert and Klupsch, Michael and Schmitt,
Thorsten},
title = {Agilo {RoboCuppers:} {RoboCup} Team Description},
booktitle = {The Fourth Robot World Cup Soccer Games and Conferences},
year = {2000},
series = {{RoboCup-2000} Melbourne},
abstract = {This paper describes the Agilo {RoboCuppers}, team of the image understanding
group ({FG} {BV)} at the Technische Universit?t München. With a team
of four Pioneer 1 robots, equipped with {CCD} camera and a single
board computer each and coordinated by a master {PC} outside the
field we participate in the Middle Size League of the fourth international
{RoboCup} Tournament in Melbourne 2000. We use a multi-agent based
approach to represent different robots and to encapsulate concurrent
tasks within the robots. A fast feature extraction based on the image
processing library {HALCON} provides the data necessary for the on-board
scene interpretation. All robot observations are fused to one single
consistent view. Decision making is done on this fused data.}

}

@INPROCEEDINGS{buck_learning_2000,

author = {Buck, Sebastian and Riedmiller, Martin},
title = {Learning Situation Dependent Success Rates Of Actions In A {RoboCup}
Scenario},
booktitle = {Pacific Rim International Conference on Artificial Intelligence},
year = {2000},
pages = {809},
abstract = {A quickly changing, not predictable environment complicates autonomous
decision making in a system of mobile robots. To simplify action
selection we suggest a suitable reduction of decision space by restricting
the number of executable actions the agent can choose from. We use
supervised neural learning to automaticly learn success rates of
actions to facilitate decision making. To determine probabilities
of success each agent relies on its sensory data. We show that using
our approach it is possible to compute probabilities of success close
to the real success rates of actions and further we give a few results
of games of a {RoboCup} simulation team based on this approach.}

}

@INPROCEEDINGS{buck_machine_2002,

author = {Buck, Sebastian and Stulp, Freek and Beetz, Michael and Schmitt,
Thorsten},
title = {Machine Control Using Radial Basis Value Functions and Inverse State
Projection},
booktitle = {Proc. of the {IEEE} Intl. Conf. on Automation, Robotics, Control,
and Vision},
year = {2002},
abstract = {Typical real world machine control tasks have some characteristics
which makes them difficult to solve: Their state spaces are high-dimensional
and continuous, and it may be impossible to reach a satisfying target
state by exploration or human control. To overcome these problems,
in this paper, we propose (1) to use radial basis functions for value
function approximation in continuous space reinforcement learning
and (2) the use of learned inverse projection functions for state
space exploration. We apply our approach to path planning in dynamic
environments and to an aircraft autolanding simulation, and evaluate
its performance.}

}

@INPROCEEDINGS{buck_multi_2001,

author = {Buck, Sebastian and Weber, U. and Beetz, Michael and Schmitt, Thorsten},
title = {Multi Robot Path Planning for Dynamic Evironments: A case study},
booktitle = {Proc. of the {IEEE} Intl. Conf. on Intelligent Robots and Systems},
year = {2001}

}

@ARTICLE{buss_cotesys_2010,

author = {Buss, Martin and Beetz, Michael},
title = {{CoTeSys} – Cognition for Technical Systems},
journal = {Künstliche Intelligenz},
year = {2010}

}

@ARTICLE{buss_cotesys_2007,

author = {Buss, Martin and Beetz, Michael and Wollherr, Dirk},
title = {{CoTeSys} — Cognition for Technical Systems},
journal = {International Journal of Assistive Robotics and Mechatronics},
year = {2007},
volume = {8},
pages = {25--36},
number = {4},
abstract = {The {CoTeSys} cluster of excellence investigates cognition for technical
systems such as vehicles, robots, and factories. Cognitive technical
systems ({CTS)} are information processing systems equipped with
artificial sensors and actuators, integrated and embedded into physical
systems, and acting in a physical world. They differ from other technical
systems as they perform cognitive control and have cognitive capabilities.
Cognitive control orchestrates reflexive and habitual behavior in
accord with longterm intentions. Cognitive capabilities such as perception,
reasoning, learning, and planning turn technical systems into systems
that “know what they are doing”. The cognitive capabilities will
result in systems of higher reliability, flexibility, adaptivity,
and better performance. They will be easier to interact and cooperate
with.}

}

@INPROCEEDINGS{buss_cotesys_2007-1,

author = {Buss, Martin and Beetz, Michael and Wollherr, Dirk},
title = {{CoTeSys} — Cognition for Technical Systems},
booktitle = {Proceedings of the 4th {COE} Workshop on Human Adaptive Mechatronics
({HAM)}},
year = {2007},
abstract = {The {CoTeSys} cluster of excellence investigates cognition for technical
systems such as vehicles, robots, and factories. Cognitive technical
systems ({CTS)} are information processing systems equipped with
artificial sensors and actuators, integrated and embedded into physical
systems, and acting in a physical world. They differ from other technical
systems as they perform cognitive control and have cognitive capabilities.
Cognitive control orchestrates reflexive and habitual behavior in
accord with longterm intentions. Cognitive capabilities such as perception,
reasoning, learning, and planning turn technical systems into systems
that “know what they are doing”. The cognitive capabilities will
result in systems of higher reliability, flexibility, adaptivity,
and better performance. They will be easier to interact and cooperate
with.}

}

@PHDTHESIS{durus_ball_tracking_2014,

author = {Durus, Murat},
title = {Ball Tracking and Action Recognition of Soccer Players in TV Broadcast
Videos},
school = {Technische Universität München},
year = {2014},
address = {München},
keywords = {soccer},
url = {http://nbn-resolving.de/urn/resolver.pl?urn:nbn:de:bvb:91-diss-20140414-1145077-0-1}

}

@PHDTHESIS{,

author = {Eggers, Martin},
title = {Perspective-Adjusting Appearance Model for Distributed Multi-View
Person Tracking},
school = {Technische Universität München},
year = {2014},
owner = {herrmmic},
timestamp = {2015.04.16},
url = {http://nbn-resolving.de/urn/resolver.pl?urn:nbn:de:bvb:91-diss-20141104-1219467-0-9}

}

@ARTICLE{eggers_setup_2013,

author = {Eggers, Martin and Dikov, Veselin and Mayer, Christoph and Steger,
Carsten and Radig, Bernd},
title = {Setup and calibration of a distributed camera system for surveillance
of laboratory space},
journal = {Pattern Recognition and Image Analysis},
year = {2013},
volume = {23},
pages = {481--487},
number = {4},
month = oct,
doi = {10.1134/S1054661813040032},
issn = {1054-6618, 1555-6212},
language = {en},
url = {http://link.springer.com/10.1134/S1054661813040032},
urldate = {2014-05-15}

}

@INPROCEEDINGS{engstler_memoman_2009,

author = {Engstler, Florian and Bandouch, Jan and Bubb, Heiner},
title = {{MeMoMan} - Model Based Markerless Capturing of Human Motion},
booktitle = {The 17th World Congress on Ergonomics (International Ergonomics Association,
{IEA)}},
year = {2009},
address = {Beijing, China}

}

@INPROCEEDINGS{ertelt_integration_2009,

author = {Ertelt, Christoph and Rühr, Thomas and Pangercic, Dejan and Shea,
Kristina and Beetz, Michael},
title = {Integration of Perception, Global Planning and Local Planning in
the Manufacturing Domain},
booktitle = {Proceedings of Emerging Technologies and Factory Automation ({ETFA).}},
year = {2009}

}

@INPROCEEDINGS{fedrizzi_transformational_2009,

author = {Fedrizzi, Andreas and Moesenlechner, Lorenz and Stulp, Freek and
Beetz, Michael},
title = {Transformational Planning for Mobile Manipulation based on Action-related
Places},
booktitle = {Proceedings of the International Conference on Advanced Robotics
({ICAR).}},
year = {2009},
pages = {1–8}

}

@INPROCEEDINGS{fischer_experiences_2004,

author = {Fischer, Stefan and Döring, Sven and Wimmer, Matthias and Krummheuer,
Antonia},
title = {Experiences with an Emotional Sales Agent},
booktitle = {Affective Dialogue Systems},
year = {2004},
editor = {André, Elisabeth and er, Laila Dybkj{\textbackslash}a and Minker,
Wolfgang and Heisterkamp, Paul},
volume = {3068},
series = {Lecture Notes in Computer Science},
pages = {309--312},
address = {Kloster Irsee, Germany},
month = jun,
publisher = {Springer},
abstract = {With {COSIMAB2B} we demonstrate a prototype of a complex and visionary
e-procurement application. The embodied character agent named {COSIMA}
is able to respect a customer's preferences and deals with him or
her via natural speech. She expresses various emotions via mimic,
gesture, combined with speech output, and {COSIMA} is even able to
consider the customer's emotions via mimic recognition. As first
observations show, this is a very promising approach to improve the
bargaining with the customer or the recommendation of products.},
isbn = {3-540-22143-3}

}

@INPROCEEDINGS{friesdorf_mutually_2009,

author = {Friesdorf, Florian and Pangercic, Dejan and Bubb, Heiner and Beetz,
Michael},
title = {Mutually Augmented Cognition},
booktitle = {Proceedings of the International Conference on Social Robotics ({ICSR).}},
year = {2009}

}

@INPROCEEDINGS{gast_did_2009,

author = {Gast, Jürgen and Bannat, Alexander and Rehrl, Tobias and Mayer, Christoph
and Wallhoff, Frank and Rigoll, Gerhard and Radig, Bernd},
title = {Did I Get it Right: Head Gesture Analysis for Human-Machine Interaction},
booktitle = {Human-Computer Interaction. Novel Interaction Methods and Techniques},
year = {2009},
series = {Lecture Notes in Computer Science},
publisher = {Springer}

}

@PHDTHESIS{gedikli_continual_2009,

author = {Gedikli, Suat},
title = {Continual and Robust Estimation of Camera Parameters in Broadcasted
Sports Games},
school = {Technische Universität München},
year = {2009},
keywords = {soccer}

}

@INPROCEEDINGS{gedikli_adaptive_2007,

author = {Gedikli, Suat and Bandouch, Jan and Hoyningen-Huene, Nico von and
Kirchlechner, Bernhard and Beetz, Michael},
title = {An Adaptive Vision System for Tracking Soccer Players from Variable
Camera Settings},
booktitle = {Proceedings of the 5th International Conference on Computer Vision
Systems ({ICVS)}},
year = {2007},
abstract = {In this paper we present {ASpoGAMo}, a vision system capable of estimating
motion trajectories of soccer players taped on video. The system
performs well in a multitude of application scenarios because of
its adaptivity to various camera setups, such as single or multiple
camera settings, static or dynamic ones. Furthermore, {ASpoGAMo}
can directly process image streams taken from {TV} broadcast, and
extract all valuable information despite scene interruptions and
cuts between different cameras. The system achieves a high level
of robustness through the use of modelbased vision algorithms for
camera estimation and player recognition and a probabilistic multi-player
tracking framework capable of dealing with occlusion situations typical
in team-sports. The continuous interplay between these submodules
is adding to both the reliability and the efficiency of the overall
system.},
keywords = {soccer}

}

@INPROCEEDINGS{geipel_learning_2006,

author = {Geipel, Markus and Beetz, Michael},
title = {Learning to shoot goals, Analysing the Learning Process and the Resulting
Policies},
booktitle = {{RoboCup-2006:} Robot Soccer World Cup X},
year = {2006},
editor = {Lakemeyer, Gerhard and Sklar, Elizabeth and Sorenti, Domenico and
Takahashi, Tomoichi},
publisher = {Springer Verlag, Berlin},
note = {to be published},
abstract = {Reinforcement learning is a very general unsupervised learning mechanism.
Due to its generality reinforcement learning does not scale very
well for tasks that involve inferring subtasks. In particular when
the subtasks are dynamically changing and the environment is adversarial.
One of the most challenging reinforcement learning tasks so far has
been the 3 to 2 keepaway task in the {RoboCup} simulation league.
In this paper we apply reinforcement learning to a even more challenging
task: attacking the opponents goal. The main contribution of this
paper is the empirical analysis of a portfolio of mechanisms for
scaling reinforcement learning towards learning attack policies in
simulated robot soccer.}

}

@INPROCEEDINGS{gonsior_improving_2011,

author = {Gonsior, Barbara and Sosnowski, Stefan and Mayer, Christoph and Blume,
Jürgen and Radig, Bernd and Dirk Wollherr and Kühnlenz, Kolja},
title = {Improving Aspects of Empathy and Subjective Performance for {HRI}
through Mirroring Facial Expressions},
booktitle = {Proceedings of the 19th {IEEE} International Symposium on Robot and
Human Interactive Communication},
year = {2011},
keywords = {facial expressions}

}

@INPROCEEDINGS{goron_segmenting_2012,

author = {Goron, Lucian Cosmin and Marton, Zoltan Csaba and Lazea, Gheorghe
and Beetz, Michael},
title = {Segmenting Cylindrical and Box-like Objects in Cluttered {3D} Scenes},
booktitle = {7th German Conference on Robotics ({ROBOTIK)}},
year = {2012},
address = {Munich, Germany},
month = may

}

@INPROCEEDINGS{goron_automatic_2010,

author = {Goron, Lucian Cosmin and Marton, Zoltan Csaba and Lazea, Gheorghe
and Beetz, Michael},
title = {Automatic Layered {3D} Reconstruction of Simplified Object Models
for Grasping},
booktitle = {Joint 41st International Symposium on Robotics ({ISR)} and 6th German
Conference on Robotics ({ROBOTIK)}},
year = {2010},
address = {Munich, Germany}

}

@INPROCEEDINGS{gossow_distinctive_2012,

author = {Gossow, David and Weikersdorfer, David and Beetz, Michael},
title = {Distinctive Texture Features from Perspective-Invariant Keypoints},
booktitle = {21st International Conference on Pattern Recognition},
year = {2012},
note = {Accepted for publication.}

}

@BOOK{grotzinger_learning_2011,

title = {Learning Probabilistic Models of Robot Behaviour from Logged Execution
Traces},
year = {2011},
author = {Grötzinger, Simon},
note = {Published: Bachelor's Thesis, Department of Informatics, Technische
Universität München. Michael Beetz, supervisor, Dominik Jain, advisor}

}

@PHDTHESIS{hanek_fitting_2004,

author = {Hanek, Robert},
title = {Fitting Parametric Curve Models to Images Using Local Self-adapting
Seperation Criteria},
school = {Department of Informatics, Technische Universität München},
year = {2004},
url = {http://tumb1.biblio.tu-muenchen.de/publ/diss/in/2004/hanek.html}

}

@INPROCEEDINGS{hanek_contracting_2001,

author = {Hanek, Robert},
title = {The Contracting Curve Density Algorithm and its Application to Model-based
Image Segmentation},
booktitle = {{IEEE} Conf. Computer Vision and Pattern Recognition},
year = {2001},
series = {Kauai, Hawaii, {USA}},
pages = {I:797--804},
url = {http://www9.in.tum.de/papers/2001/CVPR-2001-Hanek.abstract.html}

}

@ARTICLE{hanek_contracting_2004,

author = {Hanek, Robert and Beetz, Michael},
title = {The Contracting Curve Density Algorithm: Fitting Parametric Curve
Models to Images Using Local Self-adapting Separation Criteria},
journal = {International Journal of Computer Vision},
year = {2004},
volume = {59},
pages = {233--258},
number = {3},
abstract = {The task of fitting parametric curve models to the boundaries of perceptually
meaningful image regions is a key problem in computer vision with
numerous applications, such as image segmentation, pose estimation,
object tracking, and 3-D reconstruction. In this article, we propose
the Contracting Curve Density ({CCD)} algorithm as a solution to
the curve-fitting problem. The {CCD} algorithm extends the state-of-the-art
in two important ways. First, it applies a novel likelihood function
for the assessment of a fit between the curve model and the image
data. This likelihood function can cope with highly inhomogeneous
image regions, because it is formulated in terms of local image statistics.
The local image statistics are learned on the fly from the vicinity
of the expected curve. They provide therefore locally adapted criteria
for separating the adjacent image regions. These local criteria replace
often used predefined fixed criteria that rely on homogeneous image
regions or specific edge properties. The second contribution is the
use of blurred curve models as efficient means for iteratively optimizing
the posterior density over possible model parameters. These blurred
curve models enable the algorithm to trade-off two conflicting objectives,
namely heaving a large area of convergence and achieving high accuracy.
We apply the {CCD} algorithm to several challenging image segmentation
and 3-D pose estimation problems. Our experiments with {RGB} images
show that the {CCD} algorithm achieves a high level of robustness
and subpixel accuracy even in the presence of severe texture, shading,
clutter, partial occlusion, and strong changes of illumination.}

}

@INPROCEEDINGS{hanek_vision-based_2000,

author = {Hanek, Robert and Schmitt, Thorsten},
title = {Vision-Based Localization and Data Fusion in a System of Cooperating
Mobile Robots},
booktitle = {Proc. of the {IEEE} Intl. Conf. on Intelligent Robots and Systems},
year = {2000},
pages = {1199–1204},
publisher = {{IEEE/RSJ}},
abstract = {The approach presented in this paper allows a team of mobile robots
to estimate cooperatively their poses, i.e. positions and orientations,
and the poses of other observed objects from images. The images are
obtained by calibrated color cameras mounted on the robots. Model
knowledge of the robots' environment, the geometry of observed objects,
and the characteristics of the cameras are represented in curve functions
which describe the relation between model curves in the image and
the sought pose parameters. The pose parameters are estimated by
minimizing the distance between model curves and actual image curves.
Observations from possibly different view points obtained at different
times are fused by a method similar to the extended Kalman filter.
In contrast to the extended Kalman filter, which is based on a linear
approximation of the measurement equations, we use an iterative optimization
technique which takes non-linearities into account. The approach
has been successfully used in robot soccer, where it reliably maintained
a joint pose estimate for the players and the ball.}

}

@ARTICLE{hanek_towards_2003,

author = {Hanek, Robert and Schmitt, Thorsten and Buck, Sebastian and Beetz,
Michael},
title = {Towards {RoboCup} without color labeling},
journal = {{AI} Magazine},
year = {2003},
volume = {24},
pages = {37–40},
number = {2}

}

@INPROCEEDINGS{hanek_fast_2002,

author = {Hanek, Robert and Schmitt, Thorsten and Buck, Sebastian and Beetz,
Michael},
title = {Fast Image-based Object Localization in Natural Scenes},
booktitle = {{IEEE/RSJ} International Conference on Intelligent Robots and Systems
({IROS)} 2002},
year = {2002},
series = {Lausanne},
pages = {116–122},
abstract = {In many robot applications, autonomous robots must be capable of localizing
the objects they are to manipulate. In this paper we address the
object localization problem by fitting a parametric curve model to
the object contour in the image. The initial prior of the object
pose is iteratively refined to the posterior distribution by optimizing
the separation of the object and the background. The local separation
criteria are based on local statistics which are iteratively computed
from the object and the background region. No prior knowledge on
color distributions is needed. Experiments show that the method is
capable of localizing objects in a cluttered and textured scene even
under strong variations of illumination. The method is able to localize
a soccer ball within frame rate.}

}

@INPROCEEDINGS{hanek_towards_2002,

author = {Hanek, Robert and Schmitt, Thorsten and Buck, Sebastian and Beetz,
Michael},
title = {Towards {RoboCup} without Color Labeling},
booktitle = {{RoboCup} International Symposium 2002},
year = {2002},
series = {Lecture Notes in Artificial Intelligence ({LNAI)}},
address = {Fukuoka, Japan},
publisher = {Springer Publishers}

}

@INPROCEEDINGS{hanek_multiple_2000,

author = {Hanek, Robert and Schmitt, Thorsten and Klupsch, Michael and Buck,
Sebastian},
title = {From Multiple Images to a Consistent View},
booktitle = {The Fourth Robot World Cup Soccer Games and Conferences, {RoboCup-2000}
Melbourne},
year = {2000},
pages = {288–296},
publisher = {Springer},
abstract = {The approach presented in this paper allows a team of mobile robots
to estimate cooperatively their poses, i.e. positions and orientations,
and the poses of other observed objects from images. The images are
obtained by calibrated color cameras mounted on the robots. Model
knowledge of the robots' environment, the geometry of observed objects,
and the characteristics of the cameras are represented in curve functions
which describe the relation between model curves in the image and
the sought pose parameters. The pose parameters are estimated by
minimizing the distance between model curves and actual image curves.
Observations from possibly different view points obtained at different
times are fused by a method similar to the extended Kalman filter.
In contrast to the extended Kalman filter, which is based on a linear
approximation of the measurement equations, we use an iterative optimization
technique which takes non-linearities into account. The approach
has been successfully used in robot soccer, where it reliably maintained
a joint pose estimate for the players and the ball.}

}

@PHDTHESIS{hansen_modellgetriebene_2002,

author = {Hansen, Christoph},
title = {Modellgetriebene Verfolgung formvariabler Objekte in Videobildfolgen},
school = {Department of Informatics, Technische Universität München},
year = {2002}

}

@INPROCEEDINGS{hausman_tracking-based_2013,

author = {Hausman, Karol and Balint-Benczedi, Ferenc and Pangercic, Dejan and
Marton, Zoltan-Csaba and Ueda, Ryohei and Okada, Kei and Beetz, Michael},
title = {Tracking-based Interactive Segmentation of Textureless Objects},
booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
year = {2013},
address = {Karlsruhe, Germany},
month = may,
note = {Best Service Robotics Paper Award Finalist}

}

@INPROCEEDINGS{hausman_segmentation_2012,

author = {Hausman, Karol and Bersch, Christian and Pangercic, Dejan and Osentoski,
Sarah and Marton, Zoltan-Csaba and Beetz, Michael},
title = {Segmentation of Cluttered Scenes through Interactive Perception},
booktitle = {{ICRA} 2012 Workshop on Semantic Perception and Mapping for Knowledge-enabled
Service Robotics},
year = {2012},
address = {St. Paul, {MN}, {USA}},
month = may

}

@TECHREPORT{heinz_using_2008,

author = {Heinz, Stefan and Sachenbacher, Martin},
title = {Using Model Counting to Find Optimal Distinguishing Tests},
institution = {Zuse Institute Berlin},
year = {2008},
number = {08-32}

}

@INPROCEEDINGS{heinz_using_2008-1,

author = {Heinz, Stefan and Sachenbacher, Martin},
title = {Using Model Counting to Find Optimal Distinguishing Tests},
booktitle = {Proc. First International Workshop on Counting Problems in {CSP}
and {SAT}, and other neighbouring problems (Counting'08)},
year = {2008},
note = {Accepted for publication}

}

@ARTICLE{herrmmic_tracking_2014,

author = {Michael Herrmann and Martin Hoernig and Bernd Radig},
title = {Online Multi-player Tracking in Monocular Soccer Videos },
journal = {AASRI Procedia },
year = {2014},
volume = {8},
pages = {30 - 37},
number = {0},
note = {2014 AASRI Conference on Sports Engineering and Computer Science
(SECS 2014) },
issn = {2212-6716},
keywords = {computer vision; soccer},
url = {http://www.sciencedirect.com/science/article/pii/S2212671614000730}

}

@ARTICLE{herrmann_automatic_2014,

author = {Herrmann, Michael and Mayer, Christoph and Radig, Bernd},
title = {Automatic Generation of Image Analysis Programs},
journal = {Pattern Recognition and Image Analysis},
year = {2014},
volume = {24},
pages = {400-408},
number = {3},
doi = {10.1134/S1054661814030079},
issn = {1054-6618},
keywords = {automatic programming; inductive programming; generate-and-search;
machine learning; computer vision; image analysis; object detection},
language = {English},
publisher = {Pleiades Publishing},
url = {http://dx.doi.org/10.1134/S1054661814030079}

}

@INPROCEEDINGS{herrmann_automatic_2013,

author = {Herrmann, Michael and Mayer, Christoph and Radig, Bernd},
title = {Automatic Generation of Image Analysis Programs},
booktitle = {11th International Conference on Pattern Recognition and Image Analysis
({PRIA-11-2013)}},
year = {2013},
volume = {1},
pages = {36--39},
address = {Samara},
month = sep,
publisher = {The Russian Academy of Sciences},
keywords = {automatic programming; inductive programming; generate-and-search;
machine learning; computer vision; image analysis; object detection}

}

@ARTICLE{OJWT-v1i2n01_Hoernig,

author = {Martin Hoernig and Andreas Bigontina and Bernd Radig},
title = {A Comparative Evaluation of Current HTML5 Web Video Implementations},
journal = {Open Journal of Web Technologies (OJWT)},
year = {2014},
volume = {1},
pages = {1--9},
number = {2},
bibsource = {RonPub UG (haftungsbeschr{\"a}nkt)},
issn = {2199 -188X},
publisher = {RonPub UG (haftungsbeschr{\"a}nkt)},
url = {http://www.ronpub.com/publications/OJWT-v1i2n01_Hoernig.pdf}

}

@ARTICLE{hoernig_real-time_2014,

author = {Hoernig, Martin and Herrmann, Michael and Radig, Bernd},
title = {Real-Time Segmentation Methods for Monocular Soccer Videos},
journal = {Pattern Recogntion and Image Analysis, To appear},
year = {2015},
keywords = {soccer}

}

@INPROCEEDINGS{hoernig_shot_detection_2014,

author = {Martin Hoernig and Michael Herrmann and Bernd Radig},
title = {Multi Temporal Distance Images for Shot Detection in Soccer Games},
booktitle = {EUSIPCO 2014 (22nd European Signal Processing Conference 2014) (EUSIPCO
2014)},
year = {2014},
address = {Lisbon, Portugal},
month = sep,
abstract = {We present a new approach for video shot detection and introduce multi
temporal distance images (MTDIs), formed by chi-square based similarity
measures that are calculated pairwise within a floating window of
video frames. By using MTDI-based boundary detectors, various cuts
and transitions in various shapes (dissolves, overlayed effects,
fades, and others) can be determined. The algorithm has been developed
within the special context of soccer game TV broadcasts, where a
particular interest in long view shots is intrinsic. With a correct
shot detection rate in camera 1 shots of 98.2\% within our representative
test data set, our system outperforms competing state-of-the-art
systems.},
days = {1},
keywords = {soccer video analysis; video indexing; multi temporal distance image
(MTDI); video segmentation; video shot boundary detection; soccer}

}

@INPROCEEDINGS{hoernig_real_2013,

author = {Hoernig, Martin and Herrmann, Michael and Radig, Bernd},
title = {Real Time Soccer Field Analysis from Monocular {TV} Video Data},
booktitle = {11th International Conference on Pattern Recognition and Image Analysis
({PRIA-11-2013)}},
year = {2013},
volume = {2},
pages = {567--570},
address = {Samara},
month = sep,
publisher = {The Russian Academy of Sciences},
keywords = {soccer}

}

@PHDTHESIS{von_hoyningen-huene_real-time_2011,

author = {von Hoyningen-Huene, Nicolai},
title = {Real-time Tracking of Player Identities in Team Sports},
school = {Technische Universität München},
year = {2011},
keywords = {soccer}

}

@INCOLLECTION{hoyningen-huene_importance_2010,

author = {Hoyningen-Huene, Nicolai v and Beetz, Michael},
title = {Importance Sampling as One Solution to the Data Association Problem
in Multi-target Tracking},
booktitle = {{VISIGRAPP} 2009},
publisher = {Springer-Verlag Berlin Heidelberg},
year = {2010},
editor = {Ranchordas, {AlpeshKumar} and Araujo, Helder},
number = {68},
series = {Communications in Computer and Information Science ({CCIS)}},
pages = {309–325},
abstract = {Tracking multiple targets with similar appearance is a common task
in many computer vision applications as surveillance or sports analysis.
We propose a Rao-Blackwellized Resampling Particle Filter ({RBRPF)}
as a real-time multi-target tracking method that solves the data
association problem by a Monte Carlo approach. Each particle containing
the whole target configuration is predicted by using a process model
and resampled by sampling associations and fusing of the predicted
state with the assigned measurement(s) instead of the common dispersion.
As each target state is modeled as a Gaussian, Rao-Blackwellization
can be used to solve some of these steps analytically. The sampling
of associations splits the multi-target tracking problem in multiple
single target tracking problems, which can be handled by Kalman filters
in an optimal way. The method is independent of the order of measurements
which is mostly predetermined by the measuring process in contrast
to other state-of-the-art approaches. Smart resampling and memoization
is introduced to equip the tracking method with real-time capabilities
in the first place exploiting the discreteness of the associations.
The probabilistic framework allows for consideration of appearance
models and the fusion of different sensors. A way to constrain the
multiplicity of measurements associated with a single target is proposed
and – along with the ability to cope with a high number of targets
in clutter – evaluated in a simulation experiment. We demonstrate
the applicability of the proposed method to real world applications
by tracking soccer players captured by multiple cameras through occlusions
in real-time.}

}

@INPROCEEDINGS{hoyningen-huene_rao-blackwellized_2009,

author = {Hoyningen-Huene, Nicolai von and Beetz, Michael},
title = {Rao-Blackwellized Resampling Particle Filter for Real-Time Player
Tracking in Sports},
booktitle = {Fourth International Conference on Computer Vision Theory and Applications
({VISAPP)}},
year = {2009},
editor = {Ranchordas, {AlpeshKumar} and Araujo, Helder},
volume = {1},
pages = {464--470},
address = {Lisboa, Portugal},
month = feb,
publisher = {{INSTICC} press},
abstract = {Tracking multiple targets with similiar appearance is a common task
in computer vision applications, especially in sports games. We propose
a Rao-Blackwellized Resampling Particle Filter ({RBRPF)} as an implementable
real-time continuation of a state-of-the-art multi-target tracking
method. Target configurations are tracked by sampling associations
and solving single-target tracking problems by Kalman filters. As
an advantage of the new method the independence assumption between
data associations is relaxed to increase the robustness in the sports
domain. Smart resampling and memoization is introduced to equip the
tracking method with real-time capabilities in the first place. The
probabilistic framework allows for consideration of appearance models
and the fusion of different sensors. We demonstrate its applicability
to real world applications by tracking soccer players captured by
multiple cameras through occlusions in real-time.},
keywords = {soccer}

}

@INPROCEEDINGS{hoyningen-huene_robust_2009,

author = {Hoyningen-Huene, Nicolai von and Beetz, Michael},
title = {Robust real-time multiple target tracking},
booktitle = {Ninth Asian Conference on Computer Vision ({ACCV)}},
year = {2009},
address = {Xi'an, China},
month = sep,
abstract = {We propose a novel efficient algorithm for robust tracking of a fixed
number of targets in real-time with low failure rate. The method
is an instance of Sequential Importance Resampling filters approximating
the posterior of complete target configurations as a mixture of Gaussians.
Using predicted target positions by Kalman filters, data associations
are sampled for each measurement sweep according to their likelihood
allowing to constrain the number of associations per target. Updated
target configurations are weighted for resampling pursuant to their
explanatory power for former positions and measurements. Fixed-lag
of the resulting positions increases the tracking quality while smart
resampling and memoization decrease the computational demand. A negative
information handling exploits missing measurements for a target outside
the monitored area. We present both, qualitative and quantitative
experimental results on two demanding real-world applications with
occluded and highly confusable targets, demonstrating the robustness
and real-time performance of our approach outperforming current state-of-the-art
{MCMC} methods.}

}

@INPROCEEDINGS{hoyningen-huene_gram:_2007,

author = {Hoyningen-Huene, Nicolai v and Kirchlechner, Bernhard and Beetz,
Michael},
title = {{GrAM:} Reasoning with Grounded Action Models by Combining Knowledge
Representation and Data Mining},
booktitle = {Towards Affordance-based Robot Control},
year = {2007},
abstract = {This paper proposes {GrAM} (Grounded Action Models), a novel integration
of actions and action models into the knowledge representation and
inference mechanisms of agents. In {GrAM} action models accord to
agent behavior and can be specified explicitly and implicitly. The
explicit representation is an action class specific set of Markov
logic rules that predict action properties. Stated implicitly an
action model defines a data mining problem that, when executed, computes
the model's explicit representation. When inferred from an implicit
representation the prediction rules predict typical behavior and
are learned from a set of training examples, or, in other words,
grounded in the respective experience of the agents. Therefore, {GrAM}
allows for the functional and thus adaptive specification of concepts
such as the class of situations in which a special action is typically
executed successfully or the concept of agents that tend to execute
certain kinds of actions. {GrAM} represents actions and their models
using an upgrading of the representation language {OWL} and equips
the Java Theorem Prover ({JTP)}, a hybrid reasoner for {OWL}, with
additional mechanisms that allow for the automatic acquisition of
action models and solving a variety of inference tasks for actions,
action models and functional descriptions.}

}

@INPROCEEDINGS{hughes_action_2013,

author = {Hughes, Charmayne Mary Lee and Tenorth, Moritz and Bienkiewicz, Marta
and Hermsdörfer, Joachim},
title = {Action sequencing and error production in stroke patients with apraxia
– Behavioral modeling using Bayesian Logic Networks},
booktitle = {6th International Conference on Health Informatics ({HEALTHINF} 2013)},
year = {2013},
address = {Barcelona, Spain},
month = feb,
note = {Accepted for publication.}

}

@INPROCEEDINGS{hammerle_sensor-based_2005,

author = {Hämmerle, Simone and Wimmer, Matthias and Radig, Bernd and Beetz,
Michael},
title = {Sensor-based Situated, Individualized, and Personalized Interaction
in Smart Environments},
booktitle = {{INFORMATIK} 2005 - Informatik {LIVE!} Band 1, Beiträge der 35. Jahrestagung
der Gesellschaft für Informatik ({GI)}},
year = {2005},
editor = {Cremers, Armin B. and Manthey, Rainer and Martini, Peter and Steinhage,
Volker},
volume = {67},
series = {{LNI}},
pages = {261--265},
address = {Bonn, Germany},
month = sep,
publisher = {{GI}},
abstract = {Smart environments are sensor equipped areas that know about their
environment thus being able to adapt to the user. We present {sHOME},
a multiagent based platform for integrating situated, individualized,
and personalized information. {sHOME} acquires sensor data to determine
the user's identity, his location, his gesture, and natural language
commands and stores it in a central knowledge base.},
isbn = {3-88579-396-2}

}

@BOOK{intelligent_autonomous_systems_group_tum-ros_2012,

title = {{TUM-ROS} code repository},
year = {2012},
author = {Intelligent Autonomous Systems Group, Technische Universität München},
url = {http://www.ros.org/wiki/tum-ros-pkg}

}

@INPROCEEDINGS{isik_coordination_2006,

author = {Isik, Michael and Stulp, Freek and Mayer, Gerd and Utz, Hans},
title = {Coordination without Negotiation in Teams of Heterogeneous Robots},
booktitle = {Proceedings of the {RoboCup} Symposium},
year = {2006},
pages = {355–362},
address = {Bremen, Germany}

}

@PHDTHESIS{jain_probabilistic_2012,

author = {Jain, Dominik},
title = {Probabilistic Cognition for Technical Systems: Statistical Relational
Models for High-Level Knowledge Representation, Learning and Reasoning},
school = {Technische Universität München},
year = {2012},
url = {http://mediatum.ub.tum.de/node?id=1096684&change_language=en}

}

@INPROCEEDINGS{jain_knowledge_2011,

author = {Jain, Dominik},
title = {Knowledge Engineering with Markov Logic Networks: A Review},
booktitle = {{DKB} 2011: Proceedings of the Third Workshop on Dynamics of Knowledge
and Belief},
year = {2011}

}

@INPROCEEDINGS{jain_adaptive_2010,

author = {Jain, Dominik and Barthels, Andreas and Beetz, Michael},
title = {Adaptive Markov Logic Networks: Learning Statistical Relational Models
with Dynamic Parameters},
booktitle = {19th European Conference on Artificial Intelligence ({ECAI)}},
year = {2010},
pages = {937--942}

}

@INPROCEEDINGS{jain_soft_2010,

author = {Jain, Dominik and Beetz, Michael},
title = {Soft Evidential Update via Markov Chain Monte Carlo Inference},
booktitle = {{KI} 2010: Advances in Artificial Intelligence, 33rd Annual German
Conference on {AI}},
year = {2010},
volume = {6359},
series = {Lecture Notes in Computer Science},
pages = {280--290},
address = {Karlsruhe, Germany},
publisher = {Springer},
isbn = {978-3-642-16110-0}

}

@INPROCEEDINGS{jain_bayesian_2011,

author = {Jain, Dominik and Gleissenthall, Klaus von and Beetz, Michael},
title = {Bayesian Logic Networks and the Search for Samples with Backward
Simulation and Abstract Constraint Learning},
booktitle = {{KI} 2011: Advances in Artificial Intelligence, 34th Annual German
Conference on {AI}},
year = {2011},
volume = {7006},
series = {Lecture Notes in Computer Science},
pages = {144--156},
address = {Berlin, Germany},
month = oct,
publisher = {Springer},
isbn = {978-3-642-24454-4}

}

@INPROCEEDINGS{jain_extending_2007,

author = {Jain, Dominik and Kirchlechner, Bernhard and Beetz, Michael},
title = {Extending Markov Logic to Model Probability Distributions in Relational
Domains},
booktitle = {{KI} 2007: Advances in Artificial Intelligence, 30th Annual German
Conference on {AI}},
year = {2007},
volume = {4667},
series = {Lecture Notes in Computer Science},
pages = {129–143},
publisher = {Springer},
isbn = {978-3-540-74564-8}

}

@INPROCEEDINGS{jain_markov_2009,

author = {Jain, Dominik and Maier, Paul and Wylezich, Gregor},
title = {Markov Logic as a Modelling Language for Weighted Constraint Satisfaction
Problems},
booktitle = {Eighth International Workshop on Constraint Modelling and Reformulation,
in conjunction with {CP2009}},
year = {2009},
abstract = {Many real-world problems, for example resource allocation, can be
formalized as soft constraint optimization problems. A fundamental
issue is the compact and precise declaration of such problems. We
propose Markov logic networks ({MLNs)}, a representation formalism
well-known from statistical relational learning, as a simple yet
highly expressive modelling framework, for {MLNs} enable the representation
of general principles that abstract away from concrete entities in
order to achieve a separation between the model and the data to which
it is applied. {MLNs} provide the full power of first-order logic
and combine it with probabilistic semantics, thus allowing a flexible
representation of soft constraints. We introduce an automatic conversion
of maximum a posteriori ({MAP)} inference problems in {MLNs} to weighted
constraint satisfaction problems to leverage a large body of available
solving methods, and we make our software suite available to the
public. We demonstrate the soundness of our approach on a real-world
room allocation problem, providing experimental results.}

}

@INPROCEEDINGS{jain_equipping_2009,

author = {Jain, Dominik and Mösenlechner, Lorenz and Beetz, Michael},
title = {Equipping Robot Control Programs with First-Order Probabilistic Reasoning
Capabilities},
booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
year = {2009},
pages = {3626--3631}

}

@INPROCEEDINGS{jain_equipping_2008,

author = {Jain, Dominik and Mösenlechner, Lorenz and Beetz, Michael},
title = {Equipping Robot Control Programs with First-Order Probabilistic Reasoning
Capabilities},
booktitle = {Proceedings of the 1st International Workshop on Cognition for Technical
Systems},
year = {2008},
address = {München, Germany},
month = oct

}

@TECHREPORT{jain_bayesian_2009,

author = {Jain, Dominik and Waldherr, Stefan and Beetz, Michael},
title = {Bayesian Logic Networks},
institution = {{IAS} Group, Fakultät für Informatik, Technische Universität München},
year = {2009}

}

@INPROCEEDINGS{kammerl_real-time_2012,

author = {Kammerl, Julius and Blodow, Nico and Rusu, Radu Bogdan and Gedikli,
Suat and Beetz, Michael and Steinbach, Eckehard},
title = {Real-time Compression of Point Cloud Streams},
booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
year = {2012},
address = {Minnesota, {USA}},
month = may

}

@INPROCEEDINGS{kanezaki_voxelized_2011,

author = {Kanezaki, Asako and Marton, Zoltan-Csaba and Pangercic, Dejan and
Harada, Tatsuya and Kuniyoshi, Yasuo and Beetz, Michael},
title = {Voxelized Shape and Color Histograms for {RGB-D}},
booktitle = {{IEEE/RSJ} International Conference on Intelligent Robots and Systems
({IROS)}, Workshop on Active Semantic Perception and Object Search
in the Real World},
year = {2011},
address = {San Francisco, {CA}, {USA}},
month = sep

}

@INPROCEEDINGS{kidson_elaborative_2012,

author = {Kidson, Ross and Stanimirovic, Darko and Pangercic, Dejan and Beetz,
Michael},
title = {Elaborative Evaluation of {RGB-D} based Point Cloud Registration
for Personal Robots},
booktitle = {{ICRA} 2012 Workshop on Semantic Perception and Mapping for Knowledge-enabled
Service Robotics},
year = {2012},
address = {St. Paul, {MN}, {USA}},
month = may

}

@TECHREPORT{kirsch_be_2010,

author = {Kirsch, Alexandra},
title = {Be a Robot — A Study on Everyday Activities Performed in Real and
Virtual Worlds},
institution = {Technische Universität München},
year = {2010},
number = {{TUM-I1006}},
abstract = {This report presents a user study, in which we compare the behaviour
for setting and clearing the table in reality and in a simulated,
computer-game-like environment. The aim was to examine the potential
of using a computer-game-like simulation for user studies on cognition,
in particular for robot-centred studies on human-robot interaction,
but also other areas such as studies about context-specific and context-independent
behaviour. A simulation allows the creation of a large number of
environments at low cost and enables comparisons of behaviour in
reality and simulation. In the present pilot study we have considered
three points of interest: 1) the differences in user skills with
the used simulation, 2) comparison of human behaviour in simulation
and reality performing everyday activities, and 3) comparison of
behaviour in different simulated environments.}

}

@ARTICLE{kirsch_robot_2009,

author = {Kirsch, Alexandra},
title = {Robot Learning Language – Integrating Programming and Learning for
Cognitive Systems},
journal = {Robotics and Autonomous Systems Journal},
year = {2009},
volume = {57},
pages = {943–954},
number = {9},
url = {http://dx.doi.org/10.1016/j.robot.2009.05.001}

}

@PHDTHESIS{kirsch_integration_2008,

author = {Kirsch, Alexandra},
title = {Integration of Programming and Learning in a Control Language for
Autonomous Robots Performing Everyday Activities},
school = {Technische Universität München},
year = {2008},
abstract = {Robots performing complex tasks in changing, everyday environments
and required to improve with experience must continually monitor
the way they execute their routines and revise them if necessary.
Existing approaches, which use either monolithic or isolated, nonrecurring
learning processes, cannot sufficiently focus their learning processes
to satisfy these requirements. To meet this challenge we propose
to make learning an integral part of the control program by providing
a control language that includes constructs for specifying and executing
learning problems. Our Robot Learning Language ({RoLL)} makes learning
tasks executable within the control program. It allows for the specification
of complete learning processes including the acquisition of experience,
the execution of learning algorithms and the integration of learning
results into the program. {RoLL} is built upon the concept of experience,
which is a learning task specific symbolic summary of a problem solving
episode. This means that experiences do not only record the observed
data, but also include the robot's intentions and the perceived execution
context. The experience acquisition in {RoLL} is designed in a way
that experiences can be defined outside the primary control program,
using hybrid automata as a tool for declaratively specifying experience
and anchoring it to the program. The rich experience concept enables
convenient abstraction and an economic use of experiences. {RoLL's}
design allows the inclusion of arbitrary experience-based learning
algorithms. Upon the completion of the learning process {RoLL} automatically
integrates the learned function into the control program without
interrupting Program Execution. {RoLL} enables the plug-and-play
addition of new learning problems and keeps the control program modular
and transparent. {RoLL's} control structures make learning an integral
part of the control program and can serve as a powerful implementational
platform for comprehensive learning approaches such as developmental,
life-long and imitation learning.},
url = {http://mediatum2.ub.tum.de/node?id=625553}

}

@INPROCEEDINGS{kirsch_towards_2005,

author = {Kirsch, Alexandra},
title = {Towards High-performance Robot Plans with Grounded Action Models:
Integrating Learning Mechanisms into Robot Control Languages},
booktitle = {{ICAPS} Doctoral Consortium},
year = {2005},
abstract = {For planning in the domain of autonomous robots, abstraction of state
and actions is indispensable. This abstraction however comes at the
cost of suboptimal execution, as relevant information is ignored.
A solution is to maintain abstractions for planning, but to fill
in precise information on the level of execution. To do so, the control
program needs models of its own behavior, which could be learned
by the robot automatically. In my dissertation I develop a robot
control and plan language, which provides mechanisms for the representation
of state variables, goals and actions, and integrates learning into
the language.}

}

@INPROCEEDINGS{kirsch_training_2007,

author = {Kirsch, Alexandra and Beetz, Michael},
title = {Training on the Job — Collecting Experience with Hierarchical Hybrid
Automata},
booktitle = {Proceedings of the 30th German Conference on Artificial Intelligence
({KI-2007)}},
year = {2007},
editor = {Hertzberg, J. and Beetz, M. and Englert, R.},
pages = {473–476},
abstract = {We propose a novel approach to experience collection for autonomous
service robots performing complex activities. This approach enables
robots to collect data for many learning problems at a time, abstract
it and transform it into information specific to the learning tasks
and thereby speeding up the learning process. The approach is based
on the concept of hierarchical hybrid automata, which are used as
transparent and expressive representational mechanisms that allow
for the specification of these experience related capabilities independent
of the program itself. The suitability of the approach is demonstrated
through experiments in which a robot doing household chore performs
experience-based learning.}

}

@INPROCEEDINGS{kirsch_combining_2005,

author = {Kirsch, Alexandra and Beetz, Michael},
title = {Combining Learning and Programming for High-Performance Robot Controllers},
booktitle = {Tagungsband Autonome Mobile Systeme 2005},
year = {2005},
series = {Reihe Informatik aktuell},
publisher = {Springer Verlag},
abstract = {The implementation of high-performance robot controllers for complex
control tasks such as playing autonomous robot soccer is tedious,
error-prone, and a never ending programming task. In this paper we
propose programmers to write autonomous controllers that optimize
and automatically adapt themselves to changing circumstances of task
execution using explicit perception, dynamics and action models.
To this end we develop {ROLL} (Robot Learning Language), a control
language allowing for model-based robot programming. {ROLL} provides
language constructs for specifying executable code pieces of how
to learn and update these models. We are currently using {ROLL's}
mechanisms for implementing a rational reconstruction of our soccer
robot controllers.}

}

@INPROCEEDINGS{kirsch_testbed_2010,

author = {Kirsch, Alexandra and Chen, Yuxiang},
title = {A Testbed for Adaptive Human-Robot Collaboration},
booktitle = {33rd Annual German Conference on Artificial Intelligence ({KI} 2010)},
year = {2010},
abstract = {This paper presents a novel method for developing and evaluating intelligent
robot behavior for joint human-robot activities. We extended a physical
simulation of an autonomous robot to interact with a second, human-controlled
agent as in a computer game. We have conducted a user study to demonstrate
the viability of the approach for adaptive human-aware planning for
collaborative everyday activities. The paper presents the details
of our simulation and its control for human subjects as well as results
of the user study.}

}

@INPROCEEDINGS{kirsch_learning_2010,

author = {Kirsch, Alexandra and Cheng, Fan},
title = {Learning Ability Models for Human-Robot Collaboration},
booktitle = {Robotics: Science and Systems ({RSS)} — Workshop on Learning for
Human-Robot Interaction Modeling},
year = {2010},
abstract = {Our vision is a pro-active robot that assists elderly or disabled
people in everyday activities. Such a robot needs knowledge in the
form of prediction models about a person's abilities, preferences
and expectations in order to decide on the best way to assist. We
are interested in learning such models from observation. We report
on a first approach to learn ability models for manipulation tasks
and identify some general challenges for the acquisition of human
models.}

}

@INPROCEEDINGS{kirsch_integrated_2009,

author = {Kirsch, Alexandra and Kruse, Thibault and Mösenlechner, Lorenz},
title = {An Integrated Planning and Learning Framework for Human-Robot Interaction},
booktitle = {4th Workshop on Planning and Plan Execution for Real-World Systems
(held in conjuction with {ICAPS} 09)},
year = {2009}

}

@ARTICLE{kirsch_plan-based_2010,

author = {Kirsch, Alexandra and Kruse, Thibault and Sisbot, E. Akin and Alami,
Rachid and Lawitzky, Martin and Brščić, Dražen and Hirche, Sandra
and Basili, Patrizia and Glasauer, Stefan},
title = {Plan-based Control of Joint Human-Robot Activities},
journal = {Künstliche Intelligenz},
year = {2010},
volume = {24},
pages = {223–231},
number = {3},
abstract = {Cognition in technical systems is especially relevant for the interaction
with humans. We present a newly emerging application for autonomous
robots: companion robots that are not merely machines performing
tasks for humans, but assistants that achieve joint goals with humans.
This collaborative aspect entails specific challenges for {AI} and
robotics. In this article, we describe several planning and action-related
problems for human-robot collaboration and point out the challenges
to implement cognitive robot assistants.}

}

@INPROCEEDINGS{kirsch_making_2005,

author = {Kirsch, Alexandra and Schweitzer, Michael and Beetz, Michael},
title = {Making Robot Learning Controllable: A Case Study in Robot Navigation},
booktitle = {Proceedings of the {ICAPS} Workshop on Plan Execution: A Reality
Check},
year = {2005},
abstract = {In many applications the performance of learned robot controllers
drags behind those of the respective hand-coded ones. In our view,
this situation is caused not mainly by deficiencies of the learning
algorithms but rather by an insufficient embedding of learning in
robot control programs. This paper presents a case study in which
{RoLL}, a robot control language that allows for explicit representations
of learning problems, is applied to learning robot navigation tasks.
The case study shows that {RoLL's} constructs for specifying learning
problems (1) make aspects of autonomous robot learning explicit and
controllable; (2) have an enormous impact on the performance of the
learned controllers and therefore encourage the engineering of high
performance learners; (3) make the learning processes repeatable
and allow for writing bootstrapping robot controllers. Taken together
the approach constitutes an important step towards engineering controllers
of autonomous learning robots.}

}

@PHDTHESIS{klank_everyday_2012,

author = {Klank, Ulrich},
title = {Everyday Perception for Mobile Manipulation in Human Environments},
school = {Technische Universität München},
year = {2012},
url = {http://nbn-resolving.de/urn:nbn:de:bvb:91-diss-20120412-1080039-1-7}

}

@INPROCEEDINGS{klank_transparent_2011,

author = {Klank, Ulrich and Carton, Daniel and Beetz, Michael},
title = {Transparent Object Detection and Reconstruction on a Mobile Platform},
booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
year = {2011},
address = {Shanghai, China},
month = may

}

@INPROCEEDINGS{klank_robots_2012,

author = {Klank, Ulrich and Mösenlechner, Lorenz and Maldonado, Alexis and
Beetz, Michael},
title = {Robots that Validate Learned Perceptual Models},
booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
year = {2012},
address = {St. Paul, {MN}, {USA}},
month = may

}

@ARTICLE{klank_automatic_2008,

author = {Klank, Ulrich and Padoy, N. and Feussner, H. and Navab, N.},
title = {Automatic feature generation in endoscopic images},
journal = {International Journal of Computer Assisted Radiology and Surgery},
year = {2008},
volume = {3},
pages = {331–339},
number = {3}

}

@INPROCEEDINGS{klank_real-time_2009,

author = {Klank, Ulrich and Pangercic, Dejan and Rusu, Radu Bogdan and Beetz,
Michael},
title = {Real-time {CAD} Model Matching for Mobile Manipulation and Grasping},
booktitle = {9th {IEEE-RAS} International Conference on Humanoid Robots},
year = {2009},
pages = {290–296},
address = {Paris, France},
month = dec

}

@INPROCEEDINGS{klank_3d_2009,

author = {Klank, Ulrich and Zia, Muhammad Zeeshan and Beetz, Michael},
title = {{3D} Model Selection from an Internet Database for Robotic Vision},
booktitle = {International Conference on Robotics and Automation ({ICRA)}},
year = {2009},
pages = {2406–2411},
abstract = {We propose a new method for automatically accessing an internet database
of {3D} models that are searchable only by their user-annotated labels,
for using them for vision and robotic manipulation purposes. Instead
of having only a local database containing already seen objects,
we want to use shared databases available over the internet. This
approach while having the potential to dramatically increase the
visual recognition capability of robots, also poses certain problems,
like wrong annotation due to the open nature of the database, or
overwhelming amounts of data (many {3D} models) or the lack of relevant
data (no models matching a specified label). To solve those problems
we propose the following: First, we present an outlier/inlier classification
method for reducing the number of results and discarding invalid
{3D} models that do not match our query. Second, we utilize an approach
from computer graphics, the so called 'morphing', to this application
to specialize the models, in order to describe more objects. Third,
we search for {3D} models using a restricted search space, as obtained
from our knowledge of the environment. We show our classification
and matching results and finally show how we can recover the correct
scaling with the stereo setup of our robot.}

}

@INPROCEEDINGS{klapfer_pouring_2012,

author = {Klapfer, Reinhard and Kunze, Lars and Beetz, Michael},
title = {Pouring and Mixing Liquids — Understanding the Physical Effects of
Everyday Robot Manipulation Actions},
booktitle = {35th German Conference on Artificial Intelligence ({KI-2012)}, Workshop
on Human Reasoning and Automated Deduction},
year = {2012},
address = {Saarbrücken, Germany},
month = sep

}

@INPROCEEDINGS{klupsch_object-oriented_1998,

author = {Klupsch, Michael},
title = {Object-Oriented Representation of Time-Varying Data Sequences in
Multiagent Systems},
booktitle = {World Multiconference on Systemics, Cybernetics and Informatics ({SCI}
'98) - 4th International Conference on Information Systems, Analysis
and Synthesis ({ISAS} '98)},
year = {1998},
editor = {Callaos, Nagib C.},
volume = {2},
pages = {33–40},
address = {Orlando, {FL}},
month = jul,
publisher = {International Institute of Informatics and Systemics ({IIIS)}}

}

@PHDTHESIS{klupsch_objektorientierte_2000,

author = {Klupsch, Michael},
title = {Objektorientierte Daten- und Zeitmodelle für die Echtzeit-Bildfolgenauswertung},
school = {Fakultät für Informatik, Technische Universität München},
year = {2000},
abstract = {This work describes new concepts for the object-oriented modeling
and representation of time-varying image and sensor data sequences
as well as the functions which process these data sequences. Different
frameworks for developing sensor data modules from function and data
objects are presented. These allow to develop, configure and control
these modules easily and to integrate them into complex real-time
program systems transparently as logical sensors. The aim of this
work is to provide a software system which supports the design and
implementation process of efficient and scalable program components
and applications for real-time processing of image sequences and
distributed sensor data analysis on standard computer systems. One
of the fundamentals of this work is a consistent, explicit modeling
of time. This concerns the sensor based data capturing and modeling
of the outer process as well as the description of the data processing
system itself. The first aspect allows to relate the data to the
course of events in the real world and to model the dynamic aspects
of the scene, the latter provides mechanisms for analysing the performance
of the data processing methods. Data sequences are modelled as autonomous
objects ({'Sequence')} collecting the individual measurements of
a specific scene state like images or other sensor data, and the
features derived from these. In addition, they represent general
properties and methods, which are common for all kinds of data sequences,
such as data initialization, access to current and old values, access
to their temporal properties, and methods for updating the data sequence
or interpolating values. Sensors and operators are modelled as {'Functor'}
objects, which on an abstract level provide the functionality for
continuously capturing, transforming, or analysing the dynamic data
sequences. They encapsulate concrete sensor integrations and operator
sequences including their static parameters. In addition, they represent
general, application independent operator properties, e.g., connections
to the input and output data sequences, attributes and methods for
analysing the time consumption, or a general interface for the cyclic
operator execution. With the help of these Sequence and Functor objects
the data flow representation of a sensor data module is easy to implement
without the need for an explicit program control specificaton. Instead,
the program components are locally executed by new input data or
by access to the output data. That behavior can be modified according
to topical requirements. It can be controlled by software agents.
So, it is easy to adapt the program control and the level of concurrency.
The presented concepts were prototyped as C++ class library, which
provides a framework for the representation of data Sequences, Functors,
software agents, and temporal expressions. Based on this library
an extensive distributed robotic application - a team of soccer playing
robots - was developed and succesfully employed and tested at different
international {RoboCup} competitions.}

}

@INPROCEEDINGS{klupsch_agilo_1998,

author = {Klupsch, Michael and Lückenhaus, Maximilian and Zierl, Christoph
and Laptev, Ivan and Bandlow, Thorsten and Grimme, Marc and Kellerer,
Ignaz and Schwarzer, Fabian},
title = {Agilo {RoboCuppers:} {RoboCup} Team Description},
booktitle = {Proceedings of the Second {RoboCup} Workshop, {RoboCup-98}},
year = {1998},
editor = {Asada, Minoru},
pages = {431–438},
address = {Paris},
month = jul,
abstract = {This paper describes the Agilo {RoboCuppers} - the {RoboCup} team
of the image understanding group ({FG} {BV)} at the Technische Universität
München. With a team of five Pioneer 1 robots, equipped with a {CCD}
camera and single board computer each and coordinated by a master
{PC} outside the field we participated in the medium size {RoboCup}
league in Paris 1998. We use a multi-agent based approach to represent
different robots and to encapsulate concurrent tasks within the robots.
A fast feature extraction based on the image processing library {HALCON}
provides the necessary data for the onboard scene interpretation.
These features as well as the odometric data are checked on the master
{PC} with regard to consistency and plausibility. The results are
distributed to all robots as base for their local planning modules
and also used by a coordinating global planning module.}

}

@INPROCEEDINGS{kranz_knife_2007,

author = {Kranz, Matthias and Maldonado, Alexis and Hoernler, Benedikt and
Rusu, Radu Bogdan and Beetz, Michael and Rigoll, Gerhard and Schmidt,
Albrecht},
title = {A Knife and a Cutting Board as Implicit User Interface - Towards
Context-Aware Kitchen Utilities},
booktitle = {Proceedings of First International Conference on Tangible and Embedded
Interaction 2007, {TEI} 2007, February 15-17 Baton Rouge, Louisiana,
{USA}},
year = {2007}

}

@INPROCEEDINGS{kranz_sensing_2007,

author = {Kranz, Matthias and Maldonado, Alexis and Rusu, Radu Bogdan and Hoernler,
Benedikt and Rigoll, Gerhard and Beetz, Michael and Schmidt, Albrecht},
title = {Sensing Technologies and the Player-Middleware for Context-Awareness
in Kitchen Environments},
booktitle = {Proceedings of Fourth International Conference on Networked Sensing
Systems, June 6 - 8, 2007, Braunschweig, Germany},
year = {2007}

}

@INPROCEEDINGS{kranz_player/stage_2006,

author = {Kranz, Matthias and Rusu, Radu Bogdan and Maldonado, Alexis and Beetz,
Michael and Schmidt, Albrecht},
title = {A {Player/Stage} System for Context-Aware Intelligent Environments},
booktitle = {Proceedings of {UbiSys'06}, System Support for Ubiquitous Computing
Workshop, at the 8th Annual Conference on Ubiquitous Computing (Ubicomp
2006), Orange County California, September 17-21, 2006},
year = {2006},
abstract = {We propose {Player/Stage}, a well-known platform widely used in robotics,
as middleware for ubiquitous computing. {Player/Stage} provides uniform
interfaces to sensors and actuators and allows the computational
matching of input and output. {Player/Stage} exactly addresses the
issues of dealing with heterogeneous hardware but currently only
with a focus towards robotics. We show how to integrate ubiquitous
computing platforms into {Player/Stage} and propose {Player/Stage}
as middleware for ubiquitous computing projects.}

}

@INPROCEEDINGS{kresse_movement-aware_2012,

author = {Kresse, Ingo and Beetz, Michael},
title = {Movement-aware Action Control – Integrating Symbolic and Control-theoretic
Action Execution},
booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
year = {2012},
address = {St. Paul, {MN}, {USA}},
month = may

}

@INPROCEEDINGS{kresse_multimodal_2011,

author = {Kresse, Ingo and Klank, Ulrich and Beetz, Michael},
title = {Multimodal Autonomous Tool Analyses and Appropriate Application},
booktitle = {11th {IEEE-RAS} International Conference on Humanoid Robots},
year = {2011},
address = {Bled, Slovenia},
month = oct

}

@INPROCEEDINGS{kriegel_combining_2013,

author = {Kriegel, Simon and Brucker, Manuel and Marton, Zoltan-Csaba and Bodenmuller,
Tim and Suppa, Michael},
title = {Combining object modeling and recognition for active scene exploration},
booktitle = {Intelligent Robots and Systems ({IROS)}, 2013 {IEEE/RSJ} International
Conference on},
year = {2013},
pages = {2384–2391},
publisher = {{IEEE}}

}

@INPROCEEDINGS{kruse_towards_2010,

author = {Kruse, Thibault and Kirsch, Alexandra},
title = {Towards Opportunistic Action Selection in Human-Robot Cooperation},
booktitle = {33rd Annual German Conference on Artificial Intelligence ({KI} 2010)},
year = {2010}

}

@INPROCEEDINGS{kruse_dynamic_2010,

author = {Kruse, Thibault and Kirsch, Alexandra and Sisbot, E. Akin and Alami,
Rachid},
title = {Dynamic Generation and Execution of Human Aware Navigation Plans},
booktitle = {Proceedings of the Ninth International Conference on Autonomous Agents
and Multiagent Systems ({AAMAS)}},
year = {2010}

}

@INPROCEEDINGS{kruse_exploiting_2010,

author = {Kruse, Thibault and Kirsch, Alexandra and Sisbot, E. Akin and Alami,
Rachid},
title = {Exploiting Human Cooperation in Human-Centered Robot Navigation},
booktitle = {{IEEE} International Symposium in Robot and Human Interactive Communication
(Ro-Man)},
year = {2010}

}

@PHDTHESIS{kunze_robot_2014,

author = {Kunze, Lars},
title = {Naïve Physics and Commonsense Reasoning for Everyday Robot Manipulation},
school = {Technische Universität München},
year = {2014},
address = {München},
url = {http://nbn-resolving.de/urn/resolver.pl?urn:nbn:de:bvb:91-diss-20140214-1138034-0-5}

}

@INPROCEEDINGS{kunze_searching_2012,

author = {Kunze, Lars and Beetz, Michael and Saito, Manabu and Azuma, Haseru
and Okada, Kei and Inaba, Masayuki},
title = {Searching Objects in Large-scale Indoor Environments: A Decision-thereotic
Approach},
booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
year = {2012},
address = {St. Paul, {MN}, {USA}},
month = may

}

@INPROCEEDINGS{kunze_logic_2011,

author = {Kunze, Lars and Dolha, Mihai Emanuel and Beetz, Michael},
title = {Logic Programming with Simulation-based Temporal Projection for Everyday
Robot Object Manipulation},
booktitle = {2011 {IEEE/RSJ} International Conference on Intelligent Robots and
Systems ({IROS)}},
year = {2011},
address = {San Francisco, {CA}, {USA}},
month = sep,
note = {Best Student Paper Finalist.}

}

@INPROCEEDINGS{kunze_simulation-based_2011,

author = {Kunze, Lars and Dolha, Mihai Emanuel and Guzman, Emitza and Beetz,
Michael},
title = {Simulation-based Temporal Projection of Everyday Robot Object Manipulation},
booktitle = {Proc. of the 10th Int. Conf. on Autonomous Agents and Multiagent
Systems ({AAMAS} 2011)},
year = {2011},
editor = {Yolum and Tumer and Stone and Sonenberg},
address = {Taipei, Taiwan},
month = may,
publisher = {{IFAAMAS}}

}

@INPROCEEDINGS{kunze_making_2012,

author = {Kunze, Lars and Haidu, Andrei and Beetz, Michael},
title = {Making Virtual Pancakes — Acquiring and Analyzing Data of Everyday
Manipulation Tasks through Interactive Physics-based Simulations},
booktitle = {Poster and Demo Track of the 35th German Conference on Artificial
Intelligence ({KI-2012)}},
year = {2012},
address = {Saarbrücken, Germany},
month = sep

}

@INPROCEEDINGS{kunze_salient_2007,

author = {Kunze, Lars and Lingemann, Kai and Nüchter, Andreas and Hertzberg,
Joachim},
title = {Salient Visual Features to Help Close the Loop in {6D} {SLAM}},
booktitle = {The 5th International Conference on Computer Vision Systems, 2007},
year = {2007}

}

@INPROCEEDINGS{kunze_towards_2011,

author = {Kunze, Lars and Roehm, Tobias and Beetz, Michael},
title = {Towards Semantic Robot Description Languages},
booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
year = {2011},
pages = {5589–5595},
address = {Shanghai, China},
month = may

}

@INPROCEEDINGS{kunze_putting_2010,

author = {Kunze, Lars and Tenorth, Moritz and Beetz, Michael},
title = {Putting People's Common Sense into Knowledge Bases of Household Robots},
booktitle = {33rd Annual German Conference on Artificial Intelligence ({KI} 2010)},
year = {2010},
pages = {151–159},
address = {Karlsruhe, Germany},
month = sep,
publisher = {Springer}

}

@PHDTHESIS{lanser_modellbasierte_1997,

author = {Lanser, Stefan},
title = {Modellbasierte Lokalisation gestützt auf monokulare Videobilder},
school = {Technische Universität München},
year = {1997}

}

@INPROCEEDINGS{leha_optimization_2009,

author = {Leha, Andreas and Pangercic, Dejan and Rühr, Thomas and Beetz, Michael},
title = {Optimization of Simulated Production Process Performance using Machine
Learning},
booktitle = {Proceedings of Emerging Technologies and Factory Automation ({ETFA).}},
year = {2009}

}

@INPROCEEDINGS{lemaignan_oro_2010,

author = {Lemaignan, Séverin and Ros, Raquel and Mösenlechner, Lorenz and Alami,
Rachid and Beetz, Michael},
title = {{ORO}, a knowledge management module for cognitive architectures
in robotics},
booktitle = {Proceedings of the 2010 {IEEE/RSJ} International Conference on Intelligent
Robots and Systems},
year = {2010},
pages = {3548–3553},
address = {Taipei, Taiwan},
month = oct

}

@ARTICLE{lemaignan_grounding_2011,

author = {Lemaignan, Séverin and Ros, Raquel and Sisbot, E. Akin and Alami,
Rachid and Beetz, Michael},
title = {Grounding the Interaction: Anchoring Situated Discourse in Everyday
Human-Robot Interaction},
journal = {International Journal of Social Robots},
year = {2011},
pages = {1--19},
issn = {1875-4791},
url = {http://dx.doi.org/10.1007/s12369-011-0123-x}

}

@INPROCEEDINGS{lenz_distributed_2010,

author = {Lenz, C. and Röder, T. and Eggers, Martin and Amin, S. and Kisler,
T. and Radig, Bernd and Panin, G. and Knoll, A.},
title = {A Distributed Many-Camera System for Multi-Person Tracking},
booktitle = {Proceedings of the First International Joint Conference on Ambient
Intelligence ({AmI} 2010)},
year = {2010},
editor = {Wichert, R. and Ruyter, B. de},
month = nov,
publisher = {Springer Lecture Notes in Computer Science}

}

@INPROCEEDINGS{li_obstacle_2009,

author = {Li, Jun and Maldonado, Alexis and Beetz, Michael and Schuboe, Anna},
title = {Obstacle avoidance in a pick-and-place task},
booktitle = {Proceedings of the 2009 {IEEE} Conference on Robotics and Biomimetics},
year = {2009},
address = {Guilin, Guangxi, China},
month = dec

}

@INPROCEEDINGS{maier_self-diagnosis_2009,

author = {Maier, Paul},
title = {Self-Diagnosis and Self-Planning with Constraint-based Hybrid Models},
booktitle = {Proc. First International Conference on Prognostics and Health Management
({PHM'09)/Doctoral} Consortium},
year = {2009},
address = {San Diego, {CA}, {USA}},
month = sep

}

@INPROCEEDINGS{maier_adaptive_2008,

author = {Maier, Paul},
title = {Adaptive Abstraction of Constraint-Based Models for Self-Diagnosis
and Planning},
booktitle = {Proc. {AAAI/SIGART} Doctoral Consortium},
year = {2008},
pages = {1859–1860},
address = {Menlo Park, California},
month = jul,
publisher = {The {AAAI} Press}

}

@INPROCEEDINGS{maier_compiling_2011,

author = {Maier, Paul and Jain, Dominik and Sachenbacher, Martin},
title = {Compiling {AI} Engineering Models for Probabilistic Inference},
booktitle = {{KI} 2011: Advances in Artificial Intelligence, 34th Annual German
Conference on {AI}},
year = {2011},
volume = {7006},
series = {Lecture Notes in Computer Science},
pages = {191--203},
address = {Berlin, Germany},
month = oct,
publisher = {Springer},
isbn = {978-3-642-24454-4}

}

@INPROCEEDINGS{maier_diagnostic_2011,

author = {Maier, Paul and Jain, Dominik and Sachenbacher, Martin},
title = {Diagnostic Hypothesis Enumeration vs. Probabilistic Inference for
Hierarchical Automata Models},
booktitle = {Proceedings of the 22nd International Workshop on Principles of Diagnosis
({DX-2011)}},
year = {2011},
address = {Murnau, Germany}

}

@INPROCEEDINGS{maier_plan_2010,

author = {Maier, Paul and Jain, Dominik and Waldherr, Stefan and Sachenbacher,
Martin},
title = {Plan Assessment for Autonomous Manufacturing as Bayesian Inference},
booktitle = {{KI} 2010: Advances in Artificial Intelligence, 33rd Annual German
Conference on {AI}},
year = {2010},
volume = {6359},
series = {Lecture Notes in Computer Science},
pages = {263--271},
address = {Karlsruhe, Germany},
publisher = {Springer},
isbn = {978-3-642-16110-0}

}

@INPROCEEDINGS{maier_diagnosis_2009,

author = {Maier, Paul and Sachenbacher, Martin},
title = {Diagnosis and Fault-adaptive Control for Mechatronic Systems using
Hybrid Constraint Automata},
booktitle = {Proc. First International Conference on Prognostics and Health Management
({PHM'09)}},
year = {2009},
address = {San Diego, {CA}, {USA}},
month = sep,
note = {accepted for publication}

}

@INPROCEEDINGS{maier_factory_2009,

author = {Maier, Paul and Sachenbacher, Martin},
title = {Factory Monitoring and Control with Mixed {Hardware/Software}, {Discrete/Continuous}
Models},
booktitle = {Proc. of 14th {IEEE} International Conference on Emerging Technologies
and Factory Automation ({ETFA-2009)}},
year = {2009}

}

@INPROCEEDINGS{maier_self-monitoring_2009,

author = {Maier, Paul and Sachenbacher, Martin},
title = {Self-Monitoring and Control for Embedded Systems using Hybrid Constraint
Automata},
booktitle = {Proc. Workshop on Self-X in Mechatronics and other Engineering Applications},
year = {2009},
address = {Paderborn, Germany},
month = sep

}

@INPROCEEDINGS{maier_adaptive_2008-1,

author = {Maier, Paul and Sachenbacher, Martin},
title = {Adaptive Domain Abstraction in a Soft-Constraint Message-Passing
Algorithm},
booktitle = {Proc. Ninth International Workshop on Preferences and Soft Constraints
(Soft'08)},
year = {2008},
note = {Accepted for publication}

}

@INPROCEEDINGS{maier_constraint_2008,

author = {Maier, Paul and Sachenbacher, Martin},
title = {Constraint Optimization and Abstraction for Embedded Intelligent
Systems},
booktitle = {Proc. Fifth International Conference on Integration of {AI} and {OR}
Techniques in Constraint Programming for Combinatorial Optimization
Problems ({CPAIOR'08)}},
year = {2008},
pages = {338–342},
address = {Paris, France}

}

@INPROCEEDINGS{maier_constraint-based_????,

author = {Maier, Paul and Sachenbacher, Martin and R{\textbackslash}ühr, Thomas
and Kuhn, Lukas},
title = {Constraint-Based Integration of Plan Tracking and Prognosis for Autonomous
Production},
booktitle = {32nd Annual German Conference on Artificial Intelligence},
pages = {403–410}

}

@INPROCEEDINGS{maier_integrated_2009-2,

author = {Maier, Paul and Sachenbacher, Martin and R{\textbackslash}ühr, Thomas
and Kuhn, Lukas},
title = {Integrated Diagnosis and Plan Assessment for Autonomous Production
Processes},
booktitle = {Workshop Proc. {SAS@} {IJCAI}},
year = {2009}

}

@INPROCEEDINGS{maier_constraint-based_2009,

author = {Maier, Paul and Sachenbacher, Martin and Rühr, Thomas and Kuhn, Lukas},
title = {Constraint-Based Integration of Plan Tracking and Prognosis for Autonomous
Production},
booktitle = {{KI} 2009: Advances in Artificial Intelligence, 32nd Annual German
Conference on {AI}},
year = {2009},
volume = {5803},
series = {Lecture Notes in Computer Science},
pages = {403--410},
address = {Paderborn, Germany},
month = sep,
publisher = {Springer}

}

@INPROCEEDINGS{maier_integrated_2009,

author = {Maier, Paul and Sachenbacher, Martin and Rühr, Thomas and Kuhn, Lukas},
title = {Integrated Plan Tracking and Prognosis for Autonomous Production
Processes},
booktitle = {Proc. of 14th {IEEE} International Conference on Emerging Technologies
and Factory Automation ({ETFA-2009)}},
year = {2009},
month = sep

}

@INPROCEEDINGS{maier_integrated_2009-1,

author = {Maier, Paul and Sachenbacher, Martin and Rühr, Thomas and Kuhn, Lukas},
title = {Integrated Diagnosis and Plan Assessment for Autonomous Production
Processes},
booktitle = {Proc. of The {IJCAI-09} Workshop on Self-* and Autonomous Systems
({SAS-2009)}},
year = {2009},
month = jul

}

@INPROCEEDINGS{maier_integrating_2009,

author = {Maier, Paul and Sachenbacher, Martin and Rühr, Thomas and Kuhn, Lukas},
title = {Integrating Model-based Diagnosis and Prognosis in Autonomous Production},
booktitle = {Proc. First International Conference on Prognostics and Health Management
({PHM'09)}},
year = {2009},
address = {San Diego, {CA}, {USA}},
month = sep,
note = {accepted for publication}

}

@INPROCEEDINGS{malaka_solving_2000,

author = {Malaka, Rainer and Buck, Sebastian},
title = {Solving Nonlinear Optimization Problems Using Networks Of Spiking
Neurons},
booktitle = {{IEEE} International Joint Conference on Neural Networks},
year = {2000},
volume = {6},
pages = {486–491},
abstract = {Most artificial neural networks used in practical applications are
based on simple neuron types in a multi-layer architecture. Here,
we propose to solve optimization problems using a fully recurrent
network of spiking neurons mimicking the response behavior of biological
neurons. Such networks can compute a series of different solutions
for a given problem and converge into a periodical sequence of such
solutions. The goal of this paper is to prove that neural networks
like the {SRM} (Spike Response Model) are able to solve nonlinear
optimization problems. We demonstrate this for the traveling salesman
problem. Our network model is able to compute multiple solutions
and can use its dynamics to leave local minima in which classical
models would be stuck. For adapting the model, we introduce a suitable
network architecture and show how to encode the problem directly
into the network weights.}

}

@INPROCEEDINGS{maldonado_improving_2012,

author = {Maldonado, Alexis and Alvarez-Heredia, Humberto and Beetz, Michael},
title = {Improving robot manipulation through fingertip perception},
booktitle = {{IEEE} International Conference on Intelligent Robots and Systems
({IROS)}},
year = {2012},
address = {Vilamoura, Algarve, Portugal},
month = oct

}

@INPROCEEDINGS{maldonado_robotic_2010,

author = {Maldonado, Alexis and Klank, Ulrich and Beetz, Michael},
title = {Robotic grasping of unmodeled objects using time-of-flight range
data and finger torque information},
booktitle = {2010 {IEEE/RSJ} International Conference on Intelligent Robots and
Systems ({IROS)}},
year = {2010},
pages = {2586–2591},
address = {Taipei, Taiwan},
month = oct

}

@BOOK{marco_creating_2012,

title = {Creating and using {RoboEarth} object models},
year = {2012},
author = {Marco, Daniel Di and Koch, Andreas and Zweigle, Oliver and Häussermann,
Kai and le, Björn Schieß and Levi, Paul and Lopez, Dorian Galvez
and Riazuelo, Luis and Civera, Javier and Montiel, J. M. M. and Tenorth,
Moritz and Perzylo, Alexander Clifford and Waibel, Markus and Molengraft,
Marinus Jacobus Gerardus van de},
address = {St. Paul, {MN}, {USA}},
month = may,
note = {Published: {IEEE} International Conference on Robotics and Automation
({ICRA)}}

}

@INPROCEEDINGS{marco_roboearth_2012,

author = {Marco, Daniel di and Tenorth, Moritz and Häussermann, Kai and Zweigle,
Oliver and Levi, Paul},
title = {{RoboEarth} Action Recipe Execution},
booktitle = {12th International Conference on Intelligent Autonomous Systems},
year = {2012}

}

@INPROCEEDINGS{marconi_sherpa_2012,

author = {Marconi, L. and Melchiorri, C. and Beetz, M. and Pangercic†, D. and
Siegwart, R. and Leutenegger, S. and Carloni, R. and Stramigioli,
S. and Bruyninckx, H. and Doherty, P. and Kleiner, A. and Lippiello,
V. and Finzi, A. and Siciliano, B. and Sala, A. and Tomatis, N.},
title = {The {SHERPA} project: smart collaboration between humans and ground-aerial
robots for improving rescuing activities in alpine environments},
booktitle = {{IEEE} International Symposium on Safety, Security, and Rescue Robotics
({SSRR)}},
year = {2012},
address = {College Station, Texas, {USA}},
month = nov

}

@INPROCEEDINGS{marton_autonomous_2008,

author = {Marton, Zoltan Csaba and Blodow, Nico and Dolha, Mihai and Tenorth,
Moritz and Rusu, Radu Bogdan and Beetz, Michael},
title = {Autonomous Mapping of Kitchen Environments and Applications},
booktitle = {Proceedings of the 1st International Workshop on Cognition for Technical
Systems, Munich, Germany, 6-8 October},
year = {2008}

}

@INPROCEEDINGS{marton_reconstruction_2009,

author = {Marton, Zoltan Csaba and Goron, Lucian Cosmin and Rusu, Radu Bogdan
and Beetz, Michael},
title = {Reconstruction and Verification of {3D} Object Models for Grasping},
booktitle = {Proceedings of the 14th International Symposium on Robotics Research
({ISRR09)}},
year = {2009},
address = {Lucerne, Switzerland},
month = sep

}

@ARTICLE{marton_combined_2011,

author = {Marton, Zoltan Csaba and Pangercic, Dejan and Blodow, Nico and Beetz,
Michael},
title = {Combined {2D-3D} Categorization and Classification for Multimodal
Perception Systems},
journal = {The International Journal of Robotics Research},
year = {2011},
volume = {30},
pages = {1378–1402},
number = {11},
month = sep

}

@INPROCEEDINGS{marton_fast_2009,

author = {Marton, Zoltan Csaba and Rusu, Radu Bogdan and Beetz, Michael},
title = {On Fast Surface Reconstruction Methods for Large and Noisy Datasets},
booktitle = {Proceedings of the {IEEE} International Conference on Robotics and
Automation ({ICRA)}},
year = {2009},
address = {Kobe, Japan},
month = may

}

@INPROCEEDINGS{marton_probabilistic_2009,

author = {Marton, Zoltan Csaba and Rusu, Radu Bogdan and Jain, Dominik and
Klank, Ulrich and Beetz, Michael},
title = {Probabilistic Categorization of Kitchen Objects in Table Settings
with a Composite Sensor},
booktitle = {{IEEE/RSJ} International Conference on Intelligent Robots and Systems
({IROS)}},
year = {2009},
pages = {4777--4784},
address = {St. Louis, {MO}, {USA}},
month = oct

}

@INPROCEEDINGS{marton_object_2012,

author = {Marton, Zoltan-Csaba and Balint-Benczedi, Ferenc and Blodow, Nico
and Goron, Lucian Cosmin and Beetz, Michael},
title = {Object Categorization in Clutter using Additive Features and Hashing
of Part-graph Descriptors},
booktitle = {Proceedings of Spatial Cognition ({SC)}},
year = {2012},
address = {Abbey Kloster Seeon, Germany}

}

@INPROCEEDINGS{marton_advantages_2011,

author = {Marton, Zoltan-Csaba and Blodow, Nico and Beetz, Michael},
title = {Advantages of Spatial-temporal Object Maps for Service Robotics},
booktitle = {{IEEE} Workshop on Advanced Robotics and its Social Impacts ({ARSO)}},
year = {2011},
address = {Half-Moon Bay, {CA}, {USA}},
month = oct

}

@INPROCEEDINGS{marton_efficient_2011,

author = {Marton, Zoltan-Csaba and Pangercic, Dejan and Beetz, Michael},
title = {Efficient Surface and Feature Estimation in {RGBD}},
booktitle = {{RGB-D} Workshop on {3D} Perception in Robotics at the European Robotics
({euRobotics)} Forum},
year = {2011},
address = {Väster{\textbackslash}aas, Sweden},
month = apr

}

@INPROCEEDINGS{marton_general_2010,

author = {Marton, Zoltan-Csaba and Pangercic, Dejan and Blodow, Nico and Kleinehellefort,
Jonathan and Beetz, Michael},
title = {General {3D} Modelling of Novel Objects from a Single View},
booktitle = {Proceedings of the {IEEE/RSJ} International Conference on Intelligent
Robots and Systems ({IROS)}},
year = {2010},
address = {Taipei, Taiwan},
month = oct

}

@INPROCEEDINGS{marton_hierarchical_2010,

author = {Marton, Zoltan-Csaba and Pangercic, Dejan and Rusu, Radu Bogdan and
Holzbach, Andreas and Beetz, Michael},
title = {Hierarchical Object Geometric Categorization and Appearance Classification
for Mobile Manipulation},
booktitle = {Proceedings of the {IEEE-RAS} International Conference on Humanoid
Robots},
year = {2010},
address = {Nashville, {TN}, {USA}},
month = dec

}

@ARTICLE{marton_ensembles_2012,

author = {Marton, Zoltan-Csaba and Seidel, Florian and Balint-Benczedi, Ferenc
and Beetz, Michael},
title = {Ensembles of Strong Learners for Multi-cue Classification},
journal = {Pattern Recognition Letters ({PRL)}, Special Issue on Scene Understandings
and Behaviours Analysis},
year = {2012},
note = {In press.}

}

@INPROCEEDINGS{marton_towards_2012,

author = {Marton, Zoltan-Csaba and Seidel, Florian and Beetz, Michael},
title = {Towards Modular Spatio-temporal Perception for Task-adapting Robots},
booktitle = {Postgraduate Conference on Robotics and Development of Cognition
({RobotDoC-PhD)}, a satellite event of the 22nd International Conference
on Artificial Neural Networks ({ICANN)}},
year = {2012},
address = {Lausanne, Switzerland}

}

@PHDTHESIS{mayer_facial_2012,

author = {Mayer, Christoph},
title = {Facial Expression Recognition With A Three-Dimensional Face Model},
school = {Technische Universität München},
year = {2012},
address = {München},
keywords = {facial expressions},
url = {http://nbn-resolving.de/urn/resolver.pl?urn:nbn:de:bvb:91-diss-20120110-1080232-1-5}

}

@ARTICLE{mayer_cross-database_2014,

author = {Mayer, Christoph and Eggers, Martin and Radig, Bernd},
title = {Cross-database evaluation for facial expression recognition},
journal = {Pattern Recognition and Image Analysis},
year = {2014},
volume = {24},
pages = {124--132},
number = {1},
month = jan,
doi = {10.1134/S1054661814010106},
issn = {1054-6618, 1555-6212},
keywords = {facial expressions},
language = {en},
url = {http://link.springer.com/10.1134/S1054661814010106},
urldate = {2014-05-15}

}

@ARTICLE{mayer_face_2013,

author = {Mayer, Christoph and Radig, Bernd},
title = {Face model fitting with learned displacement experts and multi-band
images},
journal = {Pattern Recognition and Image Analysis},
year = {2013},
volume = {23},
pages = {287--295},
number = {2},
month = apr,
doi = {10.1134/S1054661813020119},
issn = {1054-6618, 1555-6212},
keywords = {facial expressions},
language = {en},
url = {http://link.springer.com/10.1134/S1054661813020119},
urldate = {2014-05-15}

}

@ARTICLE{mayer_face_2011,

author = {Mayer, Christoph and Radig, Bernd},
title = {Face model fitting with learned displacement experts and multi-band
images},
journal = {Pattern Recognition and Image Analysis},
year = {2011},
volume = {21},
pages = {526--529},
number = {3},
month = sep,
doi = {10.1134/S1054661811020738},
issn = {1054-6618, 1555-6212},
keywords = {facial expressions},
language = {en},
url = {http://link.springer.com/10.1134/S1054661811020738},
urldate = {2014-05-15}

}

@INPROCEEDINGS{mayer_learning_2011,

author = {Mayer, Christoph and Radig, Bernd},
title = {Learning Displacement Experts from Multi-band Images for Face Model
Fitting},
booktitle = {International Conference on Advances in Computer-Human Interaction},
year = {2011},
month = feb,
keywords = {facial expressions}

}

@INPROCEEDINGS{mayer_towards_2010,

author = {Mayer, Christoph and Sosnowski, Stefan and Kühnlenz, Kolja and Radig,
Bernd},
title = {Towards robotic facial mimicry: system development and evaluation},
booktitle = {Proceedings of the 19th {IEEE} International Symposium on Robot and
Human Interactive Communication},
year = {2010},
keywords = {facial expressions}

}

@INPROCEEDINGS{mayer_facial_2009,

author = {Mayer, Christoph and Wimmer, Matthias and Eggers, Martin and Radig,
Bernd},
title = {Facial Expression Recognition with {3D} Deformable Models},
booktitle = {Proceedings of the 2nd International Conference on Advancements Computer-Human
Interaction ({ACHI)}},
year = {2009},
publisher = {Springer},
note = {Best Paper Award},
keywords = {facial expressions}

}

@ARTICLE{mayer_adjusted_2009,

author = {Mayer, Christoph and Wimmer, Matthias and Radig, Bernd},
title = {Adjusted Pixel Features for Facial Component Classification},
journal = {Image and Vision Computing Journal},
year = {2009},
keywords = {facial expressions}

}

@INPROCEEDINGS{mayer_interpreting_2008,

author = {Mayer, Christoph and Wimmer, Matthias and Stulp, Freek and Riaz,
Zahid and Roth, Anton and Eggers, Martin and Radig, Bernd},
title = {Interpreting the Dynamics of Facial Expressions in Real Time Using
Model-based Techniques},
booktitle = {Proceedings of the 3rd Workshop on Emotion and Computing: Current
Research and Future Impact},
year = {2008},
pages = {45--46},
address = {Kaiserslautern, Germany},
month = sep,
keywords = {facial expressions}

}

@INPROCEEDINGS{mayer_real_2008,

author = {Mayer, Christoph and Wimmer, Matthias and Stulp, Freek and Riaz,
Zahid and Roth, Anton and Eggers, Martin and Radig, Bernd},
title = {A Real Time System for Model-based Interpretation of the Dynamics
of Facial Expressions},
booktitle = {Proc. of the International Conference on Automatic Face and Gesture
Recognition ({FGR08)}},
year = {2008},
address = {Amsterdam, Netherlands},
month = sep,
keywords = {facial expressions}

}

@ARTICLE{meyer_discrete_2002,

author = {Meyer, M and Desbrun, M. and Schröder, P. and Barr, {A.H.}},
title = {Discrete differential-geometry operators for triangulated 2-manifolds},
journal = {Visualization and mathematics},
year = {2002},
volume = {3},
pages = {34–57},
number = {7}

}

@INPROCEEDINGS{morisset_leaving_2009,

author = {Morisset, Benoit and Rusu, Radu Bogdan and Sundaresan, Aravind and
Hauser, Kris and Agrawal, Motilal and Latombe, Jean-Claude and Beetz,
Michael},
title = {Leaving Flatland: Toward Real-Time {3D} Navigation},
booktitle = {Proceedings of the {IEEE} International Conference on Robotics and
Automation ({ICRA)}, Kobe, Japan, May 12-17},
year = {2009}

}

@ARTICLE{mozos_furniture_2011,

author = {Mozos, Oscar Martinez and Marton, Zoltan Csaba and Beetz, Michael},
title = {Furniture Models Learned from the {WWW} – Using Web Catalogs to Locate
and Categorize Unknown Furniture Pieces in {3D} Laser Scans},
journal = {Robotics \& Automation Magazine},
year = {2011},
volume = {18},
pages = {22–32},
number = {2},
month = jun

}

@INPROCEEDINGS{murray_modeling_2011,

author = {Murray, William R. and Jain, Dominik},
title = {Modeling Cognitive Frames for Situations with Markov Logic Networks},
booktitle = {Proceedings of the 8th International {NLPCS} Workshop: Human-Machine
Interaction in Translation, Copenhagen Studies in Language 41},
year = {2011},
pages = {167–178},
month = aug,
publisher = {Samfundslitteratur}

}

@INPROCEEDINGS{mosenlechner_fast_2013,

author = {Mösenlechner, Lorenz and Beetz, Michael},
title = {Fast Temporal Projection Using Accurate Physics-Based Geometric Reasoning},
booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
year = {2013},
address = {Karlsruhe, Germany},
month = may,
note = {Accepted for publication.}

}

@INPROCEEDINGS{mosenlechner_parameterizing_2011,

author = {Mösenlechner, Lorenz and Beetz, Michael},
title = {Parameterizing Actions to have the Appropriate Effects},
booktitle = {{IEEE/RSJ} International Conference on Intelligent Robots and Systems
({IROS)}},
year = {2011},
address = {San Francisco, {CA}, {USA}},
month = sep

}

@INPROCEEDINGS{mosenlechner_using_2009,

author = {Mösenlechner, Lorenz and Beetz, Michael},
title = {Using Physics- and Sensor-based Simulation for High-fidelity Temporal
Projection of Realistic Robot Behavior},
booktitle = {19th International Conference on Automated Planning and Scheduling
({ICAPS'09).}},
year = {2009},
abstract = {Planning means deciding on the future course of action based on predictions
of what will happen when an activity is carried out in one way or
the other. As we apply action planning to autonomous, sensor-guided
mobile robots with manipulators or even to humanoid robots we need
very realistic and detailed predictions of the behavior generated
by a plan in order to improve the robot's performance substantially.
In this paper we investigate the high-fidelity temporal projection
of realistic robot behavior based on physics- and sensor-based simulation
systems. We equip a simulator and interpreter with means to log simulated
plan executions into a database. A logic-based query and inference
mechanism then retrieves and reconstructs the necessary information
from the database and translates the information into a first-order
representation of robot plans and the behavior they generate. The
query language enables the robot planning system to infer the intentions,
the beliefs, and the world state at any projected time. It also allows
the planning system to recognize, diagnose, and analyze various plan
failures typical for performing everyday manipulation tasks.}

}

@INPROCEEDINGS{mosenlechner_becoming_2010,

author = {Mösenlechner, Lorenz and Demmel, Nikolaus and Beetz, Michael},
title = {Becoming Action-aware through Reasoning about Logged Plan Execution
Traces},
booktitle = {{IEEE/RSJ} International Conference on Intelligent {RObots} and Systems.},
year = {2010},
pages = {2231–2236},
address = {Taipei, Taiwan},
month = oct

}

@INPROCEEDINGS{mosenlechner_high_2008,

author = {Mösenlechner, Lorenz and Müller, Armin and Beetz, Michael},
title = {High Performance Execution of Everyday Pick-and-Place Tasks by Integrating
Transformation Planning and Reactive Execution},
booktitle = {Proceedings of the 1st International Workshop on Cognition for Technical
Systems, München, Germany, 6-8 October},
year = {2008},
abstract = {We investigate the plan-based control of physically and sensorically
realistic simulated autonomous mobile robots performing everyday
pick-and-place tasks in human environments, such as table setting.
Our approach applies {AI} planning techniques to transform default
plans that can be inferred from instructions for activities of daily
life into flexible, high-performance robot plans. To find high performance
plans the planning system applies transformations such as carrying
plates to the table by stacking them or leaving cabinet doors open
while setting the table, which require substantial changes of the
control structure of the intended activities. We argue and demonstrate
that applying {AI} planning techniques directly to concurrent reactive
plan languages, instead of using layered software architectures with
different languages, enables the robot action planner to achieve
substantial performance improvements (23\% - 45\% depending on the
tasks). We also argue that the transformation of concurrent reactive
plans is necessary to obtain the results. Our claims are supported
by extensive empirical investigations in realistic simulations.}

}

@PHDTHESIS{muller_transformational_2008,

author = {Müller, Armin},
title = {Transformational Planning for Autonomous Household Robots using Libraries
of Robust and Flexible Plans},
school = {Technische Universität München},
year = {2008},
abstract = {One of the oldest dreams of Artificial Intelligence is the realization
of autonomous robots that achieve a level of problem-solving competency
comparable to humans. Human problem-solving capabilities are particularly
impressive in the context of everyday ac- tivities such as performing
household chores: people are able to deal with ambiguous and incomplete
information, they adapt their plans to different environments and
specific sit- uations achieving intuitively almost optimal behavior,
they cope with interruptions and failures and manage multiple interfering
jobs. The investigations presented in this work make substantial
progress in the direction of building robots that show similar behavior.
This thesis addresses the problem of competently accomplishing everyday
manipu- lation activities, such as setting the table and preparing
meals, as a plan-based control problem. In plan-based control, robots
do not only execute their programs but also reason about and modify
them. We propose {TRANER} (Transformational Planner) as a suitable
planning system for the optimization of everyday manipulation activities.
{TRANER} real- izes planning through a generate-test cycle in which
plan revision rules propose alternative plans and new plans are simulated
in order to test and evaluate them. The unique features of {TRANER}
are that it can realize very general and abstract plan revisions
such as "stack objects before carrying them instead of handling them
one by one" and that it successfully operates on plans in a way that
they generate reliable, flexible, and efficient robot behavior in
realistic simulations. The key contributions of this dissertation
are threefold. First, it extends the plan rep- resentation to support
the specification of robust and transformable plans. Second, it pro-
poses a library of general and flexible plans for a household robot,
using the extended plan representation. Third, it establishes a powerful,
yet intuitive syntax for transforma- tion rules together with a set
of general transformation rules for optimizing pick-and-place tasks
in an everyday setting using the rule language. The viability and
strength of the approach is empirically demonstrated in comprehen-
sive and extensive experiments in a simulation environment with realistically
simulated action and sensing mechanisms. The experiments show that
transformational planning is necessary to tailor the robot's activities
and that it is capable of substantially improving the robot's performance.},
url = {http://mediatum2.ub.tum.de/node?id=645588}

}

@INPROCEEDINGS{muller_designing_2006,

author = {Müller, Armin and Beetz, Michael},
title = {Designing and Implementing a Plan Library for a Simulated Household
Robot},
booktitle = {Cognitive Robotics: Papers from the {AAAI} Workshop},
year = {2006},
editor = {Beetz, Michael and Rajan, Kanna and Thielscher, Michael and Rusu,
Radu Bogdan},
series = {Technical Report {WS-06-03}},
pages = {119–128},
address = {Menlo Park, California},
publisher = {American Association for Artificial Intelligence},
abstract = {As we are deploying planning mechanisms in real-world applications,
such as the control of autonomous robots, it becomes apparent that
the performance of plan-based controllers critically depends on the
design and implementation of plan libraries. Despite its importance
the investigation of designs of plan libraries and plans has been
largely ignored. In this paper we describe parts of a plan library
that we are currently developing and applying to the control of a
simulated household robot. The salient features of our plans are
that they are designed for reliable, flexible, and optimized execution,
and are grounded into sensor data and action routines. We provide
empirical evidence that design criteria that we are proposing have
considerable impact on the performance level of robots.},
isbn = {978-1-57735-285-3}

}

@INPROCEEDINGS{muller_towards_2007,

author = {Müller, Armin and Beetz, Michael},
title = {Towards a Plan Library for Household Robots},
booktitle = {Proceedings of the {ICAPS'07} Workshop on Planning and Plan Execution
for Real-World Systems: Principles and Practices for Planning in
Execution},
year = {2007},
address = {Providence, {USA}},
month = sep,
abstract = {This paper describes the structure for a plan library of a service
robot intended to perform household chores. The plans in the library
are particularly designed to enable reliable, flexible, and efficient
robot control, to learn control heuristics, to generalize the plans
to cope with new objects and situations. We believe that plans with
these characteristics are required for competent autonomous robots
performing skilled manipulation tasks in human environments.}

}

@INPROCEEDINGS{muller_transformational_2007,

author = {Müller, Armin and Kirsch, Alexandra and Beetz, Michael},
title = {Transformational Planning for Everyday Activity},
booktitle = {Proceedings of the 17th International Conference on Automated Planning
and Scheduling ({ICAPS'07)}},
year = {2007},
pages = {248–255},
address = {Providence, {USA}},
month = sep,
abstract = {We propose an approach to transformational planning and learning of
everyday activity. This approach is targeted at autonomous robots
that are to perform complex activities such as household chore. Our
approach operates on flexible and reliable plans suited for long-term
activity and applies plan transformations that generate competent
and high-performance robot behavior. We show as a proof of concept
that general transformation rules can be formulated that achieve
substantially and significantly improved performance using table
setting as an example.}

}

@INPROCEEDINGS{muller_object-oriented_2004,

author = {Müller, Armin and Kirsch, Alexandra and Beetz, Michael},
title = {Object-oriented Model-based Extensions of Robot Control Languages},
booktitle = {27th German Conference on Artificial Intelligence},
year = {2004},
abstract = {More than a decade after mobile robots arrived in many research labs
it is still difficult to find plan-based autonomous robot controllers
that perform, beyond doubt, better than they possibly could without
applying {AI} methods. One of the main reason for this situation
is abstraction. {AI} based control techniques typically abstract
away from the mechanisms that generate the physical behavior and
refuse the use of control structures that have proven to be necessary
for producing flexible and reliable robot behavior. The consequence
is: {AI-based} control mechanisms can neither explain and diagnose
how a certain behavior resulted from a given plan nor can they revise
the plans to improve its physical performance. In our view, a substantial
improvement on this situation is not possible without having a new
generation of robot control languages. These languages must, on the
one hand, be expressive enough for specifying and producing high
performance robot behavior and, on the other hand, be transparent
and explicit enough to enable execution time inference mechanisms
to reason about, and manipulate these control programs. This paper
reports on aspects of the design of {RPL-II}, which we propose as
such a next generation control language. We describe the nuts and
bolts of extending our existing language R P L to support explicit
models of physical systems, and object-oriented modeling of control
tasks and programs. We show the application of these concepts in
the context of autonomous robot soccer.}

}

@INPROCEEDINGS{nissler_sample_2013,

author = {Nissler, Christian and Marton, Zoltan-Csaba and Suppa, Michael},
title = {Sample consensus fitting of bivariate polynomials for initializing
{EM-based} modeling of smooth {3D} surfaces},
booktitle = {Intelligent Robots and Systems ({IROS)}, 2013 {IEEE/RSJ} International
Conference on},
year = {2013},
pages = {4228–4234},
publisher = {{IEEE}}

}

@INPROCEEDINGS{nyga_everything_2012,

author = {Nyga, Daniel and Beetz, Michael},
title = {Everything Robots Always Wanted to Know about Housework (But were
afraid to ask)},
booktitle = {2012 {IEEE/RSJ} International Conference on Intelligent Robots and
Systems ({IROS)}},
year = {2012},
address = {Vilamoura, Portugal},
month = oct

}

@INPROCEEDINGS{nyga_how-models_2011,

author = {Nyga, Daniel and Tenorth, Moritz and Beetz, Michael},
title = {How-Models of Human Reaching Movements in the Context of Everyday
Manipulation Activities},
booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
year = {2011},
address = {Shanghai, China},
month = may

}

@INPROCEEDINGS{pangercic_fast_2011,

author = {Pangercic, Dejan and Haltakov, Vladimir and Beetz, Michael},
title = {Fast and Robust Object Detection in Household Environments Using
Vocabulary Trees with {SIFT} Descriptors},
booktitle = {{IEEE/RSJ} International Conference on Intelligent Robots and Systems
({IROS)}, Workshop on Active Semantic Perception and Object Search
in the Real World},
year = {2011},
address = {San Francisco, {CA}, {USA}},
month = sep

}

@BOOK{pangercic_robot_2011,

title = {A Robot that Shops for and Stores Groceries},
year = {2011},
author = {Pangercic, Dejan and Mathe, Koppany and Marton, Zoltan-Csaba and
Goron, Lucian Cosmin and Opris, Monica-Simona and Schuster, Martin
and Tenorth, Moritz and Jain, Dominik and Ruehr, Thomas and Beetz,
Michael},
address = {San Francisco, {CA}, {USA}},
month = aug,
note = {Published: {AAAI} Video Competition ({AIVC} 2011)},
url = {http://youtu.be/x0Ybod_6ADA}

}

@INPROCEEDINGS{pangercic_3d-based_2008,

author = {Pangercic, Dejan and Rusu, Radu Bogdan and Beetz, Michael},
title = {{3D-Based} Monocular {SLAM} for Mobile Agents Navigating in Indoor
Environments},
booktitle = {Proceedings of the 13th {IEEE} International Conference on Emerging
Technologies and Factory Automation ({ETFA)}, Hamburg, Germany, September
15-18},
year = {2008}

}

@INPROCEEDINGS{pangercic_visual_2009,

author = {Pangercic, Dejan and Tavcar, Rok and Tenorth, Moritz and Beetz, Michael},
title = {Visual Scene Detection and Interpretation using Encyclopedic Knowledge
and Formal Description Logic},
booktitle = {Proceedings of the International Conference on Advanced Robotics
({ICAR).}},
year = {2009},
address = {Munich, Germany},
month = jun

}

@INPROCEEDINGS{pangercic_combining_2010,

author = {Pangercic, Dejan and Tenorth, Moritz and Jain, Dominik and Beetz,
Michael},
title = {Combining Perception and Knowledge Processing for Everyday Manipulation},
booktitle = {{IEEE/RSJ} International Conference on Intelligent Robots and Systems
({IROS)}},
year = {2010},
pages = {1065--1071},
address = {Taipei, Taiwan},
month = oct

}

@INPROCEEDINGS{pangercic_semantic_2012,

author = {Pangercic, Dejan and Tenorth, Moritz and Pitzer, Benjamin and Beetz,
Michael},
title = {Semantic Object Maps for Robotic Housework - Representation, Acquisition
and Use},
booktitle = {2012 {IEEE/RSJ} International Conference on Intelligent Robots and
Systems ({IROS)}},
year = {2012},
address = {Vilamoura, Portugal},
month = oct

}

@INPROCEEDINGS{pietzsch_face_2008,

author = {Pietzsch, Sylvia and Wimmer, Matthias and Stulp, Freek and Radig,
Bernd},
title = {Face Model Fitting with Generic, Group-specific, and Person-specific
Objective Functions},
booktitle = {3rd International Conference on Computer Vision Theory and Applications
({VISAPP)}},
year = {2008},
volume = {2},
pages = {5--12},
address = {Madeira, Portugal},
month = jan,
abstract = {In model-based fitting, the model parameters that best fit the image
are determined by searching for the optimum of an objective function.
Often, this function is designed manually, based on implicit and
domain-dependent knowledge. We acquire more robust objective function
by learning them from annotated images, in which many critical decisions
are automated, and the remaining manual steps do not require domain
knowledge. Still, the trade-off between generality and accuracy remains.
General functions can be applied to a large range of objects, whereas
specific functions describe a subset of objects more accurately.
Gross et al. have demonstrated this principle by comparing generic
to person-specific Active Appearance Models. As it is impossible
to learn a person-specific objective function for the entire human
population, we automatically partition the training images and then
learn partition-specific functions. The number of groups influences
the specificity of the learned functions. We automatically determine
the optimal partitioning given the number of groups, by minimizing
the expected fitting error. Our empirical evaluation demonstrates
that the group-specific objective functions more accurately describe
the images of the corresponding group. The results of this paper
are especially relevant to face model tracking, as individual faces
will not change throughout an image sequence.},
keywords = {facial expressions}

}

@ARTICLE{radig_perception_2011,

author = {Radig, Bernd and Mayer, Christoph},
title = {Perception as a key component for cognitive technical systems},
journal = {Pattern Recognition and Image Analysis},
year = {2011},
volume = {21},
pages = {160--163},
number = {2},
month = jun,
doi = {10.1134/S1054661811020921},
issn = {1054-6618, 1555-6212},
language = {en},
url = {http://link.springer.com/10.1134/S1054661811020921},
urldate = {2014-05-15}

}

@INPROCEEDINGS{riaz_image_2009,

author = {Riaz, Zahid and Beetz, Michael and Radig, Bernd},
title = {Image Normalization for Face Recognition using {3D} Model},
booktitle = {International Conference of Information and Communication Technologies,
Karachi, Pakistan},
year = {2009},
publisher = {{IEEE}},
keywords = {facial expressions}

}

@INPROCEEDINGS{riaz_shape_2008,

author = {Riaz, Zahid and Beetz, Michael and Radig, Bernd},
title = {Shape Invariant Recognition of Segmented Human Faces using Eigenfaces},
booktitle = {Proceedings of the 12th International Multitopic Conference},
year = {2008},
publisher = {{IEEE}},
keywords = {facial expressions}

}

@INPROCEEDINGS{riaz_unified_2009,

author = {Riaz, Zahid and Gedikli, Suat and Beetz, Michael and Radig, Bernd},
title = {A Unified Features Approach to Human Face Image Analysis and Interpretation},
booktitle = {Affective Computing and Intelligent Interaction, Amsterdam, Netherlands},
year = {2009},
publisher = {{IEEE}},
note = {Doctoral Consortium Paper},
keywords = {facial expressions}

}

@INPROCEEDINGS{riaz_3d_2009,

author = {Riaz, Zahid and Mayer, Christoph and Beetz, Michael and Radig, Bernd},
title = {{3D} Model for Face Recognition across Facial Expressions},
booktitle = {Biometric {ID} Management and Multimodal Communication, Madrid, Spain},
year = {2009},
publisher = {Springer},
keywords = {facial expressions}

}

@INPROCEEDINGS{riaz_facial_2009,

author = {Riaz, Zahid and Mayer, Christoph and Beetz, Michael and Radig, Bernd},
title = {Facial Expressions Recognition from Image Sequences},
booktitle = {2nd International Conference on Cross-Modal Analysis of Speech, Gestures,
Gaze and Facial Expressions, Prague, Czech Republic},
year = {2009},
publisher = {Springer},
keywords = {facial expressions}

}

@INPROCEEDINGS{riaz_model_2009-1,

author = {Riaz, Zahid and Mayer, Christoph and Beetz, Michael and Radig, Bernd},
title = {Model Based Analysis of Face Images for Facial Feature Extraction},
booktitle = {Computer Analysis of Images and Patterns, Munster, Germany},
year = {2009},
publisher = {Springer},
keywords = {facial expressions}

}

@INPROCEEDINGS{riaz_multi-feature_2009,

author = {Riaz, Zahid and Mayer, Christoph and Sarfraz, Saquib and Beetz, Michael
and Radig, Bernd},
title = {Multi-Feature Fusion in Advanced Robotics Applications},
booktitle = {Internaional Conference on Frontier of Information Technology},
year = {2009},
publisher = {{ACM}}

}

@INPROCEEDINGS{riaz_model_2009,

author = {Riaz, Zahid and Mayer, Christoph and Wimmer, Matthias and Beetz,
Michael and Radig, Bernd},
title = {A Model Based approach for Expression Invariant Face Recognition},
booktitle = {3rd International Conference on Biometrics, Alghero Italy},
year = {2009},
publisher = {Springer},
keywords = {facial expressions}

}

@INPROCEEDINGS{riaz_model_2008,

author = {Riaz, Zahid and Mayer, Christoph and Wimmer, Matthias and Radig,
Bernd},
title = {Model Based Face Recognition Across Facial Expressions},
booktitle = {Journal of Information and Communication Technology},
year = {2008},
month = dec,
keywords = {facial expressions}

}

@PHDTHESIS{ridder_interpretation_2000,

author = {Ridder, Christof},
title = {Interpretation von Videobildfolgen zur Beobachtung artikularer Bewegung
von Personen anhand eines generischen {3D} Objektmodells},
school = {Technische Universität München, Fachbereich Informatik},
year = {2000}

}

@INPROCEEDINGS{rink_feature_2013,

author = {Rink, Christian and Marton, Zoltan-Csaba and Seth, Daniel and Bodenmuller,
Tim and Suppa, Michael},
title = {Feature based particle filter registration of {3D} surface models
and its application in robotics},
booktitle = {Intelligent Robots and Systems ({IROS)}, 2013 {IEEE/RSJ} International
Conference on},
year = {2013},
pages = {3187–3194},
publisher = {{IEEE}}

}

@INPROCEEDINGS{rohrbach_database_2012,

author = {Rohrbach, Marcus and Amin, Sikandar and Andriluka, Mykhaylo and Schiele,
Bernt},
title = {A Database for Fine Grained Activity Detection of Cooking Activities},
booktitle = {2012 {IEEE} Conference on Computer Vision and Pattern Recognition
({CVPR)}},
year = {2012},
address = {Providence, United States},
month = jun,
note = {The dataset and relevant code is available at http://www.d2.mpi-inf.mpg.de/mpii-cooking}

}

@INPROCEEDINGS{rohrbach_script_2012,

author = {Rohrbach, Marcus and Regneri, Michaela and Andriluka, Micha and Amin,
Sikandar and Pinkal, Manfred and Schiele, Bernt},
title = {Script Data for Attribute-based Recognition of Composite Activities},
booktitle = {Computer Vision - {ECCV} 2012 : 12th European Conference on Computer
Vision},
year = {2012},
volume = {2012},
series = {Lecture Notes in Computer Science},
address = {Firenze, Italy},
month = oct,
publisher = {Springer}

}

@INPROCEEDINGS{ruiz-ugalde_fast_2011,

author = {Ruiz-Ugalde, Federico and Cheng, Gordon and Beetz, Michael},
title = {Fast adaptation for effect-aware pushing},
booktitle = {11th {IEEE-RAS} International Conference on Humanoid Robots},
year = {2011},
address = {Bled, Slovenia},
month = oct

}

@INPROCEEDINGS{ruiz-ugalde_prediction_2010,

author = {Ruiz-Ugalde, Federico and Cheng, Gordon and Beetz, Michael},
title = {Prediction of action outcomes using an object model},
booktitle = {2010 {IEEE/RSJ} International Conference on Intelligent Robots and
Systems ({IROS)}},
year = {2010},
address = {Taipei, Taiwan},
month = oct

}

@ARTICLE{rusu_acquiring_2006,

author = {Rusu, Radu Bogdan},
title = {Acquiring Models of Everyday Activities for Robotic Control in {'Current}
{PhD} Research in Pervasive Computing'},
journal = {Technical Reports - University of Munich, Department of Computer
Science, Media Informatics Group},
year = {2006},
volume = {{LMU-MI-2005-3}},
month = mar,
abstract = {Intelligent sensor equipped environments can be of much greater help
if they are capable of recognizing the actions and activities of
their users, and inferring their intentions. An intelligent kitchen
that recognizes what a person is looking for can highlight the target
object. An oven noticing that the cook is on the phone can reduce
the heating temperature, in order to avoid the meal getting burnt.
In my dissertation research, I investigate the representation of
models of everyday activities and study how such models can be learned
from sensory data.},
editor = {Ferscha, A. and Langheinrich, M. and Schmidt, A.},
issn = {1862-5207}

}

@PHDTHESIS{rusu_semantic_2009,

author = {Rusu, Radu Bogdan},
title = {Semantic {3D} Object Maps for Everyday Manipulation in Human Living
Environments},
school = {Technische Universität München},
year = {2009}

}

@INPROCEEDINGS{rusu_action_2008,

author = {Rusu, Radu Bogdan and Bandouch, Jan and Marton, Zoltan Csaba and
Blodow, Nico and Beetz, Michael},
title = {Action Recognition in Intelligent Environments using Point Cloud
Features Extracted from Silhouette Sequences},
booktitle = {{IEEE} 17th International Symposium on Robot and Human Interactive
Communication ({RO-MAN)}, Muenchen, Germany},
year = {2008},
abstract = {In this paper we present our work on human action recognition in intelligent
environments. We classify actions by looking at a time-sequence of
silhouettes extracted from various camera images. By treating time
as the third spatial dimension we generate so-called space-time shapes
that contain rich information about the actions. We propose a novel
approach for recognizing actions, by representing the shapes as {3D}
point clouds and estimating feature histograms for them. Preliminary
results show that our method robustly derives different classes of
actions, even in the presence of large variability in the data, coming
from different persons at different time intervals.}

}

@ARTICLE{rusu_human_2009,

author = {Rusu, Radu Bogdan and Bandouch, Jan and Meier, Franziska and Essa,
Irfan and Beetz, Michael},
title = {Human Action Recognition using Global Point Feature Histograms and
Action Shapes},
journal = {Advanced Robotics journal, Robotics Society of Japan ({RSJ)}},
year = {2009},
abstract = {This article investigates the recognition of human actions from {3D}
point clouds that encode the motions of people acting in sensor-distributed
indoor environments. Data streams are time-sequences of silhouettes
extracted from cameras in the environment. From the {2D} silhouette
contours we generate space-time streams by continuously aligning
and stacking the contours along the time axis as third spatial dimension.
The space-time stream of an observation sequence is segmented into
parts corresponding to subactions using a pattern matching technique
based on suffix trees and interval scheduling. Then, the segmented
space-time shapes are processed by treating the shapes as {3D} point
clouds and estimating global point feature histograms for them. The
resultant models are clustered using statistical analysis, and our
experimental results indicate that the presented methods robustly
derive different action classes. This holds despite large intra-class
variance in the recorded datasets due to performances from different
persons at different time intervals.}

}

@INPROCEEDINGS{rusu_fast_2009-1,

author = {Rusu, Radu Bogdan and Blodow, Nico and Beetz, Michael},
title = {Fast Point Feature Histograms ({FPFH)} for {3D} Registration},
booktitle = {Proceedings of the {IEEE} International Conference on Robotics and
Automation ({ICRA)}, Kobe, Japan, May 12-17},
year = {2009}

}

@INPROCEEDINGS{rusu_close-range_2009,

author = {Rusu, Radu Bogdan and Blodow, Nico and Marton, Zoltan Csaba and Beetz,
Michael},
title = {Close-range Scene Segmentation and Reconstruction of {3D} Point Cloud
Maps for Mobile Manipulation in Human Environments},
booktitle = {Proceedings of the {IEEE/RSJ} International Conference on Intelligent
Robots and Systems ({IROS)}},
year = {2009},
address = {St. Louis, {MO}, {USA}},
month = oct

}

@INPROCEEDINGS{rusu_aligning_2008,

author = {Rusu, Radu Bogdan and Blodow, Nico and Marton, Zoltan Csaba and Beetz,
Michael},
title = {Aligning Point Cloud Views using Persistent Feature Histograms},
booktitle = {Proceedings of the 21st {IEEE/RSJ} International Conference on Intelligent
Robots and Systems ({IROS)}, Nice, France, September 22-26},
year = {2008}

}

@INPROCEEDINGS{rusu_towards_2007,

author = {Rusu, Radu Bogdan and Blodow, Nico and Marton, Zoltan-Csaba and Soos,
Alina and Beetz, Michael},
title = {Towards {3D} Object Maps for Autonomous Household Robots},
booktitle = {Proceedings of the 20th {IEEE} International Conference on Intelligent
Robots and Systems ({IROS)}},
year = {2007},
address = {San Diego, {CA}, {USA}},
abstract = {This paper describes a mapping system that acquires {3D} object models
of man-made indoor environments such as kitchens. The system segments
and geometrically reconstructs cabinets with doors, tables, drawers,
and shelves, objects that are important for robots retrieving and
manipulating objects in these environments. The system also acquires
models of objects of daily use such glasses, plates, and ingredients.
The models enable the recognition of the objects in cluttered scenes
and the classification of newly encountered objects. Key technical
contributions include (1) a robust, accurate, and efficient algorithm
for constructing complete object models from {3D} point clouds constituting
partial object views, (2) feature-based recognition procedures for
cabinets, tables, and other task-relevant furniture objects, and
(3) automatic inference of object instance and class signatures for
objects of daily use that enable robots to reliably recognize the
objects in cluttered and real task contexts. We present results from
the sensor-based mapping of a real kitchen.}

}

@ARTICLE{rusu_robots_2008,

author = {Rusu, Radu Bogdan and Gerkey, Brian and Beetz, Michael},
title = {Robots in the kitchen: Exploiting ubiquitous sensing and actuation},
journal = {Robotics and Autonomous Systems Journal (Special Issue on Network
Robot Systems)},
year = {2008},
abstract = {Our goal is to develop intelligent service robots that operate in
standard human environments, automating common tasks. In pursuit
of this goal, we follow the ubiquitous robotics paradigm, in which
intelligent perception and control are combined with ubiquitous computing.
By exploiting sensors and effectors in its environment, a robot can
perform more complex tasks without becoming overly complex itself.
Following this insight, we have developed a service robot that operates
autonomously in a sensor-equipped kitchen. The robot learns from
demonstration and performs sophisticated tasks in concert with the
network of devices in its environment. We report on the design, implementation,
and usage of this system, which is freely available for use and improvement
by others in the research community.}

}

@INPROCEEDINGS{rusu_fast_2009,

author = {Rusu, Radu Bogdan and Holzbach, Andreas and Blodow, Nico and Beetz,
Michael},
title = {Fast Geometric Point Labeling using Conditional Random Fields},
booktitle = {Proceedings of the {IEEE/RSJ} International Conference on Intelligent
Robots and Systems ({IROS)}},
year = {2009},
address = {St. Louis, {MO}, {USA}},
month = oct

}

@INPROCEEDINGS{rusu_detecting_2009,

author = {Rusu, Radu Bogdan and Holzbach, Andreas and Bradski, Gary and Beetz,
Michael},
title = {Detecting and Segmenting Objects for Mobile Manipulation},
booktitle = {Proceedings of {IEEE} Workshop on Search in {3D} and Video ({S3DV)},
held in conjunction with the 12th {IEEE} International Conference
on Computer Vision ({ICCV)}},
year = {2009},
address = {Kyoto, Japan},
month = sep

}

@INPROCEEDINGS{rusu_perception_2009,

author = {Rusu, Radu Bogdan and Holzbach, Andreas and Diankov, Rosen and Bradski,
Gary and Beetz, Michael},
title = {Perception for Mobile Manipulation and Grasping using Active Stereo},
booktitle = {9th {IEEE-RAS} International Conference on Humanoid Robots (Humanoids)},
year = {2009},
address = {Paris, France},
month = dec

}

@INPROCEEDINGS{rusu_extending_2007,

author = {Rusu, Radu Bogdan and Maldonado, Alexis and Beetz, Michael and Gerkey,
Brian},
title = {Extending {Player/Stage/Gazebo} towards Cognitive Robots Acting in
Ubiquitous Sensor-equipped Environments},
booktitle = {Proceedings of the {IEEE} International Conference on Robotics and
Automation ({ICRA)} Workshop for Network Robot Systems, 2007, April
14, Rome, Italy},
year = {2007},
abstract = {Standardized middleware for autonomous robot control has proven itself
to enable faster deployment of robots, to make robot control code
more interchangeable, and experiments easier to replicate. Unfortunately,
the support provided by current middleware is in most cases limited
to what current robots do: navigation. However, as we tackle more
ambitious service robot applications, more comprehensive middleware
support is needed. We increasingly need the middleware to support
ubiquitous sensing infrastructures, robot manipulation tasks, and
cognitive capabilities. In this paper we describe and discuss current
extensions of the {Player/Stage/Gazebo} ({P/S/G)} middleware, one
of the most widespread used robot middlewares, of which we are active
developers, that satisfy these requirements.}

}

@INPROCEEDINGS{rusu_player/stage_2006,

author = {Rusu, Radu Bogdan and Maldonado, Alexis and Beetz, Michael and Kranz,
Matthias and Mösenlechner, Lorenz and Holleis, Paul and Schmidt,
Albrecht},
title = {{Player/Stage} as Middleware for Ubiquitous Computing},
booktitle = {Proceedings of the 8th Annual Conference on Ubiquitous Computing
(Ubicomp 2006), Orange County California, September 17-21},
year = {2006},
abstract = {The effective development and deployment of comprehensive and heterogeneous
ubiquitous computing applications is hindered by the lack of a comprehensive
middleware infrastructure: interfaces to sensors are company specific
and sometimes even product specific. Typically, these interfaces
also do not sustain the development of robust systems that make use
of sensor data fusion. In this paper, we propose the use of {Player/Stage},
a middleware commonly used as a defacto standard by the robotics
community, as the backbone of a heterogeneous ubiquitous system.
{Player/Stage} offers many features needed in ubicomp, mostly because
dealing with uncertainty and many different sensor and actuator systems
has been a long term problem in robotics as well. We emphasize they
key features of the {Player/Stage} project, and show how ubicomp
devices can be integrated into the system, as well as how existing
devices can be used. On top of that, we present our sensor-enabled
{AwareKitchen} environment which makes use of automatic data analysis
algorithms integrated as drivers in the {Player/Stage} platform.
All our work is released as open source software under the {Player/Stage}
package, of which we are active developers.}

}

@INPROCEEDINGS{rusu_interpretation_2008,

author = {Rusu, Radu Bogdan and Marton, Zoltan Csaba and Blodow, Nico and Beetz,
Michael},
title = {Interpretation of Urban Scenes based on Geometric Features},
booktitle = {Proceedings of the 21st {IEEE/RSJ} International Conference on Intelligent
Robots and Systems ({IROS)} Workshop on {3D} Mapping, Nice, France,
September 26},
year = {2008},
note = {Invited paper}

}

@INPROCEEDINGS{rusu_learning_2008,

author = {Rusu, Radu Bogdan and Marton, Zoltan Csaba and Blodow, Nico and Beetz,
Michael},
title = {Learning Informative Point Classes for the Acquisition of Object
Model Maps},
booktitle = {Proceedings of the 10th International Conference on Control, Automation,
Robotics and Vision ({ICARCV)}, Hanoi, Vietnam, December 17-20},
year = {2008}

}

@INPROCEEDINGS{rusu_persistent_2008,

author = {Rusu, Radu Bogdan and Marton, Zoltan Csaba and Blodow, Nico and Beetz,
Michael},
title = {Persistent Point Feature Histograms for {3D} Point Clouds},
booktitle = {Proceedings of the 10th International Conference on Intelligent Autonomous
Systems ({IAS-10)}, Baden-Baden, Germany},
year = {2008},
abstract = {This paper proposes a novel way of characterizing the local geometry
of {3D} points, using persistent feature histograms. The relationships
between the neighbors of a point are analyzed and the resulted values
are stored in a 16-bin histogram. The histograms are pose and point
cloud density invariant and cope well with noisy datasets. We show
that geometric primitives have unique signatures in this feature
space, preserved even in the presence of additive noise. To extract
a compact subset of points which characterizes a point cloud dataset,
we perform an in-depth analysis of all point feature histograms using
different distance metrics. Preliminary results show that point clouds
can be roughly segmented based on the uniqueness of geometric primitives
feature histograms. We validate our approach on datasets acquired.
from laser sensors in indoor (kitchen) environments.}

}

@ARTICLE{rusu_towards_2008,

author = {Rusu, Radu Bogdan and Marton, Zoltan Csaba and Blodow, Nico and Dolha,
Mihai and Beetz, Michael},
title = {Towards {3D} Point Cloud Based Object Maps for Household Environments},
journal = {Robotics and Autonomous Systems Journal (Special Issue on Semantic
Knowledge in Robotics)},
year = {2008},
volume = {56},
pages = {927–941},
number = {11},
month = nov

}

@INPROCEEDINGS{rusu_functional_2008,

author = {Rusu, Radu Bogdan and Marton, Zoltan Csaba and Blodow, Nico and Dolha,
Mihai Emanuel and Beetz, Michael},
title = {Functional Object Mapping of Kitchen Environments},
booktitle = {Proceedings of the 21st {IEEE/RSJ} International Conference on Intelligent
Robots and Systems ({IROS)}, Nice, France, September 22-26},
year = {2008}

}

@INPROCEEDINGS{rusu_model-based_2009,

author = {Rusu, Radu Bogdan and Marton, Zoltan Csaba and Blodow, Nico and Holzbach,
Andreas and Beetz, Michael},
title = {Model-based and Learned Semantic Object Labeling in {3D} Point Cloud
Maps of Kitchen Environments},
booktitle = {Proceedings of the {IEEE/RSJ} International Conference on Intelligent
Robots and Systems ({IROS)}},
year = {2009},
address = {St. Louis, {MO}, {USA}},
month = oct

}

@INPROCEEDINGS{rusu_laser-based_2009,

author = {Rusu, Radu Bogdan and Meeussen, Wim and Chitta, Sachin and Beetz,
Michael},
title = {Laser-based Perception for Door and Handle Identification},
booktitle = {Proceedings of the International Conference on Advanced Robotics
({ICAR)}},
year = {2009},
address = {Munich},
month = jun,
note = {Best Paper Award}

}

@INPROCEEDINGS{rusu_real-time_2009,

author = {Rusu, Radu Bogdan and Sucan, Ioan Alexandru and Gerkey, Brian and
Chitta, Sachin and Beetz, Michael and Kavraki, Lydia E.},
title = {Real-time Perception-Guided Motion Planning for a Personal Robot},
booktitle = {Proceedings of the {IEEE/RSJ} International Conference on Intelligent
Robots and Systems ({IROS)}},
year = {2009},
pages = {4245–4252},
address = {St. Louis, {MO}, {USA}},
month = oct

}

@INPROCEEDINGS{rusu_leaving_2008,

author = {Rusu, Radu Bogdan and Sundaresan, Aravind and Morisset, Benoit and
Agrawal, Motilal and Beetz, Michael},
title = {Leaving Flatland: Realtime {3D} Stereo Semantic Reconstruction},
booktitle = {Proceedings of the International Conference on Intelligent Robotics
and Applications ({ICIRA)} 2008, October 15-17, Wuhan, China},
year = {2008}

}

@INPROCEEDINGS{rusu_realtime_2008,

author = {Rusu, Radu Bogdan and Sundaresan, Aravind and Morisset, Benoit and
Agrawal, Motilal and Beetz, Michael and Konolige, Kurt},
title = {Realtime Extended {3D} Reconstruction from Stereo for Navigation},
booktitle = {Proceedings of the 21st {IEEE/RSJ} International Conference on Intelligent
Robots and Systems ({IROS)} Workshop on {3D} Mapping, Nice, France,
September 26},
year = {2008},
note = {Invited paper}

}

@ARTICLE{rusu_leaving_2009,

author = {Rusu, Radu Bogdan and Sundaresan, Aravind and Morisset, Benoit and
Hauser, Kris and Agrawal, Motilal and Latombe, Jean-Claude and Beetz,
Michael},
title = {Leaving Flatland: Efficient Real-Time {3D} Navigation},
journal = {Journal of Field Robotics ({JFR)}},
year = {2009}

}

@INPROCEEDINGS{ruhr_structured_2008,

author = {Rühr, Thomas and Pangercic, Dejan and Beetz, Michael},
title = {Structured Reactive Controllers and Transformational Planning for
Manufacturing},
booktitle = {Proceedings of the 13th {IEEE} International Conference on Emerging
Technologies and Factory Automation ({ETFA)}, Hamburg, Germany, September
15-18},
year = {2008}

}

@INPROCEEDINGS{ruhr_generalized_2012,

author = {Rühr, Thomas and Sturm, Jürgen and Pangercic, Dejan and Beetz, Michael
and Cremers, Daniel},
title = {A Generalized Framework for Opening Doors and Drawers in Kitchen
Environments},
booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
year = {2012},
address = {St. Paul, {MN}, {USA}},
month = may

}

@INPROCEEDINGS{sachenbacher_test_2008,

author = {Sachenbacher, Martin and Maier, Paul},
title = {Test Strategy Generation using Quantified {CSPs}},
booktitle = {Proc. International Conference on Principles and Practice of Constraint
Programming ({CP'08)}},
year = {2008},
note = {Accepted for publication}

}

@INPROCEEDINGS{sachenbacher_model-based_2008,

author = {Sachenbacher, Martin and Schwoon, Stefan},
title = {Model-based Testing Using Quantified {CSPs:} A Map},
booktitle = {Proc. Workshop on Model-based Systems ({MBS-2008)}},
year = {2008},
pages = {37–41},
address = {Patras, Greece}

}

@INPROCEEDINGS{sachenbacher_model-based_2008-1,

author = {Sachenbacher, Martin and Schwoon, Stefan},
title = {Model-based Test Generation Using Quantified {CSPs}},
booktitle = {Proc. International Workshop on Principles of Diagnosis ({DX'08)}},
year = {2008},
note = {Accepted for publication}

}

@INPROCEEDINGS{saito_semantic_2011,

author = {Saito, Manabu and Chen, Haseru and Okada, Kei and Inaba, Masayuki
and Kunze, Lars and Beetz, Michael},
title = {Semantic Object Search in Large-scale Indoor Environments},
booktitle = {{IEEE/RSJ} International Conference on Intelligent Robots and Systems
({IROS)}, Workshop on Active Semantic Perception and Object Search
in the Real World},
year = {2011},
address = {San Francisco, {CA}, {USA}},
month = sep

}

@INPROCEEDINGS{sarfraz_bayesian_2009,

author = {Sarfraz, M. S. and Saeed, A. and Khan, M. H. and Riaz, Zahid},
title = {Bayesian Prior Models for Vehicle Make and Model Recognition},
booktitle = {Internaional Conference on Frontier of Information Technology},
year = {2009},
publisher = {{ACM}}

}

@PHDTHESIS{schmitt_vision-based_2004,

author = {Schmitt, Thorsten},
title = {Vision-based Probabilistic State Estimation for Cooperating autonomous
Robots},
school = {Department of Informatics, Technische Universität München},
year = {2004},
url = {http://tumb1.biblio.tu-muenchen.de/publ/diss/in/2004/schmitt.html}

}

@INPROCEEDINGS{schmitt_designing_2003,

author = {Schmitt, Thorsten and Beetz, Michael},
title = {Designing Probabilistic State Estimators for Autonomous Robot Control},
booktitle = {{IEEE/RSJ} Intl. Conf. on Intelligent Robots and Systems ({IROS)}},
year = {2003},
abstract = {This paper sketches and discusses design options for complex probabilistic
state estimators and investigates their interactions and their impact
on performance. We consider, as an example, the estimation of game
states in autonomous robot soccer. We show that many factors other
than the choice of algorithms determine the performance of the estimation
systems. We propose empirical investigations and learning as necessary
tools for the development of successful state estimation systems.}

}

@INPROCEEDINGS{schmitt_watch_2002,

author = {Schmitt, Thorsten and Beetz, Michael and Hanek, Robert and Buck,
Sebastian},
title = {Watch their Moves: Applying Probabilistic Multiple Object Tracking
to Autonomous Robot Soccer},
booktitle = {The Eighteenth National Conference on Artificial Intelligence},
year = {2002},
address = {Edmonton, Canada},
abstract = {In many autonomous robot applications robots must be capable of estimating
the positions and motions of moving objects in their environments.
In this paper, we apply probabilistic multiple object tracking to
estimating the positions of opponent players in autonomous robot
soccer. We extend an existing tracking algorithm to handle multiple
mobile sensors with uncertain positions, discuss the specification
of probabilistic models needed by the algorithm, and describe the
required vision-interpretation algorithms. The multiple object tracking
has been successfully applied throughout the {RoboCup} 2001 world
championship.}

}

@INPROCEEDINGS{schmitt_agilo_2001,

author = {Schmitt, Thorsten and Buck, Sebastian and Beetz, Michael},
title = {{AGILO} {RoboCuppers} 2001: Utility- and Plan-based Action Selection
based on Probabilistically Estimated Game Situations},
booktitle = {5th International Workshop on {RoboCup} (Robot World Cup Soccer Games
and Conferences)},
year = {2001},
editor = {Stone, P. and Balch, T. and Kraetzschmar, G.},
series = {Lecture Notes in Computer Science},
publisher = {Springer Verlag},
abstract = {This paper describes the {AGILO} {RoboCuppers} 1 the {RoboCup} team
of the image understanding group ({FG} {BV)} at the Technische Universität
München. a? With a team of four Pioneer I robots, all equipped with
{CCD} camera and a single board computer, we've participated in all
international middle size league tournaments from 1998 until 2001.
We use a modular approach of concurrent subprograms for image processing,
self localization, object tracking, action selection, path planning
and basic robot control. A fast feature extraction process provides
the data necessary for the on-board scene interpretation. All robot
observations are fused into a single environmental model, which forms
the basis for action selection, path planning and low-level robot
control.}

}

@INPROCEEDINGS{schmitt_developing_2003,

author = {Schmitt, Thorsten and Hanek, Robert and Beetz, Michael},
title = {Developing Comprehensive State Estimators for Robot Soccer},
booktitle = {{RoboCup} International Symposium 2003},
year = {2003},
series = {Padova},
abstract = {This paper sketches and discusses design options for complex probabilistic
state estimators and investigates their interactions and their impact
on performance. We consider, as an example, the estimation of game
states in autonomous robot soccer. We show that many factors other
than the choice of algorithms determine the performance of the estimation
systems. We propose empirical investigations and learning as necessary
tools for the development of successful state estimation systems.}

}

@ARTICLE{schmitt_cooperative_2002,

author = {Schmitt, Thorsten and Hanek, Robert and Beetz, Michael and Buck,
Sebastian and Radig, Bernd},
title = {Cooperative Probabilistic State Estimation for Vision-based Autonomous
Mobile Robots},
journal = {{IEEE} Transactions on Robotics and Automation},
year = {2002},
volume = {18},
number = {5},
month = oct,
abstract = {With the services that autonomous robots are to provide becoming more
demanding, the states that the robots have to estimate become more
complex. In this article, we develop and analyze a probabilistic,
vision-based state estimation method for individual, autonomous robots.
This method enables a team of mobile robots to estimate their joint
positions in a known environment and track the positions of autonomously
moving objects. The tate estimators of different robots cooperate
to increase the accuracy and reliability of the estimation process.
This cooperation between the robots enables them to track temporarily
occluded objects and to faster recover their position after they
have lost track of it. The method is empirically validated based
on experiments with a team of physical robots.}

}

@INPROCEEDINGS{schmitt_cooperative_2001,

author = {Schmitt, Thorsten and Hanek, Robert and Buck, Sebastian and Beetz,
Michael},
title = {Cooperative Probabilistic State Estimation for Vision-based Autonomous
Mobile Robots},
booktitle = {Proc. of the {IEEE/RSJ} International Conference on Intelligent Robots
and Systems ({IROS)}},
year = {2001},
pages = {1630–1638},
address = {Maui, Hawaii},
abstract = {With the services that autonomous robots are to provide becoming more
demanding, the states that the robots have to estimate become more
complex. In this paper, we develop and analyze a probabilistic, vision-based
state estimation method for individual, autono-mous robots. This
method enables a team of mobile robots to estimate their joint positions
in a known environment and track the positions of autonomously moving
objects. The state estimators of different robots cooperate to increase
the accuracy and reliability of the estimation process. This cooperation
between the robots enables them to track temporarily occluded objects
and to faster recover their position after they have lost track of
it. The method is empirically validated based on experiments with
a team of physical robots.}

}

@INPROCEEDINGS{schmitt_cooperative_2001-1,

author = {Schmitt, Thorsten and Hanek, Robert and Buck, Sebastian and Beetz,
Michael},
title = {Cooperative Probabilistic State Estimation for Vision-based Autonomous
Soccer Robots},
booktitle = {{DAGM} Symposium},
year = {2001},
volume = {2191},
series = {Lecture Notes in Computer Science},
pages = {321–328},
publisher = {Springer}

}

@INPROCEEDINGS{schmitt_cooperative_2001-2,

author = {Schmitt, Thorsten and Hanek, Robert and Buck, Sebastian and Beetz,
Michael},
title = {Cooperative Probabilistic State Estimation fo Vision-based Autonomous
Soccer Robots},
booktitle = {{RoboCup} International Symposium 2001},
year = {2001},
address = {Seattle, {USA}},
abstract = {With the services that autonomous robots are to provide becoming more
demanding, the states that the robots have to estimate become more
complex. In this paper, we develop and analyze a probabilistic, vision-based
state estimation method for individual, autono-mous robots. This
method enables a team of mobile robots to estimate their joint positions
in a known environment and track the positions of autonomously moving
objects. The state estimators of different robots cooperate to increase
the accuracy and reliability of the estimation process. This cooperation
between the robots enables them to track temporarily occluded objects
and to faster recover their position after they have lost track of
it. The method is empirically validated based on experiments with
a team of physical robots.}

}

@INPROCEEDINGS{schroter_acquiring_2004,

author = {Schröter, Derik and Beetz, Michael},
title = {Acquiring Modells of Rectangular Objects for Robot Maps},
booktitle = {Proc. of {IEEE} International Conference on Robotics and Automation
({ICRA)}, New {Orleans/USA}},
year = {2004},
abstract = {State-of-the-art robot mapping approaches are capable of acquiring
impressively accurate {2D} and {3D} models of their environments.
To the best of our knowledge few of them can acquire models of task-relevant
objects. In this paper, we introduce a novel method for acquiring
models of task-relevant objects from stereo images. The proposed
algorithm applies methods from projective geometry and works for
rectangular objects, which are, in office- and museum-like environments,
the most commonly found subclass of geometric objects. The method
is shown to work accurately and for a wide range of viewing angles
and distances.}

}

@INPROCEEDINGS{schroter_rg_2004,

author = {Schröter, Derik and Beetz, Michael},
title = {{RG} Mapping: Building Object-Oriented Representations of Structured
Human Environments},
booktitle = {6-th Open Russian-German Workshop on Pattern Recognition and Image
Understanding ({OGRW)}, {Katun/Russia}},
year = {2004},
note = {{\textbackslash}bf Best Paper Award},
abstract = {We present a new approach to mapping of indoor environments, where
the environment structure in terms of regions and gateways is automatically
extracted, while the robot explores. Objects, both in {2D} and {3D},
are modelled explicitly in those maps and allow for robust localization.
We refer to those maps as object-oriented environment representations
or Region \& Gateway Maps. Region \& Gateway Mapping is capable of
acquiring very compact, structured, and semantically annotated maps.
We show that those maps can be built online and that they are extremely
useful in plan-based control of autonomous robots as well as for
robot-human interaction.}

}

@INPROCEEDINGS{schroter_rg_2002,

author = {Schröter, Derik and Beetz, Michael and Gutmann, J.-S.},
title = {{RG} Mapping: Learning Compact and Structured {2D} Line Maps of Indoor
Environments},
booktitle = {11th {IEEE} International Workshop on Robot and Human Interactive
Communication ({ROMAN)}, {Berlin/Germany}},
year = {2002},
abstract = {In this paper we present Region \& Gateway ({RG)} Mapping, a novel
approach to laser-based {2D} line mapping of indoor environments.
{RG} Mapping is capable of acquiring very compact, structured, and
semantically annotated maps. We present and empirically analyze the
method based on map acquisition experiments with autonomous mobile
robots. The experiments show that {RG} mapping drastically compresses
the data contained in line scan maps without substantial loss of
accuracy.}

}

@INPROCEEDINGS{schroter_detection_2004,

author = {Schröter, Derik and Weber, T. and Beetz, Michael and Radig, Bernd},
title = {Detection and Classification of Gateways for the Acquisition of Structured
Robot Maps},
booktitle = {Proc. of 26th Pattern Recognition Symposium ({DAGM)}, {Tübingen/Germany}},
year = {2004},
abstract = {The automatic acquisition of structured object maps requires sophisticated
perceptual mechanisms that enable the robot to recognize the objects
that are to be stored in the robot map. This paper investigates a
particular object recognition problem: the automatic detection and
classification of gateways in office environments based on laser
range data. We will propose, discuss, and empirically evaluate a
sensor model for crossing gateways and different approaches to gateway
classification including simple maximum classifiers and {HMM-based}
classification of observation sequences.}

}

@INPROCEEDINGS{schubo_subsequent_2008,

author = {Schubö, Anna and Maldonado, Alexis and Stork, Sonja and Beetz, Michael},
title = {Subsequent Actions Influence Motor Control Parameters of a Current
Grasping Action},
booktitle = {{IEEE} 17th International Symposium on Robot and Human Interactive
Communication ({RO-MAN)}, Muenchen, Germany},
year = {2008}

}

@INPROCEEDINGS{schuller_detection_2008,

author = {Schuller, Björn and Wimmer, Matthias and Arsic, Dejan and Moosmayr,
Tobias and Rigoll, Gerhard},
title = {Detection of Security Related Affect and Behaviour in Passenger Transport},
booktitle = {Proc. of the 9th {INTERSPEECH}},
year = {2008},
address = {Brisbane, Australia},
month = sep,
publisher = {{ISCA}, {ASSTA}}

}

@INPROCEEDINGS{schuller_audiovisual_2007,

author = {Schuller, Björn and Wimmer, Matthias and Arsic, Dejan and Rigoll,
Gerhard and Radig, Bernd},
title = {Audiovisual Behavior Modeling by Combined Feature Spaces},
booktitle = {{IEEE} International Conference on Acoustics, Speech, and Signal
Processing ({ICASSP)}},
year = {2007},
volume = {2},
pages = {733--736},
address = {Honolulu, Hawaii, {USA}},
month = apr,
isbn = {1-4244-0728-1}

}

@INPROCEEDINGS{schuller_brute-forcing_2008,

author = {Schuller, Björn and Wimmer, Matthias and Mösenlechner, Lorenz and
Kern, Christian and Rigoll, Gerhard},
title = {Brute-Forcing Hierarchical Functionals for Paralinguistics: a Waste
of Feature Space?},
booktitle = {Proceedings of {ICASSP} 2008},
year = {2008},
address = {Las Vegas, Nevada, {USA}},
month = apr

}

@INPROCEEDINGS{schultz_emotionale_2007,

author = {Schultz, R. and Oertel, K. and Peter, Christian and Wimmer, Matthias
and Voskamp, Jörg and Urban, B.},
title = {Emotionale Aspekte in Produktevaluationen},
booktitle = {2. Kongress Multimediatechnik},
year = {2007},
address = {Wismar, Germany},
month = oct

}

@INPROCEEDINGS{schumacher_agentenbasiertes_2001,

author = {Schumacher, Jürgen and Beetz, Michael},
title = {Ein agentenbasiertes Verfahren zur effizienten Beantwortung von Lieferterminanfragen
in einer Supply-Chain},
booktitle = {Proceedings der Verbundtagung {VertIS} 2001},
year = {2001}

}

@INPROCEEDINGS{schuster_learning_2012,

author = {Schuster, Martin and Jain, Dominik and Tenorth, Moritz and Beetz,
Michael},
title = {Learning Organizational Principles in Human Environments},
booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
year = {2012},
pages = {3867--3874},
address = {St. Paul, {MN}, {USA}},
month = may

}

@PHDTHESIS{siles_canales_automated_2014,

author = {Siles Canales, Francisco},
title = {Automated Semantic Annotation of Football Games from {TV} Broadcast},
school = {Technische Universität München},
year = {2014},
address = {München},
abstract = {The main objective of this thesis is to investigate mechanisms for
the creation of a computational system, for the automated semantic
annotation of football games from {TV} broadcast. An abstract model
is used for the representation of football, and for storing and retrieving
relevant information for answering football-related queries. The
principal hypothesis is that the model can be populated, based on
the trajectories of the targets on the field of play.},
keywords = {soccer},
url = {http://nbn-resolving.de/urn/resolver.pl?urn:nbn:de:bvb:91-diss-20140214-1169627-0-7}

}

@INPROCEEDINGS{sosnowski_mirror_2010,

author = {Sosnowski, Stefan and Mayer, Christoph and Kühnlenz, Kolja and Radig,
Bernd},
title = {Mirror my emotions! Combining facial expression analysis and synthesis
on a robot},
booktitle = {The Thirty Sixth Annual Convention of the Society for the Study of
Artificial Intelligence and Simulation of Behaviour ({AISB2010)}},
year = {2010},
keywords = {facial expressions}

}

@PHDTHESIS{stulp_tailoring_2007,

author = {Stulp, Freek},
title = {Tailoring Robot Actions to Task Contexts using Action Models},
school = {Technische Universität München},
year = {2007},
abstract = {In motor control, high-level goals must be expressed in terms of low-level
motor commands. An effective approach to bridge this gap, widespread
in both nature and robotics, is to acquire a set of temporally extended
actions, each designed for specific goals and task contexts. An action
selection module then selects the appropriate action in a given situation.
In this approach, high-level goals are mapped to actions, and actions
produce streams of motor commands. The first mapping is often ambiguous,
as several actions or action parameterizations can achieve the same
goal. Instead of choosing an arbitrary action or parameterization,
the robot should select those that best fulfill some pre-specified
requirement, such as minimal execution duration, successful execution,
or coordination of actions with others. The key to being able to
perform this selection lies in prediction. By predicting the performance
of different actions and action parameterizations, the robot can
also predict which of them best meets the requirement. Action models,
which have many similarities with human forward models, enable robots
to make such predictions. In this dissertation, we introduce a computational
model for the acquisition and application of action models. Robots
first learn action models from observed experience, and then use
them to optimize their performance with the following methods: 1)
{\textbackslash}{emphSubgoal} refinement, which enables robots to
optimize actions in action sequences by predicting which action parameterization
leads to the best performance. 2) {\textbackslash}{emphCondition}
refinement and {\textbackslash}emphsubgoal assertion, with which
robots can adapt existing actions to novel task contexts and goals
by predicting when action execution will fail. 3) {\textbackslash}{emphImplicit}
coordination, in which multiple robots globally coordinate their
actions, by locally making predictions about the performance of other
robots. The acquisition and applications of action models have been
realized and empirically evaluated in three robotic domains: the
{\textbackslash}pioneer robots of our {RoboCup} mid-size league team,
a simulated B21 in a kitchen environment, and a {PowerCube} robot
arm. The main principle behind this approach is that in robot controller
design, knowledge that robots learn themselves from observed experience
complements well the abstract knowledge that humans specify.},
url = {http://mediatum2.ub.tum.de/node?id=617105}

}

@ARTICLE{stulp_combining_2008,

author = {Stulp, Freek and Beetz, Michael},
title = {Combining Declarative, Procedural and Predictive Knowledge to Generate
and Execute Robot Plans Efficiently and Robustly},
journal = {Robotics and Autonomous Systems Journal (Special Issue on Semantic
Knowledge)},
year = {2008},
abstract = {One of the main challenges in motor control is expressing high-level
goals in terms of low-level actions. To do so effectively, motor
control systems must reason about actions at different levels of
abstraction. Grounding high-level plans in low-level actions is essential
semantic knowledge for plan-based control of real robots. We present
a robot control system that uses declarative, procedural and predictive
to generate, execute and optimize plans. Declarative knowledge is
represented in {PDDL}, durative actions constitute procedural knowledge,
and predictive knowledge is learned by observing action executions.
We demonstrate how learned predictive knowledge enables robots to
autonomously optimize plan execution with respect to execution duration
and robustness in real-time. The approach is evaluated in two different
robotic domains.}

}

@INPROCEEDINGS{stulp_learning_2008,

author = {Stulp, Freek and Beetz, Michael},
title = {Learning Predictive Knowledge to Optimize Robot Motor Control},
booktitle = {International Conference on Cognitive Systems ({CogSys} 2008)},
year = {2008}

}

@ARTICLE{stulp_refining_2008,

author = {Stulp, Freek and Beetz, Michael},
title = {Refining the execution of abstract actions with learned action models},
journal = {Journal of Artificial Intelligence Research ({JAIR)}},
year = {2008},
volume = {32},
month = jun

}

@INPROCEEDINGS{stulp_action_2006,

author = {Stulp, Freek and Beetz, Michael},
title = {Action Awareness – Enabling Agents to Optimize, Transform, and Coordinate
Plans},
booktitle = {Proceedings of the Fifth International Joint Conference on Autonomous
Agents and Multiagent Systems ({AAMAS)}},
year = {2006},
abstract = {As agent systems are solving more and more complex tasks in increasingly
challenging domains, the systems themselves are becoming more complex
too, often compromising their adaptivity and robustness. A promising
approach to solve this problem is to provide agents with reflective
capabilities. Agents that can reflect on the effects and expected
performance of their actions, are more aware and knowledgeable of
their capabilities and shortcomings. In this paper, we introduce
a computational model for what we call {\textbackslash}emphaction
awareness. To achieve this awareness, agents learn predictive action
models from observed experience. This knowledge is then used to optimize,
transform and coordinate plans. We apply this computational model
to a number of typical scenarios from robotic soccer. Various experiments
on real robots demonstrate that action awareness enables the robots
to improve the performance of their plans substantially.}

}

@BOOK{stulp_optimized_2005,

title = {Optimized Execution of Action Chains through Subgoal Refinement},
year = {2005},
author = {Stulp, Freek and Beetz, Michael},
note = {{ICAPS} Workshop {“Plan} Execution: A Reality Check”},
abstract = {In this paper we propose a novel computation model for the execution
of abstract action chains. In this computation model a robot first
learns situation-specific performance models of abstract actions.
It then uses these models to automatically specialize the abstract
actions for their execution in a given action chain. This specialization
results in refined chains that are optimized for performance. As
a side effect this behavior optimization also appears to produce
action chains with seamless transitions between actions.},
url = {http://ic.arc.nasa.gov/people/sailesh/icaps2005wksp/}

}

@INPROCEEDINGS{stulp_optimized_2005-1,

author = {Stulp, Freek and Beetz, Michael},
title = {Optimized Execution of Action Chains Using Learned Performance Models
of Abstract Actions},
booktitle = {Proceedings of the Nineteenth International Joint Conference on Artificial
Intelligence ({IJCAI)}},
year = {2005},
abstract = {Many plan-based autonomous robot controllers generate chains of abstract
actions in order to achieve complex, dynamically changing, and possibly
interacting goals. The execution of these action chains often results
in robot behavior that shows abrupt transitions between subsequent
actions, causing suboptimal performance. The resulting motion patterns
are so characteristic for robots that people imitating robotic behavior
will do so by making abrupt movements between actions. In this paper
we propose a novel computation model for the execution of abstract
action chains. In this computation model a robot first learns situation-specific
performance models of abstract actions. It then uses these models
to automatically specialize the abstract actions for their execution
in a given action chain. This specialization results in refined chains
that are optimized for performance. As a side effect this behavior
optimization also appears to produce action chains with seamless
transitions between actions.}

}

@BOOK{stulp_tailoring_2005,

title = {Tailoring Action Parameterizations to Their Task Contexts},
year = {2005},
author = {Stulp, Freek and Beetz, Michael},
note = {{IJCAI} Workshop {“Agents} in Real-Time and Dynamic Environments”},
abstract = {Solving complex tasks successfully and efficiently not only depends
on {\textbackslash}em what you do, but also {\textbackslash}em how
you do it. Different task contexts have different performance measures,
and thus require different ways of executing an action to optimize
performance. Simply adding new actions that are tailored to perform
well within a specific task context makes planning or action selection
programming more difficult, as generality and adaptivity is lost.
Rather, existing actions should be parametrized such that they optimize
the task-specific performance measure. In this paper we propose a
novel computation model for the execution of abstract action chains.
In this computation model, a robot first learns situation-specific
performance models of abstract actions. It then uses these models
to automatically specialize the abstract actions for their execution
in a given action chain. This specialization results in refined chains
that are optimized for performance. As a side effect this behavior
optimization also appears to produce action chains with seamless
transitions between actions.},
url = {http://www.tzi.de/ṽisser/ijcai05/}

}

@INPROCEEDINGS{stulp_action-related_2009,

author = {Stulp, Freek and Fedrizzi, Andreas and Beetz, Michael},
title = {Action-Related Place-Based Mobile Manipulation},
booktitle = {Proceedings of the International Conference on Intelligent Robots
and Systems ({IROS)}},
year = {2009},
pages = {3115–3120}

}

@INPROCEEDINGS{stulp_learning_2009,

author = {Stulp, Freek and Fedrizzi, Andreas and Beetz, Michael},
title = {Learning and Performing Place-based Mobile Manipulation},
booktitle = {Proceedings of the 8th International Conference on Development and
Learning ({ICDL).}},
year = {2009},
pages = {1–7}

}

@ARTICLE{stulp_learning_2012,

author = {Stulp, Freek and Fedrizzi, Andreas and Mösenlechner, Lorenz and Beetz,
Michael},
title = {Learning and Reasoning with Action-Related Places for Robust Mobile
Manipulation},
journal = {Journal of Artificial Intelligence Research ({JAIR)}},
year = {2012},
volume = {43},
pages = {1–42}

}

@INPROCEEDINGS{stulp_combining_2009,

author = {Stulp, Freek and Fedrizzi, Andreas and Zacharias, Franziska and Tenorth,
Moritz and Bandouch, Jan and Beetz, Michael},
title = {Combining Analysis, Imitation, and Experience-based Learning to Acquire
a Concept of Reachability},
booktitle = {9th {IEEE-RAS} International Conference on Humanoid Robots},
year = {2009},
pages = {161–167}

}

@INPROCEEDINGS{stulp_evaluating_2004,

author = {Stulp, Freek and Gedikli, Suat and Beetz, Michael},
title = {Evaluating Multi-Agent Robotic Systems Using Ground Truth},
booktitle = {Proceedings of the Workshop on Methods and Technology for Empirical
Evaluation of Multi-agent Systems and Multi-robot Teams ({MTEE)}},
year = {2004},
abstract = {A thorough empirical evaluation of multi-agent robotic systems is
greatly facilitated if the {\textbackslash}em true state of the world
over time can be obtained. The accuracy of the beliefs as well as
the overall performance can then be measured objectively and efficiently.
In this paper we present a system for determining the {\textbackslash}em
ground truth state of the world, similar to the ceiling cameras used
in {RoboCup} small-size league. We have used this ground truth data
to evaluate the accuracy of the self- and object-localization of
the robots in our {RoboCup} mid-size league team, the Agilo {RoboCuppers.}
More complex models of the state estimation module have also been
learned. These models provide insight into the workings and shortcomings
of this module, and can be used to improve it.}

}

@INPROCEEDINGS{stulp_implicit_2006,

author = {Stulp, Freek and Isik, Michael and Beetz, Michael},
title = {Implicit Coordination in Robotic Teams using Learned Prediction Models},
booktitle = {Proceedings of the {IEEE} International Conference on Robotics and
Automation ({ICRA)}},
year = {2006},
pages = {1330--1335},
abstract = {Many application tasks require the cooperation of two or more robots.
Humans are good at cooperation in shared workspaces, because they
anticipate and adapt to the intentions and actions of others. In
contrast, multi-agent and multi-robot systems rely on communication
to exchange their intentions. This causes problems in domains where
perfect communication is not guaranteed, such as rescue robotics,
autonomous vehicles participating in traffic, or robotic soccer.
In this paper, we introduce a computational model for implicit coordination,
and apply it to a typical coordination task from robotic soccer:
regaining ball possession. The computational model specifies that
performance prediction models are necessary for coordination, so
we learn them off-line from observed experience. By taking the perspective
of the team mates, these models are then used to predict utilities
of others, and optimize a shared performance model for joint actions.
In several experiments conducted with our robotic soccer team, we
evaluate the performance of implicit coordination.}

}

@INPROCEEDINGS{stulp_agilo_2004,

author = {Stulp, Freek and Kirsch, Alexandra and Gedikli, Suat and Beetz, Michael},
title = {{AGILO} {RoboCuppers} 2004},
booktitle = {{RoboCup} International Symposium 2004},
year = {2004},
series = {Lisbon},
month = jul,
abstract = {The Agilo {RoboCup} team is the primary platform for our research
on the semi-automatic acquisition of visuo-motoric plans. It is realized
using inexpensive, off the shelf, easily extendible hardware components
and a standard software environment. The control system of an autonomous
soccer robot consists of a probabilistic game state estimator and
a situated action selection module. The game state estimator computes
the robot's belief state with respect to the current game situation.
The action selection module selects actions according to specified
goals as well as learned experiences. Automatic learning techniques
made it possible to develop fast and skillful routines for approaching
the ball, assigning roles, and performing coordinated plays.}

}

@INPROCEEDINGS{stulp_seamless_2007,

author = {Stulp, Freek and Koska, Wolfram and Maldonado, Alexis and Beetz,
Michael},
title = {Seamless Execution of Action Sequences},
booktitle = {Proceedings of the {IEEE} International Conference on Robotics and
Automation ({ICRA)}},
year = {2007},
pages = {3687--3692},
abstract = {One of the most notable and recognizable features of robot motion
is the abrupt transitions between actions in action sequences. In
contrast, humans and animals perform sequences of actions efficiently,
and with seamless transitions between subsequent actions. This smoothness
is not a goal in itself, but a side-effect of the evolutionary optimization
of other performance measures. In this paper, we argue that such
jagged motion is an inevitable consequence of the way human designers
and planners reason about abstract actions. We then present subgoal
refinement, a procedure that optimizes action sequences. Subgoal
refinement determines action parameters that are not relevant to
why the action was selected, and optimizes these parameters with
respect to expected execution performance. This performance is computed
using action models, which are learned from observed experience.
We integrate subgoal refinement in an existing planning system, and
demonstrate how requiring optimal performance causes smooth motion
in three robotic domains.}

}

@INPROCEEDINGS{stulp_compact_2009-1,

author = {Stulp, Freek and Kresse, Ingo and Maldonado, Alexis and Ruiz, Federico
and Fedrizzi, Andreas and Beetz, Michael},
title = {Compact Models of Human Reaching Motions for Robotic Control in Everyday
Manipulation Tasks},
booktitle = {Proceedings of the 8th International Conference on Development and
Learning ({ICDL).}},
year = {2009}

}

@INPROCEEDINGS{stulp_compact_2009,

author = {Stulp, Freek and Oztop, Erhan and Pastor, Peter and Beetz, Michael
and Schaal, Stefan},
title = {Compact Models of Motor Primitive Variations for Predictable Reaching
and Obstacle Avoidance},
booktitle = {9th {IEEE-RAS} International Conference on Humanoid Robots},
year = {2009}

}

@INPROCEEDINGS{stulp_feature_2006,

author = {Stulp, Freek and Pflüger, Mark and Beetz, Michael},
title = {Feature Space Generation using Equation Discovery},
booktitle = {Proceedings of the 29th German Conference on Artificial Intelligence
({KI)}},
year = {2006}

}

@ARTICLE{stulp_implicit_2010,

author = {Stulp, Freek and Utz, Hans and Isik, Michael and Mayer, Gerd},
title = {Implicit Coordination with Shared Belief: A Heterogeneous Robot Soccer
Team Case Study},
journal = {Advanced Robotics, the International Journal of the Robotics Society
of Japan},
year = {2010}

}

@INPROCEEDINGS{sun_eyewatchme_2009,

author = {Sun, Li and Klank, Ulrich and Beetz, Michael},
title = {{EYEWATCHME} - {3D} Hand and object tracking for inside out activity
analysis},
booktitle = {{IEEE} Computer Society Conference on Computer Vision and Pattern
Recognition, 2009. {CVPR} 2009.},
year = {2009},
pages = {9--16},
month = jun,
abstract = {This paper investigates the inside-out recognition of everyday manipulation
tasks using a gaze-directed camera, which is a camera that actively
directs at the visual attention focus of the person wearing the camera.
We present {EYEWATCHME}, an integrated vision and state estimation
system that at the same time tracks the positions and the poses of
the acting hands, the pose that the manipulated object, and the pose
of the observing camera. Taken together, {EYEWATCHME} provides comprehensive
data for learning predictive models of vision-guided manipulation
that include the objects people are attending, the interaction of
attention and reaching/grasping, and the segmentation of reaching
and grasping using visual attention as evidence. Key technical contributions
of this paper include an ego view hand tracking system that estimates
27 {DOF} hand poses. The hand tracking system is capable of detecting
hands and estimating their poses despite substantial self-occlusion
caused by the hand and occlusions caused by the manipulated object.
{EYEWATCHME} can also cope with blurred images that are caused by
rapid eye movements. The second key contribution is the of the integrated
activity recognition system that simultaneously tracks the attention
of the person, the hand poses, and the poses of the manipulated objects
in terms of a global scene coordinates. We demonstrate the operation
of {EYEWATCHME} in the context of kitchen tasks including filling
a cup with water.},
doi = {10.1109/CVPR.2009.5204358},
keywords = {{3D} object tracking, blurred images, computer graphics, {EYEWATCHME},
gaze-directed camera, grasping segmentation, human computer interaction,
image restoration, image segmentation, image sensors, inside out
activity analysis, integrated activity recognition system, object
recognition, reaching segmentation, state estimation system, substantial
self-occlusion, {tracking3D} hand tracking, vision-guided manipulation}

}

@PHDTHESIS{tenorth_knowledge_2011,

author = {Tenorth, Moritz},
title = {Knowledge Processing for Autonomous Robots},
school = {Technische Universität München},
year = {2011},
url = {http://nbn-resolving.de/urn/resolver.pl?urn:nbn:de:bvb:91-diss-20111125-1079930-1-7}

}

@INPROCEEDINGS{tenorth_tum_2009,

author = {Tenorth, Moritz and Bandouch, Jan and Beetz, Michael},
title = {The {TUM} Kitchen Data Set of Everyday Manipulation Activities for
Motion Tracking and Action Recognition},
booktitle = {{IEEE} International Workshop on Tracking Humans for the Evaluation
of their Motion in Image Sequences ({THEMIS)}, in conjunction with
{ICCV2009}},
year = {2009},
abstract = {We introduce the publicly available {TUM} Kitchen Data Set as a comprehensive
collection of activity sequences recorded in a kitchen environment
equipped with multiple complementary sensors. The recorded data consists
of observations of naturally performed manipulation tasks as encountered
in everyday activities of human life. Several instances of a table-setting
task were performed by different subjects, involving the manipulation
of objects and the environment. We provide the original video sequences,
fullbody motion capture data recorded by a markerless motion tracker,
{RFID} tag readings and magnetic sensor readings from objects and
the environment, as well as corresponding action labels. In this
paper, we both describe how the data was computed, in particular
the motion tracker and the labeling, and give examples what it can
be used for. We present first results of an automatic method for
segmenting the observed motions into semantic classes, and describe
how the data can be integrated in a knowledge-based framework for
reasoning about the observations.}

}

@ARTICLE{tenorth_knowrob_2013,

author = {Tenorth, Moritz and Beetz, Michael},
title = {{KnowRob} – A Knowledge Processing Infrastructure for Cognition-enabled
Robots. Part 1: The {KnowRob} System},
journal = {International Journal of Robotics Research ({IJRR)}},
year = {2013},
note = {Accepted for publication.}

}

@INPROCEEDINGS{tenorth_exchange_2012,

author = {Tenorth, Moritz and Beetz, Michael},
title = {Exchange of Action-related Information among Autonomous Robots},
booktitle = {12th International Conference on Intelligent Autonomous Systems},
year = {2012}

}

@INPROCEEDINGS{tenorth_knowledge_2012,

author = {Tenorth, Moritz and Beetz, Michael},
title = {Knowledge Processing for Autonomous Robot Control},
booktitle = {{AAAI} Spring Symposium on Designing Intelligent Robots: Reintegrating
{AI}},
year = {2012},
address = {Stanford, {CA}, {USA}},
month = mar

}

@INPROCEEDINGS{tenorth_unified_2012,

author = {Tenorth, Moritz and Beetz, Michael},
title = {A Unified Representation for Reasoning about Robot Actions, Processes,
and their Effects on Objects},
booktitle = {2012 {IEEE/RSJ} International Conference on Intelligent Robots and
Systems ({IROS)}},
year = {2012},
address = {Vilamoura, Portugal},
month = oct

}

@TECHREPORT{tenorth_deliverable_2010,

author = {Tenorth, Moritz and Beetz, Michael},
title = {Deliverable D5.2: The {RoboEarth} Language – Language Specification},
institution = {{FP7-ICT-248942} {RoboEarth}},
year = {2010},
number = {D5.2},
abstract = {This document describes the current state of implementation of the
{RoboEarth} representation language. This language is designed for
two main purposes. First, it should allow to represent all information
a robot needs to perform a reasonably complex task. This includes
information about (1) Plans, which consist of the actions a task
is composed of, ordering constraints among them, monitoring and failure
handling, as well as action parameters like objects, locations, grasp
types; (2) Objects, especially types, dimensions, states, and other
properties, but also locations of specific objects a robot has detected,
and object models that can be used for recognition; and the (3) Environment,
including maps for self-localization as well as poses of objects
like pieces of furniture. The second main task of the {RoboEarth}
language is to allow a robot to decide on its own if a certain piece
of information is useful to it. That means, a robot must be able
to check if an action description contains a plan for the action
it would like to do, if it meets all requirements to perform this
action, and if it has the sensors needed to use an object recognition
model. Using the semantic descriptions in the {RoboEarth} language,
a robot can perform the checks using logical inference.}

}

@INPROCEEDINGS{tenorth_priming_2010,

author = {Tenorth, Moritz and Beetz, Michael},
title = {Priming Transformational Planning with Observations of Human Activities},
booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
year = {2010},
pages = {1499–1504},
address = {Anchorage, {AK}, {USA}},
month = may

}

@INPROCEEDINGS{tenorth_knowrob_2009,

author = {Tenorth, Moritz and Beetz, Michael},
title = {{KnowRob} – Knowledge Processing for Autonomous Personal Robots},
booktitle = {{IEEE/RSJ} International Conference on Intelligent Robots and Systems},
year = {2009},
pages = {4261–4266},
abstract = {Mobile household robots need much knowledge about objects, places
and actions when performing more and more complex tasks. They must
be able to recognize objects, know what they are and how they can
be used. We present a practical approach to robot knowledge representation
that combines description logics knowledge bases with a rich environment
model, data mining and (self-) observation modules. The robot observes
itself and humans while executing actions and uses the collected
experiences to learn models of action-related concepts grounded in
its perception and action system. We demonstrate our approach by
learning places that are involved in mobile robot manipulation actions,
by locating objects based on their function and by supplying knowledge
required for understanding underspecified task descriptions as commonly
given by humans.}

}

@INPROCEEDINGS{tenorth_towards_2008,

author = {Tenorth, Moritz and Beetz, Michael},
title = {Towards Practical and Grounded Knowledge Representation Systems for
Autonomous Household Robots},
booktitle = {Proceedings of the 1st International Workshop on Cognition for Technical
Systems, München, Germany, 6-8 October},
year = {2008},
abstract = {Mobile household robots need much knowledge about objects, places
and actions when performing more and more complex tasks. They must
be able to recognize objects, know what they are and how they can
be used. This knowledge can often be specified more easily in terms
of action-related concepts than by giving declarative descriptions
of the appearance of objects. Defining chairs as objects to sit on,
for instance, is much more natural than describing how chairs in
general look like. Having grounded symbolic models of its actions
and related concepts allows the robot to reason about its activities
and improve its problem solving performance. In order to use action-related
concepts, the robot must be able to find them in its environment.
We present a practical approach to robot knowledge representation
that combines description logics knowledge bases with data mining
and (self-) observation modules. The robot collects experiences while
executing actions and uses them to learn models and aspects of action-related
concepts grounded in its perception and action system. We demonstrate
our approach by learning places that are involved in mobile robot
manipulation actions.}

}

@ARTICLE{tenorth_knowledge_2010,

author = {Tenorth, Moritz and Jain, Dominik and Beetz, Michael},
title = {Knowledge Representation for Cognitive Robots},
journal = {Künstliche Intelligenz},
year = {2010},
volume = {24},
pages = {233–240},
number = {3}

}

@INPROCEEDINGS{tenorth_towards_2012,

author = {Tenorth, Moritz and Kamei, Koji and Satake, Satoru and Miyashita,
Takahiro and Hagita, Norihiro},
title = {Towards a Networked Robot Architecture{\textbackslash} for Distributed
Task Execution and Knowledge Exchange},
booktitle = {Third International Workshop on Standards and Common Platforms for
Robotics ({SCPR} 2012), in conjunction with {SIMPAR} 2012},
year = {2012},
address = {Tsukuba, Japan},
month = nov

}

@ARTICLE{tenorth_web-enabled_2011,

author = {Tenorth, Moritz and Klank, Ulrich and Pangercic, Dejan and Beetz,
Michael},
title = {Web-enabled Robots – Robots that Use the Web as an Information Resource},
journal = {Robotics \& Automation Magazine},
year = {2011},
volume = {18},
pages = {58–68},
number = {2}

}

@INPROCEEDINGS{tenorth_knowrob-map_2010,

author = {Tenorth, Moritz and Kunze, Lars and Jain, Dominik and Beetz, Michael},
title = {{KNOWROB-MAP} – Knowledge-Linked Semantic Object Maps},
booktitle = {10th {IEEE-RAS} International Conference on Humanoid Robots},
year = {2010},
pages = {430--435},
address = {Nashville, {TN}, {USA}},
month = dec

}

@INPROCEEDINGS{tenorth_understanding_2010,

author = {Tenorth, Moritz and Nyga, Daniel and Beetz, Michael},
title = {Understanding and Executing Instructions for Everyday Manipulation
Tasks from the World Wide Web},
booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
year = {2010},
pages = {1486–1491},
address = {Anchorage, {AK}, {USA}},
month = may

}

@TECHREPORT{tenorth_understanding_2009,

author = {Tenorth, Moritz and Nyga, Daniel and Beetz, Michael},
title = {Understanding and Executing Instructions for Everyday Manipulation
Tasks from the World Wide Web},
institution = {{IAS} group, Technische Universität München, Fakultät für Informatik},
year = {2009}

}

@ARTICLE{tenorth_representation_2013,

author = {Tenorth, Moritz and Perzylo, Alexander Clifford and Lafrenz, Reinhard
and Beetz, Michael},
title = {Representation and Exchange of Knowledge about Actions, Objects,
and Environments in the {RoboEarth} Framework},
journal = {{IEEE} Transactions on Automation Science and Engineering (T-{ASE)}},
year = {2013},
note = {Accepted for publication.}

}

@INPROCEEDINGS{tenorth_roboearth_2012,

author = {Tenorth, Moritz and Perzylo, Alexander Clifford and Lafrenz, Reinhard
and Beetz, Michael},
title = {The {RoboEarth} language: Representing and Exchanging Knowledge about
Actions, Objects, and Environments},
booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
year = {2012},
address = {St. Paul, {MN}, {USA}},
month = may,
note = {Best Cognitive Robotics Paper Award.}

}

@INPROCEEDINGS{tenorth_learning_2013,

author = {Tenorth, Moritz and Torre, Fernando De la and Beetz, Michael},
title = {Learning Probability Distributions over Partially-Ordered Human Everyday
Activities},
booktitle = {{IEEE} International Conference on Robotics and Automation ({ICRA)}},
year = {2013},
address = {Karlsruhe, Germany},
month = may,
note = {Accepted for publication.}

}

@ARTICLE{thrun_probabilistic_2000,

author = {Thrun, Sebastian and Beetz, Michael and Bennewitz, Maren and Cremers,
Armin and Dellaert, Frank and Fox, Dieter and Hähnel, Dirk and Rosenberg,
Charles and Roy, Nicholas and Schulte, Jamieson and Schulz, Dirk},
title = {Probabilistic Algorithms and the Interactive Museum Tour-Guide Robot
Minerva},
journal = {International Journal of Robotics Research},
year = {2000},
abstract = {This paper describes Minerva, an interactive tour-guide robot that
was successful ly deployed in a Smithsonian museum. Minerva's software
is pervasively probabilistic, relying on explicit representations
of uncertainty in perception and control. This article describes
Minerva's major software components, and provides a comparative analysis
of the results obtained in the Smithsonian museum. During two weeks
of highly successful operation, the robot interacted with thousands
of people, both in the museum and through the Web, traversing more
than 44km at speeds of up to 163 cm/sec in the unmodified museum.}

}

@INPROCEEDINGS{tischler_application_2007,

author = {Tischler, Martin A. and Peter, Christian and Wimmer, Matthias and
Voskamp, Jörg},
title = {Application of emotion recognition methods in automotive research},
booktitle = {Proceedings of the 2nd Workshop on Emotion and Computing – Current
Research and Future Impact},
year = {2007},
editor = {Reichardt, Dirk and Levi, Paul},
pages = {50--55},
address = {Oldenburg, Germany},
month = sep,
abstract = {This paper reports on a pilot study applying emotion recognition technologies
developed for Human-Machine-Interfaces in automobile research. The
aim of the study was to evaluate technologies for quantifying driving
pleasure in a close-to-reality scenario. Results show that car driving
scenarios pose particular requirements on emotion recognition technologies
which could be met by modifications of current systems.}

}

@INPROCEEDINGS{usenko_furniture_2012,

author = {Usenko, Vladyslav and Seidel, Florian and Marton, Zoltan-Csaba and
Beetz, Dejan Pangercic Michael},
title = {Furniture Classification using {WWW} {CAD} Models},
booktitle = {{IROS’12} Workshop on Active Semantic Perception ({ASP’12)}},
year = {2012},
address = {Vilamoura, Portugal},
month = oct

}

@INPROCEEDINGS{utz_sharing_2004,

author = {Utz, Hans and Stulp, Freek and Mühlenfeld, Arndt},
title = {Sharing Belief in Teams of Heterogeneous Robots},
booktitle = {{RoboCup-2004:} The Eighth {RoboCup} Competitions and Conferences},
year = {2004},
editor = {Nardi, Daniele and Riedmiller, Martin and Sammut, Claude},
pages = {508--515},
address = {Lisbon, Portugal},
publisher = {Springer Verlag},
abstract = {This paper describes the joint approach of three research groups to
enable a heterogeneous team of robots to exchange belief. The communication
framework presented imposes little restrictions on the design and
implementation of the individual autonomous mobile systems. The three
groups have individually taken part in the {RoboCup} F2000 league
since 1998. Although recent rule changes allow for more robots per
team, the cost of acquiring and maintaining autonomous mobile robots
keeps teams from making use of this opportunity. A solution is to
build mixed teams with robots from different labs. As almost all
robots in this league are custom built research platforms with unique
sensors, actuators, and software architectures, forming a heterogeneous
team presents an exciting challenge.},
url = {http://citeseer.ist.psu.edu/utz04sharing.html}

}

@ARTICLE{waibel_roboearth_2011,

author = {Waibel, Markus and Beetz, Michael and {D'Andrea}, Raffaello and Janssen,
Rob and Tenorth, Moritz and Civera, Javier and Elfring, Jos and Gálvez-López,
Dorian and Häussermann, Kai and Montiel, J. M. M. and Perzylo, Alexander
and sle, Björn Schie{\textbackslash}s and Zweigle, Oliver and Molengraft,
René van de},
title = {{RoboEarth} - A World Wide Web for Robots},
journal = {Robotics \& Automation Magazine},
year = {2011},
volume = {18},
pages = {69–82},
number = {2}

}

@INPROCEEDINGS{wallhoff_real-time_2010,

author = {Wallhoff, Frank and Rehrl, Tobias and Mayer, Christoph and Radig,
Bernd},
title = {Real-Time Face and Gesture Analysis for Human-Robot Interaction},
booktitle = {Real-Time Image and Video Processing 2010},
year = {2010},
series = {Proceedings of {SPIE}},
note = {invited paper},
keywords = {facial expressions}

}

@INPROCEEDINGS{weikersdorfer_depth-adaptive_2012,

author = {Weikersdorfer, David and Gossow, David and Beetz, Michael},
title = {Depth-Adaptive Superpixels},
booktitle = {21st International Conference on Pattern Recognition},
year = {2012},
note = {Accepted for publication.}

}

@BOOK{wimmer_future_2008,

title = {Future User Interfaces Enhanced by Facial Expression Recognition
– Interpreting Human Faces with Model-based Techniques},
publisher = {{VDM}, Verlag Dr. Müller},
year = {2008},
author = {Wimmer, Matthias},
month = mar,
note = {{ISBN} 978-3-8364-6928-9},
keywords = {facial expressions}

}

@PHDTHESIS{wimmer_model-based_2007,

author = {Wimmer, Matthias},
title = {Model-based Image Interpretation with Application to Facial Expression
Recognition},
school = {Technische Universitat München, Institute for Informatics},
year = {2007},
month = dec,
keywords = {facial expressions},
url = {http://nbn-resolving.de/urn/resolver.pl?urn:nbn:de:bvb:91-diss-20071220-618214-1-1}

}

@INPROCEEDINGS{wimmer_asm_2008,

author = {Wimmer, Matthias and Fujie, Shinya and Stulp, Freek and Kobayashi,
Tetsunori and Radig, Bernd},
title = {An {ASM} Fitting Method Based on Machine Learning that Provides a
Robust Parameter Initialization for {AAM} Fitting},
booktitle = {Proc. of the International Conference on Automatic Face and Gesture
Recognition ({FGR08)}},
year = {2008},
address = {Amsterdam, Netherlands},
month = sep,
abstract = {Due to their use of information contained in texture, Active Appearance
Models ({AAM)} generally outperform Active Shape Models ({ASM)} in
terms of fitting accuracy. Although many extensions and improvements
over the original {AAM} have been proposed, on of the main drawbacks
of {AAMs} remains its dependence on good initial model parameters
to achieve accurate fitting results. In this paper, we determine
the initial model parameters for {AAM} fitting with {ASM} fitting,
and use machine learning techniques to improve the scope and accuracy
of {ASM} fitting. Combining the precision of {AAM} fitting with the
large radius of convergence of learned {ASM} fitting improves the
results by an order of magnitude, as our empirical evaluation on
a database of publicly available benchmark images demonstrates.}

}

@ARTICLE{wimmer_bitte_2006,

author = {Wimmer, Matthias and Hämmerle, Simone},
title = {Bitte recht freundlich},
journal = {Journal: Zukunft im Brennpunkt},
year = {2006},
volume = {5},
pages = {35--38},
month = dec

}

@INPROCEEDINGS{wimmer_facial_2008,

author = {Wimmer, Matthias and {MacDonald}, Bruce A. and Jayamuni, Dinuka and
Yadav, Arpit},
title = {Facial Expression Recognition for Human-robot Interaction – A Prototype},
booktitle = {2{\textbackslash}textsuperscriptnd Workshop Robot Vision. Lecture
Notes in Computer Science.},
year = {2008},
editor = {Klette, Reinhard and Sommer, Gerald},
volume = {4931/2008},
pages = {139--152},
address = {Auckland, New Zealand},
month = feb,
publisher = {Springer},
abstract = {To be effective in the human world robots must respond to human emotional
states. This paper focuses on the recognition of the six universal
human facial expressions. In the last decade there has been successful
research on facial expression recognition ({FER)} in controlled conditions
suitable for human-computer interaction. However the human-robot
scenario presents additional challenges including a lack of control
over lighting conditions and over the relative poses and separation
of the robot and human, the inherent mobility of robots, and stricter
real time computational requirements dictated by the need for robots
to respond in a timely fashion. Our approach imposes lower computational
requirements by specifically adapting model-based techniques to the
{FER} scenario. It contains adaptive skin color extraction, localization
of the entire face and facial components, and specifically learned
objective functions for fitting a deformable face model. Experimental
evaluation reports a recognition rate of 70\% on the Cohn-Kanade
facial expression database, and 67\% in a robot scenario, which compare
well to other {FER} systems.},
keywords = {facial expressions}

}

@INPROCEEDINGS{wimmer_are_2008,

author = {Wimmer, Matthias and Mayer, Christoph and Eggers, Martin and Radig,
Bernd},
title = {Are You Happy with Your First Name?},
booktitle = {Proceedings of the 3rd Workshop on Emotion and Computing: Current
Research and Future Impact},
year = {2008},
pages = {23--29},
address = {Kaiserslautern, Germany},
month = sep

}

@INPROCEEDINGS{wimmer_tailoring_2008,

author = {Wimmer, Matthias and Mayer, Christoph and Pietzsch, Sylvia and Radig,
Bernd},
title = {Tailoring Model-based Techniques for Facial Expression Interpretation},
booktitle = {The First International Conference on Advances in Computer-Human
Interaction ({ACHI08)}},
year = {2008},
address = {Sainte Luce, Martinique},
month = feb,
keywords = {facial expressions}

}

@INPROCEEDINGS{wimmer_recognizing_2008,

author = {Wimmer, Matthias and Mayer, Christoph and Radig, Bernd},
title = {Recognizing Facial Expressions Using Model-based Image Interpretation},
booktitle = {Verbal and Nonverbal Communication Behaviours, {COST} Action 2102
International Workshop},
year = {2008},
address = {Vietri sul Mare, Italy},
month = apr,
note = {Invited Paper},
abstract = {Even if electronic devices widely occupy our daily lives, human-machine
interaction still lacks intuition. Therefore, researchers intend
to resolve these shortcomings by augmenting traditional systems with
aspects of human-human interaction and consider human emotion, behavior,
and intention. This publication focusses on one aspect of this challenge:
recognizing facial expressions. Our approach achieves real-time performance
and provides robustness for real-world applicability. This computer
vision task comprises of various phases for which it exploits model-based
techniques that accurately localize facial features, seamlessly track
them through image sequences, and finally infer facial expressions
visible. We specifically adapt state-of-the-art techniques to each
of these challenging phases. Our system has been successfully presented
to industrial, political, and scientific audience in various events.},
keywords = {facial expressions}

}

@INPROCEEDINGS{wimmer_robustly_2008,

author = {Wimmer, Matthias and Mayer, Christoph and Radig, Bernd},
title = {Robustly Classifying Facial Components Using a Set of Adjusted Pixel
Features},
booktitle = {Proc. of the International Conference on Face and Gesture Recognition
({FGR08)}},
year = {2008},
address = {Amsterdam, Netherlands},
month = sep,
abstract = {Efficient and accurate localization of the components of human faces,
such as skin, lips, eyes, and brows, provides benefit to various
real-world applications. However, high intra-class and small inter-class
variations in color prevent simple but quick pixel classifiers from
yielding robust results. In contrast, more elaborate classifiers
consider shape or region features but they do not achieve real-time
performance. In this paper, we show that it definitely is possible
to robustly determine the facial components and achieve far more
than real-time performance. We also use quick pixel-level classifiers
and provide them with a set of pixel features that are adapted to
the image characteristics beforehand. We do not manually select the
pixel features and specify the calculation rules. In contrast, our
idea is to provide a multitude of features and let the Machine Learning
algorithm decide which of them are important. The evaluation draws
a comparison to fixed approaches that do not adapt the computation
of the features to the image content in any way. The obtained accuracy
is precise enough to be used for real-world applications such as
for model-based interpretation of human faces.},
keywords = {facial expressions}

}

@INPROCEEDINGS{wimmer_face_2008,

author = {Wimmer, Matthias and Mayer, Christoph and Stulp, Freek and Radig,
Bernd},
title = {Face Model Fitting based on Machine Learning from Multi-band Images
of Facial Components},
booktitle = {Workshop on Non-Rigid Shape Analysis and Deformable Image Alignment,
held in conjunction with {CVPR}},
year = {2008},
address = {Anchorage, {AK}, {USA}},
month = jun,
abstract = {Geometric models allow to determine semantic information about real-world
objects. Model fitting algorithms need to find the best match between
a parameterized model and a gi ven image. This task inherently requires
an objective function to estimate the error between a model parameterization
and an image. The accuracy of this function directly inf luences
the accuracy of the entire process of model fitting. Unfortunately,
building these functions is a non-trivial task. Dedicated to the
application of face model fitting, this paper proposes to consider
a multi-band image representation that indicates the facial components,
from which a large set of image features is computed. Since it is
not possible to manually formulate an objective function that considers
this large amount of features, we apply a Machine Lear ning framework
to construct them. This automatic approach is capable of considering
the large amount of features provided and yield highly accurate objective
functions for fa ce model fitting. Since the Machine Learning framework
rejects non-relevant image features, we obtain high performance runtime
characteristics as well.},
keywords = {facial expressions}

}

@INPROCEEDINGS{wimmer_estimating_2007,

author = {Wimmer, Matthias and Mayer, Christoph and Stulp, Freek and Radig,
Bernd},
title = {Estimating Natural Activity by Fitting {3D} Models via Learned Objective
Functions},
booktitle = {Workshop on Vision, Modeling, and Visualization ({VMV)}},
year = {2007},
volume = {1},
pages = {233--241},
address = {Saarbrücken, Germany},
month = nov,
abstract = {Model-based image interpretation has proven to robustly extract high-level
scene descriptors from raw image data. Furthermore, geometric texture
models represent a fundamental component for visualizing real-world
scenarios. However, the motion of the model and the real-world object
must be similar in order to portray natural activity. Again, this
information can be determined by inspecting images via model-based
image interpretation. This paper sketches the challenge of fitting
models to images, describes the shortcomings of current approaches
and proposes a technique based on machine learning techniques. We
identify the objective function as a crucial component for fitting
models to images. Furthermore, we state preferable properties of
these functions and we propose to learn such a function from manually
annotated example images.}

}

@INPROCEEDINGS{wimmer_robustly_2008-1,

author = {Wimmer, Matthias and Pietzsch, Sylvia and Mayer, Christoph and Radig,
Bernd},
title = {Robustly Estimating the Color of Facial Components Using a Set of
Adjusted Pixel Features},
booktitle = {14. Workshop Farbbildverarbeitung},
year = {2008},
pages = {85--96},
address = {Aachen, Germany},
month = oct,
keywords = {facial expressions}

}

@INPROCEEDINGS{wimmer_learning_2007,

author = {Wimmer, Matthias and Pietzsch, Sylvia and Stulp, Freek and Radig,
Bernd},
title = {Learning Robust Objective Functions with Application to Face Model
Fitting},
booktitle = {Proceedings of the 29th {DAGM} Symposium},
year = {2007},
volume = {1},
pages = {486--496},
address = {Heidelberg, Germany},
month = sep,
abstract = {Model-based image interpretation extracts high-level information from
images using a priori knowledge about the object of interest. The
computational challenge is to determine the model parameters that
best match a given image by searching for the global optimum of the
involved objective function. Unfortunately, this function is usually
designed manually, based on implicit and domain-dependent knowledge,
which prevents the fitting task from yielding accurate results. In
this paper, we demonstrate how to improve model fitting by learning
objective functions from annotated training images. Our approach
automates many critical decisions and the remaining manual steps
hardly require domain-dependent knowledge. This yields more robust
objective functions that are able to achieve the accurate model fit.
Our evaluation uses a publicly available image database and compares
the obtained results to a recent state-of-the-art approach.},
keywords = {facial expressions}

}

@INPROCEEDINGS{wimmer_adaptive_2005,

author = {Wimmer, Matthias and Radig, Bernd},
title = {Adaptive Skin Color Classificator},
booktitle = {Proceedings of the first International Conference on Graphics, Vision
and Image Processing},
year = {2005},
editor = {al, Ashraf Aboshosha et},
volume = {I},
pages = {324--327},
address = {Cairo, Egypt},
month = dec,
publisher = {{ICGST}},
abstract = {A lot of computer vision applications benefit from robust skin color
classification. But this is a hard challenge due to the various image
conditions like camera settings, illumination, light source, shadows
and many more. Furthermore people?s tans and ethnic groups also extend
those conditions. In this work we present a parametric skin color
classifier that can be adapted to the conditions of each image or
image sequence. This is done by evaluating some previously know skin
color pixels which are acquired by applying a face detector. This
approach can distinguish skin color from very similar color like
lip color or eye brow color. Its high speed and high accuracy makes
it appropriate for real time applications such as face tracking and
mimic recognition.},
isbn = {21970/2005}

}

@INPROCEEDINGS{wimmer_initial_2007,

author = {Wimmer, Matthias and Radig, Bernd},
title = {Initial Pose Estimation for {3D} Models Using Learned Objective Functions},
booktitle = {Proceedings of the 8th Asian Conference on Computer Vision ({ACCV07)}},
year = {2007},
editor = {Yagi, Yasushi and Kang, Sing Bing and Kweon, In So and Zha, Hongbin},
volume = {4844},
series = {{LNCS}},
pages = {332--341},
address = {Heidelberg},
month = nov,
publisher = {Springer},
abstract = {Tracking {3D} models in image sequences essentially requires determining
their initial position and orientation. Our previous work identifies
the objective function as a crucial component for fitting {2D} models
to images. We state preferable properties of these functions and
we propose to learn such a function from annotated example images.
This paper extends this approach by making it appropriate to also
fit {3D} models to images. The correctly fitted model represents
the initial pose for model tracking. However, this extension induces
nontrivial challenges such as out-of-plane rotations and self occlusion,
which cause large variation to the models surface visible in the
image. We solve this issue by connecting the input features of the
objective function directly to the model. Furthermore, sequentially
executing objective functions specifically learned for different
displacements from the correct positions yields highly accurate objective
values.},
isbn = {978-3-540-76389-5}

}

@INPROCEEDINGS{wimmer_automatically_2007,

author = {Wimmer, Matthias and Radig, Bernd},
title = {Automatically Learning the Objective Function for Model Fitting},
booktitle = {Proceedings of the Meeting in Image Recognition and Understanding
({MIRU)}},
year = {2007},
address = {Hiroshima, Japan},
month = jul,
abstract = {Model-based image interpretation has proven to appropriately extract
high-level information from images. A priori knowledge about the
object of interest represents the basis of this task. Model fitting
determines the model that best matches a given image by searching
for the global optimum of an objective function. Unfortunately, the
objective function is usually designed manually, based on implicit
and domain-dependent knowledge. In contrast, this paper describes
how to obtain highly accurate objective functions by learning them
from annotated training images. It automates many critical decisions
and the remaining manual steps hardly require domain-dependent knowledge
at all. This approach yields highly accurate objective functions.
Our evaluation fits a face model to a publicly available image database
and compares the obtained results to a recent state-of-the-art approach.}

}

@ARTICLE{wimmer_adaptive_2006,

author = {Wimmer, Matthias and Radig, Bernd},
title = {Adaptive Skin Color Classificator},
journal = {{ICGST} International Journal on Graphics, Vision and Image Processing},
year = {2006},
volume = {Special Issue on Biometrics},
abstract = {Skin color is an important feature of faces. Various applications
benefit from robust skin color detection. Skin color may look quite
different, depending on camera settings, illumination, shadows, people's
tans, ethnic groups. That variation is a challenging aspect of skin
color classification. In this paper, we present an approach that
uses a high level vision module to detect an image specific skin
color model. This model is representative for the context conditions
within the image and is used to adapt dynamic skin color classifiers
to it. This approach distinguishes skin color from very similar color
like lip color or eyebrow color. Its high speed and accuracy makes
it appropriate for real time applications such as face model fitting,
gaze estimation, and recognition of facial expressions.}

}

@INPROCEEDINGS{wimmer_person_2006,

author = {Wimmer, Matthias and Radig, Bernd and Beetz, Michael},
title = {A Person and Context Specific Approach for Skin Color Classification},
booktitle = {Procedings of the 18th International Conference of Pattern Recognition
({ICPR} 2006)},
year = {2006},
volume = {2},
pages = {39--42},
address = {Los Alamitos, {CA}, {USA}},
month = aug,
publisher = {{IEEE} Computer Society},
abstract = {Skin color is an important feature of faces. Various applications
benefit from robust skin color detection. Depending on camera settings,
illumination, shadows, people?s tans, and ethnic groups skin color
looks differently, which is a challenging aspect for detecting it
automatically. In this paper, we present an approach that uses a
high level vision module to detect an image specific skin color model.
This model is then used to adapt parametric skin color classifiers
to the processed image. This approach is capable to distinguish skin
color from extremely similar colors, such as lip color or eyebrow
color. Its high speed and high accuracy make it appropriate for real
time applications such as face tracking and recognition of facial
expressions.}

}

@INPROCEEDINGS{wimmer_sipbild_2007,

author = {Wimmer, Matthias and Radig, Bernd and Mayer, Christoph},
title = {{SIPBILD} – Mimik- und Gestikerkennung in der Mensch-Maschine-Schnittstelle},
booktitle = {Beiträge der 37. Jahrestagung der Gesellschaft für Informatik ({GI)}},
year = {2007},
volume = {1},
pages = {271--274},
address = {Bremen, Germany},
month = sep,
abstract = {Für eine natürliche Mensch-Maschine Interaktion spielt die Interpretation
visueller Informationen eine zentrale Rolle. Fehlende Kontrolle der
Umgebungsbedingungen wie Helligkeit und Hintergrundfarbe stellt hohe
Anforderungen an die Bilderkennungssoftware. {SIPBILD} schafft es,
mit modellbasierter Bildinterpretation die menschliche Mimik und
Gestik zu erkennen. Um diese Technik in natürlichen Umgebungen einzusetzen,
ist es allerdings notwendig, die bisherigen Techniken entscheidend
zu verbessern. Insbesondere stellen wir eine Vorgehensweise vor,
die robustes Model-Fitting ohne spezielles Fachwissen in der Bildverarbeitung
erreicht und der Einsatz dieser Technik somit keinen Experten mehr
verlangt.}

}

@ARTICLE{wimmer_recognizing_2008-1,

author = {Wimmer, Matthias and Riaz, Zahid and Mayer, Christoph and Radig,
Bernd},
title = {Recognizing Facial Expressions Using Model-based Image Interpretation},
journal = {Advances in Human-Computer Interaction},
year = {2008},
volume = {1},
pages = {587--600},
month = oct,
editor = {Pinder, Shane},
keywords = {facial expressions}

}

@INPROCEEDINGS{wimmer_low-level_2008,

author = {Wimmer, Matthias and Schuller, Björn and Arsic, Dejan and Radig,
Bernd and Rigoll, Gerhard},
title = {Low-level Fusion of Audio and Video Feature for Multi-modal Emotion
Recognition},
booktitle = {3rd International Conference on Computer Vision Theory and Applications
({VISAPP)}},
year = {2008},
volume = {2},
pages = {145--151},
address = {Madeira, Portugal},
month = jan,
abstract = {Bimodal emotion recognition through audiovisual feature fusion has
been shown superior over each individual modality in the past. Still,
synchronization of the two streams is a challenge, as many vision
approaches work on a frame basis opposing audio turn- or chunk-basis.
Therefore, late fusion schemes such as simple logic or voting strategies
are commonly used for the overall estimation of underlying affect.
However, early fusion is known to be more effective in many other
multimodal recognition tasks. We therefore suggest a combined analysis
by descriptive statistics of audio and video Low-Level-Descriptors
for subsequent static {SVM} Classification. This strategy also allows
for a combined feature-space optimization which will be discussed
herein. The high effectiveness of this approach is shown on a database
of 11.5h containing six emotional situations in an airplane scenario.}

}

@ARTICLE{wimmer_learning_2008,

author = {Wimmer, Matthias and Stulp, Freek and Pietzsch, Sylvia and Radig,
Bernd},
title = {Learning Local Objective Functions for Robust Face Model Fitting},
journal = {{IEEE} Transactions on Pattern Analysis and Machine Intelligence
({PAMI)}},
year = {2008},
volume = {30},
pages = {1357--1370},
number = {8},
doi = {http://doi.ieeecomputersociety.org/10.1109/TPAMI.2007.70793},
issn = {0162-8828},
keywords = {facial expressions}

}

@INPROCEEDINGS{wimmer_enabling_2007,

author = {Wimmer, Matthias and Stulp, Freek and Radig, Bernd},
title = {Enabling Users to Guide the Design of Robust Model Fitting Algorithms},
booktitle = {Workshop on Interactive Computer Vision, held in conjunction with
{ICCV} 2007},
year = {2007},
pages = {28},
address = {Rio de Janeiro, Brazil},
month = oct,
publisher = {Omnipress},
abstract = {Model-based image interpretation extracts high-level information from
images using a priori knowledge about the object of interest. The
computational challenge in model fitting is to determine the model
parameters that best match a given image, which corresponds to finding
the global optimum of the objective function. When it comes to the
robustness and accuracy of fitting models to specific images, humans
still outperform state-of-the-art model fitting systems. Therefore,
we propose a method in which non-experts can guide the process of
designing model fitting algorithms. In particular, this paper demonstrates
how to obtain robust objective functions for face model fitting applications,
by learning their calculation rules from example images annotated
by humans. We evaluate the obtained function using a publicly available
image database and compare it to a recent state-of-the-art approach
in terms of accuracy.},
isbn = {978-1-4244-1631-8}

}

@INPROCEEDINGS{wimmer_learning_2006,

author = {Wimmer, Matthias and Stulp, Freek and Tschechne, Stephan and Radig,
Bernd},
title = {Learning Robust Objective Functions for Model Fitting in Image Understanding
Applications},
booktitle = {Proceedings of the 17th British Machine Vision Conference ({BMVC)}},
year = {2006},
editor = {Chantler, Michael J. and Trucco, Emanuel and Fisher, Robert B.},
volume = {3},
pages = {1159--1168},
address = {Edinburgh, {UK}},
month = sep,
publisher = {{BMVA}},
abstract = {Model-based methods in computer vision have proven to be a good approach
for compressing the large amount of information in images. Fitting
algorithms search for those parameters of the model that optimise
the objective function given a certain image. Although fitting algorithms
have been the subject of intensive research and evaluation, the objective
function is usually designed ad hoc and heuristically with much implicit
domain-dependent knowledge. This paper formulates a set of requirements
that robust objective functions should satisfy. Furthermore, we propose
a novel approach that learns the objective function from training
images that have been annotated with the preferred model parameters.
The requirements are automatically enforced during the learning phase,
which yields generally applicable objective functions. We compare
the performance of our approach to other approaches. For this purpose,
we propose a set of indicators that evaluate how well an objective
function meets the stated requirements.}

}

@INPROCEEDINGS{wimmer_human_2007,

author = {Wimmer, Matthias and Zucker, Ursula and Radig, Bernd},
title = {Human Capabilities on Video-based Facial Expression Recognition},
booktitle = {Proceedings of the 2nd Workshop on Emotion and Computing – Current
Research and Future Impact},
year = {2007},
editor = {Reichardt, Dirk and Levi, Paul},
pages = {7--10},
address = {Osnabrück, Germany},
month = sep,
abstract = {A lot of promising computer vision research has been conducted in
order to automatically recognize facial expressions during the last
decade. Some of them achieve high accuracy, however, it has not yet
been investigated how accurately humans accomplish this task, which
will introduce a comparable measure. Therefore, we conducted a survey
on this issue and this paper evaluates the gathered information regarding
the recognition rate and the confusion of facial expressions.},
keywords = {facial expressions}

}

@INPROCEEDINGS{witzig_context_2013,

author = {Witzig, Thomas and Zöllner, J. Marius and Pangercic, Dejan and Osentoski,
Sarah and Roan, Philip and Jäkel, Rainer and Dillmann, Rüdiger},
title = {Context Aware Shared Autonomy for Robotic Manipulation Tasks},
booktitle = {In {IEEE/RSJ} International Conference on Intelligent Robots and
Systems ({IROS)}, Tokyo Big Sight, Japan},
year = {2013}

}

@INCOLLECTION{wykowska_how_2009,

author = {Wykowska, Agnieszka and Maldonado, Alexis and Beetz, Michael and
Schuboe, Anna},
title = {How Humans Optimize Their Interaction with the Environment: The Impact
of Action Context on Human Perception},
booktitle = {Progress in Robotics},
publisher = {Springer Berlin Heidelberg},
year = {2009},
editor = {Kim, Jong-Hwan and Ge, Shuzhi Sam and Vadakkepat, Prahlad and Jesse,
Norbert and Al Manum, Abdullah and Puthusserypady K, Sadasivan and
Rückert, Ulrich and Sitte, Joaquin and Witkowski, Ulf and Nakatsu,
Ryohei and Braunl, Thomas and Baltes, Jacky and Anderson, John and
Wong, Ching-Chang and Verner, Igor and Ahlgren, David},
volume = {44},
series = {Communications in Computer and Information Science},
pages = {162--172},
note = {10.1007/978-3-642-03986-7\_19},
isbn = {978-3-642-03986-7},
keywords = {Computer, Science},
url = {http://dx.doi.org/10.1007/978-3-642-03986-7_19}

}

@ARTICLE{wykowska_how_2010,

author = {Wykowska, Agnieszka and Maldonado, Alexis and Beetz, Michael and
Schuboe, Anna},
title = {How Humans Optimize Their Interaction with the Environment: The Impact
of Action Context on Human Perception},
journal = {International Journal of Social Robotics},
year = {2010},
pages = {1--9},
note = {10.1007/s12369-010-0078-3},
issn = {1875-4791},
keywords = {Engineering},
url = {http://dx.doi.org/10.1007/s12369-010-0078-3}

}

@INPROCEEDINGS{wykowska_how_2009-1,

author = {Wykowska, Agnieszka and Maldonado, Alexis and Beetz, Michael and
Schuboe, Anna},
title = {How humans optimize their interaction with the environment: The impact
of action context on human perception.},
booktitle = {Progress in Robotics. Proceedings of the {FIRA} {RoboWorld} Congress},
year = {2009},
address = {Incheon, Korea},
month = aug

}

@ARTICLE{zaeh_artificial_2010,

author = {Zaeh, M. F. and Roesel, W. and Bannat, A. and Bautze, T. and Beetz,
M. and Blume, J. and Diepold, K. and Ertelt, C. and Geiger, F. and
Gmeiner, T. and Gyger, T. and Knoll, A. and Lau, C. and Lenz, C.
and Ostgathe, M. and Reinhart, G. and Ruehr, T. and Schuboe, A. and
Shea, K. and Wersborg, I. Stork genannt and Stork, S. and Tekouo,
W. and Wallhoff, F. and Wiesbeck, M.},
title = {Artificial Cognition in Production Systems},
journal = {{IEEE} Transactions on Automation Science and Engineering},
year = {2010},
volume = {7},
pages = {1–27},
number = {3}

}

@INPROCEEDINGS{zhu_contracting_2011,

author = {Zhu, Shulei and Pangercic, Dejan and Beetz, Michael},
title = {Contracting Curve Density Algorithm for Applications in Personal
Robotics},
booktitle = {11th {IEEE-RAS} International Conference on Humanoid Robots},
year = {2011},
address = {Bled, Slovenia},
month = oct

}

@INPROCEEDINGS{zia_acquisition_2009,

author = {Zia, Muhammad Zeeshan and Klank, Ulrich and Beetz, Michael},
title = {Acquisition of a Dense {3D} Model Database for Robotic Vision},
booktitle = {International Conference on Advanced Robotics ({ICAR)}},
year = {2009},
abstract = {Service Robots in real world environments need to have computer vision
capability for detecting a large class of objects. We discuss how
freely available {3D} model databases can be used to enable robots
to know the appearance of a wide variety of objects in human environments
with special application to our Assistive Kitchen. However, the open
and free nature of such databases pose problems for example the presence
of incorrectly annotated {3D} models, or objects for which very few
models exist online. We have previously proposed techniques to automatically
select the useful models from the search result, and utilizing such
models to perform simple manipulation tasks. Here, we build upon
that work, to describe a technique based on Morphing to form new
{3D} models if we only have a few models corresponding to a label.
However, morphing in computer graphics requires a human operator
and is computationally burdensome, due to which we present our own
automatic morphing technique. We also present a simple technique
to speed the matching process of {3D} models against real scenes
using Visibility culling. This technique can potentially speed-up
the matching process by 2-3 times while using less memory, if we
have some prior information model and world pose.}

}

@INCOLLECTION{zah_cognitive_2009,

author = {Zäh, Michael F. and Beetz, Michael and Shea, Kristina and Reinhart,
Gunther and Bender, K. and Lau, Christian and Ostgathe, Martin and
Vogl, W. and Wiesbeck, Mathey and Engelhard, Marco and Ertelt, Christoph
and Rühr, Thomas and Friedrich, M. and Herle, S.},
title = {The Cognitive Factory},
booktitle = {Changeable and Reconfigurable Manufacturing Systems},
publisher = {Springer},
year = {2009},
editor = {{ElMaraghy}, H. A.},
pages = {355–371}

}

@INPROCEEDINGS{zah_integrated_2008,

author = {Zäh, M. F. and Beetz, M. and Shea, K. and Reinhart, G. and Stursberg,
O. and Ostgathe, M. and Lau, C. and Ertelt, C. and Pangercic, D.
and Rühr, Thomas and Ding, H. and Paschedag, T.},
title = {An Integrated Approach to Realize the Cognitive Machine Shop},
booktitle = {Proceedings of the 1st International Workshop on Cognition for Technical
Systems, München, Germany, 6-8 October},
year = {2008}

}

@BOOK{hertzberg_ki_2007,

title = {{KI} 2007: Advances in Artificial Intelligence},
publisher = {Springer-Verlag},
year = {2007},
editor = {Hertzberg, Joachim and Beetz, Michael and Englert, Roman},
volume = {4667},
series = {Lecture Notes in Artificial Intelligence},
address = {Berlin Heidelberg},
month = aug

}

Rechte Seite

Informatik IX

Image Understanding and Knowledge-Based Systems

Boltzmannstrasse 3
85748 Garching

info@iuks.in.tum.de