% generated by bibtexbrowser <http://www.monperrus.net/martin/bibtexbrowser/>
% 
% Encoding: UTF-8
@inproceedings{gonsior_improving_2011,
 author = {B Gonsior and S Sosnowski and C Mayer and J Blume and B Radig and D Wollherr and K Kühnlenz},
 title = {Improving Aspects of Empathy and Subjective Performance for {HRI}
	through Mirroring Facial Expressions},
 booktitle = {Proceedings of the 19th {IEEE} International Symposium on Robot and
	Human Interactive Communication},
 year = {2011},
 keywords = {facial expressions},
}

@phdthesis{mayer_facial_2012,
 author = {C Mayer},
 title = {Facial Expression Recognition With A Three-Dimensional Face Model},
 school = {Technische Universität München},
 year = {2012},
 address = {München},
 keywords = {facial expressions},
 url = {http://nbn-resolving.de/urn/resolver.pl?urn:nbn:de:bvb:91-diss-20120110-1080232-1-5},
}

@article{mayer_cross-database_2014,
 author = {C Mayer and M Eggers and B Radig},
 title = {Cross-database evaluation for facial expression recognition},
 journal = {Pattern Recognition and Image Analysis},
 year = {2014},
 volume = {24},
 pages = {124--132},
 number = {1},
 month = {jan},
 doi = {10.1134/S1054661814010106},
 issn = {1054-6618, 1555-6212},
 keywords = {facial expressions},
 language = {en},
 url = {http://link.springer.com/10.1134/S1054661814010106},
 urldate = {2014-05-15},
}

@article{mayer_face_2013,
 author = {C Mayer and B Radig},
 title = {Face model fitting with learned displacement experts and multi-band
	images},
 journal = {Pattern Recognition and Image Analysis},
 year = {2013},
 volume = {23},
 pages = {287--295},
 number = {2},
 month = {apr},
 doi = {10.1134/S1054661813020119},
 issn = {1054-6618, 1555-6212},
 keywords = {facial expressions},
 language = {en},
 url = {http://link.springer.com/10.1134/S1054661813020119},
 urldate = {2014-05-15},
}

@article{mayer_face_2011,
 author = {C Mayer and B Radig},
 title = {Face model fitting with learned displacement experts and multi-band
	images},
 journal = {Pattern Recognition and Image Analysis},
 year = {2011},
 volume = {21},
 pages = {526--529},
 number = {3},
 month = {sep},
 doi = {10.1134/S1054661811020738},
 issn = {1054-6618, 1555-6212},
 keywords = {facial expressions},
 language = {en},
 url = {http://link.springer.com/10.1134/S1054661811020738},
 urldate = {2014-05-15},
}

@inproceedings{mayer_learning_2011,
 author = {C Mayer and B Radig},
 title = {Learning Displacement Experts from Multi-band Images for Face Model
	Fitting},
 booktitle = {International Conference on Advances in Computer-Human Interaction},
 year = {2011},
 month = {feb},
 keywords = {facial expressions},
}

@inproceedings{mayer_towards_2010,
 author = {C Mayer and S Sosnowski and K Kühnlenz and B Radig},
 title = {Towards robotic facial mimicry: system development and evaluation},
 booktitle = {Proceedings of the 19th {IEEE} International Symposium on Robot and
	Human Interactive Communication},
 year = {2010},
 keywords = {facial expressions},
}

@inproceedings{mayer_facial_2009,
 author = {C Mayer and M Wimmer and M Eggers and B Radig},
 title = {Facial Expression Recognition with {3D} Deformable Models},
 booktitle = {Proceedings of the 2nd International Conference on Advancements Computer-Human
	Interaction ({ACHI)}},
 year = {2009},
 publisher = {Springer},
 keywords = {facial expressions},
}

@article{mayer_adjusted_2009,
 author = {C Mayer and M Wimmer and B Radig},
 title = {Adjusted Pixel Features for Facial Component Classification},
 journal = {Image and Vision Computing Journal},
 year = {2009},
 keywords = {facial expressions},
}

@inproceedings{mayer_interpreting_2008,
 author = {C Mayer and M Wimmer and F Stulp and Z Riaz and A Roth and M Eggers and B Radig},
 title = {Interpreting the Dynamics of Facial Expressions in Real Time Using
	Model-based Techniques},
 booktitle = {Proceedings of the 3rd Workshop on Emotion and Computing: Current
	Research and Future Impact},
 year = {2008},
 pages = {45--46},
 address = {Kaiserslautern, Germany},
 month = {sep},
 keywords = {facial expressions},
}

@inproceedings{mayer_real_2008,
 author = {C Mayer and M Wimmer and F Stulp and Z Riaz and A Roth and M Eggers and B Radig},
 title = {A Real Time System for Model-based Interpretation of the Dynamics
	of Facial Expressions},
 booktitle = {Proc. of the International Conference on Automatic Face and Gesture
	Recognition ({FGR08)}},
 year = {2008},
 address = {Amsterdam, Netherlands},
 month = {sep},
 keywords = {facial expressions},
}

@inproceedings{pietzsch_face_2008,
 author = {S Pietzsch and M Wimmer and F Stulp and B Radig},
 title = {Face Model Fitting with Generic, Group-specific, and Person-specific
	Objective Functions},
 booktitle = {3rd International Conference on Computer Vision Theory and Applications
	({VISAPP)}},
 year = {2008},
 volume = {2},
 pages = {5--12},
 address = {Madeira, Portugal},
 month = {jan},
 abstract = {In model-based fitting, the model parameters that best fit the image
	are determined by searching for the optimum of an objective function.
	Often, this function is designed manually, based on implicit and
	domain-dependent knowledge. We acquire more robust objective function
	by learning them from annotated images, in which many critical decisions
	are automated, and the remaining manual steps do not require domain
	knowledge. Still, the trade-off between generality and accuracy remains.
	General functions can be applied to a large range of objects, whereas
	specific functions describe a subset of objects more accurately.
	Gross et al. have demonstrated this principle by comparing generic
	to person-specific Active Appearance Models. As it is impossible
	to learn a person-specific objective function for the entire human
	population, we automatically partition the training images and then
	learn partition-specific functions. The number of groups influences
	the specificity of the learned functions. We automatically determine
	the optimal partitioning given the number of groups, by minimizing
	the expected fitting error. Our empirical evaluation demonstrates
	that the group-specific objective functions more accurately describe
	the images of the corresponding group. The results of this paper
	are especially relevant to face model tracking, as individual faces
	will not change throughout an image sequence.},
 keywords = {facial expressions},
}

@inproceedings{riaz_image_2009,
 author = {Z Riaz and M Beetz and B Radig},
 title = {Image Normalization for Face Recognition using {3D} Model},
 booktitle = {International Conference of Information and Communication Technologies,
	Karachi, Pakistan},
 year = {2009},
 publisher = {{IEEE}},
 keywords = {facial expressions},
}

@inproceedings{riaz_shape_2008,
 author = {Z Riaz and M Beetz and B Radig},
 title = {Shape Invariant Recognition of Segmented Human Faces using Eigenfaces},
 booktitle = {Proceedings of the 12th International Multitopic Conference},
 year = {2008},
 publisher = {{IEEE}},
 keywords = {facial expressions},
}

@inproceedings{riaz_unified_2009,
 author = {Z Riaz and S Gedikli and M Beetz and B Radig},
 title = {A Unified Features Approach to Human Face Image Analysis and Interpretation},
 booktitle = {Affective Computing and Intelligent Interaction, Amsterdam, Netherlands},
 year = {2009},
 publisher = {{IEEE}},
 keywords = {facial expressions},
}

@inproceedings{riaz_3d_2009,
 author = {Z Riaz and C Mayer and M Beetz and B Radig},
 title = {{3D} Model for Face Recognition across Facial Expressions},
 booktitle = {Biometric {ID} Management and Multimodal Communication, Madrid, Spain},
 year = {2009},
 publisher = {Springer},
 keywords = {facial expressions},
}

@inproceedings{riaz_facial_2009,
 author = {Z Riaz and C Mayer and M Beetz and B Radig},
 title = {Facial Expressions Recognition from Image Sequences},
 booktitle = {2nd International Conference on Cross-Modal Analysis of Speech, Gestures,
	Gaze and Facial Expressions, Prague, Czech Republic},
 year = {2009},
 publisher = {Springer},
 keywords = {facial expressions},
}

@inproceedings{riaz_model_2009-1,
 author = {Z Riaz and C Mayer and M Beetz and B Radig},
 title = {Model Based Analysis of Face Images for Facial Feature Extraction},
 booktitle = {Computer Analysis of Images and Patterns, Munster, Germany},
 year = {2009},
 publisher = {Springer},
 keywords = {facial expressions},
}

@inproceedings{riaz_model_2009,
 author = {Z Riaz and C Mayer and M Wimmer and M Beetz and B Radig},
 title = {A Model Based approach for Expression Invariant Face Recognition},
 booktitle = {3rd International Conference on Biometrics, Alghero Italy},
 year = {2009},
 publisher = {Springer},
 keywords = {facial expressions},
}

@inproceedings{riaz_model_2008,
 author = {Z Riaz and C Mayer and M Wimmer and B Radig},
 title = {Model Based Face Recognition Across Facial Expressions},
 booktitle = {Journal of Information and Communication Technology},
 year = {2008},
 month = {dec},
 keywords = {facial expressions},
}

@inproceedings{sosnowski_mirror_2010,
 author = {S Sosnowski and C Mayer and K Kühnlenz and B Radig},
 title = {Mirror my emotions! Combining facial expression analysis and synthesis
	on a robot},
 booktitle = {The Thirty Sixth Annual Convention of the Society for the Study of
	Artificial Intelligence and Simulation of Behaviour ({AISB2010)}},
 year = {2010},
 keywords = {facial expressions},
}

@inproceedings{wallhoff_real-time_2010,
 author = {F Wallhoff and T Rehrl and C Mayer and B Radig},
 title = {Real-Time Face and Gesture Analysis for Human-Robot Interaction},
 booktitle = {Real-Time Image and Video Processing 2010},
 year = {2010},
 series = {Proceedings of {SPIE}},
 keywords = {facial expressions},
}

@book{wimmer_future_2008,
 title = {Future User Interfaces Enhanced by Facial Expression Recognition
	– Interpreting Human Faces with Model-based Techniques},
 publisher = {{VDM}, Verlag Dr. Müller},
 year = {2008},
 author = {M Wimmer},
 month = {mar},
 keywords = {facial expressions},
}

@phdthesis{wimmer_model-based_2007,
 author = {M Wimmer},
 title = {Model-based Image Interpretation with Application to Facial Expression
	Recognition},
 school = {Technische Universitat München, Institute for Informatics},
 year = {2007},
 month = {dec},
 keywords = {facial expressions},
 url = {http://nbn-resolving.de/urn/resolver.pl?urn:nbn:de:bvb:91-diss-20071220-618214-1-1},
}

@inproceedings{wimmer_facial_2008,
 author = {M Wimmer and BA. MacDonald and D Jayamuni and A Yadav},
 title = {Facial Expression Recognition for Human-robot Interaction – A Prototype},
 booktitle = {2{\textbackslash}textsuperscriptnd Workshop Robot Vision. Lecture
	Notes in Computer Science.},
 year = {2008},
 editor = {Klette, Reinhard and Sommer, Gerald},
 volume = {4931/2008},
 pages = {139--152},
 address = {Auckland, New Zealand},
 month = {feb},
 publisher = {Springer},
 abstract = {To be effective in the human world robots must respond to human emotional
	states. This paper focuses on the recognition of the six universal
	human facial expressions. In the last decade there has been successful
	research on facial expression recognition ({FER)} in controlled conditions
	suitable for human-computer interaction. However the human-robot
	scenario presents additional challenges including a lack of control
	over lighting conditions and over the relative poses and separation
	of the robot and human, the inherent mobility of robots, and stricter
	real time computational requirements dictated by the need for robots
	to respond in a timely fashion. Our approach imposes lower computational
	requirements by specifically adapting model-based techniques to the
	{FER} scenario. It contains adaptive skin color extraction, localization
	of the entire face and facial components, and specifically learned
	objective functions for fitting a deformable face model. Experimental
	evaluation reports a recognition rate of 70\% on the Cohn-Kanade
	facial expression database, and 67\% in a robot scenario, which compare
	well to other {FER} systems.},
 keywords = {facial expressions},
}

@inproceedings{wimmer_tailoring_2008,
 author = {M Wimmer and C Mayer and S Pietzsch and B Radig},
 title = {Tailoring Model-based Techniques for Facial Expression Interpretation},
 booktitle = {The First International Conference on Advances in Computer-Human
	Interaction ({ACHI08)}},
 year = {2008},
 address = {Sainte Luce, Martinique},
 month = {feb},
 keywords = {facial expressions},
}

@inproceedings{wimmer_recognizing_2008,
 author = {M Wimmer and C Mayer and B Radig},
 title = {Recognizing Facial Expressions Using Model-based Image Interpretation},
 booktitle = {Verbal and Nonverbal Communication Behaviours, {COST} Action 2102
	International Workshop},
 year = {2008},
 address = {Vietri sul Mare, Italy},
 month = {apr},
 abstract = {Even if electronic devices widely occupy our daily lives, human-machine
	interaction still lacks intuition. Therefore, researchers intend
	to resolve these shortcomings by augmenting traditional systems with
	aspects of human-human interaction and consider human emotion, behavior,
	and intention. This publication focusses on one aspect of this challenge:
	recognizing facial expressions. Our approach achieves real-time performance
	and provides robustness for real-world applicability. This computer
	vision task comprises of various phases for which it exploits model-based
	techniques that accurately localize facial features, seamlessly track
	them through image sequences, and finally infer facial expressions
	visible. We specifically adapt state-of-the-art techniques to each
	of these challenging phases. Our system has been successfully presented
	to industrial, political, and scientific audience in various events.},
 keywords = {facial expressions},
}

@inproceedings{wimmer_robustly_2008,
 author = {M Wimmer and C Mayer and B Radig},
 title = {Robustly Classifying Facial Components Using a Set of Adjusted Pixel
	Features},
 booktitle = {Proc. of the International Conference on Face and Gesture Recognition
	({FGR08)}},
 year = {2008},
 address = {Amsterdam, Netherlands},
 month = {sep},
 abstract = {Efficient and accurate localization of the components of human faces,
	such as skin, lips, eyes, and brows, provides benefit to various
	real-world applications. However, high intra-class and small inter-class
	variations in color prevent simple but quick pixel classifiers from
	yielding robust results. In contrast, more elaborate classifiers
	consider shape or region features but they do not achieve real-time
	performance. In this paper, we show that it definitely is possible
	to robustly determine the facial components and achieve far more
	than real-time performance. We also use quick pixel-level classifiers
	and provide them with a set of pixel features that are adapted to
	the image characteristics beforehand. We do not manually select the
	pixel features and specify the calculation rules. In contrast, our
	idea is to provide a multitude of features and let the Machine Learning
	algorithm decide which of them are important. The evaluation draws
	a comparison to fixed approaches that do not adapt the computation
	of the features to the image content in any way. The obtained accuracy
	is precise enough to be used for real-world applications such as
	for model-based interpretation of human faces.},
 keywords = {facial expressions},
}

@inproceedings{wimmer_face_2008,
 author = {M Wimmer and C Mayer and F Stulp and B Radig},
 title = {Face Model Fitting based on Machine Learning from Multi-band Images
	of Facial Components},
 booktitle = {Workshop on Non-Rigid Shape Analysis and Deformable Image Alignment,
	held in conjunction with {CVPR}},
 year = {2008},
 address = {Anchorage, {AK}, {USA}},
 month = {jun},
 abstract = {Geometric models allow to determine semantic information about real-world
	objects. Model fitting algorithms need to find the best match between
	a parameterized model and a gi ven image. This task inherently requires
	an objective function to estimate the error between a model parameterization
	and an image. The accuracy of this function directly inf luences
	the accuracy of the entire process of model fitting. Unfortunately,
	building these functions is a non-trivial task. Dedicated to the
	application of face model fitting, this paper proposes to consider
	a multi-band image representation that indicates the facial components,
	from which a large set of image features is computed. Since it is
	not possible to manually formulate an objective function that considers
	this large amount of features, we apply a Machine Lear ning framework
	to construct them. This automatic approach is capable of considering
	the large amount of features provided and yield highly accurate objective
	functions for fa ce model fitting. Since the Machine Learning framework
	rejects non-relevant image features, we obtain high performance runtime
	characteristics as well.},
 keywords = {facial expressions},
}

@inproceedings{wimmer_robustly_2008-1,
 author = {M Wimmer and S Pietzsch and C Mayer and B Radig},
 title = {Robustly Estimating the Color of Facial Components Using a Set of
	Adjusted Pixel Features},
 booktitle = {14. Workshop Farbbildverarbeitung},
 year = {2008},
 pages = {85--96},
 address = {Aachen, Germany},
 month = {oct},
 keywords = {facial expressions},
}

@inproceedings{wimmer_learning_2007,
 author = {M Wimmer and S Pietzsch and F Stulp and B Radig},
 title = {Learning Robust Objective Functions with Application to Face Model
	Fitting},
 booktitle = {Proceedings of the 29th {DAGM} Symposium},
 year = {2007},
 volume = {1},
 pages = {486--496},
 address = {Heidelberg, Germany},
 month = {sep},
 abstract = {Model-based image interpretation extracts high-level information from
	images using a priori knowledge about the object of interest. The
	computational challenge is to determine the model parameters that
	best match a given image by searching for the global optimum of the
	involved objective function. Unfortunately, this function is usually
	designed manually, based on implicit and domain-dependent knowledge,
	which prevents the fitting task from yielding accurate results. In
	this paper, we demonstrate how to improve model fitting by learning
	objective functions from annotated training images. Our approach
	automates many critical decisions and the remaining manual steps
	hardly require domain-dependent knowledge. This yields more robust
	objective functions that are able to achieve the accurate model fit.
	Our evaluation uses a publicly available image database and compares
	the obtained results to a recent state-of-the-art approach.},
 keywords = {facial expressions},
}

@article{wimmer_recognizing_2008-1,
 author = {M Wimmer and Z Riaz and C Mayer and B Radig},
 title = {Recognizing Facial Expressions Using Model-based Image Interpretation},
 journal = {Advances in Human-Computer Interaction},
 year = {2008},
 volume = {1},
 pages = {587--600},
 month = {oct},
 editor = {Pinder, Shane},
 keywords = {facial expressions},
}

@article{wimmer_learning_2008,
 author = {M Wimmer and F Stulp and S Pietzsch and B Radig},
 title = {Learning Local Objective Functions for Robust Face Model Fitting},
 journal = {{IEEE} Transactions on Pattern Analysis and Machine Intelligence
	({PAMI)}},
 year = {2008},
 volume = {30},
 pages = {1357--1370},
 number = {8},
 doi = {http://doi.ieeecomputersociety.org/10.1109/TPAMI.2007.70793},
 issn = {0162-8828},
 keywords = {facial expressions},
}

@inproceedings{wimmer_human_2007,
 author = {M Wimmer and U Zucker and B Radig},
 title = {Human Capabilities on Video-based Facial Expression Recognition},
 booktitle = {Proceedings of the 2nd Workshop on Emotion and Computing – Current
	Research and Future Impact},
 year = {2007},
 editor = {Reichardt, Dirk and Levi, Paul},
 pages = {7--10},
 address = {Osnabrück, Germany},
 month = {sep},
 abstract = {A lot of promising computer vision research has been conducted in
	order to automatically recognize facial expressions during the last
	decade. Some of them achieve high accuracy, however, it has not yet
	been investigated how accurately humans accomplish this task, which
	will introduce a comparable measure. Therefore, we conducted a survey
	on this issue and this paper evaluates the gathered information regarding
	the recognition rate and the confusion of facial expressions.},
 keywords = {facial expressions},
}