% % GENERATED FROM https://www.coli.uni-saarland.de % by : anonymous % IP : coli2006.lst.uni-saarland.de % at : Mon, 05 Feb 2024 15:41:12 +0100 GMT % % Selection : Year = 2000 % @Proceedings{Anne_et_al:2000, TITLE = {Proceedings of the Workshop on Linguistically Interpreted Corpora LINC-2000, August 6}, YEAR = {2000}, EDITOR = {Abeillé, Anne and Brants, Thorsten and Uszkoreit, Hans}, ADDRESS = {Luxembourg} } @Article{Alonso Pardo_et_al:2000, AUTHOR = {Alonso Pardo, Miguel and Nederhof, Mark-Jan and Villemonte de la Clergerie, Eric}, TITLE = {Tabulation of Automata for Tree-Adjoining Languages}, YEAR = {2000}, JOURNAL = {Grammars}, VOLUME = {3}, PAGES = {89-110}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/nederhof00c.ps.gz}, ABSTRACT = {We propose a modular design of tabular parsing algorithms for tree-adjoining languages. The modularity is made possible by a separation of the parsing strategy from the mechanism of tabulation. The parsing strategy is expressed in terms of the construction of a nondeterministic automaton from a grammar; three distinct types of automaton will be discussed. The mechanism of tabulation leads to the simulation of these nondeterministic automata in polynomial time, independent of the parsing strategy. The proposed application of this work is the design of efficient parsing algorithms for tree-adjoining grammars and related formalisms.}, ANNOTE = {COLIURL : Pardo:2000:TAT.pdf Pardo:2000:TAT.ps} } @InProceedings{Andreeva_Barry:2000, AUTHOR = {Andreeva, Bistra and Barry, William J.}, TITLE = {Sentence Mode and Emotional Load in Bulgarian: Economy of Intonational Form?}, YEAR = {2000}, BOOKTITLE = {Proceedings of the 7th Conference on Laboratory Phonology (LabPhon7), June 29 -July 1}, ADDRESS = {Nijmegen, Netherlands}, ABSTRACT = {An earlier study [1] illuminated the role of F0 contours on the focus accent in differentiating question and sentence mode in the Sofia variety of Bulgarian.The results showed that placement of the low target of the pitch accent at the beginning and the peak at the end of the accented syllable or in the following syllable (L*+H) is critical for the perception of (syntactically and lexically unmarked) checks, which are used to confirm already known information [2]. By shifting the peak leftwards towards the beginning of the accented syllable (H*) the pragmatic category changed from check to statement. Both categories have a L% boundary tone. However, subjects' judgements indicated that the strength of the intonational information was not equal for the two utterance types. The situational pre-context had a strong influence on the interpretation of the utterance as a check or statement. In case the intonation contour diverged from the unmarked contour for a particular function, the category judgement was accompanied by a change in the emotional message. To examine the general validity of these observations a further experiment was carried out. Three checks, three statements with low terminal boundary tones (L%), and three statements with continuation rises (H%) were selected from Map Task recordings made for a number of male and female speakers [3]. From each of these 9 natural utterances three intonational variants were generated, one for each pragmatic category. Firstly, a stylised resynthesized version of the original (e.g. a check) was produced. Then the intonation contours for the other two pragmatic categories (e.g. statement with terminal fall and continuation rise) were derived from the stylised contour. Four repetitions of the stimuli were presented (Roman square design) to 15 native speakers of Sofia Bulgarian in three situational contexts: question, neutral statement and polite statement. The context utterance together with the stimulus form a minimal dialogue. The natural context for the check was a statement, and for the two statement forms it was a question. In the test, each context was offered with each pragmatic category, producing potential tension between context and stimulus. The subjects were required to judge - on a five-point scale - the degree to which each stimulus was suited to its context. The results show that all three intonational contours can be accepted as statements in the context of a preceding question, whereas the change of context cannot shift the interpretation of a statement to a check. The following explanation can be offered for this asymmetry of reinterpretation. The context plays an extremely important role for the interpretation of checks or statements. The context priming a statement (question-answer sequence) provides enough information to uniquely specify the communicative frame. It is a strong enough speech act marker to relegate function of the intonational form to a minor one. Thus the context weakens the distinctive function that intonation has when word sequence and syntactic structure are identical.This does not, however, mean that the intonational form is irrelevant. The shift in the interpretation of the sentence mode (check to statement) can only occur because a compensatory change of modal meaning accompanies it. The check contour cannot be accepted as a neutral statement, it can only be accepted as an emphatic, impatient or angry statement. The statements with a continuation rise were also accepted in the statement context, but the compensatory modal message was of an exaggeratedly polite speaker. Apparently, when a typical (neutral) feature of a particular communicative situation is replaced by a feature typical of another situation, it introduces an additional modal marking. Thus intonation alone, without syntactic and lexical support, can imply a certain shade of modal meaning. This phenomenon is already known at the grammatical level. In Bulgarian, for example, the future tense is the neutral form for referring to events in the future. If the present tense is used, the utterance is immediately modally marked as a firm intention. In English and German the reverse is true.} } @InProceedings{Avgustinova:2000, AUTHOR = {Avgustinova, Tania}, TITLE = {Gaining the Perspective of Language-Family-Oriented Grammar Design: Predicative Special Clitics in Slavic}, YEAR = {2000}, BOOKTITLE = {1st Conference on Generative Linguistics in Poland (GLiP-1), November 13-14}, PAGES = {5-14}, EDITOR = {Banski, P. and Przepiórkowski, Adam}, ADDRESS = {Warsaw, Poland}, URL = {https://www.coli.uni-saarland.de/~tania/ta-pub/glip1.pdf}, NOTE = {also in: Selected Topics in Multilingual Grammar Design (Based on Data from Slavic Language Family), DFG-Zwischenbericht, Mai 1998 - April 1999}, ANNOTE = {COLIURL : Avgustinova:2000:GPL.pdf} } @Misc{Avgustinova:2000_1, AUTHOR = {Avgustinova, Tania}, TITLE = {Review of: Tesar, Bruce; Smolensky, Paul: Learnability in Optimality Theory. Cambridge: MIT Press, 2000}, YEAR = {2000}, URL = {http://linguistlist.org/issues/11/11-2024.html}, ANNOTE = {COLIURL : Avgustinova:2000:RTB.pdf} } @InProceedings{Avgustinova:2000_2, AUTHOR = {Avgustinova, Tania}, TITLE = {Arguments, Grammatical Relations, and Diathetic Paradigm}, YEAR = {2000}, BOOKTITLE = {7th International Conference on Head-Driven Phrase Structure Grammar, July 22-23}, PAGES = {23-42}, EDITOR = {Flickinger, Dan and Kathol, Andreas}, ADDRESS = {University of California, Berkeley, USA}, PUBLISHER = {CSLI Publications}, URL = {http://cslipublications.stanford.edu/HPSG/HPSG00/hpsg00avgustinova.pdf}, ANNOTE = {COLIURL : Avgustinova:2000:AGR.pdf} } @InProceedings{Avgustinova_Uszkoreit:2000, AUTHOR = {Avgustinova, Tania and Uszkoreit, Hans}, TITLE = {An Ontology of Systematic Relations for a Shared Grammar of Slavic}, YEAR = {2000}, BOOKTITLE = {18th International Conference on Computational Linguistics (COLING '00), July 31 - August 4}, VOLUME = {1}, PAGES = {28-34}, ADDRESS = {Saarbrücken, Germany}, PUBLISHER = {Morgan Kaufmann Publishers}, URL = {https://www.coli.uni-saarland.de/~tania/ta-pub/ta-hu-coling2000.pdf}, ABSTRACT = {Sharing portions of grammars across languages greatly reduces the costs of multilingual grammar engineering. Related languages share a much wider range of linguistic information than typically assumed in standard multilingual grammar architectures. Taking grammatical relatedness seriously, we are particularly interested in designing linguistically motivated grammatical resources for Slavic languages to be used in applied and theoretical computational linguistics. In order to gain the perspective of a languagefamily oriented grammar design, we consider an array of systematic relations that can hold between syntactical units. While the categorisation of primitive linguistic entities tends to be languagespecific or even constructionspecific, the relations holding between them allow various degrees of abstraction. On the basis of Slavic data, we show how a domain ontology conceptualising morphosyntactic building blocks can serve as a basis of a shared grammar of Slavic.}, ANNOTE = {COLIURL : Avgustinova:2000:OSR.pdf} } @InProceedings{Bateman_et_al:2000, AUTHOR = {Bateman, John and Teich, Elke and Kruijff, Geert-Jan M. and Korbayova, Ivana and Sharoff, Serge and Skoumalová, Hana}, TITLE = {Resources for Multilingual Text Generation in Three Slavic Languages}, YEAR = {2000}, BOOKTITLE = {2nd International Conference on Language Resources and Evaluation (LREC-2000), May 31 - June 2}, PAGES = {1763-1768}, ADDRESS = {Athens, Greece}, URL = {http://ufal.mff.cuni.cz/agile/biblio/publications/2000/LREC00.pdf}, ANNOTE = {COLIURL : Bateman:2000:RMT.pdf} } @InProceedings{Baumann_et_al:2000, AUTHOR = {Baumann, Stefan and Grice, Martine and Benzmüller, Ralf}, TITLE = {GToBI - A Phonological System for the Transcription of German Intonation}, YEAR = {2000}, BOOKTITLE = {Prosody 2000: Speech Recognition and Synthesis Workshop, October 2-5}, PAGES = {21-28}, ADDRESS = {Kraków, Poland}, ANNOTE = {COLIURL : Baumann:2000:GPS.pdf} } @Proceedings{Tilman_Stephan:2000, TITLE = {Impacts in Natural Language Generation: NLG between Technology and Applications. Workshop at Schloss Dagstuhl, Germany}, YEAR = {2000}, NUMBER = {D-00-01}, EDITOR = {Becker, Tilman and Busemann, Stephan}, SERIES = {DFKI Document}, ADDRESS = {Saarbrücken}, PUBLISHER = {DFKI}, URL = {ftp://ftp.dfki.uni-kl.de/pub/Publications/Documents/2000/D-00-01.tar.gz}, ABSTRACT = {This report contains the presented papers, abstracts of the invited talk and four sessions on 'burning issues' of the IMPACTS workshop on Natural Language Generation between Technology and Applications, held at Schloss Dagstuhl, July 26-28, 2000.}, ANNOTE = {COLIURL : Becker:2000:INL.tar} } @InProceedings{Bos_Gabsdil:2000, AUTHOR = {Bos, Johan and Gabsdil, Malte}, TITLE = {First-Order Inference and the Semantics of Questions and Answers}, YEAR = {2000}, BOOKTITLE = {Götalog 2000: 4th Workshop on the Semantics and Pragmatics of Dialogue, June 15-17}, PAGES = {43-50}, EDITOR = {Poesio, Massimo and Traum, David}, ADDRESS = {Göteborg, Sweden}, PUBLISHER = {Gothenburg Papers in Computational Linguistics 00-5}, URL = {https://www.coli.uni-saarland.de/~gabsdil/papers/goetalog00.ps.gz}, ANNOTE = {COLIURL : Bos:2000:FOI.pdf Bos:2000:FOI.ps} } @InCollection{Bos_Heine:2000, AUTHOR = {Bos, Johan and Heine, Julia}, TITLE = {Discourse and Dialog Semantics for Translation}, YEAR = {2000}, BOOKTITLE = {Verbmobil: Foundations of Speech-to-Speech Translation}, PAGES = {337-348}, EDITOR = {Wahlster, Wolfgang}, ADDRESS = {Berlin - Heidelberg - New York}, PUBLISHER = {Springer} } @Proceedings{Johan_Michael:2000, TITLE = {Proceedings of the Workshop on Inference in Computational Semantics (ICoS-2), International Conference and Research Center for Computer Science, July 29-30}, YEAR = {2000}, PAGES = {149}, EDITOR = {Bos, Johan and Kohlhase, Michael}, ADDRESS = {Schloss Dagstuhl}, URL = {https://www.coli.uni-saarland.de/~bos/icos/icos2.ps.gz}, ANNOTE = {COLIURL : Bos:2000:PWI.ps} } @InProceedings{Brants:2000, AUTHOR = {Brants, Thorsten}, TITLE = {Inter-Annotator Agreement for a German Newspaper Corpus}, YEAR = {2000}, BOOKTITLE = {2nd International Conference on Language Resources and Evaluation (LREC-2000), May 31 - June 2}, ADDRESS = {Athens, Greece}, URL = {https://www.coli.uni-saarland.de/~thorsten/publications/Brants-LREC00.ps.gz}, ABSTRACT = {This paper presents the results of an investigation on inter-annotator agreement for the NEGRA corpus, consisting of German newspaper texts. The corpus is syntactically annotated with part-of-speech and structural information. Agreement for part-of-speech is 98.6%, the labeled F-score for structures is 92.4%. The two annotations are used to create a common final version by discussing differences and by several iterations of cleaning. Initial and final versions are compared. We identify categories causing large numbers of differences and categories that are handled inconsistently.}, ANNOTE = {COLIURL : Brants:2000:IAA.pdf Brants:2000:IAA.ps} } @InProceedings{Brants:2000_1, AUTHOR = {Brants, Thorsten}, TITLE = {TnT - A Statistical Part-of-Speech Tagger}, YEAR = {2000}, BOOKTITLE = {6th Applied Natural Language Processing (ANLP '00), April 29 - May 4}, PAGES = {224-231}, ADDRESS = {Seattle, USA}, PUBLISHER = {Association for Computational Lingusitics}, URL = {https://www.coli.uni-saarland.de/~thorsten/publications/Brants-ANLP00.ps.gz}, ABSTRACT = {Trigrams'n'Tags (TnT) is an efficient statistical part-of-speech tagger. Contrary to claims found elsewhere in the literature, we argue that a tagger based on Markov models performs at least as well as other current approaches, including the Maximum Entropy framework. A recent comparison has even shown that TnT performs significantly better for the tested corpora. We describe the basic model of TnT, the techniques used for smoothing and for handling unknown words. Furthermore, we present evaluations on two corpora.}, ANNOTE = {COLIURL : Brants:2000:TSP.pdf Brants:2000:TSP.ps} } @InProceedings{Brants_Crocker:2000, AUTHOR = {Brants, Thorsten and Crocker, Matthew W.}, TITLE = {Probabilistic Parsing and Psychological Plausibility}, YEAR = {2000}, BOOKTITLE = {18th International Conference on Computational Linguistics (COLING '00), July 31 - August 4}, VOLUME = {1}, PAGES = {111-117}, ADDRESS = {Saarbrücken, Luxembourg, Nancy}, PUBLISHER = {Morgan Kaufmannn Publishers}, URL = {https://www.coli.uni-saarland.de/~thorsten/publications/Brants-Crocker-COLING00.ps.gz}, ABSTRACT = {Given the recent evidence for probabilistic mechanisms in models of human ambiguity resolution, this paper investigates the plausibility of exploiting current wide-coverage, probabilistic parsing techniques to model human linguistic performance. In particular, we investigate the performance of standard stochastic parsers when they are revised to operate incrementally, and with reduced memory resources. We present techniques for ranking and filtering analyses, together with experimental results. Our results confirm that stochastic parsers which adhere to these psychologically motivated constraints achieve good performance. Memory can be reduced down to 1% (compared to exhausitve search) without reducing recall and precision. Additionally, these models exhibit substantially faster performance. Finally, we argue that this general result is likely to hold for more sophisticated, and psycholinguistically plausible, probabilistic parsing models.}, ANNOTE = {COLIURL : Brants:2000:PPP.pdf Brants:2000:PPP.ps} } @InProceedings{Brants_Plaehn:2000, AUTHOR = {Brants, Thorsten and Plaehn, Oliver}, TITLE = {Interactive Corpus Annotation}, YEAR = {2000}, BOOKTITLE = {2nd International Conference on Language Resources and Evaluation (LREC'00), May 31 - June 2}, EDITOR = {Gavrilidou, M. and Carayannis, G. and Markantonatou, S. and Piperidis, Stelios and Steinhaouer, G.}, ADDRESS = {Athens, Greece}, PUBLISHER = {European Language Resource Association (ELRA)}, URL = {https://www.coli.uni-saarland.de/~plaehn/papers/lrec2000.ps.gz https://www.coli.uni-saarland.de/~plaehn/papers/lrec2000.pdf}, ABSTRACT = {We present an easy-to-use graphical tool for syntactic corpus annotation. This tool, Annotate, interacts with a part-of-speech tagger and a parser running in the background. The parser incrementally suggests single phrases bottom-up based on cascaded Markov models. A human annotator confirms or rejects the parser's suggestions. This semi-automatic process facilitates a very rapid and efficient annotation.}, ANNOTE = {COLIURL : Brants:2000:ICA.pdf Brants:2000:ICA.ps} } @InProceedings{Braun_et_al:2000, AUTHOR = {Braun, Bettina and Koreman, Jacques and Trouvain, Jürgen}, TITLE = {The Effect of Accentuation on Vowel Recognition}, YEAR = {2000}, BOOKTITLE = {Prosody 2000 Workshop: Speech Recognition and Synthesis, October 2-5}, ADDRESS = {Kraków, Poland}, URL = {https://www.coli.uni-saarland.de/~trouvain/braun_et_al_00.ps.gz}, ANNOTE = {COLIURL : Braun:2000:EAV.pdf} } @InProceedings{Bredenkamp_et_al:2000, AUTHOR = {Bredenkamp, Andrew and Crysmann, Berthold and Petrea, Mirela}, TITLE = {Building Multilingual Controlled Language Performance Checkers}, YEAR = {2000}, BOOKTITLE = {Proceedings of the 3rd International Workshop on Controlled Language Applications (CLAW 2000), April 29-30}, PAGES = {83-89}, ADDRESS = {Seattle, Washington, USA} } @InProceedings{Bredenkamp_et_al:2000_1, AUTHOR = {Bredenkamp, Andrew and Crysmann, Berthold and Petrea, Mirela}, TITLE = {Looking for Errors: A Declarative Formalism for Resource-Adaptive Language Checking}, YEAR = {2000}, BOOKTITLE = {Proceedings of the 2nd International Conference on Language Resources and Evaluation (LREC-2000), May 31 - June 2}, PAGES = {667-673}, ADDRESS = {Athens, Greece}, URL = {http://flag.dfki.de/pdf/LREC.pdf}, ABSTRACT = {The paper describes a phenomenonbased approach to grammar checking, which draws on the integration of different shallow NLP technologies, including morphological and POS taggers, as well as probabilistic and rulebased partial parsers. We present a declarative specification formalism for grammar checking and controlled language applications which greatly facilitates the development of checking components.}, ANNOTE = {COLIURL : Bredenkamp:2000:LED.pdf} } @MastersThesis{Brunklaus:2000, AUTHOR = {Brunklaus, Thorsten}, TITLE = {Der Oz Inspector - Browsen: Interaktiver, einfacher, effizienter}, YEAR = {2000}, ADDRESS = {Saarbrücken}, SCHOOL = {Universität des Saarlandes, Fachbereich 14 Informatik}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/OzInspector.ps.gz}, ABSTRACT = {Diese Arbeit beschreibt Konzept, Entwurf und Implementierung des Inspectors. Der Inspector ist ein interaktives, grafisches Werkzeug zur Darstellung von Oz-Datenstrukturen. Oz-Datenstrukturen sind komplex und deren Darstellung erfordert ein grafisches Werkzeug. Aus Sicht des Benutzers muss ein solches vor allem sehr effizient, flexibel und interaktiv sein. In dieser Arbeit wird ein System vorgestellt, dass diese Anforderungen durch einen zweistufigen Ansatz erfüllt. Dieser besteht darin, neben effizienten Basisdiensten einen flexiblen Transformationsmechanismus einzusetzen. Die vorgestellte Implementierung ist hochmodular und sehr kompakt. Deren Effizienz wird schließlich durch Vergleich mit einemähnlichen System demonstriert.}, ANNOTE = {COLIURL : Brunklaus:2000:OIB.pdf Brunklaus:2000:OIB.ps} } @InProceedings{Buitelaar:2000, AUTHOR = {Buitelaar, Paul}, TITLE = {Reducing Lexical Semantic Complexity with Systematic Polysemous Classes and Underspecification}, YEAR = {2000}, BOOKTITLE = {Workshop on Syntactic and Semantic Complexity in Natural Language Processing Systems (ANLP-NAACL'00), April 29 - May 3}, EDITOR = {Bagga, Amit and Pustejovsky, James and Zadrozny, Wlodek}, ADDRESS = {Seattle, Washington, USA}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/anlp00.ps. http://dfki.de/~paulb/anlp00.ps}, ABSTRACT = {This paper presents an algorithm for finding systematic polysemous classes in WordNet and similar semantic databases, based on a definition in (Apresjan 1973). The introduction of systematic polysemous classes can reduce the amount of lexical semantic processing, because the number of disambiguation decisions can be restricted more clearly to those cases that involve real ambiguity (homonymy). In many applications, for instance in document categorization, information retrieval, and information extraction, it may be sufficient to know if a given word belongs to a certain class (underspecified sense) rather than to know which of its (related) senses exactly to pick. The approach for finding systematic polysemous classes is based on that of (Buitelaar 1998a, Buitelaar 1998b), while addressing some previous shortcomings.}, ANNOTE = {COLIURL : Buitelaar:2000:RLS.pdf Buitelaar:2000:RLS.ps} } @Proceedings{Paul_Koiti:2000, TITLE = {Proceedings of the COLING '00 Workshop on Semantic Annotation and Intelligent Content, August 5-6}, YEAR = {2000}, EDITOR = {Buitelaar, Paul and Hasida, Koiti}, ADDRESS = {Luxembourg, Centre Universitaire}, URL = {http://dfki.de/~paulb/coling2000.proceedings.pdf}, ABSTRACT = {SEMANTIC ANNOTATION is augmentation of data to facilitate automatic recognition of the underlying semantic structure. A common practice in this respect is labeling of documents with thesaurus classes for the sake of document classification and management. In the medical domain, for instance, there is a long-standing tradition in terminology maintenance and annotation/classification of documents using standard coding systems such as ICD, MeSH and the UMLS meta-thesaurus. Semantic annotation in a broader sense also addresses document structure (title, section, paragraph, etc.), linguistic structure (dependency, coordination, thematic role, co-reference, etc.), and so forth. In NLP, semantic annotation has been used in connection with machine-learning software trainable on annotated corpora for parsing, word-sense disambiguation, co-reference resolution, summarization, information extraction, and other tasks. A still unexplored but important potential of semantic annotation is that it can provide a common I/O format through which to integrate various component technologies in NLP and AI such as speech recognition, parsing, generation, inference, and so on. INTELLIGENT CONTENT is semantically structured data that is used for a wide range of content-oriented applications such as classification, retrieval, extraction, translation, presentation, and question-answering, as the organization of such data provides machines with accurate semantic input to those technologies. Semantically annotated resources as described above are typical examples of intelligent content, whereas another major class includes electronic dictionaries and inter-lingual or knowledge-representation data. Some ongoing projects along these lines are GDA (Global Document Annotation), UNL (Universal Networking Language) and SHOE (Simple HTML Ontology Extension), all of which aim at motivating people to semantically organize electronic documents in machine-understandable formats, and at developing and spreading content-oriented application technologies aware of such formats. Along similar lines, MPEG-7 is a framework for semantically annotating audiovisual data for the sake of content-based retrieval and browsing, among others. Incorporation of linguistic annotation into MPEG-7 is in the agenda, because linguistic descriptions already constitute a main part of existing metadata. In short, semantic annotation is a central, basic technology for intelligent content, which in turn is a key notion in systematically coordinating various applications of semantic annotation. In the hope of fueling some of the developments mentioned above and thus promoting the linkage between basic researches and practical applications, the workshop invites researchers and practitioners from such fields as computational linguistics, document processing, terminology, information science, and multimedia content, among others, to discuss various aspects of semantic annotation and intelligent content in an interdisciplinary way.}, ANNOTE = {COLIURL : Buitelaar:2000:PCW.pdf} } @InCollection{Busemann:2000, AUTHOR = {Busemann, Stephan}, TITLE = {Generierung natürlichsprachlicher Texte}, YEAR = {2000}, BOOKTITLE = {Handbuch der Künstlichen Intelligenz}, PAGES = {783-814}, EDITOR = {Görz, Günther and Rollinger, C.-R. and Schneeberger, J.}, ADDRESS = {Oldenbourg}, PUBLISHER = {Oldenbourg Wissenschaftsverlag}, ABSTRACT = {This short survey of the state of the art in natural language generation (NLG) in particular aims at readers interested in building practical systems. It defines language generation, identifies possible areas of application and describes the tasks a NLG system should fulfil. The interdependencies of the tasks are discussed and architecture models derived. The discussion of in-depth vs. shallow generation is based on the insight that different types of applications require different kinds of generators. A methodology for the design of NLG applications concludes the paper. (Note: The paper is written in German.} } @InProceedings{Busemann:2000_1, AUTHOR = {Busemann, Stephan}, TITLE = {Interfacing Constraint-Based Grammars and Generation Algorithms}, YEAR = {2000}, BOOKTITLE = {Workshop Analysis for Generation. 1st International Conference on Natural Language Generation, June 12-16}, ADDRESS = {Mitzpe Ramon, Israel}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/busemann00.ps.gz. http://arXiv.org/abs/cs.CL/0008003}, ABSTRACT = {Constraint-based grammars can, in principle, serve as the major linguistic knowledge source for both parsing and generation. Surface generation starts from input semantics representations that may vary across grammars. For many declarative grammars, the concept of derivation implicitly built in is that of parsing. They may thus not be interpretable by a generation algorithm. We show that linguistically plausible semantic analyses can cause severe problems for semantic-head-driven approaches for generation (SHDG). We use SeReal, a variant of SHDG and the DISCO grammar of German as our source of examples. We propose a new, general approach that explicitly accounts for the interface between the grammar and the generation algorithm by adding a control-oriented layer to the linguistic knowledge base that reorganizes the semantics in a way suitable for generation.}, ANNOTE = {COLIURL : Busemann:2000:ICB.pdf Busemann:2000:ICB.ps} } @InProceedings{Busemann_et_al:2000, AUTHOR = {Busemann, Stephan and Schmeier, Sven and Arens, Roman G.}, TITLE = {Message Classification in the Call Center}, YEAR = {2000}, BOOKTITLE = {Proceedings of the 6th Applied Natural Language Processing Conference (ANLP'00), April 29 - May 4}, PAGES = {158-165}, EDITOR = {Nirenburg, Sergei and Appelt, Douglas and Ciravegna, Fabio and Dale, Robert}, ADDRESS = {Seattle, Washington, USA}, PUBLISHER = {ACL}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/bus:sch:are:00.ps.gz ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/bus:sch:are:00.entry ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/bus:sch:are:00.dvi.gz. http://arXiv.org/abs/cs.CL/0003060}, ANNOTE = {COLIURL : Busemann:2000:MCC.pdf Busemann:2000:MCC.ps Busemann:2000:MCC.dvi} } @Article{Callmeier:2000, AUTHOR = {Callmeier, Ulrich}, TITLE = {PET. A Platform for Experimentation with Efficient HPSG Processing Techniques}, YEAR = {2000}, JOURNAL = {Journal of Natural Language Engineering}, VOLUME = {6}, NUMBER = {1}, PAGES = {99-108}, URL = {https://www.coli.uni-saarland.de/~uc/pubs/nle00.ps}, ANNOTE = {COLIURL : Callmeier:2000:PPE.pdf Callmeier:2000:PPE.ps} } @InCollection{Corley_Crocker:2000, AUTHOR = {Corley, Steffan and Crocker, Matthew W.}, TITLE = {The Modular Statistical Hypothesis: Exploring Lexical Category Ambiguity}, YEAR = {2000}, BOOKTITLE = {Architectures and Mechanisms for Language Processing}, EDITOR = {Crocker, Matthew W. and Pickering, Martin J. and Clifton, Charles Jr.}, ADDRESS = {Cambridge}, PUBLISHER = {Cambridge University Press} } @InProceedings{Cowie_et_al:2000, AUTHOR = {Cowie, Roddy and Douglas-Cowie, Ellen and Savvidou, Suzie and McMahon, Edelle and Sawey, Martin and Schröder, Marc}, TITLE = {'FEELTRACE': An Instrument for Recording Perceived Emotion in Real Time}, YEAR = {2000}, BOOKTITLE = {Proceedings of the ISCA Workshop on Speech and Emotion: A Conceptual Framework for Research}, PAGES = {19-24}, EDITOR = {Douglas-Cowie, Ellen and Cowie, Roddy and Schröder, Marc}, ADDRESS = {Belfast}, PUBLISHER = {Textflow}, URL = {http://www.dfki.de/~schroed/articles/cowieetal2000.pdf}, ABSTRACT = {FEELTRACE is an instrument developed to let observers track the emotional content of a stimulus as they perceive it over time, allowing the emotional dynamics of speech episodes to be examined. It is based on activation-evaluation space, a representation derived from psychology. The activation dimension measures how dynamic the emotional state is; the evaluation dimension is a global measure of the positive or negative feeling associated with the state. Research suggests that the space is naturally circular, i.e. states which are at the limit of emotional intensity define a circle, with alert neutrality at the centre. To turn those ideas into a recording tool, the space was represented by a circle on a computer screen, and observers described perceived emotional state by moving a pointer (in the form of a disc) to the appropriate point in the circle, using a mouse. Prototypes were tested, and in the light of results, refinements were made to ensure that outputs were as consistent and meaningful as possible. They include colour coding the pointer in a way that users readily associate with the relevant emotional state; presenting key emotion words as ‘landmarks’ at the strategic points in the space; and developing an induction procedure to introduce observers to the system. An experiment assessed the reliability of the developed system. Stimuli were 16 clips from TV programs, two showing relatively strong emotions in each quadrant of activationevaluation space, each paired with one of the same person in a relatively neural state. 24 raters took part. Differences between clips chosen to contrast were statistically robust. Results were plotted in activation-evaluation space as ellipses, each with its centre at the mean co-ordinates for the clip, and its width proportional to standard deviation across raters. The size of the ellipses meant that about 25 could be fitted into the space, i.e. FEELTRACE has resolving power comparable to an emotion vocabulary of 20 non-overlapping words, with the advantage of allowing intermediate ratings, and above all, the ability to track impressions continuously.}, ANNOTE = {COLIURL : Cowie:2000:FIR.pdf} } @Proceedings{Roddy_et_al:2000, TITLE = {Proceedings of the ISCA Workshop on Speech and Emotion: A Conceptual Framework for Research}, YEAR = {2000}, EDITOR = {Cowie, Roddy and Douglas-Cowie, Ellen and Schröder, Marc}, ADDRESS = {Belfast}, PUBLISHER = {Textflow}, URL = {http://www.qub.ac.uk/en/isca/proceedings} } @Article{Crocker_Brants:2000, AUTHOR = {Crocker, Matthew W. and Brants, Thorsten}, TITLE = {Wide Coverage Probabilistic Sentence Processing}, YEAR = {2000}, JOURNAL = {Journal of Psycholinguistic Research}, VOLUME = {29}, NUMBER = {6}, PAGES = {647-669} } @InProceedings{Crocker_Brants:2000_1, AUTHOR = {Crocker, Matthew W. and Brants, Thorsten}, TITLE = {Incremental Probabilistic Models of Human Linguistic Performance}, YEAR = {2000}, BOOKTITLE = {13th Annual CUNY Conference on Sentence Processing, March 30 - April 1}, ADDRESS = {La Jolla, California, USA} } @Book{Matthew_et_al:2000, TITLE = {Architectures and Mechanisms for Language Processing}, YEAR = {2000}, EDITOR = {Crocker, Matthew W. and Pickering, Martin J. and Clifton, Charles Jr.}, ADDRESS = {Cambridge}, PUBLISHER = {Cambridge University Press} } @InProceedings{de Jong_et_al:2000, AUTHOR = {de Jong, Franciska and Gauvain, Jean-Luc and Hiemstra, Djoerd and Netter, Klaus}, TITLE = {Language-Based Multimedia Information Retrieval}, YEAR = {2000}, BOOKTITLE = {Proceedings of the 6th Conference on Content-Based Multimedia Information Access. Recherche d'Informations Assistee par Ordinateur (RIAO '00)}, ADDRESS = {Paris, France}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/riao_lbmir_final.ps.gz}, ANNOTE = {COLIURL : Jong:2000:LBM.pdf Jong:2000:LBM.ps} } @PhdThesis{De Kuthy:2000, AUTHOR = {De Kuthy, Kordula}, TITLE = {Discontinuous NPs in German}, YEAR = {2000}, ADDRESS = {Saarbrücken}, SCHOOL = {Universität des Saarlandes}, URL = {http://www.dfki.de/dfkibib/publications/docs/dekuthy-thesis.ps.gz}, ANNOTE = {COLIURL : Kuthy:2000:DNG.pdf Kuthy:2000:DNG.ps} } @InProceedings{Declerck_et_al:2000, AUTHOR = {Declerck, Thierry and Jachmann, Alexander Werner and Uszkoreit, Hans}, TITLE = {The New Edition of the Natural Language Software Registry (an Initiative of ACL hosted at DFKI)}, YEAR = {2000}, BOOKTITLE = {Proceedings of the 2nd International Conference on Language Resources and Evaluation (LREC'00), May 31 - June 2}, ADDRESS = {Athens, Greece}, PUBLISHER = {ELRA}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/lrec00_reg.ps.gz ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/lrec00_reg.entry}, ANNOTE = {COLIURL : Declerck:2000:NEN.pdf Declerck:2000:NEN.ps} } @InProceedings{Declerck_Neumann:2000, AUTHOR = {Declerck, Thierry and Neumann, Günter}, TITLE = {Using a Parameterizable and Domain-Adaptive Information Extraction System for Annotating Large-Scale Corpora?}, YEAR = {2000}, BOOKTITLE = {Proceedings of the Pre-Conference Workshop Information Extraction meets Corpus Linguistics, May 30}, ADDRESS = {Athens, Greece}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/IE-Corpus00_dec.ps.gz ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/IE-Corpus00_dec.entry}, ANNOTE = {COLIURL : Declerck:2000:UPD.pdf Declerck:2000:UPD.ps} } @InProceedings{Douglas-Cowie_et_al:2000, AUTHOR = {Douglas-Cowie, Ellen and Cowie, Roddy and Schröder, Marc}, TITLE = {A New Emotion Database: Considerations, Sources and Scope}, YEAR = {2000}, BOOKTITLE = {Proceedings of the ISCA Workshop on Speech and Emotion: A Conceptual Framework for Research}, PAGES = {39-44}, EDITOR = {Douglas-Cowie, Ellen and Cowie, Roddy and Schröder, Marc}, ADDRESS = {Belfast}, PUBLISHER = {Textflow}, URL = {http://www.dfki.de/~schroed/articles/douglascowieetal2000.pdf}, ABSTRACT = {Research on the expression of emotion is underpinned by databases. Reviewing available resources persuaded us of the need to develop one that prioritised ecological validity. The basic unit of the database is a clip, which is an audiovisual recording of an episode that appears to be reasonably selfcontained. Clips range from 10 -- 60 secs, and are captured as MPEG files. They were drawn from two main sources. People were recorded discussing emotive subjects either with each other, or with one of the research team. We also recorded extracts from television programs where members of the public interact in a way that at least appears essentially spontaneous. Associated with each clip are two additional types of file. An audio file (.wav format) contains speech alone, edited to remove sounds other than the main speaker. An interpretation file describes the emotional state that observers attribute to the main speaker, using the FEELTRACE system to provide a continuous record of the perceived ebb and flow of emotion. Clips have been extracted for 100 speakers, with at least two for each speaker (one relatively neutral and others showing marked emotions of different kinds).}, ANNOTE = {COLIURL : Douglas-Cowie:2000:NED.pdf} } @InProceedings{Duchier:2000, AUTHOR = {Duchier, Denys}, TITLE = {A Model-Eliminative Treatment of Quantifier-Free Tree Descriptions}, YEAR = {2000}, BOOKTITLE = {Algebraic Methods in Language Processing (AMILP '00). 16th Twente Workshop on Language Technology (TWLT 16) and 2nd AMAST Workshop, May 20-22}, PAGES = {55-66}, EDITOR = {Heylen, D. and Nijholt, Anton and Scollo, G.}, ADDRESS = {Iowa City, Iowa, USA}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/duchier-amilp.ps.gz}, ABSTRACT = {Tree descriptions are widely used in computational linguistics for talking and reasoning about trees. For practical applications, it is essential to be able to decide satisfiability and enumerate solutions efficiently. This challenge cannot realistically be met by brute force enumeration. However it can be addressed very effectively by constraint propagation as provided by modern constraint technology. Previously, we studied the conjunctive fragment of tree descriptions and showed how the problem of finding minimal models of a conjunctive tree description could be transformed into a constraint satisfaction problem (CSP) on finite set variables. In this paper, we extend our account to the fragment that admits both negation and disjunction, but still leaves out quantification. Again we provide a reduction to a CSP. While our previous encoding introduced the reader to set constraints and disjunctive propagators, we now extend our arsenal with selection propagators.}, ANNOTE = {COLIURL : Duchier:2000:MET.pdf Duchier:2000:MET.ps} } @InProceedings{Duchier:2000_1, AUTHOR = {Duchier, Denys}, TITLE = {Constraint Programming for Natural Language Processing}, YEAR = {2000}, BOOKTITLE = {European Summer School in Logic, Language, and Information (ESSLLI '00), August 6-18}, ADDRESS = {Birmingham, England}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/duchier-esslli2000.ps.gz}, ABSTRACT = {This course demonstrates how constraint programming can be used effectively in practice, for linguistic applications. It shows how many forms of ambiguity arising in linguistic can be represented compactly and elegantly, and processed efficiently with constraints. A key idea to derive the most benefit from constraint propagation is that intended models should be characterized as solutions of emphConstraint Satisfaction Problems (CSPs) rather than defined inductively or in a generative fashion. We examine several topics in detail: encodings of finite domains, tree descriptions using dominance constraints, and parsing with dependency grammars. In each case, we present a formal characterization of the problem as a CSP and illustrate how to derive a corresponding constraint program. The course includes 4 complete interactive applications written in Oz, with full code supplied. Through these programmatic vignettes the reader is exposed to the practice of constraint programming with emphfinite domain and emphfinite set variables, and introduced to some of the more powerful types of constraints available today, such as reified constraints, disjunctive propagators, and selection constraints.}, ANNOTE = {COLIURL : Duchier:2000:CPN.pdf Duchier:2000:CPN.ps} } @InProceedings{Duchier_Niehren:2000, AUTHOR = {Duchier, Denys and Niehren, Joachim}, TITLE = {Dominance Constraints with Set Operators}, YEAR = {2000}, BOOKTITLE = {1st International Conference on Computational Logic (CL '00), July 24-28}, NUMBER = {1861}, PAGES = {326-341}, EDITOR = {Lloyd, J. and Dahl, V.}, SERIES = {Lecture Notes in Computer Science}, ADDRESS = {Imperial College, London, UK}, PUBLISHER = {Springer}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/dombool.ps.gz}, ABSTRACT = {Dominance constraints are widely used in computational linguistics as a language for talking and reasoning about trees. In this paper, we extend dominance constraints by admitting set operators. Set operators contribute a controlled form of disjunction that is emminently well-suited for constraint propagation. We present a solver for dominance constraints with set operators as a system of abstract propagation and distribution rules, and prove its soundness and completeness. We then derive an efficient implementation in a constraint programming language with finite sets and prove its faithfullness to the abstract inference rules.}, ANNOTE = {COLIURL : Duchier:2000:DCS.pdf Duchier:2000:DCS.ps} } @InProceedings{Düsterhöft_et_al:2000, AUTHOR = {Düsterhöft, Antje and Neumann, Günter and Becker, Markus and Bedersdorfer, Jochen and Bruder, Ilvio}, TITLE = {GETESS: Constructing a Linguistic Search Index for an Internet Search Engine}, YEAR = {2000}, BOOKTITLE = {Proceedings of the 5th International Conference on Applications of Natural Language to Information Systems (NLDB'00), June}, ADDRESS = {Versailles, France}, URL = {http://www.dfki.de/~neumann/publications/new-ps/nldb-ohne.ps.gz}, ABSTRACT = {In this paper we illustrate how Internet documents can be automatically analyzed in order to capture the content of a document in a more detailed way than usually. The result of the document analysis is called abstract and will be used as a linguistic search index for the Internet search engine GETESS. We show how the linguistic analysis system SMES can be used for a Harvest based search engine for constructing a linguistic search index. Further, we denote how the linguistic index can be exploited for answeringuser search inqueries.}, ANNOTE = {COLIURL : Dusterhoft:2000:GCL.pdf Dusterhoft:2000:GCL.ps} } @PhdThesis{Egg:2000, AUTHOR = {Egg, Markus}, TITLE = {Flexible Semantic Construction: The Case of Reinterpretation}, YEAR = {2000}, ADDRESS = {Saarbrücken}, SCHOOL = {Universität des Saarlandes, Department of Computational Linguistics} } @InCollection{Egg:2000_1, AUTHOR = {Egg, Markus}, TITLE = {Reinterpretation from a Synchronic and Diachronic Point of View}, YEAR = {2000}, BOOKTITLE = {Meaning Change - Meaning Variation, vol.2. Arbeitspapier Nr. 106}, EDITOR = {Eckardt, R. and Heusinger, K.}, ADDRESS = {Konstanz}, PUBLISHER = {FG Sprachwissenschaft, Universität Konstanz}, URL = {https://www.coli.uni-saarland.de/cl/projects/chorus/papers/egg99.ps.gz}, ABSTRACT = {Frequently an utterance can only be understood if one integrates additional material into its meaning, which mediates between semantically conflicting parts of the utterance. This process is known as reinterpretation. From a synchronic viewpoint, it is a 'creative' or 'dynamic' aspect of natural language, to be described and integrated in a formal description of natural language semantics. But reinterpretation phenomena can also be regarded as a gateway for linguistic change, since they may get conventionalized and thus enlarge the domain of compositional semantics. Analyzing reinterpretation will therefore also provide insights into mechanisms of linguistic change. The proposed account of reinterpretation goes as follows. Semantic construction yields ambiguous structures for reinterpretation cases, which are then monotonically enriched with information from extralinguistic sources (e.g. world knowledge). Semantic ambiguities are described in the framework of underspecification. This account of reinterpretation allows a straightforward modelling of its synchronic and diachronic aspects.}, ANNOTE = {COLIURL : Egg:2000:RSD.pdf Egg:2000:RSD.ps} } @Article{Erbach_Saurer:2000, AUTHOR = {Erbach, Gregor and Saurer, Werner}, TITLE = {Review of Handbook of Logic in Artificial Intelligence and Logic Programming}, YEAR = {2000}, JOURNAL = {Artificial Intelligence Review}, VOLUME = {14}, NUMBER = {6}, PAGES = {615-617}, URL = {http://purl.org/net/gregor/pub/gabbay.txt}, NOTE = {Original publication: Dov M. Gabbay, Christopher J. Hogger, John Alan Robinson (Eds.), Handbook of Logic in Artificial Intelligence and Logic Programming, Volume 2: Deduction Methodologies}, ANNOTE = {COLIURL : Erbach:2000:RHL.pdf Erbach:2000:RHL.ps} } @TechReport{Ericsson_et_al:2000, AUTHOR = {Ericsson, Stinaand and Lewin, Ian and Rupp, Christopher J. and Cooper, Robin}, TITLE = {Dialogue Moves in Negotiative Dialogues}, YEAR = {2000}, MONTH = {September}, NUMBER = {1.2}, ADDRESS = {Göteborg}, TYPE = {Siridus Report}, INSTITUTION = {Göteborg University, Department of Linguistics}, URL = {http://www.ling.gu.se/projekt/siridus/Publications/deliv1-2.ps.gz}, ANNOTE = {COLIURL : Ericsson:2000:DMN.pdf Ericsson:2000:DMN.ps} } @InProceedings{Erk:2000, AUTHOR = {Erk, Katrin}, TITLE = {Die Verarbeitung von Parallelismus-Constraints}, YEAR = {2000}, BOOKTITLE = {Informatik 2000 - 30. Jahrestagung der Gesellschaft für Informatik, 19.-22. September}, EDITOR = {Mehlhorn, Kurt and Snelting, G.}, SERIES = {Informatik Aktuell}, ADDRESS = {Berlin, Germany}, PUBLISHER = {Springer}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/GI00.ps.gz}, ABSTRACT = {Parallelismus-Constraints sind partielle Beschreibungen von Bäumen. Wir verwenden sie als Repräsentationsformalismus in der unterspezifizierten natürlichsprachlichen Semantik. Parallelismus-Constraints sind gleichmächtig wie Kontext-Unifikation, deren Entscheidbarkeit ein bekanntes offenes Problem ist. Dieser Text beschreibt ein Semi-Entscheidungs-Verfahren für Parallelismus-Constraints und eine erste Implementierung. Anders als alle bekannten Verfahren für Kontext-Unifikation terminiert diese Prozedur für Dominanz-Constraints, eine für die linguistische Anwendung wichtige Teilklasse.}, ANNOTE = {COLIURL : Erk:2000:VPC.pdf Erk:2000:VPC.ps} } @InProceedings{Erk_Niehren:2000, AUTHOR = {Erk, Katrin and Niehren, Joachim}, TITLE = {Parallelism Constraints}, YEAR = {2000}, BOOKTITLE = {11th International Conference on Rewriting Techniques and Applications (RTA '00), July 10-12}, EDITOR = {Bachmair, L.}, SERIES = {LNCS}, ADDRESS = {University of East Anglia, Norwich, UK}, PUBLISHER = {Springer}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/parallelism.ps.gz}, ABSTRACT = {Parallelism constraints are logical desciptions of trees. They are as expressive as context unification, i.e. second-order linear unification. We present a semi-decision procedure enumerating all most general unifiers of a parallelism constraint and prove it sound and complete. In contrast to all known procedures for context unification, the presented procedure terminates for the important fragment of dominance constraints and performs reasonably well in a recent application to underspecified natural language semantics.}, ANNOTE = {COLIURL : Erk:2000:PC.ps} } @Book{Erk_Priese:2000, AUTHOR = {Erk, Katrin and Priese, Lutz}, TITLE = {Theoretische Informatik. Eine umfassende Einführung}, YEAR = {2000}, ADDRESS = {Berlin}, PUBLISHER = {Springer} } @Article{Flickinger_et_al:2000, AUTHOR = {Flickinger, Dan and Oepen, Stefan and Uszkoreit, Hans and Tsujii, Jun-ichi}, TITLE = {Introduction}, YEAR = {2000}, JOURNAL = {Journal of Natural Language Engineering}, VOLUME = {6}, NUMBER = {1}, PAGES = {1-14} } @Book{Dan_et_al:2000, TITLE = {Journal of Natural Language Engineering. Special Issue on Efficient processing with HPSG: Methods, Systems, Evaluation.}, YEAR = {2000}, VOLUME = {6}, EDITOR = {Flickinger, Dan and Oepen, Stefan and Uszkoreit, Hans and Tsujii, Jun-ichi}, ADDRESS = {Cambridge}, PUBLISHER = {Cambridge University Press}, URL = {http://www.dfki.de/dfkibib/publications/docs/Flickinger_2000_JNLE.pdf}, ANNOTE = {COLIURL : Flickinger:2000:JNL.pdf} } @Article{Flickinger_et_al:2000_1, AUTHOR = {Flickinger, Dan and Oepen, Stephan and Uszkoreit, Hans and Tsujii, Jun-ichi}, TITLE = {Introduction}, YEAR = {2000}, JOURNAL = {Journal of Natural Language Engineering}, VOLUME = {6}, NUMBER = {1}, PAGES = {1-14}, URL = {http://www.dfki.de/dfkibib/publications/docs/Flickinger_2000_JNLE.pdf}, ABSTRACT = {This issue of Natural Language Engineering journal reports on recent achievements in the domain of hpsg-based parsing. Research groups at Saarbrücken, CSLI Stanford and the University of Tokyo have worked on grammar development and processing systems that allow the use of hpsg-based processing in practical application contexts. Much of the research reported here has been collaborative, and all of the work shares a commitment to producing comparable results on wide-coverage grammars with substantial test suites. The focus of this special issue is deliberately narrow, to allow detailed technical reports on the results obtained among the collaborating groups. Thus, the volume cannot aim at providing a complete survey on the current state of the field. This introduction summarizes the research background for the work reported in the issue, and puts the major new approaches and results into perspective. Relationships to similar efforts pursued elsewhere are included, along with a brief summary of the research and development efforts reflected in the volume, the joint reference grammar, and the common sets of reference data.}, ANNOTE = {COLIURL : Flickinger:2000:IB.pdf} } @Article{Gabsdil_Striegnitz:2000, AUTHOR = {Gabsdil, Malte and Striegnitz, Kristina}, TITLE = {Classifying Scope Ambiguities}, YEAR = {2000}, JOURNAL = {Journal of Language and Computation}, VOLUME = {1}, NUMBER = {2}, PAGES = {291-297}, URL = {https://www.coli.uni-saarland.de/~kris/papers/jlac2000.ps.gz}, ABSTRACT = {We describe the architecture and implementation of a system which compares semantic representations of natural language input w.r.t. equivalence of logical content and context change potential. By using automated theorem proving we compute a graph-like structure which represents the relationships that hold between different readings of a given sentence. The information encoded in the graph-structure can be useful for discourse processing systems where knowledge about the relative logical strength of readings might be used to reduce the number of readings that have to be considered during processing. The system relies heavily on existing implementations and code available via the internet. These are integrated and put to the desired use by a Prolog interface. By illustrating the architecture of this system, we want to argue that it is possible to build rather complex systems involving multiple levels of linguistic processing without having to spend an unreasonably large amount of time on the implementation of basic functionalities.}, ANNOTE = {COLIURL : Gabsdil:2000:CSA.pdf Gabsdil:2000:CSA.ps} } @TechReport{Gardent_Webber:2000, AUTHOR = {Gardent, Claire and Webber, Bonnie}, TITLE = {Automated Reasoning and Discourse Disambiguation}, YEAR = {2000}, MONTH = {January}, NUMBER = {113}, PAGES = {24}, ADDRESS = {Saarbrücken}, TYPE = {CLAUS-Report}, INSTITUTION = {Universität des Saarlandes}, URL = {ftp://ftp.coli.uni-sb.de/pub/coli/claus/claus113.ps}, ABSTRACT = {The performance of first-order automated reasoning systems has been steadily improving, stimulated in part by the availability of test suites of mathematical problems on which the systems can be tested, tuned and compared. But discourse understanding in Natural Language poses different inference problems than mathematics. In order to tailor automated reasoning systems to the needs of Natural Language understanding, similar test suites need to be developed. In this paper, we claim that several kinds of ambiguity in discourse can be resolved through automated reasoning checks for consistency, informativity and minimality. Future test suites should therefore include problems of these sorts. The overall goal then is to characterise the range of inference problems that discourse understanding gives rise to and that test suites should include.}, ANNOTE = {COLIURL : Gardent:2000:ARD.pdf Gardent:2000:ARD.ps} } @Article{Grice_et_al:2000, AUTHOR = {Grice, Martine and Ladd, Robert D. and Arvaniti, Amalia}, TITLE = {On the Place of Phrase Accents in Intonational Phonology}, YEAR = {2000}, JOURNAL = {Phonology}, VOLUME = {17}, NUMBER = {2}, PAGES = {143-185} } @InCollection{Grice_et_al:2000_1, AUTHOR = {Grice, Martine and Leech, Geoffrey and Weisser, Martin and Wilson, Andrew}, TITLE = {Representation and Annotation of Dialogue}, YEAR = {2000}, BOOKTITLE = {Handbook of Multimodal and Spoken Dialogue Systems. Resources, Terminology and Product Evaluation}, PAGES = {1-101}, EDITOR = {Gibbon, Dafydd and Mertins, I. and Moore, Robert}, ADDRESS = {Dordrecht}, PUBLISHER = {Kluwer Academic Publishers} } @InCollection{Hemforth_et_al:2000, AUTHOR = {Hemforth, Barbara and Konieczny, Lars and Scheepers, Christoph}, TITLE = {Modifier Attachment: Relative Clauses and Coordinations}, YEAR = {2000}, BOOKTITLE = {German Sentence Processing}, PAGES = {159-183}, EDITOR = {Hemforth, Barbara and Konieczny, Lars}, ADDRESS = {Dodrecht}, PUBLISHER = {Kluwer Academic Publishers} } @InCollection{Hemforth_et_al:2000_1, AUTHOR = {Hemforth, Barbara and Konieczny, Lars and Scheepers, Christoph}, TITLE = {Syntactic Attachment and Anaphor Resolution: The two Sides of Relative Clause Attachment}, YEAR = {2000}, BOOKTITLE = {Architectures and Mechanisms for Language Processing}, PAGES = {259-281}, EDITOR = {Crocker, Matthew W. and Pickering, Martin J. and Clifton, Charles Jr.}, ADDRESS = {Cambridge, UK}, PUBLISHER = {Cambridge University Press} } @InProceedings{Henz_Müller:2000, AUTHOR = {Henz, Martin and Müller, Tobias}, TITLE = {An Overview of Finite Domain Constraint Programming}, YEAR = {2000}, BOOKTITLE = {5th Conference of the Association of Asia-Pacific Operations Research Societies (APORS '00), July 5-7}, ADDRESS = {Singapore, Republic of Singapore}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/amai2000.ps.gz}, ABSTRACT = {In recent years, the repertoire of available techniques for solving combinatorial problems has seen a significant addition: constraint programming. Constraint programming is best seen as a framework for combining software components to achieve problem-specific solvers. The strength of constraint programming depends on the synergy that can be achieved between these components. In this tutorial introduction, we give an overview of constraint programming for solving combinatorial problems.}, ANNOTE = {COLIURL : Henz:2000:OFD.pdf Henz:2000:OFD.ps} } @InProceedings{Holloway King_et_al:2000, AUTHOR = {Holloway King, Tracy and Dipper, Stefanie and Frank, Anette and Kuhn, Jonas and Maxwell, John}, TITLE = {Ambiguity Management in Grammar Writing}, YEAR = {2000}, BOOKTITLE = {Proceedings of the Workshop on Linguistic Theory and Grammar Implementation (ESSLLI-2000)}, PAGES = {5-19}, EDITOR = {Hinrichs, Erhard and Meurers, Detmar and Wintner, Shuly}, ADDRESS = {Birmingham, UK}, URL = {http://www.dfki.de/~frank/papers/ESSLLI00-Dipperetal.ps.gz}, ABSTRACT = {When lingusitically motivated grammars are implemented on a larger scale, and applied toreal-life corpora, keeping track of ambiguity sources becomes a difficult task. Yet it is of great importance, since unintended ambiguities arising from underrestricted rules or interactions haveto be distinguished from linguistically warranted ambiguities. In this paper we report on various tools in the XLE grammar development platform which can be used for ambiguity managementin grammar writing. In particular, we look at packed representations of ambiguities that allow the grammar writer to view sorted descriptions of ambiguity sources. Also discussed are tools forspecifying desired tree structures and for cutting down the solution space prior to parsing.}, NOTE = {Revised and extended version to appear 2002 in: Special issue of the Journal of Language and Computation}, ANNOTE = {COLIURL : King:2000:AMG.pdf King:2000:AMG.ps} } @Article{Hutter_Kohlhase:2000, AUTHOR = {Hutter, Dieter and Kohlhase, Michael}, TITLE = {Managing Structural Information by Higher-Order Colored Unification}, YEAR = {2000}, JOURNAL = {Journal of Automated Reasoning}, VOLUME = {25}, PAGES = {123-164} } @InProceedings{Karagjosova:2000, AUTHOR = {Karagjosova, Elena}, TITLE = {A Unified Approach to the Meaning and Function of Modal Particles in Dialogue}, YEAR = {2000}, BOOKTITLE = {Proceedings of the ESSLLI 2000 Student Session, August 6-18}, EDITOR = {Pilire, Catherine}, ADDRESS = {University of Birmingham, UK}, URL = {https://www.coli.uni-saarland.de/~elka/Papers/karagjosova.ps}, ANNOTE = {COLIURL : Karagjosova:2000:UAM.pdf Karagjosova:2000:UAM.ps} } @InCollection{Kasper:2000, AUTHOR = {Kasper, Walter}, TITLE = {Multilingual Semantic Databases}, YEAR = {2000}, BOOKTITLE = {Verbmobil: Foundations of Speech-to-Speech Translation}, PAGES = {348-358}, EDITOR = {Wahlster, Wolfgang}, SERIES = {Artificial Intelligence}, ADDRESS = {Berlin}, PUBLISHER = {Springer}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/284.entry} } @InProceedings{Kiefer_Krieger:2000, AUTHOR = {Kiefer, Bernd and Krieger, Hans-Ulrich}, TITLE = {A Context-Free Approximation of Head-Driven Phrase Structure Grammar}, YEAR = {2000}, BOOKTITLE = {Proceedings of the 6th International Workshop on Parsing Technologies (IWPT'00), February 23-25}, PAGES = {135-146}, ADDRESS = {Trento, Italy}, ABSTRACT = {We present a context-free approximation of unification-based grammars, such as HPSG or PATR-II. The theoretical underpinning is established through a least fixpoint construction over a certain monotonic function. In order to reach a finite fixpoint, the concrete implementation can be parameterized in several ways, either by specifying a finite iteration depth, by using different restrictors, or by making the symbols of the CFG more complex adding annotations à la GPSG. We also present several methods that speed up the approximation process and help to limit the size of the resulting CF grammar.} } @InCollection{Kiefer_et_al:2000, AUTHOR = {Kiefer, Bernd and Krieger, Hans-Ulrich and Nederhof, Mark-Jan}, TITLE = {Efficient and Robust Parsing of Word Hypotheses Graphs}, YEAR = {2000}, BOOKTITLE = {Verbmobil: Foundations of Speech-to-Speech Translation}, PAGES = {280-295}, EDITOR = {Wahlster, Wolfgang}, ADDRESS = {Berlin}, PUBLISHER = {Springer}, ABSTRACT = {This paper describes new and improved techniques which help a unification-based parser to process input efficiently and robustly. In combination these methods result in a speed-up in parsing time of more than an order of magnitude. The methods are correct in the sense that none of them rule out legal rule applications.} } @InProceedings{Kiefer_et_al:2000_1, AUTHOR = {Kiefer, Bernd and Krieger, Hans-Ulrich and Siegel, Melanie}, TITLE = {An HPSG-to-CFG Approximation of Japanese}, YEAR = {2000}, BOOKTITLE = {Proceedings of the 18th International Conference on Computational Linguistics (COLING'00), July 31 - August 4}, VOLUME = {2}, PAGES = {1046-1050}, ADDRESS = {Saarbrücken, Deutschland}, PUBLISHER = {Morgan Kaufmann Publishers}, URL = {http://www.dfki.de/~siegel/coling00.ps.gz}, ABSTRACT = {We present a simple approximation method for turn- ing a Head-Driven Phrase Structure Grammar into a context-free grammar. The approximation method can be seen as the construction of the least fixpoint of a certain monotonic function. We discuss an ex- periment with a large HPSG for Japanese.}, ANNOTE = {COLIURL : Kiefer:2000:HCA.pdf Kiefer:2000:HCA.ps} } @InProceedings{Kohlhase_Koller:2000, AUTHOR = {Kohlhase, Michael and Koller, Alexander}, TITLE = {Towards a Tableaux Machine for Language Understanding (ICoS '00)}, YEAR = {2000}, BOOKTITLE = {2nd Workshop on Inference in Computational Semantics (ICoS-2), July 30}, ADDRESS = {Schloss Dagstuhl, Germany}, URL = {https://www.coli.uni-saarland.de/~koller/papers/txm.ps.gz}, ABSTRACT = {We outline an abstract inference machine for producing discourse models in natural language understanding. This machine has tableaux as its central data structure and can operate in model generation and theorem proving modes. Search spaces are controlled by keeping track of NP saliences and equipping proof rules with costs.}, ANNOTE = {COLIURL : Kohlhase:2000:TTM.pdf Kohlhase:2000:TTM.ps} } @InProceedings{Koller_et_al:2000, AUTHOR = {Koller, Alexander and Mehlhorn, Kurt and Niehren, Joachim}, TITLE = {A Polynomial-Time Fragment of Dominance Constraints}, YEAR = {2000}, BOOKTITLE = {38th Annual Meeting of the Association of Computational Linguistics (ACL '00), October 1-8}, ADDRESS = {Hong Kong}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/poly-dom.ps.gz}, ABSTRACT = {Dominance constraints are a logical language for describing trees that is widely used in computational linguistics. Their general satisfiability problem is known to be NP-complete. Here we identify emphnormal dominance constraints, a natural fragment whose satisfiability problem we show to be in polynomial time. We present a quadratic satisfiability algorithm and use it in another algorithm that enumerates solutions efficiently. Our result is useful for various applications of dominance constraints and related formalisms.}, ANNOTE = {COLIURL : Koller:2000:PTF.pdf Koller:2000:PTF.ps} } @InProceedings{Koller_Niehren:2000, AUTHOR = {Koller, Alexander and Niehren, Joachim}, TITLE = {Constraint Programming in Computational Linguistics}, YEAR = {2000}, BOOKTITLE = {8th CSLI Workshop on Logic Language and Computation, May 30}, EDITOR = {Barker-Plummer, Dave and Beaver, D. and van Benthem, Johan and Scotto di Luzio, P.}, ADDRESS = {Stanford}, PUBLISHER = {CSLI}, URL = {http://www.ps.uni-sb.de/Papers/abstracts/CP-NL.html ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/CP-NL.ps.gz}, ABSTRACT = {Constraint programming is a programming paradigm that was originally invented in computer science to deal with hard combinatorial problems. Recently, constraint programming has evolved into a technology which permits to solve hard industrial scheduling and optimization problems. We argue that existing constraint programming technology can be useful for applications in natural language processing. Some problems whose treatment with traditional methods requires great care to avoid combinatorial explosion of (potential) readings seem to be solvable in an efficient and elegant manner using constraint programming. We illustrate our claim by two recent examples, one from the area of underspecified semantics and one from parsing.}, ANNOTE = {COLIURL : Koller:2000:CPC.pdf Koller:2000:CPC.ps} } @InProceedings{Koller_Niehren:2000_1, AUTHOR = {Koller, Alexander and Niehren, Joachim}, TITLE = {On Underspecified Processing of Dynamic Semantics}, YEAR = {2000}, BOOKTITLE = {18th International Conference on Computational Linguistics (COLING '00), July 31 - August 4}, PAGES = {460-466}, ADDRESS = {Saarbrücken, Germany}, PUBLISHER = {Morgan Kaufmann}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/dynamic.ps.gz}, ABSTRACT = {We propose a new inference system which operates on underspecified semantic representations of scope and anaphora. This inference system exploits anaphoric accessibility conditions known from dynamic semantics to decide scope ambiguities if possible. The main feature of the system is that it deals with underspecified descriptions directly, i.e. without enumerating readings.}, ANNOTE = {COLIURL : Koller:2000:UPD.pdf Koller:2000:UPD.ps} } @Article{Koller_et_al:2000_1, AUTHOR = {Koller, Alexander and Niehren, Joachim and Striegnitz, Kristina}, TITLE = {Relaxing Underspecified Semantic Representations for Reinterpretation}, YEAR = {2000}, JOURNAL = {Grammars}, VOLUME = {3}, NUMBER = {2-3}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/relax2000.ps.gz}, ABSTRACT = {Type and sort conflicts in semantics are usually resolved by a process of reinterpretation, which introduces an operator into the semantic representation. We elaborate on the foundations of a recent approach to reinterpretation within a framework for semantic underspecification. In this approach, relaxed underspecified semantic representations are inferred from the syntactic structure, leaving space for subsequent addition of reinterpretation operators. Unfortunately, a structural danger of overgeneration is inherent to the relaxation of underspecified semantic representations. We identify the problem and distinguish structural properties that avoid it. We furthermore develop techniques for proving these properties and apply them to prove the safety of relaxation in a prototypical syntax/semantics interface. In doing so, we present some novel properties of tree descriptions in the constraint language over lambda structures (CLLS).}, ANNOTE = {COLIURL : Koller:2000:RUS.pdf Koller:2000:RUS.ps} } @InCollection{Konieczny_et_al:2000, AUTHOR = {Konieczny, Lars and Hemforth, Barbara and Scheepers, Christoph}, TITLE = {Head Position and Clause Boundary Effects in Reanalysis German Sentence Processing}, YEAR = {2000}, BOOKTITLE = {German Sentence Processing}, PAGES = {247-278}, EDITOR = {Hemforth, Barbara and Konieczny, Lars}, ADDRESS = {Dodrecht}, PUBLISHER = {Kluwer Academic Publishers} } @InProceedings{Koreman_Andreeva:2000, AUTHOR = {Koreman, Jacques and Andreeva, Bistra}, TITLE = {Phonetic features in ASR: A linguistic solution to acoustic variation?}, YEAR = {2000}, BOOKTITLE = {Proceedings of the 7th Conference on Laboratory Phonology (LabPhon7), June 29 -July 1}, ADDRESS = {Nijmegen, Netherlands}, ABSTRACT = {In most phonological theories, phonemes are considered as a set (or hierarchy) of (possibly underspecified) phonetic features, which are the minimal number of formal properties needed to distinguish the phonemes in the language system from each other. In most state-of-the-art automatic speech recognition (ASR) systems, however, phonetic features do not play any role. The statistical models for each phone or phoneme are based on a spectral parameterisation of the signals, like mel-frequency cepstral coefficients (MFCC's) and energy. Three questions are dealt with in this paper: Can we successfully bridge this gap between phonological theory and ASR by using phonetic features in ASR? Which phonetic feature set is most appropriate for ASR? Can we attain the same result by using more complex non-linguistic modelling? 1. PHONETIC FEATURES IN ASR To bridge the gap between phonologists' formal representation of the phoneme and the almost purely acoustic description of the signal used in ASR systems, we have used phonetic features to create statistical phone models for automatic speech recognition. The phonetic features were derived from the spectral representation of the signal used in most standard ASR systems (MFCC's + energy) by means of a neural network. Not only do we find a clear increase in the phoneme identification rate (see under 2 below) [1], the confusions between phonemes are also much easier to interpret, since phonemes which are confused are usually very similar in terms of the phonetic features they are made up of. This is not the case when acoustic parameters are used to create phoneme models [2]. 2. DIFFERENT PHONETIC FEATURE SETS It is not self-evident which set of phonetic features is most appropriate to describe phonological categories and the processes that operate on them, since the various feature theories have different phonological implications. To evaluate how appropriate the different feature sets are for application in an ASR system, we have used several different feature sets, both articulatory-phonetic (IPA) and phonological (SPE) [3]. We have so far compared the phoneme identification results for both underspecified and fully specified SPE features with those for the set of features used in the IPA to distinguish all phonemes. In addition, the results were compared to those in a standard ASR system using acoustic parameters (MFCC's) directly to create phone models. We found a clear improvement in the phoneme identification rate when phonetic features were used to model the phones, in comparison to directly using acoustic parameters. Underspecified SPE features led to the best performance (for multi-language Eurom0 data, without the use of a lexicon or language model) of all: acoustic parameters: 15.6% IPA features: 42.6% SPE features: 36.2% Underspecified SPE features: 46.1% In addition to the features sets reported so far, the phoneme identification results for articulatory features [4] will be reported and their relative merits will be discussed. 3. VARIATION MODELLING VERSUS LINGUISTIC MODELLING The acoustic-phonetic mapping in a neural network combines two advantages, namely 1) variation modelling: different acoustic realisations of the same phoneme (e.g. allophonic variants) can be discerned by the neural network 2) linguistic modelling: these different realisations are mapped onto more homogeneous, distinctive features Even if the neural network can reduce the variation in the input parameters for statistical modelling by mapping different acoustic realisations of a phoneme onto phonetic features, the question remains whether the same result can be reached by using a non-linguistic approach. Variation modelling can also be achieved by using more complex acoustic phoneme models (multiple mixtures per state in HMM), so that we do not necessarily have to make a mapping onto phonetic features to achieve this goal. A comparison of the performance of a standard system which does not use phonetic features with the performance of a system in which phonetic features are used to train the phoneme models shows the merits of using a signal representation derived from phonological theory.} } @Article{Koreman_et_al:2000, AUTHOR = {Koreman, Jacques and Andreeva, Bistra and Erriquez, Attilio and Barry, William J.}, TITLE = {Can we use the linguistic information in the signal?}, YEAR = {2000}, JOURNAL = {PHONUS}, VOLUME = {5}, PAGES = {47-58}, URL = {https://www.coli.uni-saarland.de/Phonetics/Research/PHONUS_research_reports/Phonus5/Koreman_PHONUS5.pdf}, ABSTRACT = {This article discusses the use of phonetic features in automatic speech recognition. The phonetic features are derived from acoustic parameters by means of Kohonen networks. Behind the use of phonetic features instead of standard acoustic parameters lies the assumption that it is useful to help the system to focus on linguistically relevant signal properties. Previous experiments using very simple hidden Markov models to represent the phones (with only one mixture for each state and without a lexicon or language model) have indeed shown that the phoneme identification rates on the basis of phonetic features were considerably higher than on the basis of acoustic parameters. When eight mixtures per state are used in hidden Markov modelling, the phoneme identification rates for three different sets of phonetic features were found to be lower than those obtained from a system in which the acoustic parameters are modelled directly. It is suggested that the results are still good enough, however, to further explore the use of phonetic features in a complete automatic speech recognition system: if each phone sequence representing a word in the lexicon is replaced by a sequence of underspecified phonetic feature vectors, the use of phonetic features in the acoustic decoding may have certain advantages.}, ANNOTE = {COLIURL : Koreman:2000:CWU.pdf} } @InProceedings{Krenn:2000, AUTHOR = {Krenn, Brigitte}, TITLE = {Empirical Implications on Lexical Association Measures}, YEAR = {2000}, BOOKTITLE = {9th EURALEX International Conference, August 8-12}, ADDRESS = {Stuttgart, Germany} } @InProceedings{Krenn:2000_1, AUTHOR = {Krenn, Brigitte}, TITLE = {CDB - A Database of Lexical Collocations}, YEAR = {2000}, BOOKTITLE = {2nd International Conference on Language Resources & Evaluation (LREC '00), May 31 - June 2}, ADDRESS = {Athens, Greece}, PUBLISHER = {ELRA} } @InProceedings{Krenn:2000_2, AUTHOR = {Krenn, Brigitte}, TITLE = {Collocation Mining: Exploiting Corpora for Collocation Identification and Representation}, YEAR = {2000}, BOOKTITLE = {5. Konferenz zur Verarbeitung natürlicher Sprache (KONVENS '00), 9.-10. Oktober}, ADDRESS = {Ilmenau, Germany} } @PhdThesis{Krenn:2000_3, AUTHOR = {Krenn, Brigitte}, TITLE = {The Usual Suspects: Data-Oriented Models for Identification and Representation of Lexical Collocations. Saarbrücken Dissertations in Computational Linguistics and Language Technology, Volume 7}, YEAR = {2000}, ADDRESS = {Saarbrücken}, SCHOOL = {Universität des Saarlandes, Department of Computational Linguistics} } @InCollection{Kruijff_Kruijff-Korbayová:2000, AUTHOR = {Kruijff, Geert-Jan M. and Korbayova, Ivana}, TITLE = {Aggregation and Contextual Reference in Automatically Generated Instructions}, YEAR = {2000}, BOOKTITLE = {Proceedings of the Conference on Text, Speech and Dialogue - Third International Workshop, TSD 2000, Brno, Czech Republic, September 13-16}, VOLUME = {1902}, PAGES = {87-92}, EDITOR = {Sojka, Petr and Kopecek, Ivan and Pala, Karel}, SERIES = {Lecture Notes in Computer Science}, ADDRESS = {Berlin}, PUBLISHER = {Springer}, URL = {https://www.coli.uni-saarland.de/~korbay/Publications/tsd2000.ps.gz}, ANNOTE = {COLIURL : Kruijff:2000:ACR.pdf Kruijff:2000:ACR.ps} } @InProceedings{Kruijff_et_al:2000, AUTHOR = {Kruijff, Geert-Jan M. and Teich, Elke and Bateman, John and Korbayova, Ivana and Skoumalová, Hana and Sharoff, Serge and Sokolova, Lena and Hartley, Tony and Staykova, Kamy and Hana, Jirí}, TITLE = {A Multilingual System for Text Generation in Three Slavic Languages}, YEAR = {2000}, BOOKTITLE = {18th International Conference on Computational Linguistics (COLING '00), July 31 - August 4}, VOLUME = {2}, PAGES = {474-480}, ADDRESS = {Universität des Saarlandes, Saarbrücken, Germany}, PUBLISHER = {Morgan Kaufmann Publishers}, URL = {https://www.coli.uni-saarland.de/~korbay/Publications/agile-coling00.ps.gz}, ANNOTE = {COLIURL : Kruijff:2000:MST.pdf Kruijff:2000:MST.ps} } @Article{Kruijff-Korbayová:2000, AUTHOR = {Korbayova, Ivana}, TITLE = {Review of: Ladd, Robert: Intonational Phonology. Cambridge Studies in Linguistics 79, Cambridge University Press, 1997.}, YEAR = {2000}, JOURNAL = {Prague Bulletin of Mathematical Linguistics}, NUMBER = {73-74}, PAGES = {117-120} } @Article{Kruijff-Korbayová:2000_1, AUTHOR = {Korbayova, Ivana}, TITLE = {Discourse Meaning: Papers in Honour of Eva Hajicová}, YEAR = {2000}, JOURNAL = {Linguistica Pragensia}, VOLUME = {10}, NUMBER = {2}, PAGES = {105-108} } @InProceedings{Kruijff-Korbayová_Webber:2000, AUTHOR = {Korbayova, Ivana and Webber, Bonnie}, TITLE = {Discourse Connectives, Inference and Information Structure}, YEAR = {2000}, BOOKTITLE = {Workshop on Inference in Computational Semantics (ICoS-2), July 29-30}, PAGES = {105-120}, EDITOR = {Bos, Johan and Kohlhase, Michael}, ADDRESS = {International Conference and Research Center for Computer Science, Schloß Dagstuhl, Germany}, URL = {https://www.coli.uni-saarland.de/~bos/icos/kruijff.ps}, ANNOTE = {COLIURL : Kruijff-Korbayova:2000:DCI.pdf Kruijff-Korbayova:2000:DCI.ps} } @InProceedings{Kruijff-Korbayová_Webber:2000_1, AUTHOR = {Korbayova, Ivana and Webber, Bonnie}, TITLE = {Information Structure and the Interpretation of Discourse Connectives in English and Czech}, YEAR = {2000}, BOOKTITLE = {2nd International Conference in Contrastive Semantics and Pragmatics (SIC-CSP 2000), September 11-13}, EDITOR = {Jaszcolt, Kasia and Turner, Ken}, ADDRESS = {Cambridge, UK} } @MastersThesis{Kurz:2000, AUTHOR = {Kurz, Daniela}, TITLE = {Wortstellungspräferenzen im Deutschen}, YEAR = {2000}, ADDRESS = {Saarbrücken}, SCHOOL = {Universität des Saarlandes, Computerlinguistik} } @InProceedings{Kurz:2000_1, AUTHOR = {Kurz, Daniela}, TITLE = {A Statistical Account on Word Order Variation in German}, YEAR = {2000}, BOOKTITLE = {COLING Workshop on Linguistically Interpreted Corpora (LINC '00), August 6}, ADDRESS = {Luxembourg}, URL = {https://www.coli.uni-saarland.de/~kurz/linc00.ps.gz}, ABSTRACT = {In this paper we present a corpus-based study involving the linear order of subject, indirect object and direct object in German. The aim was to examine several hypotheses derived from Hawkins' (1994) performance theory. In this context it was crucial to examine whether and to which extend length influences the order of subject and objects. The analysis was based on data extracted from the annotated NEGRA corpus (Skut et al., 1998) and the untagged Frankfurter Rundschau corpus. We developed an analysis system operating on the untagged corpus that facilitates the acquisition of data and subsequent statistical analysis. We describe this system and discuss the results drawn from the analysis of the data. These results do not support the theoretical assumptions made by Hawkins. Furthermore, they suggest the investigation of other factors than length.}, ANNOTE = {COLIURL : Kurz:2000:SAW.pdf Kurz:2000:SAW.ps} } @InProceedings{Kurz_et_al:2000, AUTHOR = {Kurz, Daniela and Skut, Wojciech and Uszkoreit, Hans}, TITLE = {German Factors Constraining Word Order Variation}, YEAR = {2000}, BOOKTITLE = {13th Annual Conference on Human Sentence Processing, Poster presentation (CUNY 2000), March 30 - April 1}, ADDRESS = {La Jolla, California, USA}, NOTE = {poster presentation} } @PhdThesis{Lehmann:2000, AUTHOR = {Lehmann, Sabine}, TITLE = {Towards a Theory of Syntactic Phenomena. Saarbrücken Dissertations in Computational Linguistics and Language Technology, Volume 11}, YEAR = {2000}, ADDRESS = {Saarbrücken}, SCHOOL = {Universität des Saarlandes, Fachbereich Computerlinguistik}, URL = {http://www.dfki.de/~slehmann/thesis.pdf} } @TechReport{Lewin_et_al:2000, AUTHOR = {Lewin, Ian and Rupp, Christopher J. and Hieronymus, Jim and Milward, David and Larsson, Staffan and Berman, Alexander}, TITLE = {Siridus System Architecture and Interface Report}, YEAR = {2000}, MONTH = {September}, NUMBER = {6.1}, ADDRESS = {Göteborg}, TYPE = {Siridus Report}, INSTITUTION = {Göteborg University, Department of Linguistics}, URL = {http://www.ling.gu.se/projekt/siridus/Publications/deliv6-1.ps.gz}, ANNOTE = {COLIURL : Lewin:2000:SSA.pdf Lewin:2000:SSA.ps} } @Article{Müller_Niehren:2000, AUTHOR = {Müller, Martin and Niehren, Joachim}, TITLE = {Ordering Constraints over Feature Trees Expressed in Second-Order Monadic Logic}, YEAR = {2000}, JOURNAL = {Information and Computation}, VOLUME = {159}, NUMBER = {1-2}, PAGES = {22-58}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/SWSJournal99.ps.gz}, ABSTRACT = {The system FT$_leq$ of ordering constraints over feature trees has been introduced as an extension of the system FT of equality constraints over feature trees. While the first-order theory of FT is well understood, only few complexity and decidability results are known for fragments of the first-order theory of FT$_leq$. We introduce a new handle for such decidability questions by showing how to express ordering constraints over feature trees in second-order monadic logic (S2S or WS2S). Our relationship implies a new decidability result for feature logics, namely that the entailment problem of FT$_leq$ with existential quantifiers $phi_1models exists x_1ldotsexists x_n phi_2$ is decidable. We also show that this problem is PSPACE-hard even though the quantifier-free case can be solved in cubic time. To our knowledge, this is the first time that a non-trivial decidability result of feature logic is reduced to Rabins famous tree theorem.}, ANNOTE = {COLIURL : Muller:2000:OCFa.pdf Muller:2000:OCFa.ps} } @Article{Müller_et_al:2000, AUTHOR = {Müller, Martin and Niehren, Joachim and Podelski, Andreas}, TITLE = {Ordering Constraints over Feature Trees}, YEAR = {2000}, JOURNAL = {Constraints, an International Journal}, VOLUME = {5}, NUMBER = {1-2}, PAGES = {7-42}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/ftsub-constraints-99.ps.gz}, ABSTRACT = {Feature trees are the formal basis for algorithms manipulating record like structures in constraint programming, computational linguistics and in concrete applications like software configuration management. Feature trees model records, and constraints over feature trees yield extensible and modular record descriptions. We introduce the constraint system FT$_leq$ of ordering constraints interpreted over feature trees. Under the view that feature trees represent symbolic information, the relation $leq$ corresponds to the information ordering (carries less information than''). We present two algorithms in cubic time, one for the satisfiability problem and one for the entailment problem of FT$_leq$. We show that FT$_leq$ has the independence property. We are thus able to handle negative conjuncts via entailment and obtain a cubic algorithm that decides the satisfiability of conjunctions of positive and negated ordering constraints over feature trees. Furthermore, we reduce the satisfiability problem of Dörre's weak subsumption constraints to the satisfiability problem of FT$_leq$ and improve the complexity bound for solving weak subsumption constraints from $O(n^5)$ to $O(n^3)$.}, ANNOTE = {COLIURL : Muller:2000:OCFb.pdf Muller:2000:OCFb.ps} } @InProceedings{Müller:2000, AUTHOR = {Müller, Stefan}, TITLE = {Continuous or Discontinuous Constituents}, YEAR = {2000}, BOOKTITLE = {Proceedings of the ESSLLI'00 Workshop on Linguistic Theory and Grammar Implementation, August 6-18}, PAGES = {133-152}, EDITOR = {Hinrichs, Erhard and Meurers, Detmar and Wintner, Shuly}, ADDRESS = {Birmingham}, URL = {http://www.dfki.de/~stefan/PS/esslli00.ps}, ABSTRACT = {In diesem Aufsatz diskutiere ich verschiedene HPSG-Ansätze zur Beschreibung der Konstituentenstellung im Deutschen. Ansätze, die von kontinuierlichen Konstituenten ausgehen, werden mit einem Ansatz, der diskontinuierliche Konstituenten annimmt, verglichen. Die Anzahl der passiven Kanten, die beim Parsen von 24.602 Äußerungen aus dem Verbmobil-Korpus von der Verbmobil-Grammatik erzeugt werden, werden mit der Anzahl der passiven Kanten verglichen, die die Babel-Grammatik erzeugt In this paper I discuss several possibile analyses for constituent order in German. Approaches that assume continuous constituents are compared with an approach that assumes discontinuous constituents. I will show that certain proposals that have been made to analyze constituent order are either not adequate or cannot be implemented with currently availible systems. For the proposals that can be implementd I will discuss the amount of work a parser has to do. I then compare two implementations of larger fragments of German: the Verbmobil grammar and the Babel grammar. It is shown that the amount of work to be done to parse the Verbmobil grammar is significantly higher then the work that has to be done parsing with the Babel grammar.}, ANNOTE = {COLIURL : Muller:2000:CDC.pdf Muller:2000:CDC.ps} } @InProceedings{Müller:2000_1, AUTHOR = {Müller, Stefan}, TITLE = {Object-to-Subject Raising or Lexical Rule - An HPSG Analysis of the German Passive}, YEAR = {2000}, BOOKTITLE = {Proceedings of the 5. Konferenz zur Verarbeitung natürlicher Sprache (KONVENS'00) Sprachkommunikation}, PAGES = {157-162}, EDITOR = {Zühlke, Werner and Schukat-Talamazzini, Ernst G.}, ADDRESS = {Ilmenau, Deutschland}, PUBLISHER = {VDE Verlag}, URL = {http://www.dfki.de/~stefan/PS/konvens2000.ps.gz}, ABSTRACT = {In dem Aufsatz zeige ich, daß Objekt-zu-Subjektanhebungsansätze wie der von Pollard (1994) und mir (1999) vorgeschlagene problematisch sind, weil sie die Adjektivbildung nicht zufriedenstellend erklären können. Der Ansatz von Heinz und Matiasek (1994) kommt mit modalen Infinitiven und Kontrolle nicht klar. Ich entwickle dann eine auf Lexikonregeln basierende Analyse, die auch komplizierte Fälle des Fernpassivs erklären kann.}, ANNOTE = {COLIURL : Muller:2000:OSR.pdf Muller:2000:OSR.ps} } @InCollection{Müller:2000_2, AUTHOR = {Müller, Stefan}, TITLE = {German Particle Verbs and the Predicate Complex}, YEAR = {2000}, BOOKTITLE = {Grammatical Interfaces in HPSG}, VOLUME = {8}, PAGES = {215-229}, EDITOR = {Cann, Ronnie and Grover, Claire and Miller, Philip}, SERIES = {Studies in Constraint-Based Lexicalism}, ADDRESS = {Stanford}, PUBLISHER = {CSLI}, URL = {http://www.dfki.de/~stefan/PS/part-complex.ps}, ABSTRACT = {In German there is a class of verbs that can appear discontinuously (1). The part that appears to the left of the main verb in verb final position and that is stranded when the finite verb is in initial position is traditionally called a separable prefix. Since prefixes are by definition not separable, the terms particle and preverb are used in more recent work. (1) a. Setzt der Fährmann Karl über? takes the ferryman Karl across 'Does the ferryman take Karl across?' b. daß der Fährmann Karl übersetzt. that the ferryman Karl across.takes In (1a), where the verb is in initial position, the preverb is stranded. Below I will argue that separable verbs in German behave like other elements in the predicate complex. This view is supported by the following facts: Preverbs are serialized like verbal or predicative adjectival complements in the right sentence bracket (the right periphery of a clause that does not contain extraposed elements), they can be fronted as can be done with single verbs or predicative adjectives. If preverbs are analyzed as part of the predicate complex, the fronting data can be accounted for as an instance of complex fronting (Partial Verb Phrase Fronting (PVP)). The inability of particles and predicates in resultative constructions to co-occur and the non-iterability of preverbs will be explained by the fact that particles and resultative predicates occupy a designated valance position that does not allow more than one particle Grammatical Interfaces in HPSG.}, ANNOTE = {COLIURL : Muller:2000:GPV.pdf Muller:2000:GPV.ps} } @PhdThesis{Müller:2000_3, AUTHOR = {Müller, Stefan}, TITLE = {Complex Predicates: Verbal Complexes, Resultative Constructions, and Particle Verbs in German}, YEAR = {2000}, ADDRESS = {Saarbrücken}, SCHOOL = {Universität des Saarlandes}, TYPE = {Habilitationsschrift}, ABSTRACT = {In dem Buch entwickle ich eine Theorie der komplexen Prädikate, die normale Kopulakonstruktionen, kohärente Infinitive einschließich der AcI-Konstruktionen, Subjekt- und Objektprädikative, Resultativkonstruktionen und Partikelverben erfaßt. (1) Er fährt das Auto kaputt / zu Schrott. (2) Er ißt das Fleisch roh. Zur Abgrenzung der Resultativkonstruktionen (1) von anderen sekundäre Prädikaten wird auch auf depiktive Prädikate (2) eingegangen und eine entsprechende Analyse für diese entwickelt. Ich zeige, daß depiktive Prädikate als Adjunkte zu behandeln sind, die mit einem Element in der Argumentstruktur des Verbs, das sie modifizieren, koindiziert sind. Ich zeige auch, daß die Elemente in der Argumentstruktur des Verbs je nach ihrer Obliqueness als Antezedenten unterschiedlich gut geeignet sind. Im Teil, der sich mit Partikelverben beschäftigt, argumentiere ich, daß sich Verbzusätze wie andere Teile das Prädikatskomplexes verhalten. Sie werden wie Verben in der rechten Satzklammer angeordnet (3)-(4), können wie Adjektive oder Verben einzeln im Vorfeld stehen (5) oder bei Fokussierung wie Adjektive auch im Mittelfeld links vom Verbalkomplex angeordnet werden. (3) Karl kommt abends in Berlin an. (4) Karl kommt abends in der Stadt an, in der ich wohne. (5) Fest steht, daß Karl nicht der Mörder war. Es scheint also angebracht zu sein, die Verbzusatz-Verb-Konstruktionen syntaktisch wie Verbalkomplexe, also in der Syntax, zu behandeln. Wenn Verbzusätze analog zu anderen Elementen des Verbalkomplexes behandelt werden, kann (5) als Falle von (Partial) Verb Phrase Fronting behandelt werden. Für nicht-transparente Partikelverben nehme ich Lexikoneinträge an, die denen von verbalkomplexbildenden Verben ähneln. Lexikoneinträge, die in transparente Partikelverbkombinationen verwendet werden, werden durch Lexikonregeln lizensiert. Bei Behandlung der Partikeln als eigenständige syntaktische Einheiten stellt sich natürlich die Frage, wie die morphologischen Fakten, insbesondere die Derivation erklärt werden kann. In einer breiten empirischen Untersuchung werden Ähnlichkeiten zur Derivation mit Resultativkonstruktionen aufgezeigt und es wird deutlich gemacht, daß weder bei Resultativkonstruktionen noch bei Partikelverben Listedness für die Derivation entscheidend ist. Produktive Partikel-Verb-Verbindungen und die eindeutig syntaktischen Resultativkonstruktionen treten z.B. in ähnlicher Weise in vielen Nominalisierungsformen auf. Zur Beschreibung von Flexion und Derivation nehme ich Lexikonregeln an, die Stämme auf flektierte Formen bzw. Stämme auf andere Stämme abbilden. Im Fall der Partikelverben wird ein Stamm, der für eine Partikel subkategorisiert ist, durch Flexionsregeln auf ein Wort abgebildet, das dann in der Syntax mit der Partikel kombiniert werden kann. Alternativ kann der Stamm aber auch durch eine Derivationsregel auf einen anderen Stamm abgebildet werden, dieser Stamm muß dann natürlich auch flektiert werden. Da der Stamm für die Partikel subkategorisiert ist, haben Derivationsregeln Zugriff auf Information, die von der Partikel begesteuert wird (z.B. semantische Information bzw. Information über zusätzliche Komplemente zusätzliche Komplemente). Die vieldiskutierten Klammerparadoxa existieren für diesen Ansatz nicht. Mächtig Mittel wie Umklammerung (Rebrackating) werden nicht benötigt. Die Daten aus dem in diesem Buch enthaltenen Kapitel über Partikelverben wurden teilweise auf der HPSG 99 und dem Partikelverbworkshop in Leipzig präsentiert. Die Passivanalyse mit Lexikonregeln wurde auf der HPSG 2000 in Berkeley vorgestellt. Über komplexe Prädikate und Partikelverben habe ich auf der GGS 2000 in Potsdam vorgetragen. Über eine Lösung der Klammerparadoxa in der morphologischen Analyse der Partikelverben habe ich auf der HPSG 2001 in Trondheim gesprochen und die Analyse für depiktive Prädikate wurde auf der FG 2001 in Helsinki vorgestellt.} } @InProceedings{Müller:2000_4, AUTHOR = {Müller, Stefan}, TITLE = {The Passive as a Lexical Rule}, YEAR = {2000}, BOOKTITLE = {Proceedings of the 7th International Conference on Head-Driven Phrase Structure Grammar (HPSG'00), July 22-23}, PAGES = {247-266}, EDITOR = {Flickinger, Dan and Kathol, Andreas}, ADDRESS = {Berkeley, University of California, USA}, PUBLISHER = {CSLI}, URL = {http://cslipublications.stanford.edu/HPSG/1/hpsg00mueller.pdf}, ABSTRACT = {In this paper I show that object to subject raising approaches as suggested by Pollard (1994) and Müller (1999) are problematic since they cannot account for adjective formation in a satisfying way. The approach by Heinz and Matiasek (1994), which is a formalization of Haider's (1986) ideas, cannot account for modal infinitives and control. I develop a lexical rule based approach and it will be shown that this approach also extends to tricky cases of remote passive.}, ANNOTE = {COLIURL : Muller:2000:PLR.pdf} } @InCollection{Müller_Kasper:2000, AUTHOR = {Müller, Stefan and Kasper, Walter}, TITLE = {HPSG Analysis of German}, YEAR = {2000}, BOOKTITLE = {Verbmobil: Foundations of Speech-to-Speech Translation}, PAGES = {238-253}, EDITOR = {Wahlster, Wolfgang}, SERIES = {Arificial Intelligence}, ADDRESS = {Berlin}, PUBLISHER = {Springer}, ABSTRACT = {Es wird ein Überblick über die HPSG-Grammatik für die tiefe Analyse im Verbmobil-System gegeben. Die Verarbeitung von Spontansprache und der Sprache spezifischer Anwendungsdomänen wird diskutiert. Extralinguistische Information, die es in der geschriebenen Sprache nicht gibt, wie z.B. die Prosodie wird berücksichtigt. Eine empirische Evaluation der Grammatik in bezug auf Verbmobil-Korpora wird durchgeführt.} } @InProceedings{Müller:2000_5, AUTHOR = {Müller, Tobias}, TITLE = {Promoting Constraints to First-Class Status}, YEAR = {2000}, BOOKTITLE = {1st International Conference on Computational Logic (CL '00), July 24-28}, NUMBER = {1861}, PAGES = {429-447}, EDITOR = {Lloyd, J. and Dahl, V. and Furbach, U. and Kerber, Manfred and Lau, K.-K. and Palamidessi, C. and Pereira, L. M. and Sagiv, Y. and Stuckey, P.J.}, SERIES = {Lecture Notes in Artificial Intelligence}, ADDRESS = {Imperial College, London, UK}, PUBLISHER = {Springer}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/Mueller-00a.ps.gz}, ABSTRACT = {This paper proposes to promote constraints to first-class status. In contrast to constraint propagation, which performs inference on values of variables, first-class constraints allow reasoning about the constraints themselves. This lets the programmer access the current state of a constraint and control a constraint's behavior directly, thus making powerful new programming and inference techniques possible, as the combination of constraint propagation and rewriting constraints à la term rewriting. First-class constraints allow for meta constraint programming. Promising applications in the field of combinatorial optimization include early unsatisfiability detection, constraint reformulation to improve propagation, garbage collection of redundant but not yet entailed constraints, and finding minimal inconsistent subsets of a given set of constraints for debugging immediately failing constraint programs. We demonstrate the above-mentioned applications by means of examples. The experiments were done with Mozart Oz but can be easily ported to other constraint solvers.}, ANNOTE = {COLIURL : Muller:2000:PCF.pdf Muller:2000:PCF.ps} } @InProceedings{Müller:2000_6, AUTHOR = {Müller, Tobias}, TITLE = {Practical Investigation of Constraints with Graph Views}, YEAR = {2000}, BOOKTITLE = {6th International Conference on Principles and Practice of Constraint Programming (CP '00), September 18-22}, NUMBER = {1984}, PAGES = {320-336}, EDITOR = {Dechter, R.}, SERIES = {Lecture Notes in Computer Science}, ADDRESS = {Singapore}, PUBLISHER = {Springer}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/Mueller00b.ps.gz}, ABSTRACT = {Combinatorial problems can be efficiently tackled with constraint programming systems. The main tasks of the development of a constraint-based application are modeling the problem at hand and subsequently implementing that model. Typically, erroneous behavior of a constraint-based application is caused by either the model or the implementation (or both of them). Current constraint programming systems provide limited debugging support for modeling and implementing a problem. This paper proposes the Constraint Investigator, an interactive tool for debugging the model and the implementation of a constraint-based application. In particular, the Investigator is targeted at problems like wrong, void, or partial solutions. A graph metaphor is used to reflect the constraints in the solver and to present them to the user. The paper shows that this metaphor is intuitive and proposes appraoches to deal with real-life problem sizes. The Investigator has been implemented in Mozart Oz and complements other constraint programming tools as an interactive visual search engine, forming the base for an integrated constraint debugging environment.}, ANNOTE = {COLIURL : Muller:2000:PIC.ps} } @PhdThesis{Müller-Landmann:2000, AUTHOR = {Müller-Landmann, Sonja}, TITLE = {Corpus-Based Parse Pruning - Applying Empirical Data to Symbolic Knowledge}, YEAR = {2000}, ADDRESS = {Saarbrücken}, SCHOOL = {Universität des Saarlandes, Department of Computational Linguistics} } @Article{Nederhof:2000, AUTHOR = {Nederhof, Mark-Jan}, TITLE = {Practical Experiments with Regular Approximation of Context-Free Languages}, YEAR = {2000}, JOURNAL = {Computational Linguistics}, VOLUME = {26}, NUMBER = {1}, PAGES = {17-44}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/nederhof00.ps.gz ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/nederhof00.entry}, ANNOTE = {COLIURL : Nederhof:2000:PER.pdf Nederhof:2000:PER.ps} } @InProceedings{Nederhof:2000_1, AUTHOR = {Nederhof, Mark-Jan}, TITLE = {Preprocessing for Unification Parsing of Spoken Language}, YEAR = {2000}, BOOKTITLE = {Proceedings of the Natural Language Processing - NLP 2000, June 2-4}, NUMBER = {1835}, PAGES = {118-129}, EDITOR = {Christodoulakis, Dimitris}, SERIES = {Lecture Notes in Artificial Intelligence}, ADDRESS = {Patras, Greece}, PUBLISHER = {Springer}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/nederhof00a.ps.gz ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/nederhof00a.entry}, ANNOTE = {COLIURL : Nederhof:2000:PUP.pdf Nederhof:2000:PUP.ps} } @InProceedings{Nederhof:2000_2, AUTHOR = {Nederhof, Mark-Jan}, TITLE = {Preprocessing for Unification Parsing of Spoken Language}, YEAR = {2000}, BOOKTITLE = {Proceedings of the Natural Language Processing - NLP 2000, June 2-4}, NUMBER = {1835}, PAGES = {118-129}, EDITOR = {Christodoulakis, Dimitris}, SERIES = {Lecture Notes in Artificial Intelligence}, ADDRESS = {Patras, Greece}, PUBLISHER = {Springer}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/nederhof00a.ps.gz}, ABSTRACT = {Wordgraphs are structures that may be output by speech recognizers. We discuss various methods for turning wordgraphs into smaller structures. One of these methods is novel; this method relies on a new kind of determinization of acyclic weighted finite automata that is language-preserving but not fully weight-preserving, and results in smaller automata than in the case of traditional determinization of weighted finite automata. We present empirical data comparing the respective methods. The methods are relevant for systems in which wordgraphs form the input to kinds of syntactic analysis that are very time consuming, such as unification parsing.}, ANNOTE = {COLIURL : Nederhof:2000:PUPa.pdf Nederhof:2000:PUPa.ps} } @InCollection{Nederhof:2000_3, AUTHOR = {Nederhof, Mark-Jan}, TITLE = {Regular Approximation of CFLs: A Grammatical View}, YEAR = {2000}, BOOKTITLE = {Advances in Probabilistic and other Parsing Technologies}, PAGES = {221-241}, EDITOR = {Bunt, Harry and Nijholt, Anton}, PUBLISHER = {Kluwer Academic Publishers}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/nederhof00b.ps.gz}, ABSTRACT = {We show that for each context-free grammar a new grammar can be constructed that generates a regular language. This construction differs from some existing methods of approximation in that use of a pushdown automaton is avoided. This allows better insight into how the generated language is affected.}, ANNOTE = {COLIURL : Nederhof:2000:RACa.pdf Nederhof:2000:RACa.ps} } @InCollection{Nederhof:2000_4, AUTHOR = {Nederhof, Mark-Jan}, TITLE = {Regular Approximation of CFLs: A Grammatical View}, YEAR = {2000}, BOOKTITLE = {Advances in Probabilistic and other Parsing Technologies}, PAGES = {221-241}, EDITOR = {Bunt, Harry and Nijholt, Anton}, PUBLISHER = {Kluwer Academic Publishers}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/nederhof00b.ps.gz ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/nederhof00b.entry}, ANNOTE = {COLIURL : Nederhof:2000:RAC.pdf Nederhof:2000:RAC.ps} } @InProceedings{Nederhof_Satta:2000, AUTHOR = {Nederhof, Mark-Jan and Satta, Giorgio}, TITLE = {Left-To-Right Parsing and Bilexical Context-Free Grammars}, YEAR = {2000}, BOOKTITLE = {Proceedings of the 6th Applied Natural Language Processing Conference and 1st Meeting of the North American Chapter of the Association for Computational Linguistics (ANLP-NAACL '00), April 29 - May 3}, PAGES = {272-279}, EDITOR = {Nirenburg, Sergei and Appelt, Douglas and Ciravegna, Fabio and Dale, Robert}, ADDRESS = {Seattle, Washington, USA}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/nederhof-satta00.ps.gz ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/nederhof-satta00.entry}, ANNOTE = {COLIURL : Nederhof:2000:LRP.pdf Nederhof:2000:LRP.ps} } @InProceedings{Nederhof_Satta:2000_1, AUTHOR = {Nederhof, Mark-Jan and Satta, Giorgio}, TITLE = {Left-To-Right Parsing and Bilexical Context-Free Grammars}, YEAR = {2000}, BOOKTITLE = {Proceedings of the 6th Applied Natural Language Processing Conference and 1st Meeting of the North American Chapter of the Association for Computational Linguistics (ANLP-NAACL'00), April 29 - May 3}, PAGES = {272-279}, EDITOR = {Nirenburg, Sergei and Appelt, Douglas and Ciravegna, Fabio and Dale, Robert}, ADDRESS = {Seattle, Washington, USA}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/nederhof-satta00.ps.gz}, ABSTRACT = {We compare the asymptotic time complexity of left-to-right and bidirectional parsing techniques for bilexical context-free grammars, a grammar formalism that is an abstraction of language models used in several state-of-the-art real-world parsers. We provide evidence that left-to-right parsing cannot be realised within acceptable time-bounds if the so called correct-prefix property is to be ensured. Our evidence is based on complexity results for the representation of regular languages.}, ANNOTE = {COLIURL : Nederhof:2000:LRPb.pdf Nederhof:2000:LRPb.ps} } @InCollection{Neumann:2000, AUTHOR = {Neumann, Günter}, TITLE = {A Uniform Method for Automatically Extracting Stochastic Lexicalized Tree Grammars from Treebanks and HPSG}, YEAR = {2000}, BOOKTITLE = {Building and Using Syntactically Annotated Corpora}, EDITOR = {Abeillé, Anne}, SERIES = {Language and Speech Series}, ADDRESS = {Dordrecht}, PUBLISHER = {Kluwer Academic Publishers}, URL = {http://www.dfki.de/~neumann/publications/new-ps/kluwer-tag+.pdf}, ANNOTE = {COLIURL : Neumann:2000:UMA.pdf} } @InProceedings{Neumann_et_al:2000, AUTHOR = {Neumann, Günter and Piskorski, Jakub and Braun, Christian}, TITLE = {An Intelligent Text Extraction and Navigation System}, YEAR = {2000}, BOOKTITLE = {Proceedings of the 6th Applied Natural Language Processing Conference (ANLP'00). 1st Meeting of the North American Chapter of the Association for Computational Linguistics (NAACL'00), April 29 - May 4}, PAGES = {239-246}, EDITOR = {Nirenburg, Sergei and Appelt, Douglas and Ciravegna, Fabio and Dale, Robert}, ADDRESS = {Seattle, Washington, USA}, PUBLISHER = {ACL}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/final-acl2000.ps.gz ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/final-acl2000.entry}, ANNOTE = {COLIURL : Neumann:2000:ITE.pdf Neumann:2000:ITE.ps} } @InProceedings{Ng_et_al:2000, AUTHOR = {Ng, Ka Boon and Choi, Chiu Wo and Henz, Martin and Müller, Tobias}, TITLE = {GIFT: A Generic Interface for Reusing Filtering Algorithms}, YEAR = {2000}, BOOKTITLE = {Workshop on Techniques for Implementing Constraint Programming Systems (TRICS), September}, PAGES = {86-100}, EDITOR = {Beldiceanu, N. and Harvey, W. and Henz, Martin and Laburthe, F. and Monfroy, E. and Müller, Tobias and Perron, L. and Schulte, Christian}, ADDRESS = {Singapore}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/KaboonChoiHenzMueller00a.ps.gz}, ABSTRACT = {Many different constraint programming (CP) systems exist today. For each CP system, there are many different filtering algorithms. Researchers and developers usually choose a CP system of their choice to implement their filtering algorithms. To use these filtering algorithms on another system, we have to port the code over. This situation is clearly not desirable. In this paper, we propose a generic C++ interface for writing filtering algorithms called GIFT (Generic Interface for FilTers). By providing the generic interface on different CP systems, we can reuse any filtering algorithms easily. A case study on reusing scheduling filtering algorithms between Mozart and Figaro further highlights the feasibility of this approach.}, ANNOTE = {COLIURL : Ng:2000:GGI.pdf Ng:2000:GGI.ps} } @Article{Niehren:2000, AUTHOR = {Niehren, Joachim}, TITLE = {Uniform Confluence in Concurrent Computation}, YEAR = {2000}, JOURNAL = {Journal of Functional Programming}, VOLUME = {10}, NUMBER = {3}, PAGES = {1-47}, URL = {www.ps.uni-sb.de/Papers/abstracts/Uniform:99.html ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/Uniform:2000.ps.gz}, ABSTRACT = {Indeterminism is typical for concurrent computation. If several concurrent actors compete for the same resource then at most one of them may succeed, whereby the choice of the successful actor is indeterministic. As a consequence, the execution of a concurrent program may be nonconfluent. Even worse, most observables (termination, computational result, and time complexity) typically depend on the scheduling of actors created during program execution. This property contrast concurrent programs from purely functional programs. A functional program is uniformly confluent in the sense that all its possible executions coincide modulo reordering of execution steps. In this paper, we investigate concurrent programs that are uniformly confluent and their relation to eager and lazy functional programs. We study uniform confluence in concurrent computation within the applicative core of the $pi$-calculus which is widely used in different models of concurrent programming (with interleaving semantics). In particular, the applicative core of the $pi$-calculus serves as a kernel in foundations of concurrent constraint programming with first-class procedures (as provided by the programming language Oz). We model eager functional programming in the $lambda$-calculus with weak call-by-value reduction and lazy functional programming in the call-by-need $lambda$-calculus with standard reduction. As a measure of time complexity, we count application steps. We encode the $lambda$-calculus with both above reduction strategies into the applicative core of the $pi$-calculus and show that time complexity is preserved. Our correctness proofs employs a new technique based on uniform confluence and simulations. The strength of our technique is illustrated by proving a folk theorem, namely that the call-by-need complexity of a functional program is smaller than its call-by-value complexity. A shortened version of this report will appear in the Journal of Functional Programming. Due to lack of space, this journal version does not contain the encoding of the $delta$-calculus (introduced in the paper) into the applicative core of the $pi$-calculus (given here), which is of its own interest.}, ANNOTE = {COLIURL : Niehren:2000:UCC.pdf Niehren:2000:UCC.ps} } @Article{Niehren_et_al:2000, AUTHOR = {Niehren, Joachim and Treinen, Ralf and Tison, Sophie}, TITLE = {On Rewrite Constraints and Context Unification}, YEAR = {2000}, JOURNAL = {Information Processing Letters}, VOLUME = {74}, NUMBER = {1-2}, PAGES = {35-40}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/rewrite-context.ps.gz}, ABSTRACT = {We show that stratified context unification, which is one of the most expressive fragments of context unification known to be decidable, is equivalent to the satisfiability problem of slightly generalized rewriting constraints.}, ANNOTE = {COLIURL : Niehren:2000:RCC.pdf Niehren:2000:RCC.ps} } @InProceedings{Oepen_Callmeier:2000, AUTHOR = {Oepen, Stefan and Callmeier, Ulrich}, TITLE = {Measure for Measure: Parser Cross-Fertilization. Towards Increased Component Comparability and Exchange}, YEAR = {2000}, BOOKTITLE = {Proceedings of the 6th International Workshop on Parsing Technology (IWPT '00), February 23-25}, PAGES = {183-194}, ADDRESS = {Trento, Italy}, URL = {https://www.coli.uni-saarland.de/itsdb/publications/fertilization.ps.gz https://www.coli.uni-saarland.de/~uc/pubs/iwpt00.pdf}, ABSTRACT = {Over the past few years significant progress was accomplished in efficient processing with wide-coverage HPSG grammars. HPSG-based parsing systems are now available that can process medium-complexity sentences (of ten to twenty words, say) in average parse times equivalent to real (i.e. human reading) time. A large number of engineering improvements in current HPSG systems were achieved through collaboration of multiple research centers and mutual exchange of experience, encoding techniques, algorithms, and even pieces of software. This article presents an approach to grammar and system engineering, termed competence & performance profiling, that makes systematic experimentation and the precise empirical study of system properties a focal point in development. Adapting the profiling metaphor familiar from software engineering to constraint-based grammars and parsers, enables developers to maintain an accurate record of system evolution, identify grammar and system deficiencies quickly, and compare to earlier versions or between different systems. We discuss a number of exemplary problems that motivate the experimental approach, and apply the empirical methodology in a fairly detailed discussion of what was achieved during a development period of three years. Given the collaborative nature in setup, the empirical results we present involve research and achievements of a large group of people.}, ANNOTE = {COLIURL : Oepen:2000:MMP.pdf Oepen:2000:MMP.ps} } @InProceedings{Oepen_Carroll:2000, AUTHOR = {Oepen, Stefan and Carroll, John}, TITLE = {Ambiguity Packing in Constraint-Based Parsing - Practical Results}, YEAR = {2000}, BOOKTITLE = {1st Meeting of the North American Chapter of the Association of Computational Linguistics (NAACL '00), April 29 - May 3}, PAGES = {162-169}, ADDRESS = {Seattle, Washington, USA}, ANNOTE = {COLIURL : Oepen:2000:APC.pdf} } @Article{Oepen_Carroll:2000_1, AUTHOR = {Oepen, Stefan and Carroll, John}, TITLE = {Parser Engineering and Performance Profiling}, YEAR = {2000}, JOURNAL = {Journal of Natural Language Engineering}, VOLUME = {6}, NUMBER = {1}, PAGES = {81-98} } @InProceedings{Oliva:2000, AUTHOR = {Oliva, Karel}, TITLE = {Hovory k sobe/si/sebe/se}, YEAR = {2000}, BOOKTITLE = {Ceština - univerzália a specifika 2 (sborník konference ve Šlapanicích u Brna 17-19.11.1999)}, ADDRESS = {Šlapanice} } @InProceedings{Oliva_et_al:2000, AUTHOR = {Oliva, Karel and Hnátková, Milena and Petkevic, Vladimir and Kveton, Pavel}, TITLE = {The Linguistic Basis of a Rule-Based Tagger of Czech}, YEAR = {2000}, BOOKTITLE = {3rd International Workshop Text, Speech and Dialogue 2000 (TSD '00), September 13-16}, NUMBER = {1902}, PAGES = {3-8}, EDITOR = {Sojka, Petr and Kopecek, Ivan and Pala, Karel}, SERIES = {Lecture Notes in Artificial Intelligence}, ADDRESS = {Brno, Czech Republic}, PUBLISHER = {Springer} } @Book{Peetz_Pützer:2000, AUTHOR = {Peetz, Anna and Pützer, Manfred}, TITLE = {Wörterbuch der Beurener Mundart. Mundart-Hochdeutsch, Hochdeutsch-Mundart}, YEAR = {2000}, ADDRESS = {Kell am See, Germany}, PUBLISHER = {Alta Silva} } @InCollection{Pickering_et_al:2000, AUTHOR = {Pickering, Martin J. and Crocker, Matthew W.}, TITLE = {Introduction}, YEAR = {2000}, BOOKTITLE = {Architectures and Mechanisms for Language Processing}, EDITOR = {Crocker, Matthew W. and Pickering, Martin J. and Clifton, Charles Jr.}, ADDRESS = {Cambridge}, PUBLISHER = {Cambridge University Press} } @Article{Pickering_et_al:2000_1, AUTHOR = {Pickering, Martin J. and Traxler, Matthew and Crocker, Matthew W.}, TITLE = {Ambiguity Resolution in Sentence Processing: Evidence Against Likelihood}, YEAR = {2000}, JOURNAL = {Journal of Memory and Language}, VOLUME = {43}, NUMBER = {3}, PAGES = {447-475} } @InProceedings{Pinkal_Kohlhase:2000, AUTHOR = {Pinkal, Manfred and Kohlhase, Michael}, TITLE = {Feature Logic for Dotted Types: A Formalism for Complex Word Meanings}, YEAR = {2000}, BOOKTITLE = {Proceedings of the 38th Annual Meeting of the Association for Computational Linguistics (ACL '00), October 1-8}, ADDRESS = {Hong Kong}, PUBLISHER = {Morgan Kaufmann Publishers}, URL = {http://www.cs.cmu.edu/~kohlhase/papers/acl00.ps}, ABSTRACT = {In this paper we revisit Pustejovsky's proposal to treat ontologically complex word meaning by so-called dotted pairs. We use a higher-order feature logic based on Ohori's record lambda-calculus to model the semantics of words like book and library, in particular their behavior in the context of quantification and cardinality statements.}, ANNOTE = {COLIURL : Pinkal:2000:FLD.pdf Pinkal:2000:FLD.ps} } @InCollection{Pinkal_et_al:2000, AUTHOR = {Pinkal, Manfred and Rupp, Christopher J. and Worm, Karsten}, TITLE = {Robust Semantic Processing of Spoken Language}, YEAR = {2000}, BOOKTITLE = {Verbmobil: Foundations of Speech-to-Speech Translation}, PAGES = {322-336}, EDITOR = {Wahlster, Wolfgang}, SERIES = {Artificial Intelligence}, ADDRESS = {Berlin}, PUBLISHER = {Springer} } @InProceedings{Piskorski_Neumann:2000, AUTHOR = {Piskorski, Jakub and Neumann, Günter}, TITLE = {An Intelligent Text Extraction and Navigation System}, YEAR = {2000}, BOOKTITLE = {Proceedings of the 6th International Conference on Computer-Assisted Information Retrieval (RIAO'00)}, ADDRESS = {Paris, France}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/sppc.ps.gz}, ABSTRACT = {We present SPPC, a high-performance system for intelligent text extraction and navigation from German free text documents. SPPC consists of a set of domain-independent shallow core components which are realized by means of cascaded weighted finite state machines and generic dynamic tries. All extracted information is represented uniformly in one data structure (called the text chart) in a highly compact and linked form in order to support indexing and navigation through the set of solutions. German text processing includes (among others) compound processing, high performance named entity recognition and chunk parsing based on a divide-and-conquer strategy. SPPC has a good performance (4380 words per second on standard PC environments) and high linguistic coverage.}, ANNOTE = {COLIURL : Piskorski:2000:ITE.pdf Piskorski:2000:ITE.ps} } @InProceedings{Piskorski_Skut:2000, AUTHOR = {Piskorski, Jakub and Skut, Wojciech}, TITLE = {Intelligent Information Extraction}, YEAR = {2000}, BOOKTITLE = {Proceedings of the 4th International Conference on Business Information Systems, April 24-25}, ADDRESS = {Poznan, Poland}, ABSTRACT = {New developments in Information Technology and an ever-growing amount of unstructured business text documents in digital form require intelligent tools for precisely determining their content and relevance. In this paper we give an overview of the natural language processing approach to information extraction and information retrieval. Our article contains a brief description of efficient linguistic core components.} } @InProceedings{Plaehn:2000, AUTHOR = {Plaehn, Oliver}, TITLE = {Computing the Most Probable Parse for a Discontinuous Phrase Structure Grammar}, YEAR = {2000}, BOOKTITLE = {6th International Workshop on Parsing Technologies (IWPT '00), February 23-25}, EDITOR = {Bunt, Harry}, ADDRESS = {Trento, Italy}, URL = {https://www.coli.uni-saarland.de/~plaehn/papers/iwpt2000.ps https://www.coli.uni-saarland.de/~plaehn/papers/iwpt2000.ps.gz https://www.coli.uni-saarland.de/~plaehn/papers/iwpt2000.html}, ABSTRACT = {This paper presents a probabilistic extension of Discontinuous Phrase Structure Grammar (DPSG), a formalism designed to describe discontinuous constituency phenomena adequately and perspicuously by means of trees with crossing branches. We outline an implementation of an agenda-based chart parsing algorithm that is capable of computing the Most Probable Parse for a given input sentence for probabilistic versions of both DPSG and Context-Free Grammar. Experiments were conducted with both types of grammars extracted from the NEGRA corpus. In spite of the much greater complexity of DPSG parsing in terms of the number of (partial) analyses that can be constructed for an input sentence, accuracy results from both experiments are comparable. We also briefly hint at future lines of research aimed at more efficient ways of probabilistic parsing with discontinuous constituents.}, ANNOTE = {COLIURL : Plaehn:2000:CMP.pdf Plaehn:2000:CMP.ps} } @InProceedings{Plaehn_Brants:2000, AUTHOR = {Plaehn, Oliver and Brants, Thorsten}, TITLE = {Annotate - An Efficient Interactive Annotation Tool}, YEAR = {2000}, BOOKTITLE = {6th Applied Natural Language Processing Conference (ANLP '00), April 29 - May 4}, ADDRESS = {Seattle, Washington, USA}, URL = {https://www.coli.uni-saarland.de/~plaehn/papers/anlp2000.ps.gz https://www.coli.uni-saarland.de/~plaehn/papers/anlp2000.pdf https://www.coli.uni-saarland.de/~plaehn/papers/anlp2000.html}, ABSTRACT = {During the creation of the NEGRA corpus, we developed very efficient interactive annotation tools. An easy-to-use graphical tool, Annotate, is used to manipulate syntactic structures. Annotate interacts with a part-of-speech tagger and a parser running in the background, thus facilitating rapid semi-automatic corpus annotation.}, ANNOTE = {COLIURL : Plaehn:2000:AEI.pdf Plaehn:2000:AEI.ps} } @MastersThesis{Priesnitz:2000, AUTHOR = {Priesnitz, Tim}, TITLE = {Entailment von nicht-strukturellen Teiltyp-Constraints}, YEAR = {2000}, ADDRESS = {Saarbrücken}, SCHOOL = {Universität des Saarlandes, Fachbereich Informatik}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/Priesnitz-2000.ps.gz}, ABSTRACT = {Teiltyp-Entailment ist die Frage, ob eine Implikation zwischen Teiltyp-Constraints $t_1sm t_2$ gilt. Algorithmen, die diese Frage lösen, sind für die Vereinfachung von Teiltyp-Constraints relevant. Die Vereinfachung von Teiltyp-Constraints ist hingegen in contraintbasierten Typ-Sytemen dringend erforderlich. Die Komplexität von Teiltyp-Entailment hängt von der gewählten Typsprache ab; schon für ausdrucksschwache Typsprachen ist es überraschend schwierig, einen Algorithmus für Teiltyp-Entailment zu entwerfen. So ist Teiltyp-Entailment für einfache Typen coNP-vollständig und wird durch die Hinzunahme von rekursiven Typen PSPACE-vollständig. Entailment wird durch die Erweiterung um einen kleinsten und größten Typ nicht-strukturell. Henglein und Rehof haben bewiesen, daß nicht-strukturelles Teiltyp-Entailment, sowohl im einfachen wie auch im rekursiven Fall, PSPACE-schwer ist; einen vollständigen Algorithmus konnten sie allerdings nicht angeben. Es ist eine bekannte offene Frage, ob nicht-strukturelles Teiltyp-Entailment überhaupt entscheidbar ist. Ausgehend von dieser Situation untersucht diese Arbeit, worin die Schwierigkeiten liegen. Wir isolieren ein Teilproblem von nicht-strukturellem Teiltyp-Entailment, von dem wir zeigen, dass es PSPACE-vollständig ist. Damit zeigen wir zum ersten Mal Entscheidbarkeit für ein nicht-triviales Fragment von nicht-strukturellem Entailment. Wir charakterisieren Teiltyp-Entailment in der Automaten-Theorie; dazu erweitern wir das Konzept der endlichen Automaten zu sogenannten P-Automaten, deren Eigenschaften wir systematisch analysieren. Im nächsten Schritt reduzieren wir nun Teiltyp-Entailment auf das Universalitätsproblem von eingeschränkten P-Automaten. Für unser ausgezeichnetes Fragment können wir uns in der Tat auf endliche Automaten zurückziehen, deren Universalitätsproblem PSPACE-vollständig ist.}, ANNOTE = {COLIURL : Priesnitz:2000:EST.pdf Priesnitz:2000:EST.ps} } @Article{Pützer_Marasek:2000, AUTHOR = {Pützer, Manfred and Marasek, Krzysztof}, TITLE = {Differenzierung gesunder Stimmqualitäten und Stimmqualitäten bei Rekurrens-parese mit Hilfe elektroglottographischer Messungen und RBH-System}, YEAR = {2000}, JOURNAL = {Sprache Stimme Gehör}, VOLUME = {24}, PAGES = {154-163} } @InProceedings{Ramírez Bustamante_et_al:2000, AUTHOR = {Ramírez Bustamante, Flora and Declerck, Thierry and Sánchez León, Fernando}, TITLE = {Towards a Theory of Textual Errors}, YEAR = {2000}, BOOKTITLE = {Proceedings of the 3rd International Workshop on Controlled Language Applications (CLAW'00), April 29-30}, ADDRESS = {Seattle, USA}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/claw00_ram.ps.gz}, ABSTRACT = {In this paper we present a discussion on the current state of checking technology (both Controlled Language and Grammar Checking), and we stress the need for a generalized theory of textual errors which leads to a hierarchical organization of errors and illegal structures in relation to linguistic text processing. We then discuss the issue of an integrated checking approach.}, ANNOTE = {COLIURL : Bustamante:2000:TTT.pdf Bustamante:2000:TTT.ps} } @TechReport{Rupp_Milward:2000, AUTHOR = {Rupp, Christopher J. and Milward, David}, TITLE = {A Robust Linguistic Processing Architecture}, YEAR = {2000}, MONTH = {September}, NUMBER = {4.1}, ADDRESS = {Göteborg}, TYPE = {Siridus Report}, INSTITUTION = {Göteborg University, Department of Linguistics}, URL = {http://www.ling.gu.se/projekt/siridus/Publications/deliv4-1.ps.gz}, ANNOTE = {COLIURL : Rupp:2000:RLP.pdf Rupp:2000:RLP.ps} } @InCollection{Rupp_et_al:2000, AUTHOR = {Rupp, Christopher J. and Spilker, Jörg and Klarner, Martin and Worm, Karsten L.}, TITLE = {Combining Analyses from Various Parsers}, YEAR = {2000}, BOOKTITLE = {Verbmobil: Foundations of Speech-to-Speech Translation}, PAGES = {311-320}, EDITOR = {Wahlster, Wolfgang}, ADDRESS = {Berlin}, PUBLISHER = {Springer} } @InProceedings{Scheepers_Corley:2000, AUTHOR = {Scheepers, Christoph and Corley, Martin}, TITLE = {Syntactic Priming in German Sentence Production}, YEAR = {2000}, BOOKTITLE = {Proceedings of the Twenty-Second Meeting of the Cognitive Science Society (CogSci2000), August 13-15}, PAGES = {435-440}, EDITOR = {Gleitman, L. R. and Joshi, A. K.}, ADDRESS = {Philadelphia, Pennsylvania, USA}, PUBLISHER = {Lawrence Erlbaum Associates} } @InCollection{Scheepers_et_al:2000, AUTHOR = {Scheepers, Christoph and Hemforth, Barbara and Konieczny, Lars}, TITLE = {Linking Syntactic Functions with Thematic Roles: Psych-Verbs and the Resolution of Subject-Object Ambiguity}, YEAR = {2000}, BOOKTITLE = {German Sentence Processing}, PAGES = {95-135}, EDITOR = {Hemforth, Barbara and Konieczny, Lars}, ADDRESS = {Dodrecht}, PUBLISHER = {Kluwer Academic Publishers} } @InCollection{Schiehlen_et_al:2000, AUTHOR = {Schiehlen, Michael and Bos, Johan and Dorna, Michael}, TITLE = {Verbmobil Interface Terms (VITs)}, YEAR = {2000}, BOOKTITLE = {Verbmobil: Foundations of Speech-to-Speech Translation}, PAGES = {183-199}, EDITOR = {Wahlster, Wolfgang}, ADDRESS = {Berlin - Heidelberg - New York}, PUBLISHER = {Springer} } @InProceedings{Schröder:2000, AUTHOR = {Schröder, Marc}, TITLE = {Experimental Study of Affect Bursts}, YEAR = {2000}, BOOKTITLE = {ISCA Workshop on Speech and Emotion}, PAGES = {132-137}, EDITOR = {Cowie, Roddy and Douglas-Cowie, Ellen and Schröder, Marc}, ADDRESS = {Belfast}, PUBLISHER = {Textflow}, URL = {http://www.dfki.de/~schroed/articles/schroeder2000.pdf}, ABSTRACT = {The study described here investigates the perceived emotional content of “affect bursts” for German. Affect bursts are defined as short emotional non-speech expressions interrupting speech. This study shows that affect bursts, presented without context, can convey a clearly identifiable emotional meaning. Affect bursts expressing ten emotions were produced by actors. After a pre-selection procedure, “good examples” for each emotion were presented in a perception test. The mean recognition score of 81% indicates that affect bursts seem to be an effective means of expressing emotions. Affect bursts are grouped into classes on the basis of phonetic similarity. Recognition and confusion patterns are examined for these. classes}, ANNOTE = {COLIURL : Schroder:2000:ESA.pdf} } @InProceedings{Schulte:2000, AUTHOR = {Schulte, Christian}, TITLE = {Programming Deep Concurrent Constraint Combinators}, YEAR = {2000}, BOOKTITLE = {Practical Aspects of Declarative Languages, Second International Workshop, PADL 2000, January 17-18}, NUMBER = {1753}, PAGES = {215-229}, EDITOR = {Pontelli, E. and Santos Costa, V.}, SERIES = {Lecture Notes in Computer Science}, ADDRESS = {Boston, Massachusetts, USA}, PUBLISHER = {Springer}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/combinators.ps.gz}, ABSTRACT = {Constraint combination methods are essential for a flexible constraint programming system. This paper presents deep concurrent constraint combinators based on computation spaces as combination mechanism. It introduces primitives and techniques needed to program constraint combinators from computation spaces. The paper applies computation spaces to a broad range of combinators: negation, generalized reification, disjunction, and implication. Even though computation spaces have been conceived in the context of Oz, they are mainly programming language independent. This point is stressed by discussing them here in the context of Standard ML with concurrency features.}, ANNOTE = {COLIURL : Schulte:2000:PDC.pdf Schulte:2000:PDC.ps} } @TechReport{Schulte:2000_1, AUTHOR = {Schulte, Christian}, TITLE = {Parallel Search Made Simple}, YEAR = {2000}, MONTH = {September}, NUMBER = {TRA9/00}, ADDRESS = {Singapore}, TYPE = {Technical Report}, INSTITUTION = {National University of Singapore, School of Computing}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/par-trics.ps.gz}, ABSTRACT = {Search in constraint programming is a time consuming task. Search can be speeded up by exploring subtrees of a search tree in parallel. This paper presents distributed search engines that achieve parallelism by distribution across networked computers. The main point of the paper is a simple design of the parallel search engine. Simplicity comes as an immediate consequence of clearly separating search, concurrency, and distribution. The obtained distributed search engines are simple yet offer substantial speedup on standard network computers.}, ANNOTE = {COLIURL : Schulte:2000:PSM.pdf Schulte:2000:PSM.ps} } @InProceedings{Schwenter_Vasishth:2000, AUTHOR = {Schwenter, Scott A. and Vasishth, Shravan}, TITLE = {Absolute and Relative Scalar Particles in Spanish and Hindi}, YEAR = {2000}, BOOKTITLE = {26th Berkeley Linguistics Society Conference (BLS 26), February 18-21}, PAGES = {225-233}, ADDRESS = {University of California, Berkeley, USA}, URL = {ftp://ftp.ling.ohio-state.edu/pub/Students/Vasishth/Published/BLS2000/BLS00paper.pdf}, ANNOTE = {COLIURL : Schwenter:2000:ARS.pdf} } @InCollection{Siegel:2000, AUTHOR = {Siegel, Melanie}, TITLE = {HPSG Analysis of Japanese}, YEAR = {2000}, BOOKTITLE = {Verbmobil: Foundations of Speech-to-Speech Translation}, PAGES = {265-280}, EDITOR = {Wahlster, Wolfgang}, SERIES = {Artificial Intelligence}, ADDRESS = {Berlin}, PUBLISHER = {Springer}, URL = {http://www.dfki.de/~siegel/vm-buch.ps.gz ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/ms1.entry}, ANNOTE = {COLIURL : Siegel:2000:HAJ.pdf Siegel:2000:HAJ.ps} } @InProceedings{Siegel:2000_1, AUTHOR = {Siegel, Melanie}, TITLE = {Japanese Honorification in an HPSG Framework}, YEAR = {2000}, BOOKTITLE = {Proceedings of the 14th Pacific Asia Conference on Language, Information and Computation, February 15-17}, PAGES = {289-300}, EDITOR = {Ikeya, Akira and Kawamori, Masahito}, ADDRESS = {Tokyo, Japan}, PUBLISHER = {Waseda University International Conference Center}, URL = {http://www.dfki.de/~siegel/paclic2000.ps.gz}, ABSTRACT = {We present a solution for the representation of Japanese honorificational information in the HPSG framework. Basically, there are three dimensions of honorification. We show that a treatment is necessary that involves both the syntactic and the contextual level of information. The Japanese grammar is part of a machine translation system.}, ANNOTE = {COLIURL : Siegel:2000:JHH.pdf Siegel:2000:JHH.ps} } @InProceedings{Siegel_Scherf:2000, AUTHOR = {Siegel, Melanie and Scherf, Oliver}, TITLE = {Morphological Parsing of Japanese}, YEAR = {2000}, BOOKTITLE = {Conference Handbook of the 2nd International Conference on Practical Linguistics of Japanese}, PAGES = {116-117}, ADDRESS = {San Francisco State University, USA}, URL = {http://www.dfki.de/~siegel/icplj.ps.gz ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/ms2.entry} } @MastersThesis{Simon:2000, AUTHOR = {Simon, Daniel}, TITLE = {An Implementation of the Programming Language DML in Java: Runtime Environment}, YEAR = {2000}, ADDRESS = {Saarbrücken}, SCHOOL = {Universität des Saarlandes, Fachbereich Informatik}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/Simon:2000.ps.gz}, ABSTRACT = {DML is an experimental language that has emerged from the developement of the Oz dialect Alice. DML is dynamically typed, functional, and concurrent. It supports transients and provides a distributed programming model. To translate DML to the Java Virtual Machine, a runtime environment is needed. This work presents a simple and secure implementation of the basic DML runtime classes and elaborates on relevant improvements. Pickling, a mechanism to make higher order values persistent, is provided on top of the Java Object Serialization. Finally, a high-level distributed programming model for DML is implemented based on Java's Remote Method Invocation architecture. Finally, the implemented compiler and the runtime environment of DML are compared to similar projects.}, ANNOTE = {COLIURL : Simon:2000:IPL.pdf Simon:2000:IPL.ps} } @Article{Sturt_et_al:2000, AUTHOR = {Sturt, Patrick and Pickering, Martin J. and Crocker, Matthew W.}, TITLE = {Search Strategies in Syntactic Reanalysis}, YEAR = {2000}, JOURNAL = {Journal of Psycholinguistic Research}, VOLUME = {29}, NUMBER = {2}, PAGES = {183-194} } @InProceedings{Sturt_et_al:2000_1, AUTHOR = {Sturt, Patrick and Scheepers, Christoph and Pickering, Martin J. and Crocker, Matthew W.}, TITLE = {The Interaction of Structure Preservation and Recency in First- and Second-Pass Ambiguity Resolution}, YEAR = {2000}, BOOKTITLE = {6th Conference on Architectures and Mechanisms for Language Processing (AMLaP-2000), September 20-23 (AMLaP-2000)}, ADDRESS = {Leiden, The Netherlands} } @InProceedings{Trouvain_Barry:2000, AUTHOR = {Trouvain, Jürgen and Barry, William J.}, TITLE = {The Prosody of Excitement in Horse Race Commentaries}, YEAR = {2000}, BOOKTITLE = {ISCA-Workshop on Speech and Emotion: A Conceptual Framework for Research, September 5-7}, PAGES = {86-91}, ADDRESS = {Belfast, Northern Ireland}, URL = {http://www.qub.ac.uk/en/isca/proceedings/pdfs/trouvain.pdf}, ABSTRACT = {This study investigates examples of horse race commentaries and compares the acoustic properties with an auditorily based description of the typical suspense pattern from calm to very excited at the finish and relaxation after the finish. With the exception of tempo, the auditory impressions were basically confirmed. The examination shows further that the results of the investigated prosodic parameters pause duration, pausing and breathing rate, F0 level and range, intensity, and spectral tilt fit well with other forms of excitement such as anger or elation. Additionally, it is discussed how the specific speaking style of horse race commentators can be classified. Finally, the role of prosodic descriptions for modelling those speaking styles and emotions, especially for speech technology, is considered.}, ANNOTE = {COLIURL : Trouvain:2000:PEH.pdf} } @InCollection{Uszkoreit:2000, AUTHOR = {Uszkoreit, Hans}, TITLE = {Sprache und Sprachtechnologie bei der Strukturierung digitalen Wissens}, YEAR = {2000}, BOOKTITLE = {Sprache in neuen Medien. Institut für Deutsche Sprache, Jahrbuch 1999}, EDITOR = {Kallmeyer, W.}, ADDRESS = {Berlin}, PUBLISHER = {De Gruyter} } @InCollection{Uszkoreit_et_al:2000, AUTHOR = {Uszkoreit, Hans and Flickinger, Dan and Kasper, Walter and Sag, Ivan A.}, TITLE = {Deep Linguistic Analysis with HPSG}, YEAR = {2000}, BOOKTITLE = {Verbmobil: Foundations of Speech-to-Speech Translation}, PAGES = {216-237}, EDITOR = {Wahlster, Wolfgang}, SERIES = {Artificial Intelligence}, ADDRESS = {Berlin}, PUBLISHER = {Springer} } @InCollection{Uszkoreit_et_al:2000_1, AUTHOR = {Uszkoreit, Hans and Flickinger, Dan and Kasper, Walter and Sag, Ivan A.}, TITLE = {Deep Linguistic Analysis with HPSG}, YEAR = {2000}, BOOKTITLE = {Verbmobil: Foundations of Speech-to-Speech Translation}, PAGES = {216-237}, EDITOR = {Wahlster, Wolfgang}, SERIES = {Artificial Intelligence}, ADDRESS = {Berlin}, PUBLISHER = {Springer} } @MastersThesis{Walter:2000, AUTHOR = {Walter, Andreas}, TITLE = {An Implementation of the Programming Language DML in Java: Compiler}, YEAR = {2000}, ADDRESS = {Saarbrücken}, SCHOOL = {Universität des Saarlandes, Fachbereich Informatik}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/Walter:2000.ps.gz}, ABSTRACT = {DML is an experimental language that has emerged from the developement of the Oz dialect Alice. DML is dynamically typed, functional, and concurrent. It supports transients and provides a distributed programming model. Subject of this work is the implementation of a compiler backend that translates DML programs to Java Virtual Machine code. Code-optimizing techniques and possibilities for the treatment of tail calls are described. Finally, the implemented compiler and the runtime environment of DML are compared to similar projects.}, ANNOTE = {COLIURL : Walter:2000:IPL.pdf Walter:2000:IPL.ps} } @InProceedings{Xu_et_al:2000, AUTHOR = {Xu, Feiyu and Netter, Klaus and Stenzhorn, Holger}, TITLE = {MIETTA - A Framework for Uniform and Multilingual Access to Structured Database and Web Information}, YEAR = {2000}, BOOKTITLE = {Proceedings of the 5th International Workshop on Information Retrieval with Asian Languages (IRAL'00)}, ADDRESS = {Hong Kong}, URL = {http://www.dfki.uni-sb.de/~feiyu/iral00.pdf}, ABSTRACT = {We describe a WWW-based information system called MIETTA, which allows uniform and multilingual access to heterogenous data sources in the tourism domain. The design of the search engine is based on a new crosslingual framework. The framework integrates a crosslingual retrieval strategy with a strategy using natural language techniques: information extraction and multilingual generation. The combination of information extraction and multilingual generation enables the multilingual presentation of the database content and free text crosslingual information retrieval of the structured data entries. We will demonstrate that the new framework is useful for domain specific and multilingual applications.}, ANNOTE = {COLIURL : Xu:2000:MFU.pdf} } @InProceedings{Xu_et_al:2000_1, AUTHOR = {Xu, Feiyu and Netter, Klaus and Stenzhorn, Holger}, TITLE = {A System for Uniform and Multilingual Access to Structured Database and Web Information in a Tourism Domain}, YEAR = {2000}, BOOKTITLE = {Proceedings of the 38th Annual Meeting of the Association for Computational Linguistics. Demo Session (ACL'00), October 3-6}, ADDRESS = {Hong Kong}, URL = {http://www.dfki.de/~feiyu/acl00.pdf}, ABSTRACT = {We present an information system, which was developed within the project MIETTA (Multilingual Information Extraction for Tourism and Travel Assistance), a project in the Language Engineering Sector of the Telematics Application Program of the European Commission. MIETTA facilitates multilingual information access in a number of languages (English, Finnish, French, German, Italian) to the tourist information (web documents and database information) provided by three different geographical regions: the German federal state of Saarland, the Finnish region around Turku and the Italian City of Rome. The challenge of the approach is to merge the technologies of crosslingual information retrieval (Jamie Carbonell et al, 1997) and natural language processing to achieve the following goals: · Provide full access to all information independent of the language the information was originally encoded in and independent of the query language; · Provide transparent natural language access to structured database information; · Provide hybrid and flexible query options to enable users to obtain maximally precise information.}, ANNOTE = {COLIURL : Xu:2000:SUM.pdf} } @TechReport{Zaenen_et_al:2000, AUTHOR = {Zaenen, Annie and Ericsson, Stinaand and Larsson, Staffan and Mikheev, A. and Milward, David and Pinkal, Manfred and Poesio, Massimo and Rupp, Christopher J. and Worm, Karsten L.}, TITLE = {Robust Interpretation and Dialogue Dynamics}, YEAR = {2000}, ADDRESS = {Göteborg}, TYPE = {Technical Report}, INSTITUTION = {Göteborg University, Department of Linguistics}, URL = {http://www.ling.gu.se/research/projects/trindi/private/deliverables/D5.2/D5.2.pdf}, ANNOTE = {COLIURL : Zaenen:2000:RID.pdf} } @InProceedings{Weber:2000, AUTHOR = {Weber, Andrea}, TITLE = {The role of phonotactics in the segmentation of native and non-native continuous speech.}, YEAR = {2000}, BOOKTITLE = {Proceedings of the Workshop on Spoken Access Processes}, PAGES = {143-146}, EDITOR = {Cutler, Anne and McQueen, James and Zondervan, R.}, ADDRESS = {Max Planck Institute for Psycholinguistics, Nijmengen, the Netherlands} } @InProceedings{Weber:2000_1, AUTHOR = {Weber, Andrea}, TITLE = {Phonotactic and acoustic cues for word segmentation in English.}, YEAR = {2000}, BOOKTITLE = {Proceedings of the 6th International Conference on Spoken Language Processing}, VOLUME = {3}, PAGES = {782-785}, EDITOR = {Yuan, B. and Huang, T. and Tang, X.}, ADDRESS = {Beijing International Convention Center, Beijing, China} }