% % GENERATED FROM https://www.coli.uni-saarland.de % by : anonymous % IP : coli2006.lst.uni-saarland.de % at : Mon, 05 Feb 2024 15:41:12 +0100 GMT % % Selection : Year = 1999 % @InProceedings{Andreeva_et_al:1999, AUTHOR = {Andreeva, Bistra and Koreman, Jacques and Barry, William J.}, TITLE = {On the Role of the Burst and Transitions for the Identification of Palatalized and Non-Palatalized Plosives in Bulgarian}, YEAR = {1999}, BOOKTITLE = {Proceedings of the 14th International Congress of Phonetic Sciences (ICPhS'99), August 1-7}, ADDRESS = {San Francisco, USA}, URL = {https://www.coli.uni-saarland.de/~koreman/Publications/1999/ICPhS99_BulgPal.ps.gz}, ANNOTE = {COLIURL : Andreeva:1999:RBT.pdf Andreeva:1999:RBT.ps} } @TechReport{Areces_et_al:1999, AUTHOR = {Areces, Carlos and Blackburn, Patrick and Marx, Maarten}, TITLE = {Hybrid Logics: Characterization, Interpolation and Complexity}, YEAR = {1999}, MONTH = {February}, NUMBER = {108}, PAGES = {35}, ADDRESS = {Saarbrücken}, TYPE = {CLAUS-Report}, INSTITUTION = {Universität des Saarlandes}, URL = {ftp://ftp.coli.uni-sb.de/pub/coli/claus/claus108.ps ftp://ftp.coli.uni-sb.de/pub/coli/claus/claus108.dvi}, ABSTRACT = {Hybrid languages are extended modal languages which can refer to (or even quantify over) worlds. The use of strong hybrid languages dates back to at least 1967 (in the work of Arthur Prior), but recent work has focussed on more constrained systems. The purpose of the present paper is to examine one such system in detail. We begin by studying its expressivity, and provide both model theoretic characterizations (via a restricted notion of Ehrenfeucht-Fraisse game, and an enriched notion of bisimulation) and a syntactic characterization (in terms of bounded formulas). The key result to emerge is that the system corresponds precisely to the first-order fragment which is invariant for generated submodels. We further establish that it has (strong) interpolation, and provide failure results in the finite variable fragments. We also show that weak interpolation holds for an important sublanguage and provide complexity results for this sublanguage and other fragments and variants (the full logic being undecidable).}, ANNOTE = {COLIURL : Areces:1999:HLC.pdf Areces:1999:HLC.ps Areces:1999:HLC.dvi} } @Article{Avgustinova:1999, AUTHOR = {Avgustinova, Tania}, TITLE = {Prosodic Constraints in Morphosyntactic Domains}, YEAR = {1999}, JOURNAL = {Beiträge der Europäschen Slavistischen Linguistik (POLYSLAV-2)}, VOLUME = {4}, PAGES = {10-15}, URL = {https://www.coli.uni-saarland.de/~tania/ta-pub/polyslav2.pdf}, ANNOTE = {COLIURL : Avgustinova:1999:PCM.pdf} } @TechReport{Avgustinova:1999_1, AUTHOR = {Avgustinova, Tania}, TITLE = {Shared Grammatical Resources for Slavic Languages (Selected Topics in Multilingual Grammar Design)}, YEAR = {1999}, ADDRESS = {Saarbrücken}, TYPE = {DFG-Abschlußbericht}, INSTITUTION = {Universität des Saarlandes} } @InProceedings{Avgustinova_Andreeva:1999, AUTHOR = {Avgustinova, Tania and Andreeva, Bistra}, TITLE = {Link-Associated Accent Patterns in Bulgarian}, YEAR = {1999}, BOOKTITLE = {3rd Conference on Formal Description of Slavic Languages (FDSL-3), December 1-3}, ADDRESS = {Leipzig, Germany}, URL = {https://www.coli.uni-saarland.de/~tania/ta-pub/avg-andr-fdsl3.pdf}, NOTE = {Abstract in URL}, ANNOTE = {COLIURL : Avgustinova:1999:LAA.pdf} } @InProceedings{Avgustinova_Andreeva:1999_1, AUTHOR = {Avgustinova, Tania and Andreeva, Bistra}, TITLE = {Intonational Aspects of Bulgarian Clitic Replication}, YEAR = {1999}, BOOKTITLE = {The 14th International Congress of Phonetic Sciences, August 1-7}, PAGES = {1501-1504}, EDITOR = {Ohala, J. and Hasgawa, Y. and Ohala, M. and Granville, D. and Bailey, A.}, ADDRESS = {San Francisco, USA}, URL = {https://www.coli.uni-saarland.de/~tania/ta-pub/1501.pdf}, ANNOTE = {COLIURL : Avgustinova:1999:IAB.pdf} } @TechReport{Avgustinova_et_al:1999, AUTHOR = {Avgustinova, Tania and Gardent, Claire and Oliva, Karel}, TITLE = {Binding of Reciprocals with Particular Respect to Czech}, YEAR = {1999}, MONTH = {February}, NUMBER = {109}, ADDRESS = {Saarbrücken}, TYPE = {CLAUS-Report}, INSTITUTION = {Universität des Saarlandes}, URL = {ftp://ftp.coli.uni-sb.de/pub/coli/claus/claus109.ps ftp://ftp.coli.uni-sb.de/pub/coli/claus/claus109.dvi}, ABSTRACT = {Drawing on data from Czech and English, we first argue against a uniform syntactic treatment of reciprocals and reflexives. We then define a binding theory for Czech which differs from HPSG binding theory in two main points. First, it is based on an ordering (the D-ordering) which is more general than HPSG's obliqueness ordering -- this permits a natural treatment of adjuncts. Second, it distinguishes between reflexives and reciprocals and submits them to different binding constraints. Finally, we provide a semantics for reciprocals with summated antecedents.}, ANNOTE = {COLIURL : Avgustinova:1999:BRP.pdf Avgustinova:1999:BRP.ps Avgustinova:1999:BRP.dvi} } @InCollection{Avgustinova_et_al:1999_1, AUTHOR = {Avgustinova, Tania and Skut, Wojciech and Uszkoreit, Hans}, TITLE = {Typological Similarities in HPSG: A Case Study on Slavic Verb Diathesis}, YEAR = {1999}, BOOKTITLE = {Slavic in Head-Driven Phrase Structure Grammar}, PAGES = {1-28}, EDITOR = {Borsley, R. D. and Przepiórkowski, Adam}, ADDRESS = {Stanford}, PUBLISHER = {CSLI Publications} } @Article{Barry:1999, AUTHOR = {Barry, William J.}, TITLE = {Trend und Ergebnisse der phonetischen Forschung und ihr Nutzen für den Fremdsprachenunterricht}, YEAR = {1999}, JOURNAL = {Deutsch als Fremdsprache}, VOLUME = {36}, NUMBER = {2}, PAGES = {81-87} } @Article{Barry_et_al:1999, AUTHOR = {Barry, William J. and Klein, Cordula and Köser, Stephanie}, TITLE = {Speech Production Evidence for Ambisyllabicity in German}, YEAR = {1999}, JOURNAL = {PHONUS}, VOLUME = {4}, PAGES = {87-102}, URL = {https://www.coli.uni-saarland.de/Phonetics/Research/PHONUS_research_reports/Phonus4/Barry_PHONUS4.ps.gz}, ABSTRACT = {Speech production behaviour for cases where, in German, the assumed syllable structure demands of the Maximum Onset Principle (MOP) and the shortstressed vowel Compulsory Coda Principle (CCP) are in conflict, is compared with cases where there is no conflict. The results of two word manipulation tasks are presented in which subjects were required to divide firstsyllablestressed disyllabic words into two parts, in one case (scanning) introducing a pause between the first and second part, in the other case (swapping) speaking the parts in reverse order. Production of an intervocalic single consonant both as a coda to the (original) first syllable and as an onset to the (original) second syllable is seen as behavioural evidence for the psychological reality of ambisyllabicity, which, as a theoretical construct, resolves the conflict in syllable structure demands. We also discuss the relation of the results to orthographic and taskrelated factors and their implication for the phonological status of the syllable.}, ANNOTE = {COLIURL : Barry:1999:SPE.pdf} } @InCollection{Baumann:1999, AUTHOR = {Baumann, Stefan}, TITLE = {Zum Verhältnis von Akzentform und kognitivem Status von Diskurseinheiten}, YEAR = {1999}, BOOKTITLE = {Convivium. Germanistisches Jahrbuch Polen 1999}, PAGES = {201-224}, EDITOR = {Joachimsthaler, J. and Engel, U. and Kaszynski, S. H.}, SERIES = {Reihe Germanistik}, ADDRESS = {Bonn}, PUBLISHER = {DAAD}, ABSTRACT = {This article deals with accent placement in spontaneous German speech. Using data from an extempore narrative as an example, the author discusses to what extent accent placement on a discourse unit is influenced by semantic-syntactic principles on the one hand and pragmatic considerations on the other. Of particular interest is the question whether there is a correspondence between the phonetic form of an accent a speaker assigns to a constituent (low or high pitch accent) and the constituent's assumed activation state in the addressee's consciousness (given or new) at the time of utterance.}, ANNOTE = {COLIURL : Baumann:1999:VAK.pdf} } @MastersThesis{Becker:1999, AUTHOR = {Becker, Markus}, TITLE = {Unsupervised Training of a Rule-Based Part-of-Speech Tagger}, YEAR = {1999}, ADDRESS = {Saarbrücken}, SCHOOL = {Computational Linguistics, University of Saarland}, URL = {http://www.dfki.de/~mbecker/diplom.ps}, NOTE = {unpublished master's thesis}, ANNOTE = {COLIURL : Becker:1999:UTR.pdf Becker:1999:UTR.ps} } @Proceedings{Tilman_Stephan:1999, TITLE = {May I Speak Freely? Between Templates and Free Choice in Natural Language Generation. Workshop at the 23rd German Annual Conference for Artificial Intelligence (KI'99), Bonn}, YEAR = {1999}, NUMBER = {D-99-01}, EDITOR = {Becker, Tilman and Busemann, Stephan}, SERIES = {DFKI Document}, ADDRESS = {Saarbrücken}, PUBLISHER = {DFKI}, URL = {ftp://ftp.dfki.uni-kl.de/pub/Publications/Documents/1999/D-99-01.tar.gz}, ABSTRACT = {This workshop is, to our knowledge, the first one topicalizing the relation between application tasks and technologies used. It aims at exploring the tension between more general and more specific approaches to NLG, thereby clarifying what NLG technology is suited best for which task. It is intended to be an opportunity to get an overview over existing state-of-the-art technology and its optimal usage. It will be relevant for both developers and users of NLG systems. Exploring conditions for successful NLG applications is a step that should be taken jointly by technology providers and current and potential users of NLG software. The invited speaker, Paul Heisterkamp of DaimlerChrysler AG, will focus on the industrial usage of NLG software, and we appreciate his contribution to this volume. The workshop is embedded into the German Annual AI conference KI'99, following its tradition of hosting small hot-topic workshops. At the same time it is an activity of the Special Interest Group for Natural Language Systems (Fachgruppe 1.3.1) of the German association for computer science, Gesellschaft für Informatik (GI). The ten contributions to this volume are unpublished research reports reviewed by the workshop organizers. The authors agreed to make available to each other the submitted papers before preparing the final versions. The papers can also be downloaded from the workshop's web page at http://www.dfki.de/service/NLG/KI99.html.}, ANNOTE = {COLIURL : Becker:1999:MSF.tar} } @Article{Bertsch_Nederhof:1999, AUTHOR = {Bertsch, Eberhard and Nederhof, Mark-Jan}, TITLE = {On Failure of the Pruning Technique in Error Repair in Shift-Reduce Parsers}, YEAR = {1999}, JOURNAL = {ACM Transactions on Programming Languages and Systems (TOPLAS)}, VOLUME = {21}, NUMBER = {1}, PAGES = {1-10}, URL = {http://www.dfki.de/dfkibib/publications/docs/nederhof99d.ps.gz}, ABSTRACT = {A previous article presented a technique to compute the least-cost error repair by incrementally generating congurations that result from inserting and deleting tokens in a syntactically incorrect input. An additional mechanism to improve the run-time efficiency of this algorithm by pruning some of the congurations was discussed as well. In this communication we show that the pruning mechanism may lead to suboptimal repairs or may block all repairs. Certain grammatical errors in a common construct of the Java programming language also lead to the above kind of failure.}, ANNOTE = {COLIURL : Bertsch:1999:FPT.pdf Bertsch:1999:FPT.ps} } @Article{Bertsch_Nederhof:1999_1, AUTHOR = {Bertsch, Eberhard and Nederhof, Mark-Jan}, TITLE = {Regular Closure of Deterministic Languages}, YEAR = {1999}, JOURNAL = {SIAM Journal on Computing}, VOLUME = {29}, NUMBER = {1}, PAGES = {81-102}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/nederhof99c.entry ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/nederhof99c.ps.gz}, ABSTRACT = {We recall the notion of regular closure of classes of languages. We present two important results. The first result is that all languages which are in the regular closure of the class of deterministic (context-free) languages can be recognized in linear time. This is a nontrivial result, since this closure contains many inherently ambiguous languages. The second result is that the class of deterministic languages is contained in the closure of the class of deterministic languages with the prefix property or, stated in an equivalent way, all LR(k) languages are in the regular closure of the class of LR(0) languages.}, ANNOTE = {COLIURL : Bertsch:1999:RCD.pdf Bertsch:1999:RCD.ps} } @InProceedings{Branco_Crysmann:1999, AUTHOR = {Branco, António and Crysmann, Berthold}, TITLE = {Negative Concord and Linear Constraints on Quantification}, YEAR = {1999}, BOOKTITLE = {Romance Languages and Linguistic Theory 1999.Selected papers from 'Going Romance' 1999, December 9-11}, EDITOR = {d'Hulst, Y. and Rooryck, J. and Schroten, J.}, ADDRESS = {Leiden}, PUBLISHER = {John Benjamins Publishing}, URL = {https://www.coli.uni-saarland.de/~crysmann/papers/NC.html} } @PhdThesis{Brants:1999, AUTHOR = {Brants, Thorsten}, TITLE = {Tagging and Parsing with Cascaded Markov Models - Automation of Corpus Annotation. Saarbrücken Dissertations in Computational Linguistics and Language Technology, Volume 6.}, YEAR = {1999}, ADDRESS = {Saarbrücken}, SCHOOL = {Universität des Saarlandes}, URL = {http://www.dfki.de/lt/diss/diss_en.htm}, ABSTRACT = {This thesis presents new techniques for parsing natural language. They are based on Markov Models, which are commonly used in part-of-speech tagging for sequential processing on the word level. We show that Markov Models can be successfully applied to other levels of syntactic processing. First, two classification tasks are handled: the assignment of grammatical functions and the labeling of non-terminal nodes. Then, Markov Models are used to recognize hierarchical syntactic structures. Each layer of a structure is represented by a separate Markov Model. The output of a lower layer is passed as input to a higher layer, hence the name: Cascaded Markov Models. Instead of simple symbols, the states emit partial context-free structures. The new techniques are applied to corpus annotation and partial parsing and are evaluated using corpora of different languages and domains.} } @InProceedings{Brants:1999_1, AUTHOR = {Brants, Thorsten}, TITLE = {Cascaded Markov Models}, YEAR = {1999}, BOOKTITLE = {9th Conference of the European Chapter of the Association for Computational Linguistics (EACL '99), June 8-12}, ADDRESS = {Bergen}, URL = {https://www.coli.uni-saarland.de/~thorsten/publications/Brants-EACL99.ps.gz}, ABSTRACT = {This paper presents a new approach to partial parsing of context-free structures. The approach is based on Markov Models. Each layer of the resulting structure is represented by its own Markov Model, and output of a lower layer is passed as input to the next higher layer. An empirical evaluation of the method yields very good results for NP/PP chunking of German newspaper texts.}, ANNOTE = {COLIURL : Brants:1999:CMM.pdf Brants:1999:CMM.ps} } @InProceedings{Brants_et_al:1999, AUTHOR = {Brants, Thorsten and Skut, Wojciech and Uszkoreit, Hans}, TITLE = {Syntactic Annotation of a German Newspaper Corpus}, YEAR = {1999}, BOOKTITLE = {ATALA sur le Corpus Annotés pour la Syntaxe Treebanks, June 18-19}, PAGES = {69-76}, EDITOR = {Abeillé, Anne}, ADDRESS = {Paris, France}, URL = {https://www.coli.uni-saarland.de/~thorsten/publications/Brants-ea-ATALA99.pdf}, ABSTRACT = {We report on the syntactic annotation of a German newspaper corpus. The annotations consists of context-free structures, additionally allowing crossing branches, with labeled nodes (phrases) and edges (grammatical functions). Furthermore, we present a new, interactive semi-automatic annotation process that allows efficient and reliable annotations.}, ANNOTE = {COLIURL : Brants:1999:SAG.pdf} } @InProceedings{Bredenkamp_et_al:1999, AUTHOR = {Bredenkamp, Andrew and Klein, Judith and Crysmann, Berthold}, TITLE = {Annotation of Error Types for a German News Corpus}, YEAR = {1999}, BOOKTITLE = {ATALA sur les Corpus Annotés pour la Syntaxe Treebanks, June 18-19}, ADDRESS = {Paris, France}, URL = {http://flag.dfki.de/pdf/ErrAnnot.pdf}, ABSTRACT = {This paper will discuss the corpus annotation effort in the FLAG project and its application for assisting in the development of controlled language and grammar checking applications. The main aim of theGerman government funded FLAGproject1 is to develop technologies for controlled language (CL) and grammar checking applications for German. The project work has therefore been divided into two separate but complementary streams of activity. Firstly, the aim was to develop an modular NLP software architecture for quickly developing different kinds of CL and grammar checking applications. Secondly, to validate the first activity, it was seen as important to build up an empirical base for testing and formally evaluating checking components. Given the lack of existing annotated corpora of errors for German (or indeed for any language as far as the authors know), the construction of such a corpus was a high priority task. This would enable us not only to perform quantitative tests, but also to derive an empirically based typology of errors which the project could use for orientation. The corpus was particularly important given the approach which the FLAG project was taking to the task of grammar and controlled language checking, which relies on a phenomenonoriented approach to the problem of identifying errors, using shallow processing techniques. In order to finetune the heuristics which are central to such an approach, i.e. one based on identifying “candidate errors” of increasing probability, it is essential to have good test suites annotated with respect to the phenomena under investigation. The annotation of the corpus was to be carried out in such a way that we could easily access and quantify snapshots of the data, for producing test suites for testing purposes and for producing statistics on the frequency of particular error types. The research community not only lacked an annotated corpus of errors, there was no existing ontology of errors which could be easily translated into an annotation schema. The definition of such a schema based on traditional descriptions of errors (such as Luik, 1993a; Luik, 1993b) thus formed the first major workpackage. Fortunately, tools for the annotation of corpora, and the management thereof are becoming increasingly sophisticated; it was therefore necessary to evaluate a number of tools in the light of our specific needs.}, ANNOTE = {COLIURL : Bredenkamp:1999:AET.pdf} } @InProceedings{Brinckmann_Benzmüller:1999, AUTHOR = {Brinckmann, Caren and Benzmüller, Ralf}, TITLE = {The Relationship Between Utterance Type and F0 Contour in German}, YEAR = {1999}, MONTH = {5 September}, BOOKTITLE = {Proceedings 6th European Conference on Speech Communication and Technology (EUROSPEECH '99)}, VOLUME = {1}, PAGES = {21-24}, ADDRESS = {Budapest, Hungary}, PUBLISHER = {ESCA}, URL = {https://www.coli.uni-saarland.de/~cabr/Eurospeech99/}, ABSTRACT = {In this study we investigate the intonational characteristics of the four utterance types statement, wh-question, yes/no-question and declarative question. Readings of two German scripted dialogues were examined to ascertain characteristic features of the F0 contour for each utterance type. Final boundary tone, nuclear pitch accent, F0 offset, F0 onset, F0 range, and the slopes of a topline and a bottomline were determined for each utterance and compared for the four utterance types. Results show that for an average speaker, the final boundary tone, the F0 range, and the slope of the topline can be used to distinguish between the four utterance types. However, speakers may deviate from this pattern and exploit other intonational means to distinguish certain utterance types or choose not to mark a syntactic difference at all.}, ANNOTE = {COLIURL : Brinckmann:1999:RBU.pdf Brinckmann:1999:RBU.ps} } @InProceedings{Buitelaar:1999, AUTHOR = {Buitelaar, Paul}, TITLE = {Ambiguity in Semantic Annotation Submitted}, YEAR = {1999}, BOOKTITLE = {Standardizing Lexical Resources Workshop (SIGLEX '99), June 21-22}, ADDRESS = {University of Maryland College Park, USA} } @InProceedings{Busemann:1999, AUTHOR = {Busemann, Stephan}, TITLE = {Constraint-Based Techniques for Interfacing Software Modules}, YEAR = {1999}, BOOKTITLE = {Proceedings of the AISB'99 Workshop on Reference Architectures and Data Standards for NLP, April 6-9}, PAGES = {48-54}, EDITOR = {Mellish, Chris and Scott, Donia}, ADDRESS = {Edinburgh, Scotland}, PUBLISHER = {The Society for the Study of Artificial Intelligence and Simulation of Behaviour}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/busemann99.ps.gz}, ABSTRACT = {The reuse of standardized software is among the primary goals of application builders. The vision of a building block scenario of pieces of software that can be configured to form a new application is becoming real. However, this vision places strong requirements on the interfaces. Practice dictates that it must be easy to combine building blocks. Hence the interfaces should be flexible and, ideally, adaptable to new tasks or domains. This paper presents a simple method to structurally relate interface languages and to check the syntactic correctness of expressions.}, ANNOTE = {COLIURL : Busemann:1999:CBT.pdf Busemann:1999:CBT.ps} } @Article{Capstick_et_al:1999, AUTHOR = {Capstick, Joanne and Diagne, Abdel Kader and Erbach, Gregor and Uszkoreit, Hans and Leisenberg, Anne and Leisenberg, Manfred}, TITLE = {A System for Supporting Cross-Lingual Retrieval}, YEAR = {1999}, JOURNAL = {Information Processing and Management. An International Journal}, VOLUME = {36}, NUMBER = {2}, PAGES = {275-289}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/mulinex-ipm99.pdf ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/mulinex-ipm99.ps}, ANNOTE = {COLIURL : Capstick:1999:SSC.pdf Capstick:1999:SSC.ps} } @InBook{PCC1999, AUTHOR = {Pickering, Martin J. and Clifton, Charles Jr. and Crocker, Matthew W.}, TITLE = {Introduction}, YEAR = {1999}, BOOKTITLE = { Architectures and Mechanisms for Language Processing}, EDITOR = {Crocker, Matthew W. and Pickering, Martin J. and Clifton, Charles Jr.}, ADDRESS = {Cambridge, UK}, PUBLISHER = {Cambridge University Press} } @InProceedings{Crocker_Brants:1999, AUTHOR = {Crocker, Matthew W. and Brants, Thorsten}, TITLE = {Incremental Probabilistic Models of Human Linguistic Performance}, YEAR = {1999}, BOOKTITLE = {5th Conference on Architectures and Mechanisms for Language Processing (AMLaP '99), September 23-26}, ADDRESS = {Edinburgh, Scotland}, ABSTRACT = {Models of human language processing increasingly advocate probabilistic mechanisms for parsing and disambiguation (e.g. Jurafsky, 1996; MacDonald et al 1994; Crocker and Corley; to appear). These models resolve local syntactic and lexical ambiguity by promoting the analysis which has the greatest probability of being correct. In this talk we will outline a new probabilistic parsing model which is a generalisation of the Hidden Markov Models which have previously been defended as pschological models of lexical category disambiguation (Corley and Crocker, in press). The model uses layered, or cascaded, markov models (CMMs) to build up a syntactic analysis (Brants, 1999). In contrast with many probabilisic parsing models, CMMs can easily be implemented to parse incrementally. Incremental CMMs have the property of generating partial structures including hypothetical continuations after receiving each new word in the input. New material is incorporated into the existing structure and ambiguities are resolved based on local context. Alternative hypotheses are assigned probabilities which are used for ranking, and only a bounded number of parallel alternatives are pursued. Simple bounds on the model straightforwardly predict the recency effects often attributed only to connectionist-based models (Stevenson, 1994; Macdonald et al, 1884; Kempen and Vosse, 1987). In contrast with several current models, the combination of weights in CMMs is motivated directly by probability theory. The parameters of the model are acquired automatically from a corpus, and there are relatively few stipulations about how probabilities are combined (contra Jurafsky, 1996; Tanenhaus et al, in press). An important cognitive parameter concerns the number of analyses which are maintained in parallel. We will present results of experiments which evaluate the performance of the model for both general language processing, and on several critical ambiguities where human performance is well understood. The model is a first step in exploring the role of optimal models of human linguistic performance, as motivated by Chater, Crocker, and Pickering (1998). Recently, Pickering, Traxler and Crocker (to appear) have provided experimental evidence which challenges a pure maximum likelihood model of syntactic ambiguity resolution. As an alternative, they propose a measure, termed Informativity, which they derive from a rational analysis of the parsing and interpretation problem. In the final part of the talk we will outline how the presented model can be adapted to implement Informativity, which combines probability with a newly proposed measure termed Specificity.} } @InProceedings{Crysmann:1999, AUTHOR = {Crysmann, Berthold}, TITLE = {Licensing Proclisis in European Portuguese}, YEAR = {1999}, BOOKTITLE = {Empirical Issues in Formal Syntax and Semantics. Selected papers from the Colloque de Syntaxe et de Sémantique de Paris (CSSP'97), October 16-18}, PAGES = {255-276}, EDITOR = {Corblin, F. and Marandin, J.-M. and Dobrovie-Sorin, C.}, ADDRESS = {Paris, France}, PUBLISHER = {Thesus}, ABSTRACT = {In this paper, I will address the interaction between quantification and linearisation in the grammar of European Portuguese (EP) clitic placement. In particular, I will suggest that a licensing relation holds between a subset of the natural language quantifiers identified in Generalised Quantifier Theory (GQT) and the order in which the clitic and its host must surface. More specifically, I will argue that the class of proclisis licensors is best described in semantic terms (i.e. in terms of logical entailment), whereas the relation between proclisis licensor and licensee should be conceived of as entirely surface-syntactic. It will be shown that approches which mediate the licensing relation by means of syntactic movement (Barbosa, 1996; Duarte, 1983; Madeira, 1992, cf. e.g.) are faced with both motivational and empirical problems. Instead, I claim that surface-syntactic linearisation constraints will relate clitic placement directly to a class of lexical items, which in turn is defined on the basis of semantic properties. Therefore, an integrated model of syntax and semantics is called for which builds on highly articulate lexical information. The analysis will, thus, be carried out in the framework of Head-driven Phrase Structure Grammar (HPSG) (Pollard and Sag, 1987; 1994), using multiple inheritance type hierarchies and linearisation constraints. The paper is organised as follows: in the first section, I shall briefly describe the basic empirical observations regarding EP proclisis. In section two, I shall review the empirical problems faced by previous (mostly syntactic) approaches. Section three provides the details of the proposal, starting with a semantic typology of proclisis licensors. In the remainder of the section, I outline the surface-syntactic constraints which define the phenogrammatical relation between licensor and licensee, analysing EP proclisis in an essentially similar way to English negative polarity items (NPI) (cf. Ladusaw, 1996, and reference cited there).} } @InCollection{Crysmann:1999_1, AUTHOR = {Crysmann, Berthold}, TITLE = {Morphosyntactic Paradoxa in Fox}, YEAR = {1999}, BOOKTITLE = {Constraints and Resources in Natural Language Syntax and Semantics}, EDITOR = {Bouma, Gosse and Hinrichs, Erhard and Kruijff, Geert-Jan M. and Oehrle, Richard T.}, SERIES = {Studies in Constraint-Based Lexicalism}, ADDRESS = {Stanford}, PUBLISHER = {CSLI Publications}, URL = {https://www.coli.uni-saarland.de/~crysmann/}, ABSTRACT = {In this paper, I shall discuss an apparent paradox in the morphology and syntax of Fox (Mesquakie) complex verbs. In Fox, verbs can be modified by one or more of a variety of preverbs including modals, aspectuals, manner adverbials, numerals, quantifiers, as well as preverbs which increase the valence of the main verb (Dahlstrom, 1997a). While preverb and verb can be separated by words, phrases, or even embedded sentences, suggesting a status as syntactically independent words, in ection (cf. Dahlstrom, 1997a) and derivation (cf. Ackerman and LeSourd, 1994) appear to treat preverb-verb complexes as a single morphological unit. Following the basic assumptions of lexicalist syntax, I claim that Fox preverb-verb combinations are indeed morphologically derived and that inflectional affixes are attached to complex morphological objects in the word-formation component already. In order to account for the syntactic effects, I propose an analysis in Linearisation HPSG (Reape, 1994, Kathol, 1995), which builds on the assumption that Fox preverb-verb complexes introduce more than one domain object into syntax (cf. Kathol, 1996 for German, Crysmann, 1997 for European Portuguese). Further morphological material will then be distributed across preverb and verb by imposing partial morphological (order) constraints on PHON-values.} } @InProceedings{de Jong_et_al:1999, AUTHOR = {de Jong, Franciska and Gauvain, Jean-Luc and den Hartog, Jurgen and Netter, Klaus}, TITLE = {OLIVE: Speech-Based Video Retrieval}, YEAR = {1999}, BOOKTITLE = {Proceedings of the European Workshop on Content-Based Multimedia Indexing (CBMI'99), October 25-27}, PAGES = {75-80}, ADDRESS = {Toulouse, France}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/kn-cbmi99-final.ps ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/kn-cbmi99-final.pdf}, ANNOTE = {COLIURL : Jong:1999:OSB.pdf Jong:1999:OSB.ps} } @InProceedings{De Kuthy_Meurers:1999, AUTHOR = {De Kuthy, Kordula and Meurers, Detmar}, TITLE = {Argument Raising Meets Adjuncts-as-Dependents and Traceless Extraction}, YEAR = {1999}, BOOKTITLE = {6th International Conference on HPSG, August 4-6}, PAGES = {45-50}, ADDRESS = {Edinburgh, Scotland}, URL = {http://ling.osu.edu/~dm/papers/dekuthy-meurers-hpsg99.ps.gz}, ANNOTE = {COLIURL : Kuthy:1999:ARM.pdf Kuthy:1999:ARM.ps} } @InProceedings{Duchier:1999, AUTHOR = {Duchier, Denys}, TITLE = {Axiomatizing Dependency Parsing using Set Constraints}, YEAR = {1999}, BOOKTITLE = {6th Meeting on Mathematics of Language (MOL6), July 23-25}, PAGES = {115-126}, ADDRESS = {Orlando, Florida, USA}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/duchier-mol6.ps.gz}, ABSTRACT = {We propose a new formulation of dependency grammar and develop a corresponding axiomatization of syntactic well-formedness with a natural reading as a concurrent constraint program. We demonstrate the expressivity and effectiveness of set constraints, and describe a treatment of ambiguity with wide applicability. Further, we provide a constraint programming account of dependent disjunctions that is both simple and efficient and additionally provides the benefits of constructive disjunctions. Our approach was implemented in Oz and yields parsers with very good performance for our currently middle scale grammars. Constraint propagation can be observed to be remarkably effective in pruning the search space.}, ANNOTE = {COLIURL : Duchier:1999:ADP.pdf Duchier:1999:ADP.ps} } @InProceedings{Duchier:1999_1, AUTHOR = {Duchier, Denys}, TITLE = {Set Constraints in Computational Linguistics - Solving Tree Descriptions}, YEAR = {1999}, BOOKTITLE = {Workshop on Declarative Programming with Sets (DPS '99), September 28}, PAGES = {91-98}, ADDRESS = {Paris, France}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/duchier-wdps99.ps.gz}, ABSTRACT = {We describe our application of set constraints to the problem of finding solutions of tree descriptions. The encoding that turns a description into a CSP is given here in full in an axiomatic style.}, ANNOTE = {COLIURL : Duchier:1999:SCC.pdf Duchier:1999:SCC.ps} } @InProceedings{Duchier_Gardent:1999, AUTHOR = {Duchier, Denys and Gardent, Claire}, TITLE = {A Constraint-Based Treatment of Descriptions}, YEAR = {1999}, BOOKTITLE = {3rd International Workshop on Computational Semantics (IWCS 3), January 13-15}, PAGES = {71-85}, EDITOR = {Bunt, Harry and Thijsse, Elias}, ADDRESS = {Tilburg, The Netherlands}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/iwcs99.ps.gz}, ABSTRACT = {Both in computational linguistics and in formal semantics, tree (or graph) descriptions stated in terms of dominance have become common. Yet the issue of how such descriptions are processed has been little explored. In this paper, we present a constraint-based treatment of descriptions: we develop a formulation in terms of sets, which is simple and declarative, and, at the same time, constitutes an efficient implementation. We further show how the treatement of tree descriptions can be extended to DAG descriptions and apply it to a description-based account of discourse.}, ANNOTE = {COLIURL : Duchier:1999:CBT.pdf Duchier:1999:CBT.ps} } @InProceedings{Duchier_Thater:1999, AUTHOR = {Duchier, Denys and Thater, Stefan}, TITLE = {Parsing with Tree Descriptions: A Constraint-Based Approach}, YEAR = {1999}, BOOKTITLE = {6th International Workshop on Natural Language Understanding and Logic Programming (NLULP '99), December 3-4}, PAGES = {17-32}, ADDRESS = {Las Cruces, New Mexico, USA}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/duchier-thater-nlulp99.ps.gz}, ABSTRACT = {We describe a grammatical formalism based on tree descriptions and develop a constraint-based treatment of parsing in that framework. We introduce the language of electrostatic tree descriptions to write lexical entries: these are tree descriptions using neutral, as well as positively and negatively charged variables. We develop an appropriate notion of model. We then extend the framework to disjunctive systems of electrostatic descriptions, and we correspondingly extend the notion of model. Then we show how the search for minimal models can be realized by reduction to a CSP solvable by constraint programming and we provide the full encoding in an axiomatic style.}, ANNOTE = {COLIURL : Duchier:1999:PTD.pdf Duchier:1999:PTD.ps} } @InProceedings{Egg:1999, AUTHOR = {Egg, Markus}, TITLE = {Derivation and Resolution of Ambiguities in Wieder-Sentences}, YEAR = {1999}, BOOKTITLE = {12th Amsterdam Colloquium}, PAGES = {109-114}, EDITOR = {Dekker, Paul}, ADDRESS = {Amsterdam, The Netherlands}, PUBLISHER = {ILLC} } @InCollection{Erbach_et_al:1999, AUTHOR = {Erbach, Gregor and Unz, Dagmar and Capstick, Joanne}, TITLE = {Interface-Design zur Unterstützung von Selektionsentscheidungen}, YEAR = {1999}, BOOKTITLE = {Selektion im Internet}, EDITOR = {Wirth, Werner and Schweiger, Wolfgang}, PUBLISHER = {Westdeutscher Verlag}, URL = {https://www.coli.uni-saarland.de/~erbach/pub/mulinex-mefis/unz-et-al.pdf}, ANNOTE = {COLIURL : Erbach:1999:IDU.pdf} } @InProceedings{Erk:1999, AUTHOR = {Erk, Katrin}, TITLE = {Simulating Boolean Circuits by Finite Splicing}, YEAR = {1999}, BOOKTITLE = {Congress on Evolutionary Computation (CEC '99), July 16-19}, PAGES = {1279-1285}, ADDRESS = {La Jolla Marriott, San Diego, USA}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/ErkBoolCirc99.ps.gz}, ABSTRACT = {As a computational model to be simulated in a DNA computing context, Boolean circuits are especially interesting because of their parallelism. Simulations in concrete biochemical computing settings have been given by [Ogihara/Ray 96] and [Amos/Dunne97]. In this paper, we show how to simulate Boolean circuits by finite splicing systems, an abstract model of enzymatic recombination. We argue that using an abstract model of DNA computation as a basis leads to simulations of greater clarity and generality. In our construction, the running time of the simulating system is proportional to the depth, and the use of material is proportional to the size of the Boolean circuit simulated. However, the rules of the simulating splicing system depend on the size of the Boolean circuit, but not on the connectives used.}, ANNOTE = {COLIURL : Erk:1999:SBC.pdf Erk:1999:SBC.ps} } @InProceedings{Gabsdil_Striegnitz:1999, AUTHOR = {Gabsdil, Malte and Striegnitz, Kristina}, TITLE = {Classifying Scope Ambiguities}, YEAR = {1999}, BOOKTITLE = {1st Workshop on Inference in Computational Semantics (ICoS-1), August 15}, PAGES = {125-131}, EDITOR = {Monz, Christof and de Rijke, Maarten}, ADDRESS = {Amsterdam, The Netherlands}, URL = {https://www.coli.uni-saarland.de/~kris/papers/icos99.ps.gz}, ABSTRACT = {We describe the architecture and implementation of a system which compares semantic representations of natural language input w.r.t. equivalence of logical content and context change potential. Giving a clear graphical representation of the relationship between different readings, the stand-alone version of the system can be used as a classroom tool. Furthermore the core system can be incorporated into other discourse processing systems (e.g. Johan Bos' DORIS system (Bos 1998) where one might want to ignore logically equivalent readings in order to keep the number of readings small and thus improve efficiency. The system relies heavily on existing implementations and code available via the internet. These are integrated and put to the desired use by a Prolog interface. By illustrating the architecture of this system, we want to argue that it is possible to build rather complex systems involving multiple levels of linguistic processing without having to spend an unreasonably large amount of time on the implementation of basic functionalities.}, ANNOTE = {COLIURL : Gabsdil:1999:CSA.pdf Gabsdil:1999:CSA.ps} } @TechReport{Gardent:1999, AUTHOR = {Gardent, Claire}, TITLE = {Deaccenting and Higher-Order Unification}, YEAR = {1999}, MONTH = {October}, NUMBER = {112}, PAGES = {26}, ADDRESS = {Saarbrücken}, TYPE = {CLAUS-Report}, INSTITUTION = {Universität des Saarlandes}, URL = {ftp://ftp.coli.uni-sb.de/pub/coli/claus/claus112.ps}, ABSTRACT = {The HOU based analysis of ellipsis presented in (Dalrymple, Shieber and Pereira 1991) was shown to correctly capture the complex interaction of VP-ellipsis, scope and anaphora and claimed to extend to further related phenomena. When applied to deaccenting, the analysis makes a strong prediction, namely that all anaphors occurring in the deaccented part of a deaccented utterance are parallel anaphors that is, anaphors that resolve to their parallel counterpart in the source. I argue that this prediction is supported by the data and show that it correctly captures the interaction of deaccenting with anaphora, (in)definiteness and focus.}, ANNOTE = {COLIURL : Gardent:1999:DHO.pdf Gardent:1999:DHO.ps} } @Article{Gardent_et_al:1999, AUTHOR = {Gardent, Claire and Kohlhase, Michael and Konrad, Karsten}, TITLE = {Higher-Order Coloured Unification: a Linguistic Application}, YEAR = {1999}, JOURNAL = {Technique et Science Informatiques}, VOLUME = {18}, NUMBER = {2}, PAGES = {181-209} } @TechReport{Gardent_Konrad:1999, AUTHOR = {Gardent, Claire and Konrad, Karsten}, TITLE = {Definites or the proper treatment of rabbits}, YEAR = {1999}, MONTH = {June}, NUMBER = {111}, PAGES = {12}, ADDRESS = {Saarbrücken}, TYPE = {CLAUS-Report}, INSTITUTION = {Universität des Saarlandes}, URL = {ftp://ftp.coli.uni-sb.de/pub/coli/claus/claus111.ps}, ABSTRACT = {We argue that model generation programs, i.e., deduction systems that automatically compute the interpretations satisfying a given formula, can provide a procedural interpretation for semantic theories of natural language. We illustrate this claim by describing how the higher-order model generator kimba interprets definite descriptions.}, ANNOTE = {COLIURL : Gardent:1999:DPT.pdf Gardent:1999:DPT.ps} } @InProceedings{Grigorova_et_al:1999, AUTHOR = {Grigorova, Evelina and Filipov, Vladimir and Andreeva, Bistra}, TITLE = {A Contrastive Investigation of Discourse Intonational Characteristic Features of Sofia Bulgarian and Hamburg German in MAP Task Dialogues}, YEAR = {1999}, BOOKTITLE = {Proceedings of the 6th European Conference on Speech Communication and Technology (EUROSPEECH 99), September 5-9}, VOLUME = {1}, PAGES = {25-28}, ADDRESS = {Budapest}, URL = {http://www.telecom.tuc.gr/paperdb/eurospeech99/PAPERS/S1O2/G002.PDF}, ABSTRACT = {Ten MAP Task dialogues for Sofia Bulgarian (SB) and six for Hamburg German (HG) are recorded and analyzed by means of X-Waves Software Package. The discourse intonation features focused on are denial and convergence. It has been observed that for German denial can be integrated into discourse-listing through intonation: Ja-acknowledge and Nein-/Ne-denial moves are both manifested by intonation rises. For Bulgarian, intonation rises in answering moves occur only in the acknowledge subtype: rises in denials (Ne-) are associated with uncertainty and surprise. The HG Ne- and SB Ne-moves are resynthesized by means of PSOLA, twelve stimuli being obtained for SB and sixteen for HG. Two appropriate contexts marked for discourse-listing and follow-up moves are excerpted from the MAP Task and are included in perceptual tests whereby native speakers are asked to determine the appropriateness of each stimulus in relation to each context. 'The results for Bulgarian contradict our preliminary observations. Convergence is defined as the matching of corresponding movements in pitch ranges and signals sympathetic agreement with the other speaker’s point of view. The check: answer move sequence can be viewed as instantiating convergence and exemplifies both lexical and Fo movement repetition, especially where ellipted moves are concerned. The two resynthesized sequences for HG and SB respectively are ”Im Westen” and ”Pravo nagore” as manifested in check and answering contexts. As above, native speakers are expected to determine the appropriateness of each stimulus in relation to each context. It has been observed that the differences between checks and answering moves for both HG and SB are phonetically manifested and are also established as being relevant by the perceptual tests, yet they cannot be accounted for phonologically by tone alignment: convergence seems to attenuate the phonological differentiation between checks and answering moves.}, ANNOTE = {COLIURL : Grigorova:1999:CID.pdf} } @InCollection{Hajicová_et_al:1999, AUTHOR = {Hajicová, Eva and Korbayova, Ivana and Sgall, Petr}, TITLE = {Prague Dependency Treebank: Restoration of Deletions}, YEAR = {1999}, BOOKTITLE = {Text, Speech and Dialogue - Second International Workshop, TSD'99, Plzen, Czech Republic, September 1999}, VOLUME = {1692}, PAGES = {44-49}, EDITOR = {Matousek, Václav and Mautner, Pavel and Ocelíková, Jana and Sojka, Petr}, ADDRESS = {Berlin}, PUBLISHER = {Springer}, URL = {http://shadow.ms.mff.cuni.cz/pdt/Corpora/PDT_1.0/References/tsd99-deletion.pdf}, ANNOTE = {COLIURL : Hajicova:1999:PDT.pdf} } @Article{Haridi_et_al:1999, AUTHOR = {Haridi, Seif and Van Roy, Peter and Brand, Per and Mehl, Michael and Scheidhauer, Ralf and Smolka, Gert}, TITLE = {Efficient Logic Variables for Distributed Computing}, YEAR = {1999}, JOURNAL = {ACM Transactions on Programming Languages and Systems}, VOLUME = {21}, NUMBER = {3}, PAGES = {569-626}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/TOPLAS99.ps.gz}, ABSTRACT = {We define a practical algorithm for distributed rational tree unification and prove its correctness in both the off-line and on-line cases. We derive the distributed algorithm from a centralized one, showing clearly the trade-offs between local and distributed execution. The algorithm is used to realize logic variables in the Mozart Programming System, which implements the Oz language (see http://www.mozart-oz.org/). Oz appears to the programmer as a concurrent object-oriented language with dataflow synchronization. Logic variables implement the dataflow behavior. We show that logic variables can easily be added to the more restricted models of Java and ML, thus providing an alternative way to do concurrent programming in these languages. We present common distributed programming idioms in a network-transparent way using logic variables. We show that in common cases the algorithm maintains the same message latency as explicit message passing. In addition, it is able to handle uncommon cases that arise from the properties of latency tolerance and third-party independence. This is evidence that using logic variables in distributed computing is beneficial at both the system and language levels. At the system level, they improve latency tolerance and third-party independence. At the language level, they help make network-transparent distribution practical.}, ANNOTE = {COLIURL : Haridi:1999:ELV.pdf Haridi:1999:ELV.ps} } @InProceedings{Henz_et_al:1999, AUTHOR = {Henz, Martin and Müller, Tobias and Boon Ng, Ka}, TITLE = {Figaro: Yet another Constraint Programming Library}, YEAR = {1999}, BOOKTITLE = {Workshop on Parallelism and Implementation Technology for (Constraint) Logic Programming Languages, December 1}, ADDRESS = {Las Cruces, New Mexico, USA}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/figaro-parimplws99.ps.gz}, ABSTRACT = {Existing libraries and languages for finite domain constraint programming usually have depth-first search (with branch and bound) built-in as the only search algorithm. Exceptions are the languages CLAIRE and Oz, which support the programming of different search algorithms through special purpose programming language constructs. The goal of this work is to make abstractions for programming search algorithms available in a language-independent setting by using the concept of a room. Figaro is an experimentation platform being designed to study non-standard search algorithms, different memory policies for search (trailing vs copying), consistency algorithms, failure handling and support for modeling. Figaro is conceived as a C++ library providing abstractions based on the concept of a room. This paper focuses on the use and implementation of such abstractions for investigating programmable search algorithms and memory policies in a C++ constraint programming library.}, ANNOTE = {COLIURL : Henz:1999:FYA.ps} } @InProceedings{Jaspars_Koller:1999, AUTHOR = {Jaspars, Jan and Koller, Alexander}, TITLE = {A Calculus for Direct Deduction with Dominance Constraints}, YEAR = {1999}, BOOKTITLE = {12th Amsterdam Colloquium (AC '99)}, ADDRESS = {Amsterdam, The Netherlands}, URL = {https://www.coli.uni-saarland.de/~koller/papers/domded.ps.gz}, ABSTRACT = {Underspecification has recently been a popular approach to dealing with ambiguity. An important operation in this context is direct deduction, deduction on underspecified descriptions which is justified by the meaning of the described formulae. Here we instantiate an abstract approach to direct deduction to dominance constraints, a concrete underspecification formalism, and obtain a sound and complete calculus for this formalism.}, ANNOTE = {COLIURL : Jaspars:1999:CDD.pdf Jaspars:1999:CDD.ps} } @InProceedings{Kasper_et_al:1999, AUTHOR = {Kasper, Walter and Kiefer, Bernd and Krieger, Hans-Ulrich and Rupp, Christopher J. and Worm, Karsten}, TITLE = {Charting the Depths of Robust Speech Parsing}, YEAR = {1999}, BOOKTITLE = {37th Annual Meeting of the Association for Computational Linguistics (ACLANNUAL '99), June 20-26}, PAGES = {405-412}, EDITOR = {ACL}, ADDRESS = {University of Maryland, College Park, USA}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/131.entry} } @InProceedings{Kasper_et_al:1999_1, AUTHOR = {Kasper, Walter and Kiefer, Bernd and Krieger, Hans-Ulrich and Rupp, Christopher J. and Worm, Karsten L.}, TITLE = {Charting the Depths of Robust Speech Parsing}, YEAR = {1999}, BOOKTITLE = {Proceedings of the 37th Annual Meeting of the Association for Computational Linguistics (ACL'99), June 20-26}, PAGES = {405-412}, ADDRESS = {University of Maryland, College Park, USA}, URL = {http://acl.ldc.upenn.edu/P/P99/P99-1052.pdf}, ABSTRACT = {We describe a novel method for coping with ungrammatical input based on the use of chart-like data structures, which permit anytime processing. Priority is given to deep syntactic analysis. Should this fail, the best partial analyses are selected, according to a shortest-paths algorithm, and assembled in a robust processing phase. The method has been applied in a speech translation project with large HPSG grammars.}, ANNOTE = {COLIURL : Kasper:1999:CDRb.pdf} } @InProceedings{Keller_et_al:1999, AUTHOR = {Keller, Frank and Corley, Martin and Corley, Steffan and Crocker, Matthew W. and Trewin, Shari}, TITLE = {GSEARCH: A Tool for Syntactic Investigation of Unparsed Corpora}, YEAR = {1999}, BOOKTITLE = {The EACL Workshop on Linguistically Interpreted Corpora (LINC '99), June 12}, ADDRESS = {Bergen, Norway} } @InProceedings{Kiefer_et_al:1999, AUTHOR = {Kiefer, Bernd and Krieger, Hans-Ulrich and Carroll, John and Malouf, Robert}, TITLE = {A Bag of Useful Techniques for Efficient and Robust Parsing}, YEAR = {1999}, BOOKTITLE = {Proceedings of the 37th Annual Meeting of the Association for Computational Linguistics (ACL-ANNUAL'99), June 20 - 26}, PAGES = {473-480}, ADDRESS = {College Park, Maryland, USA}, PUBLISHER = {ACL}, URL = {http://www.cogs.susx.ac.uk/lab/nlp/carroll/papers/acl99.pdf}, ABSTRACT = {This paper describes new and improved techniques which help a unification based parser to process input efficiently and robustly. In combination these methods result in a speed up in parsing time of more than an order of magnitude. The methods are correct in the sense that none of them rule out legal rule applications.}, ANNOTE = {COLIURL : Kiefer:1999:BUT.pdf} } @MastersThesis{Koller:1999, AUTHOR = {Koller, Alexander}, TITLE = {Constraint Languages for Semantic Underspecification}, YEAR = {1999}, ADDRESS = {Saarbrücken}, SCHOOL = {Computational Linguistics}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/Koller99.ps.gz}, ABSTRACT = {At all levels of linguistic analysis, natural language can be ambiguous. The numbers of readings of different ambiguous components of a sentence or discourse multiply over all these components, yielding a number of readings that can be exponential in the number of ambiguities. Both from a computational and a cognitive point of view, it seems necessary to find small representations for ambiguities that describe all readings in a compact way. This approach is called underspecification, and it has received increasing attention in the past few years. Lately, two particularly elegant formalisms for the underspecified treatment of scope ambiguities in semantics have been proposed: Context Unification and the Constraint Language for Lambda Structures, CLLS. Common to both is that they regard the term representing the semantics of a sentence as a tree and describe it by imposing tree constraints. Furthermore, both offer the expressive power to describe simple ellipses and their interaction with scope ambiguities. This thesis investigates some formal properties of these two formalisms. It examines their relation and shows that, except for a few additional constructs of CLLS, both languages are equivalent in expressive power. In terms of computational complexity, this gives us the immediate result that the complexity of the satisfiability problem of CLLS is exactly the same as that of context unification, which, unfortunately, is unknown. The thesis further investigates the complexity of the satisfiability problem of dominance constraints, an important sublanguage of CLLS, and shows that it is NP-complete. In the course of the discussion of complexity, it also briefly explains how techniques from concurrent constraint programming can be applied to implement solution algorithms for these formalisms.}, ANNOTE = {COLIURL : Koller:1999:CLS.pdf Koller:1999:CLS.ps} } @TechReport{Koller_Niehren:1999, AUTHOR = {Koller, Alexander and Niehren, Joachim}, TITLE = {Scope Underspecification and Processing}, YEAR = {1999}, TYPE = {Reader for the ESSLLI summer school}, URL = {http://www.ps.uni-sb.de/~niehren/ESSLLI99/ ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/ESSLLI:99.ps.gz}, ABSTRACT = {This reader contains material for the ESSLLI '99 course, Scope Underspecification and Processing''. The reader and course are aimed at a pretty broad audience; we have tried to only presuppose a very general idea of natural language processing and of first-order logic. Underspecification is a general approach to dealing with ambiguity. In the course, we'll be particularly concerned with scope underspecification, which deals with scope ambiguity, a structural ambiguity of the semantics of a sentence. As scope underspecification is at least partially motivated by computational issues, we will pay particular attention to processing aspects. We're going to show how dominance constraints can be used for scope underspecification and how they can be processed efficiently by using concurrent constraint programming technology.}, ANNOTE = {COLIURL : Koller:1999:SUP.pdf Koller:1999:SUP.ps} } @InProceedings{Koller_et_al:1999, AUTHOR = {Koller, Alexander and Niehren, Joachim and Striegnitz, Kristina}, TITLE = {Relaxing Underspecified Semantic Representations for Reinterpretation}, YEAR = {1999}, BOOKTITLE = {6th Meeting on Mathematics of Language (MOL6), July 23-25}, PAGES = {74-87}, ADDRESS = {Orlando, Florida, USA}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/Relax99.ps.gz}, ABSTRACT = {Type and sort conflicts in semantics are usually resolved by a process of reinterpretation. Recently, Egg (1999) has proposed an alternative account in which conflicts are avoided by underspecification. The main idea is to derive sufficiently relaxed underspecified semantic representations; addition of reinterpretation operators then simply is further specialization. But in principle, relaxing underspecified representations bears the danger of overgeneration. In this paper, we investigate this problem in the framework of CLLS, where underspecified representations are expressed by tree descriptions subsuming dominance constraints. We introduce some novel properties of dominance constraints and present a safety criterion that ensures that an underspecified description can be relaxed without adding unwanted readings. We then apply this criterion systematically to Egg's analysis and show why its relaxation operation does not lead to overgeneration.}, ANNOTE = {COLIURL : Koller:1999:RUS.pdf Koller:1999:RUS.ps} } @InProceedings{Koreman_et_al:1999, AUTHOR = {Koreman, Jacques and Andreeva, Bistra and Strik, Helmer}, TITLE = {Acoustic Parameters Versus Phonetic Features in ASR}, YEAR = {1999}, BOOKTITLE = {Proceedings of the 14th International Congress of Phonetic Sciences (ICPhS'99), August 1-7}, ADDRESS = {San Francisco, USA}, URL = {http://lands.let.kun.nl/literature/strik.1999.2.ps}, ABSTRACT = {By mapping acoustic parameters onto phonetic features, it is possible to explicitly address the linguistic information in the signal. For the experiments presented in this paper, we mapped cepstral parameters onto two sets of phonetic features, one based on the IPA chart and the other on SPE. As a result, the phoneme identification rates in a hidden Markov modelling framework increase from 15.6% for the cepstral parameters to 42.3% and 31.7% for the IPA and SPE features, respectively. Furthermore, for phonetic features the resulting confusions between phonemes are often less severe from a phonetic point of view. The theoretical implications of the differences are addressed.}, ANNOTE = {COLIURL : Koreman:1999:APV.pdf Koreman:1999:APV.ps} } @InProceedings{Koreman_et_al:1999_1, AUTHOR = {Koreman, Jacques and Pützer, Manfred and Just, Manfred}, TITLE = {Acoustic, Electroglottographic and Perceptual Correlates of Vocal Fold Adduction Deficiensies. Poster Presentation}, YEAR = {1999}, BOOKTITLE = {Workshop on Non-Modal Vocal-Fold Vibration and Voice Quality, Satellite Meeting of the 14th International Congress of Phonetic Sciences (ICPhS '99). July 31}, ADDRESS = {UC Berkeley, San Francisco, USA} } @PhdThesis{Krenn:1999, AUTHOR = {Krenn, Brigitte}, TITLE = {The Usual Suspects: Data-Oriented Models for Identification and Representation of Lexical Collocations}, YEAR = {1999}, ADDRESS = {Saarbrücken}, SCHOOL = {Universität des Saarlandes} } @InCollection{Kruijff_Kruijff-Korbayová:1999, AUTHOR = {Kruijff, Geert-Jan M. and Korbayova, Ivana}, TITLE = {Text Structuring in a Multilingual System for Generation of Instructions}, YEAR = {1999}, BOOKTITLE = {Text, Speech and Dialogue - Second International Workshop, TSD'99, Plzen, Czech Republic, September 1999}, VOLUME = {1692}, PAGES = {89-94}, EDITOR = {Matousek, Václav and Mautner, Pavel and Ocelíková, Jana and Sojka, Petr}, ADDRESS = {Berlin}, PUBLISHER = {Springer}, URL = {https://www.coli.uni-saarland.de/~korbay/Publications/tsd99-ts.ps.gz}, ANNOTE = {COLIURL : Kruijff:1999:TSM.pdf Kruijff:1999:TSM.ps} } @Article{Kruijff-Korbayová:1999, AUTHOR = {Korbayova, Ivana}, TITLE = {Review of: Bosch, P.;van der Sandt, R. (Eds.): Focus. Cambridge University Press, 1999.}, YEAR = {1999}, JOURNAL = {Prague Bulletin of Mathematical Linguistics}, VOLUME = {7}, PAGES = {80-82} } @Article{Kruijff-Korbayová:1999_1, AUTHOR = {Korbayova, Ivana}, TITLE = {Review of: Issues of Valency and Meaning: Studies in Honour of Jarmila Panevová}, YEAR = {1999}, JOURNAL = {Slovo a slovesnost}, VOLUME = {LX/2}, NUMBER = {60}, PAGES = {150-153}, NOTE = {in Czech} } @InCollection{Kruijff-Korbayová_Kruijff:1999, AUTHOR = {Korbayova, Ivana and Kruijff, Geert-Jan M.}, TITLE = {Handling Word Order in a Multilingual System for Generation of Instructions}, YEAR = {1999}, BOOKTITLE = {Text, Speech and Dialogue - Second International Workshop, TSD'99, Plzen, Czech Republic, September 1999}, VOLUME = {1692}, PAGES = {83-88}, EDITOR = {Matousek, Václav and Mautner, Pavel and Ocelíková, Jana and Sojka, Petr}, ADDRESS = {Berlin}, PUBLISHER = {Springer}, URL = {https://www.coli.uni-saarland.de/~korbay/Publications/tsd99-wo.ps.gz}, ANNOTE = {COLIURL : Kruijff-Korbayova:1999:HWO.pdf Kruijff-Korbayova:1999:HWO.ps} } @InProceedings{Kruijff-Korbayová_Kruijff:1999_1, AUTHOR = {Korbayova, Ivana and Kruijff, Geert-Jan M.}, TITLE = {Contextually Appropriate Ordering of Nominal Expressions}, YEAR = {1999}, BOOKTITLE = {11th European Summer School Logic Linguistics and Information (ESSLLI '99). Workshop on Generating Nominal Expressions, August 9-20}, EDITOR = {Kibble, Rodger and van Deemter, Kees}, ADDRESS = {Utrecht University, The Netherlands}, PUBLISHER = {European Summer School in Logic, Language and Information}, URL = {https://www.coli.uni-saarland.de/~korbay/Publications/gennom99.ps.gz}, ANNOTE = {COLIURL : Kruijff-Korbayova:1999:CAO.pdf Kruijff-Korbayova:1999:CAO.ps} } @MastersThesis{Lorenz:1999, AUTHOR = {Lorenz, Benjamin}, TITLE = {Ein Debugger für Oz}, YEAR = {1999}, ADDRESS = {Saarbrücken}, SCHOOL = {Universität des Saarlandes, Fachbereich Informatik}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/OzDebugger.ps.gz}, ABSTRACT = {Im Rahmen des Software-Entwicklungsprozesses nimmt die Fehlersuche eine wichtige Stellung ein. Syntaktische Fehler können bereits vom Compiler entdeckt werden, semantische Fehler dagegen sind häufig schwer zu finden; der Zeitbedarf hierfür kann enorm hoch sein. In vielen professionellen Programmierumgebungen existiert daher ein leistungsfähiges Werkzeug, ein Debugger, der die Fehlersuche erleichtert, indem das fehlerhafte Programm an beliebigen Stellen angehalten und zusammen mit seinen Daten untersucht werden kann. Diese Arbeit beschreibt den Entwurf eines Debuggers für die Programmiersprache Oz sowie seine Implementierung im Programmiersystem Mozart.}, ANNOTE = {COLIURL : Lorenz:1999:DO.pdf Lorenz:1999:DO.ps} } @InProceedings{Marimon_et_al:1999, AUTHOR = {Marimon, Montserrat and Theofilidis, Axel and Declerck, Thierry and Bredenkamp, Andrew}, TITLE = {Natural Language Understanding for Natural Language Interfaces}, YEAR = {1999}, BOOKTITLE = {Proceedings of the Processamiento del Lenguaje Natural, September 8-10}, VOLUME = {25}, ADDRESS = {Lleida, Spain}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/sepln99_mel.ps.gz}, ABSTRACT = {In this paper we present the linguistic resources - the text handling and the linguistic processing modules - which have been developed for the MELISSA project using the ALEP platform. In particular, we will see how generic (large scale) grammars involving deep linguistic analysis can be efficiently used for NL interfaces, and how a modularized design of the linguistic resources allows us to deal with the peculiarities of sub-languages, while at the same time keeping the resources as general as possible.}, ANNOTE = {COLIURL : Marimon:1999:NLU.pdf Marimon:1999:NLU.ps} } @PhdThesis{Mehl:1999, AUTHOR = {Mehl, Michael}, TITLE = {The Oz Virtual Machine - Records, Transients, and Deep Guards}, YEAR = {1999}, ADDRESS = {Saarbrücken}, SCHOOL = {Universität des Saarlandes}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/mehl-thesis.ps.gz}, ABSTRACT = {In this thesis we describe the design and implementation of a virtual machine LVM for the execution of Oz programs. Oz is a concurrent, dynamically typed, functional language with logic variables, futures, by-need synchronization, records, feature constraints, and deep guard conditionals. The LVM supports light-weight threads, first-class procedures, exception handling, transients as generalization of logic variables, futures, and constraint variables, records and open records, and multiple computation spaces to implement the deep guard conditional. We discuss the modular, open, and extensible design of the LVM. Techniques for the efficient implementation of the store on standard hardware are shown. The LVM subsumes well-known virtual machines for functional, logic, and imperative languages.}, ANNOTE = {COLIURL : Mehl:1999:OVM.pdf Mehl:1999:OVM.ps} } @InProceedings{Müller:1999, AUTHOR = {Müller, Stefan}, TITLE = {Restricting Discontinuity}, YEAR = {1999}, BOOKTITLE = {Proceedings of the 5th Natural Language Processing Pacific Rim Symposium 1999 (NLPRS '99), November 5-7}, PAGES = {85-90}, ADDRESS = {Beijing, China}, URL = {http://www.dfki.de/~stefan/PS/restricting.ps}, ANNOTE = {COLIURL : Muller:1999:RD.pdf Muller:1999:RD.ps} } @Article{Müller:1999_1, AUTHOR = {Müller, Stefan}, TITLE = {An HPSG-Analysis for Free Relative Clauses in German}, YEAR = {1999}, JOURNAL = {Grammars}, VOLUME = {2}, NUMBER = {1}, PAGES = {53-105}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/mueller99b.entry http://www.dfki.de/~stefan/PS/freeRel.ps}, ANNOTE = {COLIURL : Muller:1999:HAF.pdf Muller:1999:HAF.ps} } @Book{Müller:1999_2, AUTHOR = {Müller, Stefan}, TITLE = {Deutsche Syntax deklarativ. Head-Driven Phrase Structure Grammar für das Deutsche}, YEAR = {1999}, VOLUME = {394}, SERIES = {Linguistische Arbeiten}, ADDRESS = {Tübingen}, PUBLISHER = {Max Niemeyer Verlag}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/mueller99.entry} } @InProceedings{Müller:1999_3, AUTHOR = {Müller, Stefan}, TITLE = {Syntactic Properties of German Particle Verbs}, YEAR = {1999}, BOOKTITLE = {6th International Conference on Head-Driven Phrase Structure Grammar (HPSG '99), August 4-6}, PAGES = {83-87}, ADDRESS = {University of Edinburgh, Scotland} } @InProceedings{Müller:1999_4, AUTHOR = {Müller, Stefan}, TITLE = {Parsing of an HPSG Grammar for German: Word Order Domains and Discontinuous Constituents}, YEAR = {1999}, BOOKTITLE = {Proceedings of the 11. Jahrestagung der Gesellschaft für Linguistische DatenVerarbeitung. Multilinguale Corpora: Codierung, Strukturierung, Analyse}, EDITOR = {Gippert, Jost and Olivier, Peter}, ADDRESS = {Frankfurt a. M.}, PUBLISHER = {Enigma corporation}, URL = {http://titus.fkidg1.uni-frankfurt.de/curric/gldv99/paper/mueller/Muellerx.pdf}, ANNOTE = {COLIURL : Muller:1999:PHG.pdf} } @InProceedings{Müller:1999_5, AUTHOR = {Müller, Tobias}, TITLE = {Practical Investigation of Constraints with Graph Views}, YEAR = {1999}, BOOKTITLE = {International Workshop on Implementation of Declarative Languages (IDL'99), September 27-28}, EDITOR = {Sagonas, K. and Tarau, P.}, ADDRESS = {Paris, France}, URL = {http://www.cs.unt.edu/~idl99/Proceedings/ProceedingsIDL99.html ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/Mueller:99b.ps.gz}, ABSTRACT = {Combinatorial problems can be efficiently tackled with constraint programming systems. The main tasks of the development of a constraint-based application are modeling the problem at hand and subsequently implementing that model. Typically, erroneous behavior of a constraint-based application is caused by either the model or the implementation (or both of them). Current constraint programming systems provide limited debugging support for modeling and implementing a problem. This paper proposes the Constraint Investigator, an interactive tool for debugging the model and the implementation of a constraint-based application. In particular, the Investigator is targeted at problems like wrong, void, or partial solutions. A graph metaphor is used to reflect the constraints in the solver and to present them to the user. The paper shows that this metaphor is intuitive and that it scales up to real-life problem sizes. The Constraint Investigator has been implemented in Mozart Oz. It complements other constraint debugging tools as an interactive search tree visualizer, forming the base for an integrated constraint debugging environment.}, ANNOTE = {COLIURL : Muller:1999:PIC.pdf Muller:1999:PIC.ps} } @Article{Müller_Würtz:1999, AUTHOR = {Müller, Tobias and Würtz, Jörg}, TITLE = {Embedding Propagators in a Concurrent Constraint Language}, YEAR = {1999}, JOURNAL = {The Journal of Functional and Logic Programming}, NUMBER = {Special Issue 1}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/MuellerWuertz:99a.ps.gz}, ABSTRACT = {Solving large and hard discrete combinatorial problems often requires the design of new constraints. Current constraint systems focus on either high-level modeling or efficient implementation technology. While each approach lacks the advantages of the other one, this paper describes the combination of them in the high-level concurrent constraint language Oz. We describe an interface to Oz providing abstractions to program new efficient constraints in CPP, preserving the benefits of Oz for problem modeling. While constraints and the Oz runtime system are linked through the interface, and adequate interface abstractions are supplied to implement advanced algorithmic techniques. In particular, it provides the means to reflect the validity of a constraint and to control and inspect the state of a constraint. This allows the user to solve demanding combinatorial problems, such as hard scheduling problems. It is desirable to execute concurrent constraint programs in parallel to profit from multiprocessor architectures. We discuss how the proposed interface can be adapted to parallel execution, avoiding the recoding of constraint implementations for sequential solvers.}, NOTE = {Published on the Internet: http://mitpress.mit.edu/JFLP/, ISSN 1080--5230, MIT Press Journals, Five Cambridge Center, Cambridge, USA}, ANNOTE = {COLIURL : Muller:1999:EPC.pdf Muller:1999:EPC.ps} } @InCollection{Muskens:1999, AUTHOR = {Muskens, Reinhard}, TITLE = {Underspecified Semantics}, YEAR = {1999}, BOOKTITLE = {Reference and Anaphoric Relations}, VOLUME = {72}, PAGES = {311--338}, EDITOR = {Egli, U. and Heusinger, K.}, SERIES = {Studies in Linguistics and Philosophy}, ADDRESS = {Dordrecht}, PUBLISHER = {Kluwer Academic Publishers} } @InProceedings{Nederhof:1999, AUTHOR = {Nederhof, Mark-Jan}, TITLE = {Models of Tabulation for TAG Parsing}, YEAR = {1999}, BOOKTITLE = {Proceedings of the 6th Meeting on Mathematics of Language (MOL6), July 23-25}, PAGES = {143-158}, ADDRESS = {Orlando, Florida, USA}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/mol99.ps ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/mol99.entry}, ANNOTE = {COLIURL : Nederhof:1999:MTT.pdf Nederhof:1999:MTT.ps} } @Article{Nederhof:1999_1, AUTHOR = {Nederhof, Mark-Jan}, TITLE = {The Computational Complexity of the Correct-Prefix Property for TAGs}, YEAR = {1999}, JOURNAL = {Computational Linguistics}, VOLUME = {25}, NUMBER = {3}, PAGES = {345-360}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/nederhof99b.ps.gz ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/nederhof99b.entry}, ANNOTE = {COLIURL : Nederhof:1999:CCC.pdf Nederhof:1999:CCC.ps} } @InCollection{Nederhof:1999_2, AUTHOR = {Nederhof, Mark-Jan}, TITLE = {Efficient Generation of Random Sentences}, YEAR = {1999}, BOOKTITLE = {An Encyclopedia of Computer Science and Technology}, VOLUME = {41}, PAGES = {45-65}, PUBLISHER = {Marcel Dekker Verlag}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/nederhof99a.ps.gz ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/nederhof99a.entry}, ANNOTE = {COLIURL : Nederhof:1999:EGR.pdf Nederhof:1999:EGR.ps} } @InCollection{Nederhof_Bertsch:1999, AUTHOR = {Nederhof, Mark-Jan and Bertsch, Eberhard}, TITLE = {An Innovative Finite State Concept for Recognition and Parsing of Context-Free Languages}, YEAR = {1999}, BOOKTITLE = {Extended Finite State Models of Language}, PAGES = {226-243}, EDITOR = {Kornai, A.}, ADDRESS = {Stanford}, PUBLISHER = {Cambridge University Press}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/nebe99.ps.gz ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/nebe99.entry}, ANNOTE = {COLIURL : Nederhof:1999:IFS.pdf Nederhof:1999:IFS.ps} } @TechReport{Netter_Wegst:1999, AUTHOR = {Netter, Klaus and Wegst, Tillmann}, TITLE = {Project Update: DiET - Diagnostic and Evaluation Tools for Natural Language Application}, YEAR = {1999}, NUMBER = {8.2}, PAGES = {8-9}, ADDRESS = {Brighton, Sussex}, TYPE = {ELSNews}, INSTITUTION = {European Network in Language and Speech}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/kn-tw-elsnews99.ps ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/kn-tw-elsnews99.pdf}, ANNOTE = {COLIURL : Netter:1999:PUD.pdf Netter:1999:PUD.ps} } @InProceedings{Neumann_Declerck:1999, AUTHOR = {Neumann, Günter and Declerck, Thierry}, TITLE = {PARADIME Parametrizable Domain-Adaptive Information and Message Extraction}, YEAR = {1999}, BOOKTITLE = {Les journées d'Etude de l'ATALA, June 18-19}, ADDRESS = {Paris, France} } @TechReport{Neumann_Flickinger:1999, AUTHOR = {Neumann, Günter and Flickinger, Dan}, TITLE = {Learning Stochastic Lexicalized Tree Grammars from HPSG}, YEAR = {1999}, ADDRESS = {Saarbrücken}, TYPE = {Technical Report}, INSTITUTION = {DFKI}, URL = {http://www.dfki.de/~neumann/publications/new-ps/sltg.ps.gz}, ANNOTE = {COLIURL : Neumann:1999:LSL.pdf Neumann:1999:LSL.ps} } @InProceedings{Neumann_Schmeier:1999, AUTHOR = {Neumann, Günter and Schmeier, Sven}, TITLE = {Combining Shallow Text Processing and Machine Learning in Real World Applications}, YEAR = {1999}, BOOKTITLE = {Proceedings of the 16th International Joint Conference on Artificial Intelligence (IJCAI '99). Workshop on Machine Learning for Information Filtering, July 31 - August 6}, ADDRESS = {Stockholm, Sweden}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/SchmeierNeumann99.pdf http://www.dfki.de/~neumann/publications/new-ps/ijcai99-ws.pdf}, ANNOTE = {COLIURL : Neumann:1999:CST.pdf} } @TechReport{Niehren:1999, AUTHOR = {Niehren, Joachim}, TITLE = {Uniform Confluence in Concurrent Computation}, YEAR = {1999}, ADDRESS = {Saarbrücken}, TYPE = {Technical Report}, INSTITUTION = {Universität des Saarlandes, Programming Systems Lab}, URL = {http://www.ps.uni-sb.de/Papers/abstracts/Uniform:2000.html ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/Uniform-97.ps.gz}, ABSTRACT = {Indeterminism is typical for concurrent computation. If several concurrent actors compete for the same resource then at most one of them may succeed, whereby the choice of the successful actor is indeterministic. As a consequence, the execution of a concurrent program may be nonconfluent. Even worse, most observables (termination, computational result, and time complexity) typically depend on the scheduling of actors created during program execution. This property contrast concurrent programs from purely functional programs. A functional program is uniformly confluent in the sense that all its possible executions coincide modulo reordering of execution steps. In this paper, we investigate concurrent programs that are uniformly confluent and their relation to eager and lazy functional programs. We study uniform confluence in concurrent computation within the applicative core of the $pi$-calculus which is widely used in different models of concurrent programming (with interleaving semantics). In particular, the applicative core of the $pi$-calculus serves as a kernel in foundations of concurrent constraint programming with first-class procedures (as provided by the programming language Oz). We model eager functional programming in the $lambda$-calculus with weak call-by-value reduction and lazy functional programming in the call-by-need amming in the $lambda$-calculus with standard reduction. As a measure of time complexity, we count application steps. We encode the $lambda$-calculus with both above reduction strategies into the applicative core of the $pi$-calculus and show that time complexity is preserved. Our correctness proofs employs a new technique based on uniform confluence and simulations. The strength of our technique is illustrated by proving a folk theorem, namely that the call-by-need complexity of a functional program is smaller than its call-by-value complexity.}, ANNOTE = {COLIURL : Niehren:1999:UCC.pdf Niehren:1999:UCC.ps} } @InProceedings{Niehren_et_al:1999, AUTHOR = {Niehren, Joachim and Müller, Martin and Talbot, Jean-Marc}, TITLE = {Entailment of Atomic Set Constraints is PSPACE-Complete}, YEAR = {1999}, BOOKTITLE = {14th Annual IEEE Symposium on Logic in Computer Science (LICS '99), July 2-5}, PAGES = {285-294}, ADDRESS = {Trento, Italy}, PUBLISHER = {IEEE Press}, URL = {www.ps.uni-sb.de/Papers/abstracts/atomic:98.html ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/atomic-lics-99.ps.gz}, ABSTRACT = {The complexity of set constraints has been extensively studied over the last years and was often found quite high. At the lower end of expressiveness, there are atomic set constraints which are conjunctions of inclusions t1 $subseteq$ t2 between first-order terms without set operators. It is well-known that satisfiability of atomic set constraints can be tested in cubic time. Also, entailment of atomic set constraints has been claimed decidable in polynomial time. We refute this claim. We show that entailment between atomic set constraints can express validity of quantified boolean formulas and is thus PSPACE hard. For infinite signatures, we also present a PSPACE-algorithm for solving atomic set constraints with negation. This proves that entailment of atomic set constraints is PSPACE-complete for infinite signatures. In case of finite signatures, this problem is even DEXPTIME-hard.}, ANNOTE = {COLIURL : Niehren:1999:EAS.pdf Niehren:1999:EAS.ps} } @TechReport{Niehren_Priesnitz:1999, AUTHOR = {Niehren, Joachim and Priesnitz, Tim}, TITLE = {Characterizing Subtype Entailment in Automata Theory}, YEAR = {1999}, ADDRESS = {Saarbrücken}, TYPE = {Technical Report}, INSTITUTION = {Universität des Saarlandes}, URL = {http://www.ps.uni-sb.de/Papers/abstracts/pauto.html ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/pauto.ps.gz}, ABSTRACT = {Subtype entailment is the entailment problem of subtype constraints for some type language. Understanding the algorithmic properties of subtype entailment is relevant to several subtype inference systems. For simple types, subtype entailment is coNP complete; when extended with recursive types it becomes PSPACE complete. Adding the least and greatest type renders subtyping non-structural. Whether non-structural subtype entailment is decidable is a prominent open problem. We characterize subtype entailment in automata theory. This yields a uniform proof method by which all known complexity results on subtype entailment in the literature can be derived. The main contribution of the paper is an equivalent characterization of non-structural subtype entailment in automata theory (by so called P-automata). On the one hand side, our characterization implies that several variants of non-structural subtype entailment are polynomial time equivalent (with or without contravariant function types or recursive types). This robustness result is new and nontrivial. On the other hand side, we believe that our characterization contributes an important and necessary step towards answering the open question on decidability of non-structural subtype entailment.}, ANNOTE = {COLIURL : Niehren:1999:CSE.pdf Niehren:1999:CSE.ps} } @InProceedings{Niehren_Priesnitz:1999_1, AUTHOR = {Niehren, Joachim and Priesnitz, Tim}, TITLE = {Entailment of Non-Structural Subtype Constraints}, YEAR = {1999}, BOOKTITLE = {Asian Computing Science Conference, December 10-12}, NUMBER = {1742}, PAGES = {251-265}, SERIES = {Lecture Notes in Computer Science}, ADDRESS = {Phuket, Thailand}, PUBLISHER = {Springer}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/SubTypeEntailment:99.ps.gz}, ABSTRACT = {Entailment of subtype constraints was introduced for constraint simplification in subtype inference systems. Designing an efficient algorithm for subtype entailment turned out to be surprisingly difficult. The situation was clarified by Rehof and Henglein who proved entailment of structural subtype constraints to be coNP-complete for simple types and PSPACE-complete for recursive types. For entailment of non-structural subtype constraints of both simple and recursive types they proved PSPACE-hardness and conjectured PSPACE-completeness but failed in finding a complete algorithm. In this paper, we investigate the source of complications and isolate a natural subproblem of non-structural subtype entailment that we prove PSPACE-complete. We conjecture (but this is left open) that the presented approach can be extended to the general case.}, ANNOTE = {COLIURL : Niehren:1999:ENS.pdf Niehren:1999:ENS.ps} } @TechReport{Oliva:1999, AUTHOR = {Oliva, Karel}, TITLE = {Formal Complexity of Word Order: Linguistic-theoretical Considerations}, YEAR = {1999}, MONTH = {June}, NUMBER = {110}, PAGES = {27}, ADDRESS = {Saarbrücken}, TYPE = {CLAUS-Report}, INSTITUTION = {Universität des Saarlandes}, URL = {ftp://ftp.coli.uni-sb.de/pub/coli/claus/claus110.ps}, ABSTRACT = {In this paper, we review the traditional term word order freedom and show that it can be understood in two ways: first, as the freedom of order of elements within a continuous head domain, and second, as the freedom of extraction out of a finite head domain, that is, as the freedom of making head domain(s) discontinuous. Further on, we concentrate on the more linguistical aspects of the latter understanding. In particular, we compare the pair of languages Czech and English, whose considerably different status as to the severity of constraints on ordering of elements within a continuous head domain of the finite verb is notorious, and aim this comparison at the possibilities which these languages offer for discontinuity of head domains. In this respect, we demonstrate that the two possible ways of understanding of word order freedom correlate, that is to say that English with its rather fixed order freedom within a continuos head domain also imposes severe constraints on extraction out of these domains, while Czech with its almost free order within a domain is also much more liberal as to extraction. The paper contains a longer discussion of this issue, together with a number of relevant examples from both languages.}, ANNOTE = {COLIURL : Oliva:1999:FCW.pdf Oliva:1999:FCW.ps} } @InProceedings{Oliva_et_al:1999, AUTHOR = {Oliva, Karel and Moshier, M. Andrew and Lehmann, Sabine}, TITLE = {Grammar Engineering for the Next Millenium}, YEAR = {1999}, BOOKTITLE = {Proceedings of the 5th Natural Language Processing Pacific Rim Symposium 1999 Closing the Millenium, November 5-7}, ADDRESS = {Beijing, China}, PUBLISHER = {Tsinghua University Press}, URL = {http://korterm.kaist.ac.kr/nlprs99/finalpaper/528-10.rtf}, ABSTRACT = {The prevailing current view of a (symbolic, computational) grammar is basically that of a set of rewriting rules using featurestructured categories. However, whenever such a grammar is aimed at development of a real world applied project, at least two disadvantages become clear. First, it breaks with the traditional understanding of a grammar as a network of phenomena (such as agreement, subcategorization, etc.), thus impeding the (direct) incorporation of this knowledge into such grammars. Second, a realistic grammar is inevitably huge and simultaneously contains very complex interdependencies among rules. This makes any modularization of grammar engineering (aka division of labour within a team) and above all maintaining and debugging realistic grammars a virtually impossible task. This paper presents an alternative view of formal (computational) grammars of natural language allowing for smooth modularization of the grammarwriting process and hence for meeting the pressing task of distributed grammardevelopment. The examples of both problems and their solutions are related to grammars in HPSG style, however, the problems discussed are in no way HPSG specific, just on the contrary, they indeed concern any approach making use of feature structured categories.}, ANNOTE = {COLIURL : Oliva:1999:GEN.pdf} } @InProceedings{Pinkal:1999, AUTHOR = {Pinkal, Manfred}, TITLE = {On semantic underspecification}, YEAR = {1999}, BOOKTITLE = {Proceedings of the 2nd International Workshop on Computational Semantics (IWCS 2), January 13-15}, EDITOR = {Bunt, Harry and Muskens, Reinhard}, ADDRESS = {Tilburg University, The Netherlands}, ABSTRACT = {Underspecification has become fashionable in computational semantics. In this chapter, I will try to give an idea of what semantic underspecification is, what it has been good for, and what the perspective for future application are. I will start with the inspection of specific phenomena and techniques which are usually associated with the notion of underspecification (in Section 1 and 2, respectively). In Sect. 3, I will try to indicate the main motivations for using underspecification techniques. Then, I will point out one important use of the concept in some detail, i.e., direct reasoning with incomplete semantic information, discussing first the appropriate truth-conditional basis (Sect. 4), and second perspectives on efficient reasonig systems (Sect. 5). I will conclude with some remarks about the general status of the semantic underspecification concept, in Sect. 6..} } @MastersThesis{Plaehn:1999, AUTHOR = {Plaehn, Oliver}, TITLE = {Probabilistic Parsing with Discontinuous Phrase Structure Grammar}, YEAR = {1999}, ADDRESS = {Saarbrücken}, SCHOOL = {Universität des Saarlandes}, URL = {https://www.coli.uni-saarland.de/~plaehn/papers/dt.ps}, ANNOTE = {COLIURL : Plaehn:1999:PPD.pdf Plaehn:1999:PPD.ps} } @InProceedings{Podelski_et_al:1999, AUTHOR = {Podelski, Andreas and Charatonik, Witold and Müller, Martin}, TITLE = {Set-Based Failure Analysis for Logic Programs and Concurrent Constraint Programs}, YEAR = {1999}, BOOKTITLE = {8th European Symposium of Programming (ESOP '99). Programming Languages and Systems, March 22-28}, NUMBER = {1576}, PAGES = {177-192}, EDITOR = {Swierstra, S. Doaitse}, SERIES = {Lecture Notes in Computer Science}, ADDRESS = {Amsterdam, The Netherlands}, PUBLISHER = {Springer}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/esop99.ps.gz}, ABSTRACT = {This paper presents the first approximation method of the finite-failure set of a logic program by set-based analysis. In a dual view, the method yields a type analysis for programs with ongoing behaviors (perpetual processes). Our technical contributions are (1) the semantical characterization of finite failure of logic programs over infinite trees and (2) the design and soundness proof of the first set-based analysis of logic programs with the greatest-model semantics. Finally, we exhibit the connection between finite failure and the inevitability of the 'inconsistent-store' error in fair executions of concurrent constraint programs where no process suspends forever. This indicates a potential application to error diagnosis for concurrent constraint programs.}, ANNOTE = {COLIURL : Podelski:1999:SBF.pdf Podelski:1999:SBF.ps} } @Article{Pützer_Barry:1999, AUTHOR = {Pützer, Manfred and Barry, William J.}, TITLE = {Soziophonetische Betrachtungen zu deutschen Dialekten in Lothringen (Frankreich)}, YEAR = {1999}, JOURNAL = {Folia Linguistica}, VOLUME = {32}, NUMBER = {3-4}, PAGES = {161-199} } @Article{Pützer_Just:1999, AUTHOR = {Pützer, Manfred and Just, Manfred}, TITLE = {Akustische und elektrophysiologische Stimmanalyse nach laserchirurgischer Larynxkarzinomresektion: Ein Fallstudie}, YEAR = {1999}, JOURNAL = {PHONUS}, VOLUME = {4}, PAGES = {103-121}, URL = {https://www.coli.uni-saarland.de/Phonetics/Research/PHONUS_research_reports/Phonus4/Puetzer_PHONUS4.ps.gz}, ABSTRACT = {Phonus 4, Institute of Phonetics, University of the Saarland, 1999, 103-121. In this paper the course of the therapy after minimally invasive laser surgery for a carcinoma of the vocal cord (T1) is evaluated using acoustic and electroglottographic methods. The evaluation includes preoperative, postoperative and post-rehabilitative conditions. With the help of frequency and amplitude perturbation parameters derived from the acoustic output, and parameters based on electroglottographic properties of single periods (contact and skewing quotient; jitter), respectively, we are able to provide statistical evidence for the effectiveness of the therapeutic measures. Comparisons with the tolerance values given by the producers of the analysis programs and with empirically derived variation ranges from a large sample of normal speakers allow us to quantify the effect. The course of the recovery, also described in this study, provides the anatomical and physiological basis to explain the functional improvement of phonation observed in the acoustic and electroglottographic correlates.}, ANNOTE = {COLIURL : Putzer:1999:AES.pdf} } @InProceedings{Ramírez Bustamante_et_al:1999, AUTHOR = {Ramírez Bustamante, Flora and Declerck, Thierry and Sánchez León, Fernando}, TITLE = {Integrated Set of Tools for Robust Text Processing}, YEAR = {1999}, BOOKTITLE = {Proceedings of the Conference Venezia per il Trattamento Automatico delle Lingue (VEXTAL)}, ADDRESS = {Venice, Italy}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/vextal99_ram.ps.gz}, ABSTRACT = {This paper describes a set of tools for morpho-syntactic annotation of Spanish texts, based both on well known public domain tools (emerging from MULTEXT) and proprietary technologies (as Constraint Grammars). Besides, a complete bunch of new specific modules ranging from morphological analyzers to form, typographical and morphosyntactic checkers have been integrated in this NLP tool.}, ANNOTE = {COLIURL : Bustamante:1999:IST.pdf Bustamante:1999:IST.ps} } @InProceedings{Scheepers_et_al:1999, AUTHOR = {Scheepers, Christoph and Hemforth, Barbara and Konieczny, Lars}, TITLE = {Incremental Processing of German Verb-Final Constructions: Predicting the Verb's Minimum (!) Valency}, YEAR = {1999}, BOOKTITLE = {Proceedings of the 2nd International Conference on Cognitive Science (ICCS/JCSS99), July 27-30}, ADDRESS = {Tokyo, Japan} } @InCollection{Schröder:1999, AUTHOR = {Schröder, Marc}, TITLE = {Zur Machbarkeit von Synthese emotionaler Sprache ohne Modellierung der Stimmqualität}, YEAR = {1999}, BOOKTITLE = {Elektronische Sprachsignalverarbeitung}, PAGES = {222-229}, ADDRESS = {Görlitz}, URL = {http://www.dfki.de/~schroed/articles/schroeder1999a.pdf}, ABSTRACT = {Die vorliegende Studie widmet sich der Frage, ob emotionale Sprechweise in konkatenativer Sprachsynthese ohne Manipulation der Stimmqualität modelliert werden kann. Ein Satz wurde von drei Sprechern mit vier Emotionen (Wut, Freude, Angst und Traurigkeit) sowie mit neutraler Sprechweise produziert. Die besterkannten dieser natürlichen emotionalen Äußerungen wurden akustisch analysiert (Segmentdauern, -energie, und F0-Extrema) und mittels Copy-Synthese nachgebildet. Während einige der resultierenden synthetischen Stimuli fast so gut der intendierten Emotion zugeordnet wurden wie die natürlichen Originale, ging bei anderen die Erkennung komplett verloren. In einem offenen Perzeptionstest wurde eine ausgeprägte und nur bedingt vom Stimulus abhängende Präferenz für die Kategorie Enttäuschung gefunden.}, ANNOTE = {COLIURL : Schroder:1999:MSE.pdf} } @Article{Schröder:1999_1, AUTHOR = {Schröder, Marc}, TITLE = {Can Emotions be Synthesized without Controlling Voice Quality?}, YEAR = {1999}, JOURNAL = {PHONUS}, VOLUME = {4}, PAGES = {37-55}, URL = {http://www.dfki.de/~schroed/articles/schroeder1999b.pdf}, ABSTRACT = {The present study addresses the question whether it is in principle feasible to convey emotion in synthesized speech using a restricted parameter set which can usually be controlled in concatenation based synthesizers. Using copy synthesis, the prosodic structure of one sentence uttered with five emotional expressions (anger, joy, fear, sadness, and neutral) was transferred to synthetic stimuli. Perception tests show that for some synthetic stimuli, the high recognition rates for the corresponding natural stimuli are almost reproduced, while for other stimuli the emotional information is lost. In a free association perception test, a tendency towards the perception of the unintended category disappointment was found that only varied to a limited extend across stimuli. Die vorliegende Untersuchung widmet sich der Frage, ob Emotionen prinzipiell mit einem begrenzten Parametersatz vermittelt werden können, wie er üblicherweise in konkatenativer Synthese zur Verfügung steht. Mittels Kopiesynthese wurde die prosodische Struktur eines Satzes, der mit fünf emotionalen Ausdrücken (Wut, Freude, Angst, Traurigkeit, und neutral) produziert worden war, auf synthetische Stimuli übertragen. Perzeptionstests zeigen, daß für manche Stimuli die hohen Erkennungsraten der entsprechenden natürlichen Stimuli nahezu reproduziert werden, während für andere Stimuli die emotionale Information verlorengeht. In einem Perzeptionstest zur freien Assoziation wurde eine Tendenz zur Wahrnehmung der unbeabsichtigten Kategorie ”Enttäuschung” festgestellt, die nur bedingt zwischen den Stimuli variierte.}, ANNOTE = {COLIURL : Schroder:1999:CES.pdf} } @InProceedings{Schulte:1999, AUTHOR = {Schulte, Christian}, TITLE = {Comparing Trailing and Copying for Constraint Programming}, YEAR = {1999}, BOOKTITLE = {16th International Conference on Logic Programming (ICLP '99), November 29 - December 4}, PAGES = {275-289}, EDITOR = {De Schreye, D.}, ADDRESS = {Las Cruces, New Mexico, USA}, PUBLISHER = {MIT Press}, URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/copying.ps.gz}, ABSTRACT = {A central service of a constraint programming system is search. In almost all constraint programming systems search is based on trailing, which is well understood and known to be efficient. This paper compares trailing to copying. Copying offers more expressiveness as required by parallel and concurrent systems. However, little is known how trailing compares to copying as it comes to implementation effort, runtime efficiency, and memory requirements. This paper discusses these issues. Execution speed of a copying-based system is shown to be competitive with state-of-the-art trailing-based systems. For the first time, a detailed analysis and comparison with respect to memory usage is made. It is shown how recomputation decreases memory requirements which can be prohibitive for large problems with copying alone. The paper introduces an adaptive recomputation strategy that is shown to speedup search while keeping memory consumption low. It is demonstrated that copying with recomputation outperforms trailing on large problems with respect to both space and time.}, ANNOTE = {COLIURL : Schulte:1999:CTC.pdf Schulte:1999:CTC.ps} } @InProceedings{Siegel:1999, AUTHOR = {Siegel, Melanie}, TITLE = {The Syntactic Processing of Participles in Japanese Spoken Language}, YEAR = {1999}, BOOKTITLE = {Proceedings of the 13th Pacific Asia Conference on Language, Information and Computation (PACLIC 13), February 10-12}, EDITOR = {Wang, Jhing-Fa and Wu, Chung-Hsien}, ADDRESS = {Taipei, Taiwan}, URL = {http://www.dfki.de/~siegel/paclic99.ps.gz}, ABSTRACT = {Particles fullfill several distinct central roles in the Japanese language. They can mark arguments as well as adjuncts, can be functional or have semantic funtions. There is, however, no straightforward matching from particles to functions, as, e.g., ga can mark the subject, the object or an adjunct of a sentence. Particles can cooccur. Verbal arguments that could be identified by particles can be eliminated in the Japanese sentence. And finally, in spoken language particles are often omitted. A proper treatment of particles is thus necessary to make an analysis of Japanese sentences possible. Our treatment is based on an empirical investigation of 800 dialogues. We set up a type hierarchy of particles motivated by their subcategorizational and modificational behaviour. This type hierarchy is part of the Japanese syntax in VERBMOBIL.}, ANNOTE = {COLIURL : Siegel:1999:SPP.pdf Siegel:1999:SPP.ps} } @PhdThesis{Skut:1999, AUTHOR = {Skut, Wojciech}, TITLE = {Partial Parsing for Corpus Annotation and Text Processing}, YEAR = {1999}, ADDRESS = {Saarbrücken}, SCHOOL = {Universität des Saarlandes} } @InProceedings{Staab_et_al:1999, AUTHOR = {Staab, Steffen and Braun, Christian and Bruder, Ilvio and Düsterhöft, Antje and Heuer, Andreas and Klettke, Meike and Neumann, Günter and Prager, Bernd and Petzel, Jan and Schnurr, Hans-Peter and Studer, Rudi and Uszkoreit, Hans and Wrenger, Burkhard}, TITLE = {GETESS - Searching the Web Exploiting German Texts}, YEAR = {1999}, BOOKTITLE = {Proceedings of the 3rd International Workshop on Cooperative Information Agents (CIA'99), July 31 - August 2}, NUMBER = {1652}, PAGES = {113-124}, EDITOR = {Klusch, Matthias and Shehory, Onn and Weiß, Gerhard}, SERIES = {Lecture Notes in Artificial Intelligence}, ADDRESS = {Uppsala, Sweden}, PUBLISHER = {Springer}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/cia99.ps.gz}, ABSTRACT = {We present an intelligent information agent that uses semantic methods and natural language processing capabilites in order to gather tourist information from theWWWand present it to the human user in an intuitive, user-friendly way. Thereby, the information agent is designed such that as background knowledge and linguistic coverage increase, its benefits improve, while it guarantees state-of-the-art information and database retrieval capabilities as its bottom line.}, ANNOTE = {COLIURL : Staab:1999:GSWa.pdf Staab:1999:GSWa.ps} } @InProceedings{Staab_et_al:1999_1, AUTHOR = {Staab, Steffen and Braun, Christian and Bruder, Ilvio and Düsterhöft, Antje and Heuer, Andreas and Klettke, Meike and Neumann, Günter and Prager, Bernd and Petzel, Jan and Schnurr, Hans-Peter and Studer, Rudi and Uszkoreit, Hans and Wrenger, Burkhard}, TITLE = {A System for Facilitating and Enhancing Web Search}, YEAR = {1999}, BOOKTITLE = {Proceedings of the 5th International Work-Conference on Artificial and Natural Neural Networks (IWANN'99), June 2-4}, ADDRESS = {Alicante, Spain}, PUBLISHER = {Springer}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/iwann99.ps.gz}, ABSTRACT = {We present a system that uses semantic methods and natural language processing capabilites in order to provide comprehensive and easy-to-use access to tourist information in the WWW. Thereby, the system is designed such that as background knowledge and linguistic coverage increase, the benefits of the system improve, while it guarantees state-of-the-art information and database retrieval capabilities as its bottom line.}, ANNOTE = {COLIURL : Staab:1999:SFEa.pdf Staab:1999:SFEa.ps} } @InProceedings{Staab_et_al:1999_2, AUTHOR = {Staab, Steffen and Braun, Christian and Bruder, Ilvio and Düsterhöft, Antje and Heuer, Andreas and Klettke, Meike and Neumann, Günter and Prager, Bernd and Pretzel, Jan and Schnurr, Hans-Peter and Studer, Rudi and Uszkoreit, Hans and Wrenger, Burkhard}, TITLE = {A System for Facilitating and Enhancing Web Search}, YEAR = {1999}, BOOKTITLE = {5th International Work-Conference on Artificial and Natural Neural Networks (IWANN '99), June 2-4}, ADDRESS = {Alicante, Spain}, PUBLISHER = {Springer}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/iwann99.ps.gz ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/iwann99.entry}, ANNOTE = {COLIURL : Staab:1999:SFE.pdf Staab:1999:SFE.ps} } @InProceedings{Staab_et_al:1999_3, AUTHOR = {Staab, Steffen and Braun, Christian and Bruder, Ilvio and Düsterhöft, Antje and Heuer, Andreas and Klettke, Meike and Neumann, Günter and Prager, Bernd and Pretzel, Jan and Schnurr, Hans-Peter and Studer, Rudi and Uszkoreit, Hans and Wrenger, Burkhard}, TITLE = {GETESS - Searching the Web Exploiting German Texts}, YEAR = {1999}, BOOKTITLE = {3rd International Workshop on Cooperative Information Agents (CIA '99), July 31 - August 2}, NUMBER = {1652}, PAGES = {113-124}, EDITOR = {Klusch, Matthias and Shehory, Onn and Weiss, G.}, SERIES = {Lecture Notes in Artificial Intelligence}, ADDRESS = {Uppsala, Sweden}, PUBLISHER = {Springer}, URL = {ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/cia99.ps.gz ftp://lt-ftp.dfki.uni-sb.de/pub/papers/local/cia99.entry}, ANNOTE = {COLIURL : Staab:1999:GSW.pdf Staab:1999:GSW.ps} } @Book{Rudolf_et_al:1999, TITLE = {Memorandum zu einem Institut für Evolutionswissenschaft. Suchprozesse für innovative Fragestellungen in der Wissenschaft}, YEAR = {1999}, VOLUME = {1}, EDITOR = {Stichweh, Rudolf and Reyer, Heinz-Ulrich and Uszkoreit, Hans}, ADDRESS = {Bad Homburg}, PUBLISHER = {Werner Reimers Stiftung}, URL = {http://www.reimers-stiftung.de/suchprozesse/heft1.doc}, ABSTRACT = {Zwei große Lücken stellen die Autoren in der gegenwärtigen Forschungslandschaft fest: 1. Es fehlt eine transdisziplinäre allgemeine Evolutionstheorie. 2. Es fehlen laterale Verknüpfungen unter den beteiligten Disziplinen. Alle genannten Disziplinen stehen in mehr oder weniger engen Beziehungen zur Evolutionsbiologie. Die Forschungsansätze in den einzelnen Disziplinen sind jedoch voneinander isoliert, so daß Querverbindungen fehlen und beispielsweise die wechselseitigen Anregungen zwischen evolutionären Kulturtheorien und evolutionärer Ökonomie minimal sind. Das vorgeschlagene Institut für Evolutionswissenschaft soll daher 1. eine transdisziplinäre allgemeine Evolutionstheorie oder allgemeine Selektionstheorie entwickeln. 2. Die Theorie- und Modellbildung in den einzelnen Disziplinen vorantreiben, um auf diese Weise die Anregungs- und Lernchancen interdisziplinären Kontakts zu optimieren.}, ANNOTE = {COLIURL : Stichweh:1999:MEI.pdf} } @MastersThesis{Striegnitz:1999, AUTHOR = {Striegnitz, Kristina}, TITLE = {On Modeling Meaning Shifts by Relaxing Underspecified Semantic Representations}, YEAR = {1999}, ADDRESS = {Saarbrücken}, SCHOOL = {Universität des Saarlandes}, TYPE = {Diplomarbeit}, URL = {https://www.coli.uni-saarland.de/cl/projects/chorus/papers/kris99.html}, ABSTRACT = {The context in which a word appears in natural language often influences its interpretation in such a way that the base meaning of the word is changed or made more specific. Polysemy and metonymy are examples for this phenomenon. These meaning shifts of words can be modeled by augmenting the semantic representation of a natural language utterance with the information that is missing to make the shift in meaning explicit. This information can be provided by linguistic or non-linguistic sources or an interaction of both. Recently, Egg (1999) has suggested an account of meaning shifts which exploits underspecification methods to yield a monotonic augmentation process. The main idea is to have semantic construction derive a sufficiently relaxed (i.e. made less specific) semantic representation, so that adding the missing information is simply further specification of this representation. This thesis will examine a treatment of meaning shifts due to systematic polysemy or metonymy within Egg's framework. We will present a syntax/semantics interface which derives appropriately relaxed semantic representations. To account for meaning shifts these representations can be augmented monotonically with additional information. We will point out a potential problem for this approach: making underspecified semantic representations less specific may cause overgeneration. However, as we will show, for our applications relaxation is safe, i.e. there is no danger of overgeneration. The underspecification formalism that we will use throughout this thesis is in the class of tree description languages subsuming dominance constraints. We will distinguish a novel class of subconstraints with a certain structure which powerfully support the type of inferences on dominance and disjointness which we have to make for proving safety of relaxation.}, ANNOTE = {COLIURL : Striegnitz:1999:MMS.pdf Striegnitz:1999:MMS.ps} } @Article{Trouvain:1999, AUTHOR = {Trouvain, Jürgen}, TITLE = {Phonological Aspects of Reading Rate Strategies}, YEAR = {1999}, JOURNAL = {PHONUS}, VOLUME = {4}, PAGES = {15-35}, ABSTRACT = {This paper deals with the effect of tempo on phonological structure. In two production experiments, German speakers were asked to read texts at three self-selected rates, normal, fast, and slow. Different speaker strategies were inspected in terms of pausing, phrasing, pitch accent structure and segmental reductions. The first aim of the study is to describe the reorganisation of the pho-nological structure as a function of the three speech rate categories. The second goal is to discuss the strategies used in speaking faster and slower than normal, considering in particular the homogeneity among speakers and the symmetry within speakers. The differences found between and within speakers provide a basis for modelling individual tempo profiles at the phonological level, which could be exploited e.g. for the synthesis of individual voices and speaking styles.}, ANNOTE = {COLIURL : Trouvain:1999:PAR.pdf Trouvain:1999:PAR.ps} } @InProceedings{Trouvain_Grice:1999, AUTHOR = {Trouvain, Jürgen and Grice, Martine}, TITLE = {The Effect of Tempo on Prosodic Structure}, YEAR = {1999}, BOOKTITLE = {14th International Congress of Phonetic Sciences (ICPhS), August 1-7}, PAGES = {1067-1070}, ADDRESS = {San Francisco, USA}, ABSTRACT = {This study investigates the effect of tempo on prosodic structuring at both temporal and melodic levels. Readings of a German text are examined to ascertain to what extent changes in pausing, prosodic phrasing, pitch accent structure and F0 realisations contribute towards strategies for speaking at a faster or slower speed than normal. Furthermore, speeding up strategies are compared to those for slowing down, to investigate how far the speakers' behaviour is symmetrical with respect to each of the parameters examined.}, ANNOTE = {COLIURL : Trouvain:1999:ETP.pdf Trouvain:1999:ETP.ps} } @InCollection{Uszkoreit:1999, AUTHOR = {Uszkoreit, Hans}, TITLE = {Sprachtechnologie für die Wissensgesellschaft: Herausforderungen und Chancen für die Computerlinguistik und die theoretische Sprachwissenschaft}, YEAR = {1999}, BOOKTITLE = {Geisteswissenschaften und Innovationen}, PAGES = {137-174}, EDITOR = {Meyer-Krahmer, Frieder and Lange, Siegfried}, PUBLISHER = {Physica Verlag} } @Proceedings{Hans_et_al:1999, TITLE = {Linguistically Interpreted Corpora. Proceedings of the Workshop LINC-1999 at the 9th Conference of the European Chapter of the Association of Computational Linguistics}, YEAR = {1999}, EDITOR = {Uszkoreit, Hans and Brants, Thorsten and Krenn, Brigitte}, ADDRESS = {Bergen, Norway} } @Article{van Noord_et_al:1999, AUTHOR = {van Noord, Gertjan and Bouma, Gosse and Koeling, Rob and Nederhof, Mark-Jan}, TITLE = {Robust Grammatical Analysis for Spoken Dialogue Systems}, YEAR = {1999}, JOURNAL = {Natural Language Engineering}, VOLUME = {5}, NUMBER = {1}, PAGES = {45-93}, URL = {http://odur.let.rug.nl/~vannoord/papers/nle/}, ABSTRACT = {We argue that grammatical analysis is a viable alternative to concept spotting for processing spoken input in a practical spoken dialogue system. We discuss the structure of the grammar, and a model for robust parsing which combines linguistic sources of information and statistical sources of information. We discuss test results suggesting that grammatical processing allows fast and accurate processing of spoken input.} } @InCollection{Villiger_et_al:1999, AUTHOR = {Villiger, Claudia and Rothkegel, Annely and Jakobs, Eva-Maria}, TITLE = {Das versteht kein Mensch ... Verständliche Gestaltung von Hilfesystemen für Softwareprogramme}, YEAR = {1999}, BOOKTITLE = {Textproduktion. HyperText, Text, KonText}, NUMBER = {5}, PAGES = {217-233}, EDITOR = {Jakobs, Eva-Maria and Knorr, Dagmar and Pogner, Karl-Heinz}, SERIES = {Textproduktion und Medium}, ADDRESS = {Frankfurt a. M}, PUBLISHER = {Peter Lang}, ABSTRACT = {Textverarbeitungsprogramme sind in der Regel erklärungsbedürftig. In unserem Beitrag untersuchen wir, wie Online-Hilfen den Nutzer bei der Lösung von Aufgaben mit der Textverarbeitungssoftware unterstützen, welche Probleme bei der Nutzung von Online-Hilfen auftreten und wodurch sie verursacht werden. Die Argumentation stützt sich auf eine Studie mit professionellen und semiprofessionellen Nutzern und formuliert abschließend Überlegungen zur verständlichen Gestaltung von Hilfesystemen.} } @InProceedings{Xu:1999, AUTHOR = {Xu, Hui}, TITLE = {English-Style and Chinese-Style Topic: A Uniform Semantic Analysis}, YEAR = {1999}, BOOKTITLE = {13th Pacific Asia Conference on Language, Information and Computation (PACLIC 13), February 10-12}, EDITOR = {Wu, Jhing-Fa Wang and Chung-Hsien}, ADDRESS = {Taipei, Taiwan} } @InProceedings{Xu:1999_1, AUTHOR = {Xu, Hui}, TITLE = {DRT-Analysis for Topic-Comment Constructions in Chinese}, YEAR = {1999}, BOOKTITLE = {3rd International Workshop on Computational Semantics (IWCS-3), January 13-15}, EDITOR = {Bunt, Harry}, ADDRESS = {Tilburg, The Netherlands}, URL = {https://www.coli.uni-saarland.de/~xu/iwcs_p.ps}, ANNOTE = {COLIURL : Xu:1999:DAT.pdf Xu:1999:DAT.ps} } @InCollection{Hajicova_Kruijff-Korbayova:1999, AUTHOR = {Hajicová, Eva and Korbayova, Ivana}, TITLE = {On the notion of topic}, YEAR = {1999}, VOLUME = {3}, PAGES = {225-236}, EDITOR = {Hajicová, Eva and Hoskovec, Tomás and Leka, Oldich and Sgall, Petr and Skoumalová, Zdena}, ADDRESS = {Amsterdam, the Netherlands} } @InCollection{Hajicova_et_al:1999_1, AUTHOR = {Hajicová, Eva and Korbayova, Ivana and Sgall, Petr}, TITLE = {Prague Dependency Treebank: Restoration of Deletions}, YEAR = {1999}, BOOKTITLE = {Text, Speech and Dialogue - Second International Workshop, TSD'99, Plzen, Czech Republic, September 1999}, VOLUME = {1692}, PAGES = {44-49}, EDITOR = {Matousek, Václav and Mautner, Pavel and Ocelíková, Jana and Sojka, Petr}, ADDRESS = {Berlin}, PUBLISHER = {Springer}, URL = {http://shadow.ms.mff.cuni.cz/pdt/Corpora/PDT_1.0/References/tsd99-deletion.pdf} } @InCollection{Kruijff_Kruijff-Korbayova:1999_1, AUTHOR = {Kruijff, Geert-Jan M. and Kruijff-Korbayova, Ivana}, TITLE = {Text structuring in a multilingual system for generation of instructions}, YEAR = {1999}, BOOKTITLE = {Proceedings of the Conference on Text, Speech and Dialogue (TSD'99), Marianske Lazne, Czech Republic}, PAGES = {89-94}, EDITOR = {Matousek, Václav and Mautner, Pavel and Ocelíková, Jana and Sojka, Petr}, URL = {https://www.coli.uni-saarland.de/~korbay/Publications/tsd99-ts.ps.gz} } @InCollection{Kruijff-Korbayova_Kruijff:1999_2, AUTHOR = {Kruijff-Korbayova, Ivana and Kruijff, Geert-Jan M.}, TITLE = {Handling Word Order in a multilingual system for generation of instructions}, YEAR = {1999}, BOOKTITLE = {Proceedings of the Conference on Text, Speech and Dialogue TSD'99 Marianske Laznve, Czech Republic}, PAGES = {83-88}, EDITOR = {Matousek, Václav and Mautner, Pavel and Ocelíková, Jana and Sojka, Petr}, PUBLISHER = {Springer-Verlag}, URL = {https://www.coli.uni-saarland.de/~korbay/Publications/tsd99-wo.ps.gz} } @TechReport{Adonova_et_al:1999, AUTHOR = {Adonova, Elena and Bateman, John and Gromova, Nevena and Hartley, Anthony and Kruijff, Geert-Jan M. and Kruijff-Korbayova, Ivana and Sharoff, Serge and Skoumalová, Hana and Sokolova, Lena and Staykova, Kamenka and Teich, Elke}, TITLE = {Formal specification of extended grammar models}, YEAR = {1999}, TYPE = {AGILE project deliverable LSPEC2}, INSTITUTION = {ITRI, University of Brighton, UK}, URL = {http://ufal.mff.cuni.cz/~agile/reports.html} } @InProceedings{Weber:1999, AUTHOR = {Weber, Andrea}, TITLE = {Help or hindrance: How violation of different assimilation rules affects spoken-language processing.}, YEAR = {1999}, BOOKTITLE = {137th meeting of the Acoustical Society of America}, ADDRESS = {Berlin, Germany} } @Article{ICM1999, AUTHOR = {Ishizaki, Masato and Crocker, Matthew W. and Mellish, Chris}, TITLE = {Exploring Mixed-Initiative Dialogue Using Computer Dialogue Simulation}, YEAR = {1999}, JOURNAL = {Journal of User-Modeling and User-Adapted Interaction}, NUMBER = {1/2}, PAGES = {79-91} } @InBook{C1999, AUTHOR = {Crocker, Matthew W.}, TITLE = {Mechanisms for Sentence Processing}, YEAR = {1999}, BOOKTITLE = {Language Processing}, EDITOR = {Garrod, Simon and Pickering, Martin J.}, ADDRESS = {London, UK}, PUBLISHER = {Psychology Press} } @Article{SPC1999, AUTHOR = {Sturt, Patrick and Pickering, Martin J. and Crocker, Matthew W.}, TITLE = {Structural Change and Reanalysis Difficulty in Language Comprehension}, YEAR = {1999}, JOURNAL = {Journal of Memory and Language}, VOLUME = {40}, NUMBER = {1}, PAGES = {136-150} } @InBook{CCP1999, AUTHOR = {Chater, Nicholas and Crocker, Matthew W. and Pickering, Martin J.}, TITLE = {The Rational Analysis of Inquiry: The Case for Parsing}, YEAR = {1999}, BOOKTITLE = {Rational Models of Cognition}, EDITOR = {Chater, Nicholas and Oaksford, Mike}, ADDRESS = {Oxford}, PUBLISHER = {Oxford University Press} }