

%
% GENERATED FROM https://www.coli.uni-saarland.de
%    by   : anonymous
%    IP   : coli2006.lst.uni-saarland.de
%    at   : Mon, 05 Feb 2024 15:43:17 +0100 GMT
%    
% Selection : Author: Christian_Schulte
%




@TechReport{Duchier_et_al:1998,
      AUTHOR = {Duchier, Denys and Kornstaedt, Leif and Schulte, Christian and Smolka, Gert},
      TITLE = {A Higher-Order Module Discipline with Separate Compilation, Dynamic Linking, and Pickling},
      YEAR = {1998},
      ADDRESS = {Saarbrücken},
      TYPE = {Technical Report},
      INSTITUTION = {Programming Systems Lab, DFKI and Universität des Saarlandes},
      URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/modules-98.ps.gz},
      ABSTRACT = {We present a higher-order module discipline with separate compilation and concurrent dynamic linking. Based on first-order modules one can program security policies for systems that link modules from untrusted locations (e.g., Java). We introduce a pickling operation that writes persistent clones of volatile, possibly higher-order data structures on the file system. Our pickling operation respects lexical binding. Our module discipline is based on functors, which are annotated functions that are applied to modules and return modules. Pickled computed functors can be used interchangeably with compiled functors. In contrast to compiled functors, pickled computed functors can carry computed data structures with them, which has significant practical applications.},
      ANNOTE = {COLIURL : Duchier:1998:HOM.pdf Duchier:1998:HOM.ps}
}

@Article{Haridi_et_al:1998,
      AUTHOR = {Haridi, Seif and Van Roy, Peter and Brand, Per and Schulte, Christian},
      TITLE = {Programming Languages for Distributed Applications},
      YEAR = {1998},
      JOURNAL = {New Generation Computing},
      VOLUME = {16},
      NUMBER = {3},
      PAGES = {223-261},
      URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/ngc98.ps.gz},
      ABSTRACT = {Much progress has been made in distributed computing in the areas of distribution structure, open computing, fault tolerance, and security. Yet, writing distributed applications remains difficult because the programmer has to manage models of these areas explicitly. A major challenge is to integrate the four models into a coherent development platform. Such a platform should make it possible to cleanly separate an application's functionality from the other four concerns. Concurrent constraint programming, an evolution of concurrent logic programming, has both the expressiveness and the formal foundation needed to attempt this integration. As a first step, we have designed and built a platform that separates an application's functionality from its distribution structure. We have prototyped several collaborative tools with this platform, including a shared graphic editor whose design is presented in detail. The platform efficiently implements Distributed Oz, which extends the Oz language with constructs to express the distribution structure and with basic primitives for open computing, failure detection and handling, and resource control. Oz appears to the programmer as a concurrent object-oriented language with dataflow synchronization. Oz is based on a higher-order, state-aware, concurrent constraint computation model.},
      ANNOTE = {COLIURL : Haridi:1998:PLD.pdf Haridi:1998:PLD.ps}
}

@TechReport{Mehl_et_al:1998,
      AUTHOR = {Mehl, Michael and Schulte, Christian and Smolka, Gert},
      TITLE = {Futures and By-need Synchronization},
      YEAR = {1998},
      MONTH = {May},
      ADDRESS = {Saarbrücken},
      TYPE = {Draft},
      INSTITUTION = {DFKI and Universität des Saarlandes},
      URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/oz-futures.ps.gz},
      ABSTRACT = {We propose a conservative extension of Oz that adds futures and by-need synchronization. Futures are read-only views of logic variables that make it possible to statically limit the scope in which a variable can be constrained. For instance, one can express with futures safe streams that cannot be assigned by their readers. By-need synchronization makes it possible to synchronize a thread on the event that a thread blocks on a future. It is used to express dynamic linking and lazy functions.}
}

@InProceedings{Ng_et_al:2000,
      AUTHOR = {Ng, Ka Boon and Choi, Chiu Wo and Henz, Martin and Müller, Tobias},
      TITLE = {GIFT: A Generic Interface for Reusing Filtering Algorithms},
      YEAR = {2000},
      BOOKTITLE = {Workshop on Techniques for Implementing Constraint Programming Systems (TRICS), September},
      PAGES = {86-100},
      EDITOR = {Beldiceanu, N. and Harvey, W. and Henz, Martin and Laburthe, F. and Monfroy, E. and Müller, Tobias and Perron, L. and Schulte, Christian},
      ADDRESS = {Singapore},
      URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/KaboonChoiHenzMueller00a.ps.gz},
      ABSTRACT = {Many different constraint programming (CP) systems exist today. For each CP system, there are many different filtering algorithms. Researchers and developers usually choose a CP system of their choice to implement their filtering algorithms. To use these filtering algorithms on another system, we have to port the code over. This situation is clearly not desirable. In this paper, we propose a generic C++ interface for writing filtering algorithms called GIFT (Generic Interface for FilTers). By providing the generic interface on different CP systems, we can reuse any filtering algorithms easily. A case study on reusing scheduling filtering algorithms between Mozart and Figaro further highlights the feasibility of this approach.},
      ANNOTE = {COLIURL : Ng:2000:GGI.pdf Ng:2000:GGI.ps}
}

@InProceedings{Schulte:1997,
      AUTHOR = {Schulte, Christian},
      TITLE = {Programming Constraint Inference Engines},
      YEAR = {1997},
      BOOKTITLE = {3rd International Conference on Principles and Practice of Constraint Programming (CP '97), October 29 - November 1},
      NUMBER = {1330},
      PAGES = {519-533},
      EDITOR = {Smolka, Gert},
      SERIES = {Lecture Notes in Computer Science},
      ADDRESS = {Schloß Hagenberg, Austria},
      PUBLISHER = {Springer},
      URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/Engines.ps.gz},
      ABSTRACT = {Existing constraint programming systems offer a fixed set of inference engines implementing search strategies such as single, all, and best solution search. This is unfortunate, since new engines cannot be integrated by the user. The paper presents first-class computation spaces as abstractions with which the user can program inference engines at a high level. Using computation spaces, the paper covers several inference engines ranging from standard search strategies to techniques new to constraint programming, including limited discrepancy search, visual search, and saturation. Saturation is an inference method for tautology-checking used in industrial practice. Computation spaces have shown their practicability in the constraint programming system Oz.},
      ANNOTE = {COLIURL : Schulte:1997:PCI.pdf Schulte:1997:PCI.ps}
}

@InProceedings{Schulte:1997_1,
      AUTHOR = {Schulte, Christian},
      TITLE = {Oz Explorer: A Visual Constraint Programming Tool},
      YEAR = {1997},
      BOOKTITLE = {14th International Conference on Logic Programming (ICLP '97), June},
      PAGES = {286-300},
      EDITOR = {Naish, L.},
      ADDRESS = {Leuven, Belgium},
      PUBLISHER = {MIT Press},
      URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/Explorer.ps.gz},
      ABSTRACT = {This paper describes the Oz Explorer and its implementation. The Explorer is a visual constraint programming tool intended to support the development of constraint programs. It uses the search tree of a constraint problem as its central metaphor. Exploration and visualization of the search tree are user-driven and interactive. The constraints of any node in the tree are available first-class: predefined or user-defined procedures can be used to display or analyze them. The Explorer is a fast and memory efficient tool intended for the development of real-world constraint programs. The Explorer is implemented in Oz using first-class computation spaces. There is no fixed search strategy in Oz. Instead, first-class computation spaces allow to program search engines. The Explorer is one particular example of a user-guided search engine. The use of recomputation to trade space for time makes it possible to solve large real-world problems, which would use too much memory otherwise.},
      ANNOTE = {COLIURL : Schulte:1997:OEV.pdf Schulte:1997:OEV.ps}
}

@InProceedings{Schulte:1999,
      AUTHOR = {Schulte, Christian},
      TITLE = {Comparing Trailing and Copying for Constraint Programming},
      YEAR = {1999},
      BOOKTITLE = {16th International Conference on Logic Programming (ICLP '99), November 29 - December 4},
      PAGES = {275-289},
      EDITOR = {De Schreye, D.},
      ADDRESS = {Las Cruces, New Mexico, USA},
      PUBLISHER = {MIT Press},
      URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/copying.ps.gz},
      ABSTRACT = {A central service of a constraint programming system is search. In almost all constraint programming systems search is based on trailing, which is well understood and known to be efficient. This paper compares trailing to copying. Copying offers more expressiveness as required by parallel and concurrent systems. However, little is known how trailing compares to copying as it comes to implementation effort, runtime efficiency, and memory requirements. This paper discusses these issues. Execution speed of a copying-based system is shown to be competitive with state-of-the-art trailing-based systems. For the first time, a detailed analysis and comparison with respect to memory usage is made. It is shown how recomputation decreases memory requirements which can be prohibitive for large problems with copying alone. The paper introduces an adaptive recomputation strategy that is shown to speedup search while keeping memory consumption low. It is demonstrated that copying with recomputation outperforms trailing on large problems with respect to both space and time.},
      ANNOTE = {COLIURL : Schulte:1999:CTC.pdf Schulte:1999:CTC.ps}
}

@InProceedings{Schulte:2000,
      AUTHOR = {Schulte, Christian},
      TITLE = {Programming Deep Concurrent Constraint Combinators},
      YEAR = {2000},
      BOOKTITLE = {Practical Aspects of Declarative Languages, Second International Workshop, PADL 2000, January 17-18},
      NUMBER = {1753},
      PAGES = {215-229},
      EDITOR = {Pontelli, E. and Santos Costa, V.},
      SERIES = {Lecture Notes in Computer Science},
      ADDRESS = {Boston, Massachusetts, USA},
      PUBLISHER = {Springer},
      URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/combinators.ps.gz},
      ABSTRACT = {Constraint combination methods are essential for a flexible constraint programming system. This paper presents deep concurrent constraint combinators based on computation spaces as combination mechanism. It introduces primitives and techniques needed to program constraint combinators from computation spaces. The paper applies computation spaces to a broad range of combinators: negation, generalized reification, disjunction, and implication. Even though computation spaces have been conceived in the context of Oz, they are mainly programming language independent. This point is stressed by discussing them here in the context of Standard ML with concurrency features.},
      ANNOTE = {COLIURL : Schulte:2000:PDC.pdf Schulte:2000:PDC.ps}
}

@TechReport{Schulte:2000_1,
      AUTHOR = {Schulte, Christian},
      TITLE = {Parallel Search Made Simple},
      YEAR = {2000},
      MONTH = {September},
      NUMBER = {TRA9/00},
      ADDRESS = {Singapore},
      TYPE = {Technical Report},
      INSTITUTION = {National University of Singapore, School of Computing},
      URL = {ftp://ftp.ps.uni-sb.de/pub/papers/ProgrammingSysLab/par-trics.ps.gz},
      ABSTRACT = {Search in constraint programming is a time consuming task. Search can be speeded up by exploring subtrees of a search tree in parallel. This paper presents distributed search engines that achieve parallelism by distribution across networked computers. The main point of the paper is a simple design of the parallel search engine. Simplicity comes as an immediate consequence of clearly separating search, concurrency, and distribution. The obtained distributed search engines are simple yet offer substantial speedup on standard network computers.},
      ANNOTE = {COLIURL : Schulte:2000:PSM.pdf Schulte:2000:PSM.ps}
}

