-
Notifications
You must be signed in to change notification settings - Fork 0
/
publications.bib
378 lines (357 loc) · 22.3 KB
/
publications.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
% Encoding: US-ASCII
@Article{bizon2014variant,
Title = {Variant calling in low-coverage whole genome sequencing of a Native American population sample},
Author = {Bizon, Chris and Spiegel, Michael and Chasse, Scott A and Gizer, Ian R and Li, Yun and Malc, Ewa P and Mieczkowski, Piotr A and Sailsbery, Josh K and Wang, Xiaoshu and Ehlers, Cindy L and others},
Journal = {BMC genomics},
Year = {2014},
Number = {1},
Pages = {85},
Volume = {15},
Publisher = {BioMed Central Ltd}
}
@Article{Boker2011,
Title = {{OpenMx}: An Open Source Extended Structural Equation Modeling Framework},
Author = {Steven Boker and Michael Neale and Hermine Maes and Michael Wilde and Michael Spiegel and Timothy Brick and Jeffrey Spies and Ryne Estabrook and Sarah Kenny and Timothy Bates and Paras Mehta and John Fox},
Journal = {Psychometrika},
Year = {2011},
Note = {[561 citations]},
Number = {2},
Pages = {306-317},
Volume = {76},
Abstract = {OpenMx is free, full-featured, open source, structural equation modeling
(SEM) software. OpenMx runs within the R statistical programming
environment on Windows, Mac OS-X, and Linux computers. The rationale
for developing OpenMx is discussed along with the philosophy behind
the user interface. The OpenMx data structures are introduced --
these novel structures define the user interface framework and provide
new opportunities for model specification. Two short example scripts
for the specification and fitting of a confirmatory factor model
are next presented. We end with an abbreviated list of modeling applications
available in OpenMx 1.0 and a discussion of directions for future
development.},
Owner = {Michael},
Timestamp = {2010.07.24},
URL = {http://www.springerlink.com/content/dg37445107026711/}
}
@InProceedings{Newhall2003,
Title = {Nswap: A network swap module for linux clusters.},
Author = {Newhall, Tia and Finney, Sean and Ganchev, Kuzman and Spiegel, Michael},
Booktitle = {Proceedings of the 9th European Conference on Parallel Processing ({Euro-Par})},
Year = {2003},
Editor = {Kosch, H., and Boszormenyi, L., and Hellwagner, H.},
Note = {[47 citations]},
Pages = {1160-1169},
Series = {Springer Lecture Notes in Computer Science},
Abstract = {Cluster applications that process large amounts of data, such as parallel
scientific or multimedia applications, are likely to cause swapping
on individual cluster nodes. These applications will perform better
on clusters with network swapping support. Network swapping allows
any cluster node with over-committed memory to use idle memory of
a remote node as its backing store and to "swap" its pages over the
network. As the disparity between network speeds and disk speeds
continues to grow, network swapping will be faster than traditional
swapping to local disk. We present Nswap, a network swapping system
for heterogeneous Linux clusters and networks of Linux machines.
Nswap is implemented as a loadable kernel module for version 2.4
of the Linux kernel. It is a space-efficient and time-efficient implementation
that transparently performs network swapping. Nswap scales to larger
clusters, supports migration of remotely swapped pages, and supports
dynamic growing and shrinking of Nswap cache (the amount of RAM available
to store remote pages) in response to a node's local memory needs.
Results comparing Nswap running on an eight node Linux cluster with
100BaseT Ethernet interconnect and faster disk show that Nswap is
comparable to swapping to local, faster disk; depending on the workload,
Nswap's performance is up to 1.7 times faster than disk to between
1.3 and 4.6 times slower than disk for most workloads. We show that
with faster networking technology, Nswap will outperform swapping
to disk.},
Owner = {mspiegel},
Timestamp = {2008.08.11}
}
@Article{Olivier2011,
Title = {{OpenMP} Task Scheduling Strategies for Multicore {NUMA} Systems},
Author = {Stephen L. Olivier and Allan K. Porterfield and Kyle B. Wheeler and Michael Spiegel and Jan F. Prins},
Journal = {International Journal of High Performance Computing Applications},
Year = {2012},
Month = {May},
Note = {[45 citations]},
Number = {2},
Pages = {110-124},
Volume = {26},
Owner = {Michael},
Timestamp = {2011.09.05},
URL = {http://hpc.sagepub.com/content/26/2/110}
}
@InProceedings{Reynolds2006,
Title = {Capturing scientists' insight for {DDDAS}},
Author = {Reynolds, Jr., Paul F. and Brogan, David C. and Carnahan, Joe and Loitiere, Yannick and Spiegel, Michael},
Booktitle = {Proceedings of the 2006 International Conference on Computational Science ({ICCS})},
Year = {2006},
Address = {Reading, U.K.},
Editor = {Alexandrov, V.N., and Van Albada, G.D., and Sloot, P.M.A., and Dongarra, J.},
Pages = {570-577},
Series = {Springer Lecture Notes in Computer Science},
Abstract = {One of the intended consequences of utilizing simulations in dynamic,
data-driven application systems is that the simulations will adjust
to new data as it arrives. These adjustments will be difficult because
of the unpredictable nature of the world and because simulations
are so carefully tuned to model specific operating conditions. Accommodating
new data may require adapting or replacing numerical methods, simulation
parameters, or the analytical scientific models from which the simulation
is derived. In this research, we emphasize the important role a scientist's
insight can play in facilitating the runtime adaptation of a simulation
to accurately utilize new data. We present the tools that serve to
capture and apply a scientist's insight about opportunities for,
and limitations of, simulation adaptation. Additionaly, we report
on the two ongoing collaborations that serve to guide and evaluate
our research.},
Owner = {mspiegel},
Timestamp = {2008.08.11}
}
@InProceedings{Reynolds2007,
Title = {Validating evolving simulations in {COERCE}},
Author = {Reynolds, Jr., Paul F. and Spiegel, Michael and Liu, Xinyu and Gore, Ross},
Booktitle = {Proceedings of the 2007 International Conference on Computational Science ({ICCS})},
Year = {2007},
Month = {May},
Pages = {1238-1245},
Abstract = {We seek to increase user confidence in simulations as they are adapted
to meet new requirements. Our approach includes formal representation
of uncertainty, lightweight validation, and novel techniques for
exploring emergent behavior. Uncertainty representation, using formalisms
such as Dempster-Shafer theory, can capture designer insight about
uncertainty, enabling formal analysis and improving communication
with decision and policy makers. Lightweight validation employs targeted
program analysis and automated regression testing to maintain user
confidence as adaptations occur. Emergent behavior validation exploits
the semi-automatic adaptation capability of COERCE to make exploration
of such behavior efficient and productive. We describe our research
on these three technologies and their impact on validating dynamically
evolving simulations.},
Owner = {mspiegel},
Timestamp = {2008.08.11}
}
@InProceedings{Spiegel2005,
Title = {A case study of model context for simulation composability and reusability},
Author = {Spiegel, Michael and Reynolds, Paul F. and Brogan, David C.},
Booktitle = {Proceedings of the 2005 Winter Simulation Conference ({WSC})},
Year = {2005},
Address = {Piscataway, New Jersey},
Editor = {Kuhl, M.E., and Steiger, N.M., and Armstrong, F.B., and Joines, J.A.},
Note = {[48 citations]},
Organization = {Institute of Electrical and Electronics Engineers},
Pages = {437-444},
Abstract = {How much effort will be required to compose or reuse simulations?
What factors need to be considered? It is generally known that composability
and reusability are daunting challenges for both simulations and
more broadly software design as a whole. We have conducted a small
case study in order to clarify the role that model context plays
in simulation composability and reusability. For a simple problem:
compute the position and velocity of a falling body, we found that
a reasonable formulation of a solution included a surprising number
of implicit constraints. Equally surprising, in a challenge posed
to a small group of capable individuals, no one of them was able
to identify more than three-quarters of the ultimate set of validation
constraints. We document the challenge, interpret its results, and
discuss the utility our study will have in future investigations
into simulation composition and reuse.},
Owner = {mspiegel},
Timestamp = {2008.08.11}
}
@InCollection{Spiegel2006,
Title = {{Perfect} {Developer} tool suite},
Author = {Michael Spiegel},
Booktitle = {A Survey of Tools for Model Checking and Model-Based Development},
Publisher = {Technical Report CS-2006-17},
Year = {2006},
Address = {Department of Computer Science, University of Virginia},
Editor = {Strunk, E. and Aiello, A. and Knight, J.},
Pages = {53-57},
Owner = {mspiegel},
Timestamp = {2008.08.11}
}
@InProceedings{Spiegel2006a,
Title = {Grand challenge case studies in a simulation curriculum},
Author = {Spiegel, Michael and Reynolds, Paul F. and Brogan, David C.},
Booktitle = {Proceedings of the 2006 Winter Simulation Conference ({WSC})},
Year = {2006},
Address = {Piscataway, New Jersey},
Editor = {Perrone, L.F., and Wieland, F.P., and Liu, J., and Lawson, B.G., and Nicol, D.M., and Fujimoto, R.M.},
Organization = {Institute of Electrical and Electronics Engineers},
Pages = {2242-2249},
Abstract = {Students wishing to become experts in modeling and simulation (M&S)
need to appreciate limitations of the technology. Our goal is to
expose students to the current boundaries of simulation technology.
To achieve this, we propose the incorporation of grand challenge
case studies into a modeling and simulation curriculum. Grand challenge
problems are defined as problems for which there does not exist a
universally accepted solution (at present). We argue that grand challenge
case studies are an excellent vehicle for discovering and appreciating
current boundaries of M&S technology. We present three candidate
case studies, one in detail - the ongoing U.S. Department of Energy
analysis of Yucca Mountain as a location for nuclear waste storage
- with supporting discussion about how these cases can enhance exploration
of the challenges inM&S technology. We discuss the proposed Yucca
Mountain storage facility, along with two other case studies, and
examine their integration into M&S curricula.},
Owner = {mspiegel},
Timestamp = {2008.08.11}
}
@TechReport{Spiegel2007,
Title = {A proposal for computing with imprecise probabilities: A framework for multiple representations of uncertainty in simulation software},
Author = {Michael Spiegel},
Institution = {Department of Computer Science, University of Virginia},
Year = {2007},
Number = {CS-2007-16},
Abstract = {We propose the design and construction of a programming language for
the formal representation of uncertainty in modeling and simulation.
Modeling under uncertainty has been of paramount importance in the
past half century, as quantitative methods of analysis have been
developed to take advantage of computational resources. Simulation
is gaining prominence as the proper tool of scientific analysis under
circumstances where it is infeasible or impractical to directly study
the system in question. This programming language will be built as
an extension to the Modelica programming language, which is an acausal
object-oriented language for hybrid continuous and discrete-event
simulations [22]. Our language extensions will serve as a platform
for the research into representation and calibration of imprecise
probabilities in quantitative risk analysis simulations. Imprecise
probability is used a generic term for any mathematical model which
measures chance or uncertainty without crisp numerical probabilities.
The explicit representation of imprecise probability theories in
a domain-specific programming language will facilitate the development
of efficient algorithms for expressing, computing, and calibrating
imprecise probability structures. Computation with imprecise probability
structures will lead to quantitative risk analyses that are more
informative than analyses using traditional probability theory. We
have three primary research objectives: (i) the exploration of efficient
representational structures and computational algorithms of Dempster-Shafer
belief structures; (ii) the application of the imprecise probabilities
to representing variable dependence; and (iii) the exploration of
various Dempster-Shafer combination rules for model calibration.
At the completion of this dissertation, we will have produced the
end-to-end design, implementation, and analysis of a programming
language that will facilitate the future exploration of algorithms,
software, and theory for quantitative uncertainty analysis in computer
science.},
Owner = {mspiegel},
Timestamp = {2008.08.11}
}
@Conference{Spiegel2008,
Title = {Quantifying and Analyzing Uncertainty in Simulations to Enable User Understanding},
Author = {Spiegel, Michael and Gore, Ross and Reynolds, Paul F.},
Booktitle = {Proceedings of the Modeling, Simulation, \& Gaming Student Capstone Conference},
Year = {2008},
Address = {Suffolk, VA},
Note = {Recipient of best paper award in "Discipline of Modeling \& Simulation" track.},
Organization = {The Virginia Modeling, Analysis, \& Simulation Center (VMASC)},
Abstract = {Quantitative methods of analysis have progressed faster than quantitative
methods of capturing, representing, propagating and analyzing uncertainty
in the realm of computational thinking, adversely affecting the quality
of both scientific computational analysis, and important policy decisions.
Uncertainty arises from incomplete model input information (aleatory
uncertainty), incomplete model structure information (epistemic uncertainty),
and incomplete understanding of model dynamics. We describe a work
in progress computational approach, framework, and language, RiskModelica,
that will 1) support representation, propagation, and calibration
of aleatory uncertainty using probability theory, probability boxes,
and Dempster-Shafer theory of evidence; 2) develop reliable methodologies
- algorithms, data acquisition and management procedures, software
and theory - for quantifying uncertainty in computer predictions;
3) support exploration of epistemic uncertainty utilizing causal
analysis, and static and dynamic program slicing to characterize
the dependencies, causal relationships, and interactions of design
decisions; and 4) as a way of gaining insight into uncertainties,
enable subject matter experts to observe model characteristics under
novel conditions of interest. These capabilities represent a revolutionary
approach to capturing, representing, propagating, and analyzing quantitatively,
uncertainties that arise in the process of computational thinking.},
Owner = {mspiegel},
Timestamp = {2008.08.11}
}
@TechReport{Spiegel2009,
Title = {The Dense Skip Tree: A Cache-Conscious Randomized Data Structure},
Author = {Michael Spiegel and Reynolds, Jr., Paul F.},
Institution = {Department of Computer Science, University of Virginia},
Year = {2009},
Number = {CS-2009-05},
Abstract = {We introduce the dense skip tree, a novel cache-conscious randomized
data structure. Algorithms for search, insertion, and deletion are
presented, and they are shown to have expected cost O(log n). The
dense skip tree obeys the same asymptotic properties as the skip
list and the skip tree. A series of properties on the dense skip
tree is proven, in order to show the probabilistic organization of
data in a cache-conscious design. Performance benchmarks show the
dense skip tree to outperform the skip list and the self-balancing
binary search tree when the working set cannot be contained in cache.},
Owner = {ms6ep},
Timestamp = {2009.04.27}
}
@InProceedings{Spiegel2010,
Title = {Lock-Free Multiway Search Trees},
Author = {Michael Spiegel and Reynolds, Jr., Paul F.},
Booktitle = {Proceedings of the 39th Annual International Conference on Parallel Processing ({ICPP})},
Year = {2010},
Address = {San Diego, CA},
Month = {September 13-16},
Publisher = {IEEE Computer Society},
Abstract = {We propose a lock-free multiway search tree algorithm for concurrent
applications with large working set sizes. Our algorithm is a variation
of the randomized skip tree. We relax the ordering constraints among
the nodes in the original skip tree definition. Optimal paths through
the tree are temporarily violated by mutation operations, and eventually
restored using online node compaction. Experimental evidence shows
that our lock-free skip tree outperforms a highly tuned concurrent
skip list under workloads of various proportions of operations and
working set sizes. The max throughput of our algorithm is on average
41% higher than the throughput of the skip list, and 129% higher
on the workload of the largest working set size and read-dominated
operations.},
Owner = {Michael},
Timestamp = {2010.07.06},
URL = {http://dx.doi.org/10.1109/ICPP.2010.68}
}
@TechReport{Spiegel2011,
Title = {Lock-Free Multiway Search Trees as Priority Queues in Parallel Branch and Bound Applications},
Author = {Michael Spiegel and Reynolds, Jr., Paul F.},
Institution = {Department of Computer Science, University of Virginia},
Year = {2011},
Month = {February},
Number = {CS-2011-01},
Abstract = {The lock-free skip tree is a cache-conscious concurrent data structure
for many-core systems that shows significant performance improvements
over the state of the art in concurrent data structure designs for
those applications that must contend with the deleterious effects
of the memory wall. In a previous study using a series of synthetic
benchmarks, the lock-free skip tree was found to improve peak throughput
by x1.8 to x2.3 relative to a state of the art lock-free skip list
implementation when the working set size exceeds cache size. In this
work, we study a class of application benchmarks that can be used
to characterize the relative merits of the lock-free skip tree as
compared to the lock-free skip list. In a series of four parallel
branch-and-bound applications, two of the applications are x2.3 and
x3.1 faster when using the skip tree as a concurrent priority queue
as compared to the lock-free skip list priority queue. On a shared-memory
supercomputer architecture the two branch-and-bound applications
are x1.6 and x2.1 faster with the skip tree versus the skip list
running at 80 hardware threads. Based on the four application benchmarks
and a synthetic branch-and-bound application, a set of guidelines
is offered for selecting the lock-free skip tree to use as a centralized
priority queue in parallel branch-and-bound applications.},
Owner = {ms6ep},
Timestamp = {2009.04.27},
URL = {http://www.unc.edu/~mspiegel/publications/CS-2011-01.pdf}
}
@PhdThesis{Spiegel2011a,
Title = {Cache-conscious concurrent data structures},
Author = {Michael Spiegel},
School = {University of Virginia},
Year = {2011},
Owner = {Michael},
Timestamp = {2011.04.26},
URL = {http://mspiegel.github.io/publications/michael-spiegel-dissertation.pdf}
}
@comment{jabref-meta: selector_author:}
@comment{jabref-meta: selector_journal:}
@comment{jabref-meta: selector_keywords:}
@comment{jabref-meta: selector_publisher:}