forked from WdeNooy/Statistical-Inference
-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathRefList.bib
More file actions
455 lines (444 loc) · 29.5 KB
/
RefList.bib
File metadata and controls
455 lines (444 loc) · 29.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
@book{deciIntrinsicMotivationSelfDetermination2013,
title = {Intrinsic {Motivation} and {Self}-{Determination} in {Human} {Behavior}},
isbn = {978-1-4899-2271-7},
abstract = {Early in this century, most empirically oriented psychologists believed that all motivation was based in the physiology of a set of non-nervous system tissue needs. The theories of that era reflected this belief and used it in an attempt to explain an increasing number of phenomena. It was not until the 1950s that it became irrefutably clear that much of human motivation is based not in these drives, but rather in a set of innate psychological needs. Their physiological basis is less understood; and as concepts, these needs lend themselves more easily to psycho logical than to physiological theorizing. The convergence of evidence from a variety of scholarly efforts suggests that there are three such needs: self-determination, competence, and interpersonal relatedness. This book is primarily about self-determination and competence (with particular emphasis on the former), and about the processes and structures that relate to these needs. The need for interpersonal relat edness, while no less important, remains to be explored, and the findings from those explorations will need to be integrated with the present theory to develop a broad, organismic theory of human motivation. Thus far, we have articulated self-determination theory, which is offered as a working theory-a theory in the making. To stimulate the research that will allow it to evolve further, we have stated self-determination theory in the form of minitheories that relate to more circumscribed domains, and we have developed paradigms for testing predictions from the various minitheories.},
language = {en},
publisher = {Springer Science \& Business Media},
author = {Deci, Edward L. and Ryan, Richard M.},
month = jun,
year = {2013},
note = {Google-Books-ID: M3CpBgAAQBAJ},
keywords = {Psychology / General, Psychology / Movements / General, Psychology / Personality, Social Science / Sociology / General},
}
@article{rogersProtectionMotivationTheory1975,
title = {A {Protection} {Motivation} {Theory} of {Fear} {Appeals} and {Attitude} {Change1}: {The} {Journal} of {Psychology}: {Vol} 91, {No} 1},
volume = {91},
url = {https://www.tandfonline.com/doi/abs/10.1080/00223980.1975.9915803?casa_token=tnl4hHdJsB0AAAAA:T1RIciCaIhUSOxB2By3Ah9nZPbufTPANLbbdgCujfE-yZr6cugamG3pBECgsGroUDXf02-k1XlEzOw},
number = {1},
urldate = {2020-03-22},
journal = {THe Journal of Psychology},
author = {Rogers, R.W.},
year = {1975},
pages = {93--114},
file = {A Protection Motivation Theory of Fear Appeals and Attitude Change1\: The Journal of Psychology\: Vol 91, No 1:/Users/cscholz/Zotero/storage/5EYJIVVK/00223980.1975.html:text/html},
}
@article{fishbeinRoleTheoryDeveloping2006,
title = {The role of theory in developing effective health communications},
volume = {56},
url = {http://onlinelibrary.wiley.com/doi/10.1111/j.1460-2466.2006.00280.x/full},
number = {s1},
urldate = {2015-01-23},
journal = {Journal of Communication},
author = {Fishbein, M. and Cappella, J. N.},
year = {2006},
pages = {S1--S17},
file = {Snapshot:/Users/cscholz/Zotero/storage/JN85ZVRG/full.html:text/html},
}
@article{doi:10.1080/19312450701641375,
author = {Daniel J. O'Keefe},
title = {Brief Report: Post Hoc Power, Observed Power, A Priori Power, Retrospective Power, Prospective Power, Achieved Power: Sorting Out Appropriate Uses of Statistical Power Analyses},
journal = {Communication Methods and Measures},
volume = {1},
number = {4},
pages = {291-299},
year = {2007},
publisher = {Routledge},
doi = {10.1080/19312450701641375},
URL = {https://doi.org/10.1080/19312450701641375},
eprint = {https://doi.org/10.1080/19312450701641375}
}
@book{RefWorks:3933,
author={Jacob Cohen},
year={1969},
title={Statistical power analysis for the behavioral sciences},
publisher={Academic Press},
address={San Diego, CA},
keywords={effect size}
}
@book{RefWorks:3883,
author={Geoff Cumming},
year={2012},
title={Understanding the new statistics: Effect sizes, confidence intervals, and meta-analysis},
publisher={Routledge},
address={New York},
note={Includes bibliographical references and index.; ID: UVA_ED003487901},
keywords={Multivariate analysis; Confidence intervals; meta analysis; new statistics}
}
@book{RefWorks:1494,
author={James A. Davis},
year={1985},
title={The logic of causal order},
publisher={Sage},
address={Beverly Hills, CA},
note={Ch. 1 geeft zeer helder overzicht over de relatie tussen causaliteit en associatie, de rol van tijd, kenmerken van causale netwerken (ordered, loop, partially ordered/blocks), soorten variabelen (prior, independent, intervening, dependent, consequent & suppressor), soorten effecten (direct, indirect, spurious) en soorten causale systemen of modellen (consistent, inconsistent). Het specificatieverband (interactie-effect) komt in het boek niet aan de orde. Ch. 2 geeft een helder overzicht over de manieren waarop variabelen gecontroleerd kunnen worden zodat directe, indirecte en schijnverbanden berekend kunnen worden: tabelelaboratie, effectanalyse en padanalyse, waarbij correlatie- en regressieanalyse als technieken vrij centraal staan. Oordeel: tamelijk geschikt om studenten na te laten denken over hun causale schema (binnen het conceptueel model - mbv Ch. 1) en hen een opstap te geven naar multivariate analyse (Ch. 2 en 3). Het interactie-effect apart behandelen (als samengestelde variabele in een causaal schema?).; x},
keywords={Causaliteit, Wiskundige methoden; 31.02 filosofie en theorie van de wiskunde; 70.03 methoden, technieken en organisatie van sociaal-wetenschappelijk onderzoek; causale analyse},
isbn={0-8039-2553-0}
}
@book{RefWorks:3909,
author={Adrianus Dingeman de Groot},
year={1969},
title={Methodology: Foundations of Inference and Research in the Behavioral Sciences},
publisher={Mouton},
address={The Hague},
keywords={empirical cycle}
}
@book{RefWorks:3937,
author={Pierre Simon de Laplace},
year={1812},
title={Théorie analytique des probabilités},
publisher={Courcier},
volume={7}
}
@article{RefWorks:3956,
author={B. Efron},
year={1979},
month={01},
title={Bootstrap Methods: Another Look at the Jackknife},
journal={Ann.Statist.},
volume={7},
number={1},
pages={1-26}
}
@article{RefWorks:3957,
author={Bradley Efron},
year={1987},
month={03/01},
title={Better Bootstrap Confidence Intervals},
journal={Journal of the American Statistical Association},
volume={82},
number={397},
pages={171-185},
note={doi: 10.1080/01621459.1987.10478410},
abstract={Abstract We consider the problem of setting approximate confidence intervals for a single parameter ? in a multiparameter family. The standard approximate intervals based on maximum likelihood theory, , can be quite misleading. In practice, tricks based on transformations, bias corrections, and so forth, are often used to improve their accuracy. The bootstrap confidence intervals discussed in this article automatically incorporate such tricks without requiring the statistician to think them through for each new application, at the price of a considerable increase in computational effort. The new intervals incorporate an improvement over previously suggested methods, which results in second-order correctness in a wide variety of problems. In addition to parametric families, bootstrap intervals are also developed for nonparametric situations.; Abstract We consider the problem of setting approximate confidence intervals for a single parameter ? in a multiparameter family. The standard approximate intervals based on maximum likelihood theory, , can be quite misleading. In practice, tricks based on transformations, bias corrections, and so forth, are often used to improve their accuracy. The bootstrap confidence intervals discussed in this article automatically incorporate such tricks without requiring the statistician to think them through for each new application, at the price of a considerable increase in computational effort. The new intervals incorporate an improvement over previously suggested methods, which results in second-order correctness in a wide variety of problems. In addition to parametric families, bootstrap intervals are also developed for nonparametric situations.},
isbn={0162-1459},
url={http://www.tandfonline.com/doi/abs/10.1080/01621459.1987.10478410},
doi={10.1080/01621459.1987.10478410}
}
@article{RefWorks:3940,
author={B. Zafer Erdogan},
year={1999},
title={Celebrity endorsement: A literature review},
journal={Journal of marketing management},
volume={15},
number={4},
pages={291-314},
keywords={celebrity endorsement; literature review}
}
@article{RefWorks:3955,
author={R. A. Fisher},
year={1919},
title={The Correlation between Relatives on the Supposition of Mendelian Inheritance.},
journal={Transactions of the Royal Society of Edinburgh},
volume={52},
number={2},
pages={399-433},
abstract={Several attempts have already been made to interpret the well-established results of biometry in accordance with the Mendelian scheme of inheritance. It is here attempted to ascertain the biometrical properties of a population of a more general type than has hitherto been examined, inheritance in which follows this scheme. It is hoped that in this way it will be possible to make a more exact analysis of the causes of human variability. The great body of available statistics show us that the deviations of a human measurement from its mean follow very closely the Normal Law of Errors, and, therefore, that the variability may be uniformly measured by the standard deviation corresponding to the square root of the mean square error. When there are two independent causes of variability capable of producing in an otherwise uniform population distributions with standard deviations σ1 and σ2, it is found that the distribution, when both causes act together, has a standard deviation . It is therefore desirable in analysing the causes of variability to deal with the square of the standard deviation as the measure of variability. We shall term this quantity the Variance of the normal population to which it refers, and we may now ascribe to the constituent causes fractions or percentages of the total variance which they together produce. It is desirable on the one hand that the elementary ideas at the basis of the calculus of correlations should be clearly understood, and easily expressed in ordinary language, and on the other that loose phrases about the “percentage of causation,” which obscure the essential distinction between the individual and the population, should be carefully avoided.},
isbn={0080-4568},
doi={10.1017/S0080456800012163}
}
@article{RefWorks:3907,
author={Ronald Aylmer Fisher},
year={1955},
title={Statistical Methods and Scientific Induction},
journal={Journal of the Royal Statistical Society.Series B (Methodological)},
volume={17},
number={1},
pages={69-78},
abstract={The attempt to reinterpret the common tests of significance used in scientific research as though they constituted some kind of acceptance procedure and led to "decisions" in Wald's sense, originated in several misapprehensions and has led, apparently, to several more. The three phrases examined here, with a view to elucidating the fallacies they embody, are: (i) "Repeated sampling from the same population", (ii) Errors of the "second kind", (iii) "Inductive behaviour". Mathematicians without personal contact with the Natural Sciences have often been misled by such phrases. The errors to which they lead are not always only numerical.},
keywords={statistical theory; hypothesis testing},
isbn={00359246},
url={http://www.jstor.org/stable/2983785}
}
@book{RefWorks:3932,
author={Ronald Aylmer Fisher},
year={1935},
title={The design of experiments.},
publisher={Oliver and Boyd},
address={Edinburgh}
}
@book{RefWorks:3908,
author={Ronald Aylmer Fisher},
year={1925},
title={Statistical methods for research workers},
publisher={Genesis Publishing Pvt Ltd},
keywords={statistical theory; hypothesis testing}
}
@article{RefWorks:3925,
author={Robert W. Frick},
year={1998},
title={Interpreting statistical testing: Process and propensity, not population and random sampling},
journal={Behavior Research Methods, Instruments, & Computers},
volume={30},
number={3},
pages={527-535},
note={ID: Frick1998},
abstract={The standard textbook treatment of conventional statistical tests assumes random sampling from a population and interprets the outcome of the statistical testing as being about a population. Problems with this interpretation include that (1) experimenters rarely make any attempt to randomly sample, (2) if random sampling occurred, conventional statistical tests would not precisely describe the population, and (3) experimenters do not use statistical testing to generalize to a population. The assumption of random sampling can be replaced with the assumption that scores were produced by a process. Rejecting the null hypothesis then leads to a conclusion about process, applying to only the subjects in the experiment (e.g., that some difference in the treatment of two groups caused the difference in average scores). This interpretation avoids the problems noted and fits how statistical testing is used in psychology.},
keywords={process-based inference; model-based inference},
isbn={1532-5970},
url={http://dx.doi.org/10.3758/BF03200686},
doi={10.3758/BF03200686}
}
@book{RefWorks:3936,
author={Carl Friedrich Gauss},
year={1809},
title={Theoria motus corporum coelestium in sectionibus conicis solem ambientium auctore Carolo Friderico Gauss},
publisher={sumtibus Frid. Perthes et IH Besser}
}
@misc{RefWorks:3838,
author = {Jens Hainmueller and Jonathan Mummolo and Yiqing Xu},
year = {2016},
title = {How Much Should We Trust Estimates from Multiplicative Interaction Models? Simple Tools to Improve Empirical Practice},
keywords = {interaction effects; regression; moderation},
url = {https://ssrn.com/abstract=2739221},
doi={10.2139/ssrn.2739221}
}
@article{RefWorks:3931,
author={Peter F. Halpin and Henderikus J. Stam},
year={2006},
title={Inductive Inference or Inductive Behavior: Fisher and Neyman: Pearson Approaches to Statistical Testing in Psychological Research (1940-1960)},
journal={The American Journal of Psychology},
volume={119},
number={4},
pages={625-653},
abstract={The application of statistical testing in psychological research over the period of 1940-1960 is examined in order to address psychologists' reconciliation of the extant controversy between the Fisher and Neyman-Pearson approaches. Textbooks of psychological statistics and the psychological journal literature are reviewed to examine the presence of what Gigerenzer (1993) called a hybrid model of statistical testing. Such a model is present in the textbooks, although the mathematically incomplete character of this model precludes the appearance of a similarly hybridized approach to statistical testing in the research literature. The implications of this hybrid model for psychological research and the statistical testing controversy are discussed.},
isbn={00029556},
url={http://www.jstor.org/stable/20445367},
doi={10.2307/20445367}
}
@book{RefWorks:3873,
author={Andrew F. Hayes},
year={2013},
title={Introduction to Mediation, Moderation, and Conditional Process Analysis: A Regression-Based Approach},
publisher={Guilford Press},
keywords={mediation; moderation; regression},
isbn={9781609182304}
}
@inbook{RefWorks:3927,
author={Rex B. Kline},
editor={Kline,Rex B.},
year={2004},
title={What's Wrong With Statistical Tests--And Where We Go From Here.},
series={Beyond Significance Testing. Reforming Data Analysis Methods in Behavioral Research},
publisher={American Psychological Association},
address={Washington DC},
pages={61-91}
}
@article{RefWorks:3930,
author={E. L. Lehmann},
year={1993},
month={12/01},
title={The Fisher, Neyman-Pearson Theories of Testing Hypotheses: One Theory or Two?},
journal={Journal of the American Statistical Association},
volume={88},
number={424},
pages={1242-1249},
note={doi: 10.1080/01621459.1993.10476404},
abstract={Abstract The Fisher and Neyman-Pearson approaches to testing statistical hypotheses are compared with respect to their attitudes to the interpretation of the outcome, to power, to conditioning, and to the use of fixed significance levels. It is argued that despite basic philosophical differences, in their main practical aspects the two theories are complementary rather than contradictory and that a unified approach is possible that combines the best features of both. As applications, the controversies about the Behrens-Fisher problem and the comparison of two binomials (2 ? 2 tables) are considered from the present point of view.; Abstract The Fisher and Neyman-Pearson approaches to testing statistical hypotheses are compared with respect to their attitudes to the interpretation of the outcome, to power, to conditioning, and to the use of fixed significance levels. It is argued that despite basic philosophical differences, in their main practical aspects the two theories are complementary rather than contradictory and that a unified approach is possible that combines the best features of both. As applications, the controversies about the Behrens-Fisher problem and the comparison of two binomials (2 ? 2 tables) are considered from the present point of view.},
isbn={0162-1459},
url={http://www.tandfonline.com/doi/abs/10.1080/01621459.1993.10476404},
doi={10.1080/01621459.1993.10476404}
}
@article{RefWorks:3924,
author={Johannes Lenhard},
year={2006},
month={March 1},
title={Models and Statistical Inference: The Controversy between Fisher and Neyman-Pearson},
journal={The British Journal for the Philosophy of Science},
volume={57},
number={1},
pages={69-91},
abstract={The main thesis of the paper is that in the case of modern statistics, the differences between the various concepts of models were the key to its formative controversies. The mathematical theory of statistical inference was mainly developed by Ronald A. Fisher, Jerzy Neyman, and Egon S. Pearson. Fisher on the one side and Neyman–Pearson on the other were involved often in a polemic controversy. The common view is that Neyman and Pearson made Fisher's account more stringent mathematically. It is argued, however, that there is a profound theoretical basis for the controversy: both sides held conflicting views about the role of mathematical modelling. At the end, the influential programme of Exploratory Data Analysis is considered to be advocating another, more instrumental conception of models. IntroductionModels in statistics—‘of what population is this a random sample?’The fundamental lemmaControversy about modelsExploratory data analysis as a model-critical approach},
keywords={model-based inference; statistical inference},
doi={10.1093/bjps/axi152}
}
@article{RefWorks:3935,
author={Aidan Lyon},
year={2014},
title={Why are normal distributions normal?},
journal={The British Journal for the Philosophy of Science},
volume={65},
number={3},
pages={621-649}
}
@article{RefWorks:3941,
author={Grant McCracken},
year={1989},
title={Who is the celebrity endorser? Cultural foundations of the endorsement process},
journal={Journal of consumer research},
volume={16},
number={3},
pages={310-321}
}
@article{RefWorks:3929,
author={Jerzy Neyman},
year={1937},
title={Outline of a theory of statistical estimation based on the classical theory of probability},
journal={Philosophical Transactions of the Royal Society of London.Series A, Mathematical and Physical Sciences},
volume={236},
number={767},
pages={333-380},
keywords={Confidence intervals}
}
@article{RefWorks:3906,
author={J. Neyman and E. S. Pearson},
year={1933},
month={01/01},
title={On the Problem of the Most Efficient Tests of Statistical Hypotheses},
journal={Philos Trans R Soc Lond A},
volume={231},
number={694-706},
pages={289},
keywords={statistical theory; hypothesis testing; Type II error},
url={http://rsta.royalsocietypublishing.org/content/231/694-706/289.abstract}
}
@article{RefWorks:3938,
author={James M. Robins and Sander Greenland},
year={1992},
title={Identifiability and exchangeability for direct and indirect effects},
journal={Epidemiology},
pages={143-155},
abstract={We consider the problem of separating the direct effects of an exposure from effects relayed through an intermediate variable (indirect effects). We show that adjustment for the intermediate variable, which is the most common method of estimating direct effects, can be biased. We also show that, even in a randomized crossover trial of exposure, direct and indirect effects cannot be separated without special assumptions; in other words, direct and indirect effects are not separately identifiable when only exposure is randomized. If the exposure and intermediate never interact to cause disease and if intermediate effects can be controlled, that is, blocked by a suitable intervention, then a trial randomizing both exposure and the intervention can separate direct from indirect effects. Nonetheless, the estimation must be carried out using the G-computation algorithm. Conventional adjustment methods remain biased. When exposure and the intermediate interact to cause disease, direct and indirect effects will not be separable even in a trial in which both the exposure and the intervention blocking intermediate effects are randomly assigned. Nonetheless, in such a trial, one can still estimate the fraction of exposure-induced disease that could be prevented by control of the intermediate. Even in the absence of an intervention blocking the intermediate effect, the fraction of exposure-inuced disease that could be prevented by control of the intermediate can be estimated with the G-computation algorithm if data are obtained on additional confounding variables.},
keywords={mediation}
}
@article{RefWorks:3922,
author={Ralph L. Rosnow and Robert Rosenthal},
year={1996},
title={Computing contrasts, effect sizes, and counternulls on other people's published data: General procedures for research consumers.},
journal={Psychological methods},
volume={1},
number={4},
pages={331}
}
@book{RefWorks:3882,
author={Wesley C. Salmon and Richard C. Jeffrey and James G. Greeno},
year={1971},
title={Statistical Explanation and Statistical Relevance},
publisher={University of Pittsburgh Press},
abstract={According to modern physics, many objectively improbable events actually occur, such as the spontaneous disintegration of radioactive atoms. Because of high levels of improbability, scientists are often at a loss to explain such phenomena. In this main essay of this book, Wesley Salmon offers a solution to scientific explanation based on the concept of statistical relevance (the S-R model). In this vein, the other two essays herein discuss "Statistical Relevance vs. Statistical Inference," and "Explanation and Information."},
isbn={9780822952251},
url={http://www.jstor.org/stable/j.ctt6wrd9p}
}
@article{RefWorks:3923,
author={Carl-Erik Särndal and Ib Thomsen and Jan M. Hoem and D. V. Lindley and O. Barndorff-Nielsen and Tore Dalenius},
year={1978},
title={Design-Based and Model-Based Inference in Survey Sampling with Discussion and Reply]},
journal={Scandinavian Journal of Statistics},
volume={5},
number={1},
pages={27-52},
abstract={In recent years, a model-based theory of inference for survey sampling populations has been proposed as an alternative to classical, design-based theory. The two approaches are compared in this paper for the problem of estimating the mean of the finite population. Following some notes on the historical development, criteria for "best estimator" within each approach are discussed. These criteria are applied to a series of examples, which show that several of the classical results can be obtained and reinterpreted without difficulty through the model-based theory. The role of "artificial randomization" is discussed. It is argued that the model-based framework often offers advantages over the design-based one when it comes to presenting a lucid argument in favor of some given sampling procedure.},
isbn={03036898, 14679469},
url={http://www.jstor.org/stable/4615682},
doi={model-based inference;design-based inference}
}
@article{RefWorks:3934,
author={Leland Wilkinson},
year={1999},
title={Statistical methods in psychology journals: Guidelines and explanations.},
journal={American psychologist},
volume={54},
number={8},
pages={594}
}
@Book{deGrootMethodologyFoundationsInference1969,
location = {{The Hague}},
title = {Methodology: {{Foundations}} of {{Inference}} and {{Research}} in the {{Behavioral Sciences}}},
number = {Book, Whole},
publisher = {{Mouton}},
date = {1969},
keywords = {empirical cycle},
author = {Adrianus Dingeman {de Groot}},
options = {useprefix=true},
}
@Article{HolbertConceptualizingOrganizingPositing2019,
title = {Conceptualizing, {{Organizing}}, and {{Positing Moderation}} in {{Communication Research}}},
abstract = {Abstract. Meta-theoretical focus is given to how communication researchers are approaching and hypothesizing moderation. A moderation typology is offered and a},
language = {en},
journal = {Communication Theory},
doi = {10.1093/ct/qtz006},
author = {R. Lance Holbert and Esul Park},
month = {apr},
year = {2019},
keywords = {moderation,regression,typology},
file = {C\:\\Users\\Wouter\\Zotero\\storage\\R6AE2AVF\\Holbert and Park - Conceptualizing, Organizing, and Positing Moderati.pdf;C\:\\Users\\Wouter\\Zotero\\storage\\ATMJ7TN7\\5481797.html},
}
@InCollection{bullock_ha_2011,
title = {Mediation Analysis Is Harder than It Looks},
booktitle = {Cambridge Handbook of Experimental Political Science},
author = {John G. Bullock and Shang E. Ha},
editor = {James N. Druckman and Donald P. Green and James H. Kuklinski and ArthurEditors Lupia},
year = {2011},
pages = {508--522},
publisher = {{Cambridge University Press}},
doi = {10.1017/CBO9780511921452.035},
file = {C\:\\Users\\wdnooy1\\Dropbox\\Bibliography\\BullockHa2011.pdf},
keywords = {mediation},
place = {Cambridge},
}
@Article{wassersteinASAStatementPValues2016,
title = {The {{ASA Statement}} on P-{{Values}}: {{Context}}, {{Process}}, and {{Purpose}}},
shorttitle = {The {{ASA Statement}} on P-{{Values}}},
author = {Ronald L. Wasserstein and Nicole A. Lazar},
year = {2016},
month = {apr},
volume = {70},
pages = {129--133},
publisher = {{Taylor \& Francis}},
issn = {0003-1305},
doi = {10.1080/00031305.2016.1154108},
annotation = {\_eprint: https://doi.org/10.1080/00031305.2016.1154108},
file = {C\:\\Users\\wdnooy1\\Zotero\\storage\\SHVLPIWH\\Wasserstein and Lazar - 2016 - The ASA Statement on p-Values Context, Process, a.pdf;C\:\\Users\\wdnooy1\\Zotero\\storage\\4EYIKBLH\\00031305.2016.html},
journal = {The American Statistician},
keywords = {Statistical Significance},
number = {2},
}
@Article{smithsonCorrectConfidenceIntervals2001,
title = {Correct {{Confidence Intervals}} for {{Various Regression Effect Sizes}} and {{Parameters}}: {{The Importance}} of {{Noncentral Distributions}} in {{Computing Intervals}}},
shorttitle = {Correct {{Confidence Intervals}} for {{Various Regression Effect Sizes}} and {{Parameters}}},
author = {Michael Smithson},
year = {2001},
month = {aug},
volume = {61},
pages = {605--632},
publisher = {{SAGE Publications Inc}},
issn = {0013-1644},
doi = {10.1177/00131640121971392},
abstract = {The advantages that confidence intervals have over null-hypothesis significance testing have been presented on many occasions to researchers in psychology. This article provides a practical introduction to methods of constructing confidence intervals for multiple and partial R2 and related parameters in multiple regression models based on ``noncentral''F and {$\chi$}2 distributions. Until recently, these techniques have not been widely available due to their neglect in popular statistical textbooks and software. These difficulties are addressed here via freely available SPSS scripts and software and illustrations of their use. The article concludes with discussions of implications for the interpretation of findings in terms of noncentral confidence intervals, alternative measures of effect size, the relationship between noncentral confidence intervals and power analysis, and the design of studies.},
file = {C\:\\Users\\wdnooy1\\Zotero\\storage\\IBYNVIYI\\Smithson - 2001 - Correct Confidence Intervals for Various Regressio.pdf},
journal = {Educational and Psychological Measurement},
keywords = {Confidence intervals,noncentral distribution},
language = {en},
number = {4},
}
@Article{sawilowskyNewEffectSize2009,
ids = {SawilowskyNeweffectsize2009},
title = {New {{Effect Size Rules}} of {{Thumb}}},
author = {Shlomo Sawilowsky},
year = {2009},
month = {nov},
volume = {8},
issn = {1538 - 9472},
doi = {10.22237/jmasm/1257035100},
file = {C\:\\Users\\wdnooy1\\Zotero\\storage\\EX2YHVF8\\Sawilowsky - 2009 - New Effect Size Rules of Thumb.pdf;C\:\\Users\\wdnooy1\\Zotero\\storage\\TCZTWHII\\26.html},
journal = {Journal of Modern Applied Statistical Methods},
keywords = {Cohen's d,effect size,Monte Carlo simulation},
number = {2},
}
@article{hoekstra2014robust,
title={Robust misinterpretation of confidence intervals},
author={Hoekstra, Rink and Morey, Richard D and Rouder, Jeffrey N and Wagenmakers, Eric-Jan},
journal={Psychonomic bulletin \& review},
volume={21},
pages={1157--1164},
year={2014},
publisher={Springer}
}
@article{RefWorks:5051,
author={Richar E. Petty & John T. Cacioppo},
year={1986},
title={The Elaboration Likelihood Model of Persuasion},
journal={Advances in Experimental Social Psychology},
volume={19},
pages={123-205},
doi={10.1016/S0065-2601(08)60214-2}
}