# install.packages("MASS") - run this code if you do not have MASS package
library(MASS)
Assumptions: The population has the N(mu,sigma) (normal) distribution with known mean and variance.
Formula: The sample mean x' from the N(mu,sigma) distribution has a normal distribution N(mu,sigma/sqrt(n)).
A level C confidence interval for a population mean mu is [x' - m; x' + m ], where x' is a sample mean.
The margin of error for mu is m = z*sigma/sqrt(n), where z is a critical value for the z distribution N(0,1) such that the area between -z and z is equal to C.
C = 90% z = 1.645
C = 95% z = 1.960
C = 99% z = 2.576
How to obtain z at a specified C:
C <- 0.5
qnorm(C) #gives a value z such that P(X < z) = C
## [1] 0
#to find z for the interval in between -z and z
C <- 0.9
qnorm(C + (1-C)/2) #gives a value of z such that P(-z < X < z) = C
## [1] 1.644854
Q1.1. Find the critical values -z and z for the area under the standard normal curve of 99.99%
Q1.2. The average credit card balance for a random sample of 1200 graduates was $3173, with the median of $1645 and sigma of $3500. Is this sample comes from the normal distribution?
How to compute the 95% confidence interval for the population mean.
z <- qnorm(0.975)
sigma <- 3500
n <- 1200
m <- z*sigma/sqrt(n) #margin of error
sample_mean <- 3173
left_interval <- sample_mean - m
right_interval <- sample_mean + m
The 95% confidence interval for the average (mean) credit card debt for all graduates is between $2975 and $3371.
Let's draw it in a graph.
plot(sample_mean,2,xlim=c(2500,4000),ylim=c(0,12),axes=F)
axis(1)
segments(left_interval, 2, right_interval, 2, lwd=4)
Q1.3. Compute and draw the 90%, 99% and 99.9% confidence intervals for the population mean from the sample of Q2. How does the interval change when confidence increases?
sample_mean <- 3173
sigma <- 3500
n <- 1200
m90 <- qnorm(0.95) * sigma/sqrt(n) #margin of error for the 90% confidence interval
m99 <- qnorm(0.995) * sigma/sqrt(n) #margin of error for the 99% confidence interval
m999 <- qnorm(0.9995) * sigma/sqrt(n) #margin of error for the 99.9% confidence interval
left_interval <- sample_mean - m90
right_interval <- sample_mean + m90
y <- 1
points(sample_mean, y)
segments(left_interval, y, right_interval, y, lwd=4)
left_interval <- sample_mean - m99
right_interval <- sample_mean + m99
y <- 3
points(sample_mean, y)
segments(left_interval, y, right_interval, y, lwd=4)
left_interval <- sample_mean - m999
right_interval <- sample_mean + m999
y <- 4
points(sample_mean, y)
segments(left_interval, y, right_interval, y, lwd=4)
Q1.4. Compute and draw the 90%, 95%, 99% and 99.9% confidence interval for the population mean from the sample of Q2, but with n=300. How do the intervals change in comparison with n=1200?
sample_mean <- 3173
sigma <- 3500
n <- 300
m90 <- qnorm(0.95) * sigma/sqrt(n) #margin of error for the 90% confidence interval
m95 <- qnorm(0.975) * sigma/sqrt(n) #margin of error for the 95% confidence interval
m99 <- qnorm(0.995) * sigma/sqrt(n) #margin of error for the 99% confidence interval
m999 <- qnorm(0.9995) * sigma/sqrt(n) #margin of error for the 99.9% confidence interval
left_interval <- sample_mean - m90
right_interval <- sample_mean + m90
y <- 5
points(sample_mean, y)
segments(left_interval, y, right_interval, y, lwd=4, col="red")
left_interval <- sample_mean - m95
right_interval <- sample_mean + m95
y <- 6
points(sample_mean, y)
segments(left_interval, y, right_interval, y, lwd=4, col="red")
left_interval <- sample_mean - m99
right_interval <- sample_mean + m99
y <- 7
points(sample_mean, y)
segments(left_interval, y, right_interval, y, lwd=4, col="red")
left_interval <- sample_mean - m999
right_interval <- sample_mean + m999
y <- 8
points(sample_mean, y)
segments(left_interval, y, right_interval, y, lwd=4, col="red")
Q1.5. Compute the 90%, 95%, 99%, and 99.9% confidence intervals for the population mean from the sample of Q2, but with smaller sigma=$2000, n=1200.
sample_mean <- 3173
sigma <- 2000
n <- 1200
m90 <- qnorm(0.95) * sigma/sqrt(n) #margin of error for the 90% confidence interval
m95 <- qnorm(0.975) * sigma/sqrt(n) #margin of error for the 95% confidence interval
m99 <- qnorm(0.995) * sigma/sqrt(n) #margin of error for the 99% confidence interval
m999 <- qnorm(0.9995) * sigma/sqrt(n) #margin of error for the 99.9% confidence interval
left_interval <- sample_mean - m90
right_interval <- sample_mean + m90
y <- 9
points(sample_mean, y)
segments(left_interval, y, right_interval, y, lwd=4, col="blue")
left_interval <- sample_mean - m95
right_interval <- sample_mean + m95
y <- 10
points(sample_mean, y)
segments(left_interval, y, right_interval, y, lwd=4, col="blue")
left_interval <- sample_mean - m99
right_interval <- sample_mean + m99
y <- 11
points(sample_mean, y)
segments(left_interval, y, right_interval, y, lwd=4, col="blue")
left_interval <- sample_mean - m999
right_interval <- sample_mean + m999
y <- 12
points(sample_mean, y)
segments(left_interval, y, right_interval, y, lwd=4, col="blue")
Q1.6. If we are designing the survey for the estimation of the average credit card debt among graduates as in Q2 (sigma = $3500) and want the margin of error to be $150 with 95% confidence, what sample size n do we need?
sigma <- 3500
m <- 150
z <- qnorm(0.975)
n <- (z * sigma / m) ^2
Assumptions: When n is large, sample proprtion p' has a Normal distribution with mean p and standard deviation sqrt(p(1-p)/n).
Formula: n = (z/m)2 *p(1-p) where p is a guessed value for the proportion. The margin of error m is the largest when p=0.5, therefore it can be used to obtain a very conservative estimation of the sample size (larger than might be needed).
Q1.7. You design a survey assessing the proportion of people satisfied versus unsatisfied with some organizational policy. At 95% confidence and a margin of error <= 5% find the required sample size n (always round up n).
p <- 0.5
m <- 0.05
z <- qnorm(0.975)
n <- (z/m)^2 * p * (1-p)
Q1.8. Find n at m <= 3% and m <= 1%.
Q1.9. In the above problem, how will n change if a preliminary study gave p'=0.2 and we want m <= 5%?
Q1.10. Build a graph showing how the margin error of a 95% confidence interval m depends on p at different n.
z <- qnorm(0.975)
p <- c(0, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 1)
n <- 50
m <- z * sqrt(p*(1-p)/n)
plot(p,m, xlim=c(0,1.0), ylim=c(0,0.15), axes=F)
axis(1, at = seq(0, 1.1, by = 0.1))
axis(2, at = seq(0, 0.15, by = 0.01))
lines(p,m, lwd=4)
n <- 100
m <- z * sqrt(p*(1-p)/n)
lines(p,m, lwd=4, col="red")
n <- 200
m <- z * sqrt(p*(1-p)/n)
lines(p,m, lwd=4, col="blue")
n <- 500
m <- z * sqrt(p*(1-p)/n)
lines(p,m, lwd=4, col="darkgreen")
n <- 2000
m <- z * sqrt(p*(1-p)/n)
lines(p,m, lwd=4, col="magenta")
“What we actually call type I or type II error depends directly on the null hypothesis. Negation of the null hypothesis causes type I and type II errors to switch roles.
The goal of the test is to determine if the null hypothesis can be rejected. A statistical test can either reject or fail to reject a null hypothesis, but never prove it true.” (Wikipedia)
Reminder: One-sample t test is used for inference about the population mean mu based on the sample mean x', assuming that the distribution is normal and its standard deviation sigma is known or can be estimated from the sample standard deviation s. Two-sample t test is used for inference about difference in two populaions means.
If n >= 40, the t test is suitable even for skewed distributions.
If n >=15, no outliers and slight skewedness are acceptable.
If n < 15, t test can be used for a normally distributed data with no outliers.
Q2.1. The effect of a treatment was tested on 20 subjects (some parameter W was measured before and after treatment). Its average difference was x' = 2.43 and standard deviation was s = 1.46 (for a safe estimate it is recommended to round it up, let's take s = 1.5).
The hypothesis to test is H0: mu = 0. Ha: mu > 0 WHEN THE ALTERNATIVE is mu=1.0. At alpha = 0.05.
The t test with n observations rejects H0 at the 5% significance level if the t statistic t = ( x' - 0) / (s/sqrt(n)) exceeds the critical value for the t-distribution at df=19 (= n-1).
qt(c(0.25, .975), df=19) # two-sided
## [1] -0.6876215 2.0930241
t <- abs(qt(0.95,df=19)) # one-sided
t
## [1] 1.729133
The event that the test rejects H0 happens when x' / (s / sqrt(n) ) >= t.
n <- 20
s <- 1.5
x <- t*s/sqrt(n)
The power is a probability that x' >= x when mu = 1.0. P( [ (x'-1.0) /1.5/sqrt(20)] >= [ (x-1.0) /1.5/sqrt(20) ] ) = P( Z >= -1.25 )
power <- 1 - pnorm(-1.25)
power
## [1] 0.8943502
This is the power that we will detect an increase in the measured parameter W of 1.0.
Q2.2. If the sample size is increased at n=100, what will be the power of the test to detect an alternative increase of W of 1.0 (same as in Q2.1)?
n <- 100
t <- abs(qt(0.95,df=n-1)) # one-sided
s <- 1.5
x <- t*s/sqrt(n)
muu <- 1.0
z <- (x - muu) / ( s / sqrt(n))
power <- 1 - pnorm(z)
Q2.3. With n=100 at 5% of significance level and power of 95%, what increase of the parameter W can be detected?
power <- 0.95
z <- qnorm(1 - power)
muu <- x - z * (s / sqrt(n))
Q2.4. The experiment was carried out to study if calcium intake reduced blood pressure. The calcium treated group consisted of n1=10 subjects, with measurements for the average dicrease in blood pressure x1=5.000, s1=8.743. The placebo group: n2=11, x2=-0.273, s2=5.901. Both samples satisfied normal distribution and had no outliers (the assumptions to carry out the t-test for n < 15). The pooled sample standard deviation was s = 7.385. The P-value was 0.059, meaning that there was no enough evidence to reject the null hypothesis that calcium intake did not lower blood pressure.
Now a new experiment is planned that would provide convincing evidence for calcium intake, at the 1% of significance level for the alternative difference in means of 5.0. Sizes of both groups have been increased to 45: n1=45, n2=45. What is the power of this test?
n1 <- 45
n2 <- 45
s <- 7.385 #the pooled sample standard deviation
muu <- 5.0 #the alternative difference in means
df <- n1 + n2 - 2 # the degrees of freedom
alpha <- 0.01
delta <- muu / (s * sqrt( (1/n1) + (1/n2) )) # the non-centrality parameter
t <- abs(qt(1 - alpha, df=df)) # one-sided critical value of t distribution
power <- 1 - pt(t, df, delta)
Q2.5. How would the power of the above test change at the 5% of significance and reduced size of groups, n1 = n2 = 30?
## [1] 0.8280672
(from http://www.ats.ucla.edu/stat/r/dae/t_test_power3.htm)
Q2.6. “A company markets an eight week long weight loss program and claims that at the end of the program on average a participant will have lost 5 pounds. On the other hand, you have studied the program and you believe that their program is scientifically unsound and shouldn't work at all. With some limited funding at hand, you want test the hypothesis that the weight loss program does not help people lose weight. Your plan is to get a random sample of people and put them on the program. You will measure their weight at the beginning of the program and then measure their weight again at the end of the program. Based on some previous research, you believe that the standard deviation of the weight difference over eight weeks will be 5 pounds. You now want to know how many people you should enroll in the program to test your hypothesis. ”
#install.packages("pwr")
library(pwr)
mu_0 <- 0 # mu for H0
mu_a <- 5 # mu for Ha
s <- 5 # standard deviation for the difference in means
power <- 0.8
alpha <- 0.05
d <- (mu_0 - mu_a) / s # this is the standard effect size
#calculates sample size n
test <- pwr.t.test(d=d,power=power,sig.level=alpha,type="paired",alternative="two.sided")
test
##
## Paired t test power calculation
##
## n = 9.93785
## d = 1
## sig.level = 0.05
## power = 0.8
## alternative = two.sided
##
## NOTE: n is number of *pairs*
#n is
test$n
## [1] 9.93785
#calculates power for a given sample size n
n <- 35
test <- pwr.t.test(d=d,n=n,sig.level=alpha,type="paired",alternative="two.sided")
#power
test$power
## [1] 0.9999224
Q2.7. For the above experiment, calculate n for power = 0.95, s = 10 and alpha=0.00001
Q2.8. For the above experiment, draw power curves (power vs. effect size) for different n = (5,15,25,35,45).
n <- seq(5, 50, by=10)
alpha <- 0.05
ds <- seq(0, 1, length.out=10) # effect sizes d for t-Test
get_power <- function(n){
test <- pwr.t.test(d=ds,n=n,sig.level=alpha,type="paired",alternative="two.sided")
test$power
}
ps <- sapply(n, get_power)
par(mfrow = c(1,1))
colors <- c("black", "blue", "red", "darkgreen", "magenta")
matplot(ds, ps, type="l", lty=1, lwd=2, xlab="Effect size d = |mu1 - mu2| / sigma", ylab="Power", main="Power of the paired two-sample t-Test (alpha=0.05, two-tailed)", xaxs="i", xlim=c(-0.05, 1.1), col=colors)
legend(x="bottomright", legend=paste("N =", n), lwd=2, col= colors)
Q2.9. Solve in R Problem 3.4 from the Lecture 4.
alpha <- 0.05
d <- 2/20
power <- 0.85
pwr.t.test(d=d,power=power,sig.level=alpha,type="paired",alternative="greater")
##
## Paired t test power calculation
##
## n = 720.2848
## d = 0.1
## sig.level = 0.05
## power = 0.85
## alternative = greater
##
## NOTE: n is number of *pairs*
Let's examine a randomly generated sample from a normal distribution.
set.seed(42)
x <- rnorm(100)
shapiro.test(x)
##
## Shapiro-Wilk normality test
##
## data: x
## W = 0.98122, p-value = 0.1654
qqnorm(x)
qqline(x, col = "red", lwd=3)
Q3.1. Repeat the above procedure for the sample of size 10000 with mu=3 and sigma = 2.
set.seed(42)
x <- rnorm(10000, mean = 3, sd = 2)
qqnorm(x)
qqline(x, col = "red", lwd=3)
shapiro.test(x)
## Error in shapiro.test(x): sample size must be between 3 and 5000
ks.test(x,"pnorm",mean(x),sqrt(var(x)))
##
## One-sample Kolmogorov-Smirnov test
##
## data: x
## D = 0.0060756, p-value = 0.8541
## alternative hypothesis: two-sided
#install.packages("moments")
library(moments)
# skewness and kurtosis, they should be around 0 and 3
skewness(x)
## [1] 0.00601486
kurtosis(x)
## [1] 2.99009
#install.packages("car")
library(car)
## Warning: package 'car' was built under R version 3.2.4
#qqplot with confidence interval
qqPlot(x)
Q3.2. Test normality of random samples of size 100 taken from a Gamma distribution; use rgamma(n, shape, scale), for shape = 1, 2, and scale = 1, 2.
shape <- 1
scale <- 2
x <- rgamma(100, shape, scale)
hist(x)
qqPlot(x) #library(car)
skewness(x)
## [1] 1.118146
kurtosis(x)
## [1] 3.713509
shapiro.test(x)
##
## Shapiro-Wilk normality test
##
## data: x
## W = 0.88468, p-value = 2.879e-07
Q3.3. Log-tranformation of right-skewed data (long right tail; positive skewness). Do log-transformation (try log, log10, log2) of a gamma-sample (3,2) and run normality tests on transformed data. Which transformation was the best taking into account the Shapiro test? Looking at qqPlot(x)?
shape <- 3
scale <- 2
x <- rgamma(100, shape, scale)
x <- log(x + 1) # try also log10, log2
hist(x)
qqPlot(x) #library(car)
skewness(x)
## [1] -0.1056408
kurtosis(x)
## [1] 2.623482
shapiro.test(x)
##
## Shapiro-Wilk normality test
##
## data: x
## W = 0.99124, p-value = 0.764
#install.packages("e1071")
library(e1071)
##
## Attaching package: 'e1071'
## The following objects are masked from 'package:moments':
##
## kurtosis, moment, skewness
library(datasets)
data(airquality)
par(mfrow = c(3,1))
x <- airquality$Ozone
x <- x[!is.na(x)]
skewness(x)
## [1] 1.209866
hist(x)
shapiro.test(x)
##
## Shapiro-Wilk normality test
##
## data: x
## W = 0.87867, p-value = 2.79e-08
ks.test(x,"pnorm",mean(x),sqrt(var(x)))
## Warning in ks.test(x, "pnorm", mean(x), sqrt(var(x))): ties should not be
## present for the Kolmogorov-Smirnov test
##
## One-sample Kolmogorov-Smirnov test
##
## data: x
## D = 0.14799, p-value = 0.01243
## alternative hypothesis: two-sided
y1 <- log(x+1)
skewness(y1)
## [1] -0.320222
hist(y1)
shapiro.test(y1)
##
## Shapiro-Wilk normality test
##
## data: y1
## W = 0.98199, p-value = 0.1219
ks.test(y1,"pnorm",mean(y1),sqrt(var(y1)))
## Warning in ks.test(y1, "pnorm", mean(y1), sqrt(var(y1))): ties should not
## be present for the Kolmogorov-Smirnov test
##
## One-sample Kolmogorov-Smirnov test
##
## data: y1
## D = 0.06415, p-value = 0.7263
## alternative hypothesis: two-sided
skew.score <- function(c, x) (skewness(log(x + c)))^2
coef <- optimise(skew.score, c(0, 20), x = x)$minimum
y2 <- log(x + coef)
skewness(y2)
## [1] -4.930632e-08
hist(y2)
shapiro.test(y2)
##
## Shapiro-Wilk normality test
##
## data: y2
## W = 0.98504, p-value = 0.2263
ks.test(y2,"pnorm",mean(y2),sqrt(var(y2)))
## Warning in ks.test(y2, "pnorm", mean(y2), sqrt(var(y2))): ties should not
## be present for the Kolmogorov-Smirnov test
##
## One-sample Kolmogorov-Smirnov test
##
## data: y2
## D = 0.074619, p-value = 0.5382
## alternative hypothesis: two-sided
qqPlot(x)
qqPlot(y1)
qqPlot(y2)
par(mfrow = c(1,1))
x <- c(8.11, 8.11, 8.09, 8.08, 8.06, 8.02, 7.99, 7.99, 7.97, 7.95, 7.92, 7.92, 7.92, 7.89, 7.87, 7.84, 7.79, 7.79, 7.77, 7.76, 7.72, 7.71, 7.66, 7.62, 7.61, 7.59, 7.55, 7.53, 7.50, 7.50, 7.42, 7.38, 7.38, 7.26, 7.25, 7.08, 6.96, 6.84, 6.55) # London Olympic 2012, men jumps
par(mfrow = c(3,1))
skewness(x)
## [1] -1.052988
hist(x)
shapiro.test(x)
##
## Shapiro-Wilk normality test
##
## data: x
## W = 0.90884, p-value = 0.003995
ks.test(x,"pnorm",mean(x),sqrt(var(x)))
## Warning in ks.test(x, "pnorm", mean(x), sqrt(var(x))): ties should not be
## present for the Kolmogorov-Smirnov test
##
## One-sample Kolmogorov-Smirnov test
##
## data: x
## D = 0.11491, p-value = 0.6817
## alternative hypothesis: two-sided
y1 <- x^2
skewness(y1)
## [1] -0.9371294
hist(y1)
shapiro.test(y1)
##
## Shapiro-Wilk normality test
##
## data: y1
## W = 0.92177, p-value = 0.009872
ks.test(y1,"pnorm",mean(y1),sqrt(var(y1)))
## Warning in ks.test(y1, "pnorm", mean(y1), sqrt(var(y1))): ties should not
## be present for the Kolmogorov-Smirnov test
##
## One-sample Kolmogorov-Smirnov test
##
## data: y1
## D = 0.10755, p-value = 0.7578
## alternative hypothesis: two-sided
y2 <- x^10
hist(y2)
shapiro.test(y2)
##
## Shapiro-Wilk normality test
##
## data: y2
## W = 0.96987, p-value = 0.3721
ks.test(y2,"pnorm",mean(y2),sqrt(var(y2)))
## Warning in ks.test(y2, "pnorm", mean(y2), sqrt(var(y2))): ties should not
## be present for the Kolmogorov-Smirnov test
##
## One-sample Kolmogorov-Smirnov test
##
## data: y2
## D = 0.089004, p-value = 0.9168
## alternative hypothesis: two-sided
qqPlot(x)
qqPlot(y1)
qqPlot(y2)
par(mfrow = c(1,1))
lambda <- 4
x <- rpois(100, lambda)
hist(x)
qqPlot(x) #library(car)
See also http://stats.stackexchange.com/questions/101274/how-to-interpret-a-qq-plot?rq=1
(adapted from http://blog.minitab.com/blog/adventures-in-statistics/choosing-between-a-nonparametric-test-and-a-parametric-test and http://blog.minitab.com/blog/statistics-and-quality-data-analysis/truth-beauty-nonparametrics-and-symmetry-plots):
Problem 4.6 of the lecture (10 patients, 5 reduced weight, 3 gained). Do these data suggest that the weight-control treatment works (that is, can we reject the null hypothesis that there is no difference in patients' weight between before and after treatment?
#install.packages("BSDA")
library(BSDA)
## Loading required package: lattice
##
## Attaching package: 'BSDA'
## The following objects are masked from 'package:car':
##
## Vocab, Wool
## The following object is masked from 'package:datasets':
##
## Orange
x1 <- c(200,202,194,188,166,196,180,188,180,210)
x2 <- c(202,204,167,192,166,190,176,182,180,202)
x <- x2 - x1
hist(x)
size <- length(x)
win <- sum(x<0)
p <- 1 - pbinom(win, size, p=0.5)
SIGN.test(x, md=0, alternative = "less")
##
## One-sample Sign-Test
##
## data: x
## s = 3, p-value = 0.3633
## alternative hypothesis: true median is less than 0
## 95 percent confidence interval:
## -Inf 2
## sample estimates:
## median of x
## -2
## Conf.Level L.E.pt U.E.pt
## Lower Achieved CI 0.9453 -Inf 2
## Interpolated CI 0.9500 -Inf 2
## Upper Achieved CI 0.9893 -Inf 2
There is no enough evidence to reject H0.
Q4.1. Test H0 that the weight in group x1 is above 180 pounds. Do we have enough evidence to reject this H0?
hist(x1)
size <- length(x1)
win <- sum(x1 > 180)
p <- pbinom(win, size, p=0.5)
SIGN.test(x1 - 180, md=0, alternative = "less")
##
## One-sample Sign-Test
##
## data: x1 - 180
## s = 7, p-value = 0.9961
## alternative hypothesis: true median is less than 0
## 95 percent confidence interval:
## -Inf 20.21333
## sample estimates:
## median of x
## 11
## Conf.Level L.E.pt U.E.pt
## Lower Achieved CI 0.9453 -Inf 20.0000
## Interpolated CI 0.9500 -Inf 20.2133
## Upper Achieved CI 0.9893 -Inf 22.0000
Q4.2. Test H0 that the weight in group x2 is below 167 pounds. Do we have enough evidence to reject this H0?
##
## One-sample Sign-Test
##
## data: x1 - 167
## s = 9, p-value = 0.01074
## alternative hypothesis: true median is greater than 0
## 95 percent confidence interval:
## 13 Inf
## sample estimates:
## median of x
## 24
## Conf.Level L.E.pt U.E.pt
## Lower Achieved CI 0.9453 13 Inf
## Interpolated CI 0.9500 13 Inf
## Upper Achieved CI 0.9893 13 Inf
Q4.3. Test H0 that the median of weight difference is 0. For this two-sided test, the null hypothesis is that the number of + and - signs are equal, and the alternative hypothesis is that the number of + and - signs are not equal. Do we have enough evidence to reject this H0?
hist(x)
median <- 0
SIGN.test(x - median, md=0, alternative = "two.sided")
##
## One-sample Sign-Test
##
## data: x - median
## s = 3, p-value = 0.7266
## alternative hypothesis: true median is not equal to 0
## 95 percent confidence interval:
## -7.351111 2.000000
## sample estimates:
## median of x
## -2
## Conf.Level L.E.pt U.E.pt
## Lower Achieved CI 0.8906 -6.0000 2
## Interpolated CI 0.9500 -7.3511 2
## Upper Achieved CI 0.9785 -8.0000 2
(the test runs with continuity correction by default, can be switched off).
H0: differences from the median (one-sample test) or differences in paired measuments follow a symmetrical distribution.
Consider Problem 4.8 from the Lecture 4.
x1 <- c(125.3, 101, 117.2, 133.7, 96.4, 124.5, 118.7, 106.2, 116.3, 120.2, 125, 128.8)
x2 <- c(127.3, 120.2, 126.2, 125.4, 115.1, 118.5, 135.5, 118.2, 122.9, 120.1, 120.8, 130.7)
df <- data.frame(cbind(x1,x2))
boxplot(df)
boxplot(x2-x1)
wilcox.test(x1,x2, paired=T, alternative="less")
##
## Wilcoxon signed rank test
##
## data: x1 and x2
## V = 17, p-value = 0.04614
## alternative hypothesis: true location shift is less than 0
wilcox.test(x1 - x2, alternative="less")
##
## Wilcoxon signed rank test
##
## data: x1 - x2
## V = 17, p-value = 0.04614
## alternative hypothesis: true location is less than 0
wilcox.test(x2 - x1, alternative="greater")
##
## Wilcoxon signed rank test
##
## data: x2 - x1
## V = 61, p-value = 0.04614
## alternative hypothesis: true location is greater than 0
H0 is rejected at the 5% significance level.
Q4.4. Some physiological test measurements were conducted on 9 patients taken at the first (x) and second (y) visits after initiation of a therapy. Do you have enough evidence to reject H0 that the therapy did not work if it was intended to lower that physiological parameter?
x <- c(1.83, 0.50, 1.62, 2.48, 1.68, 1.88, 1.55, 3.06, 1.30)
y <- c(0.878, 0.647, 0.598, 2.05, 1.06, 1.29, 1.06, 3.14, 1.29)
df <- data.frame(cbind(x,y))
boxplot(df)
wilcox.test(y-x, alternative = "less")
##
## Wilcoxon signed rank test
##
## data: y - x
## V = 5, p-value = 0.01953
## alternative hypothesis: true location is less than 0
H0 is rejected at the 5% significance level (p-value < 0.02).
(the test runs with continuity correction by default, can be switched off). H0: samples come from the same population.
Q4.5. In two independent groups of pregnant women a level of some hormon was measured at two different periods of pregnancy (x and y). H0: the hormon level is the same; Ha: the hormon level is greater in the second group.
x <- c(0.80, 0.83, 1.89, 1.04, 1.45, 1.38, 1.91, 1.64, 0.73, 1.46)
y <- c(1.15, 0.88, 0.90, 0.74, 1.21)
df <- data.frame(cbind(x,y))
boxplot(df)
wilcox.test(x, y, alternative = "greater")
##
## Wilcoxon rank sum test
##
## data: x and y
## W = 35, p-value = 0.1272
## alternative hypothesis: true location shift is greater than 0
There is no enough evidence to reject H0 at the 5% significance level.