148
151
For \code{M = 1}, the results will be generally different,
149
152
mainly due to the different way the knots are selected.
151
155
The vector cubic smoothing spline which \code{s()} represents is
152
156
computationally demanding for large \eqn{M}.
153
157
The cost is approximately \eqn{O(M^3)}.
155
160
Yet to be done: return the \emph{unscaled} smoothing parameters.
169
175
nn = 20; x = 2 + 5*(nn:1)/nn
170
176
x[2:4] = x[5:7] # Allow duplication
171
y1 = sin(x) + rnorm(nn, sd=0.13)
172
y2 = cos(x) + rnorm(nn, sd=0.13)
173
y3 = 1 + sin(x) + rnorm(nn, sd=0.13) # Run this for constraints
177
y1 = sin(x) + rnorm(nn, sd = 0.13)
178
y2 = cos(x) + rnorm(nn, sd = 0.13)
179
y3 = 1 + sin(x) + rnorm(nn, sd = 0.13) # Run this for constraints
174
180
y = cbind(y1, y2, y3)
175
181
ww = cbind(rep(3,nn), 4, (1:nn)/nn)
182
188
mat = matrix(c(1,0,1, 0,1,0), 3, 2)
183
(fit2 = vsmooth.spline(x, y, w=ww, df=5, iconstr=mat, xconstr=mat))
189
(fit2 = vsmooth.spline(x, y, w = ww, df = 5, iconstr = mat, xconstr = mat))
184
190
# The 1st and 3rd functions do differ by a constant:
185
mycols = c("red","blue","red")
186
\dontrun{ plot(fit2, lcol=mycols, pcol=mycols, las=1) }
191
mycols = c("orange", "blue", "orange")
192
\dontrun{ plot(fit2, lcol = mycols, pcol = mycols, las = 1) }
188
p = predict(fit, x=fit@x, deriv=0)
194
p = predict(fit, x = fit@x, deriv = 0)
189
195
max(abs(fit@y - with(p, y))) # Should be zero
192
198
ux = seq(1, 8, len = 100)
194
p = predict(fit, x=ux, deriv = d)
195
\dontrun{with(p, matplot(x, y, type="l", main=paste("deriv =", d), lwd=2,
196
ylab="", cex.axis=1.5, cex.lab=1.5, cex.main=1.5))}
200
p = predict(fit, x = ux, deriv = d)
201
\dontrun{with(p, matplot(x, y, type = "l", main = paste("deriv =", d),
202
lwd = 2, ylab = "", cex.axis = 1.5,
203
cex.lab = 1.5, cex.main = 1.5)) }
199
206
\keyword{regression}