2
# This file is part of TensorToolbox.
4
# TensorToolbox is free software: you can redistribute it and/or modify
5
# it under the terms of the LGNU Lesser General Public License as published by
6
# the Free Software Foundation, either version 3 of the License, or
7
# (at your option) any later version.
9
# TensorToolbox is distributed in the hope that it will be useful,
10
# but WITHOUT ANY WARRANTY; without even the implied warranty of
11
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12
# LGNU Lesser General Public License for more details.
14
# You should have received a copy of the LGNU Lesser General Public License
15
# along with TensorToolbox. If not, see <http://www.gnu.org/licenses/>.
18
# Copyright (C) 2014-2015 The Technical University of Denmark
19
# Scientific Computing Section
20
# Department of Applied Mathematics and Computer Science
22
# Author: Daniele Bigoni
28
from aux import bcolors, print_ok, print_fail, print_summary
30
def run(maxprocs, PLOTTING=False, loglev=logging.WARNING):
32
logging.basicConfig(level=loglev)
35
import numpy.linalg as npla
39
import TensorToolbox as DT
40
import TensorToolbox.multilinalg as mla
43
from matplotlib import pyplot as plt
48
#####################################################################################
49
# Test matrix-vector product by computing the matrix-vector product
50
#####################################################################################
51
span = np.array([0.,1.])
57
# sys.stdout.write("Matrix-vector: Laplace N=%4d , d=%3d [START] \n" % (N,d))
60
# Construct 2D Laplace (with 2nd order finite diff)
61
D = -1./h**2. * ( np.diag(np.ones((N-1)),-1) + np.diag(np.ones((N-1)),1) + np.diag(-2.*np.ones((N)),0) )
62
D[0,0:3] = np.array([1./(3.*h**2.),-2./(3.*h**2.),1./(3.*h**2.)])
63
D[-1,-3:] = -np.array([1./(3.*h**2.),-2./(3.*h**2.),1./(3.*h**2.)])
65
FULL_LAP = np.zeros((N**d,N**d))
67
tmp = np.array([[1.]])
69
if i != j: tmp = np.kron(tmp,I)
70
else: tmp = np.kron(tmp,D)
73
# Construction of TT Laplace operator
75
# D = -1./h**2. * ( np.diag(np.ones((N-1)),-1) + np.diag(np.ones((N-1)),1) + np.diag(-2.*np.ones((N)),0) )
80
CPi = np.empty((d,N**2))
81
for alpha in range(d):
88
CP_lap = DT.Candecomp(CPtmp)
89
TT_LAP = DT.TTmat(CP_lap,nrows=N,ncols=N)
95
# Construct input vector
96
X = np.linspace(span[0],span[1],N)
99
FULL_SIN = np.zeros((N**d))
103
if i != j: tmp = np.kron(tmp,I)
104
else: tmp = np.kron(tmp,SIN)
108
from mpl_toolkits.mplot3d import Axes3D
109
from matplotlib import cm
110
(XX,YY) = np.meshgrid(X,X)
114
ax = fig.add_subplot(221,projection='3d')
115
ax.plot_surface(XX,YY,FULL_SIN.reshape((N,N)),rstride=1, cstride=1, cmap=cm.coolwarm,
116
linewidth=0, antialiased=False)
117
plt.show(block=False)
119
# Construct TT input vector
122
CPi = np.empty((d,N))
123
for alpha in range(d):
124
if i != alpha: CPi[alpha,:] = I
125
else: CPi[alpha,:] = SIN
128
CP_SIN = DT.Candecomp(CPtmp)
129
W = [np.ones(N,dtype=float)/float(N) for i in range(d)]
130
TT_SIN = DT.WTTvec(CP_SIN,W)
134
if PLOTTING and d == 2:
136
ax = fig.add_subplot(222,projection='3d')
137
ax.plot_surface(XX,YY,TT_SIN.to_tensor(),rstride=1, cstride=1, cmap=cm.coolwarm,
138
linewidth=0, antialiased=False)
139
plt.show(block=False)
141
# Apply full laplacian
142
FULL_RES = np.dot(FULL_LAP,FULL_SIN)
143
if PLOTTING and d == 2:
145
ax = fig.add_subplot(223,projection='3d')
146
ax.plot_surface(XX,YY,FULL_RES.reshape((N,N)),rstride=1, cstride=1, cmap=cm.coolwarm,
147
linewidth=0, antialiased=False)
148
plt.show(block=False)
151
TT_RES = mla.dot(TT_LAP,TT_SIN)
152
if PLOTTING and d == 2:
154
ax = fig.add_subplot(224,projection='3d')
155
ax.plot_surface(XX,YY,TT_RES.to_tensor(),rstride=1, cstride=1, cmap=cm.coolwarm,
156
linewidth=0, antialiased=False)
157
plt.show(block=False)
160
if not np.allclose(FULL_RES,TT_RES.to_tensor().flatten()):
161
print_fail("2.1 Matrix-vector: Laplace N=%4d , d=%3d" % (N,d))
164
print_ok("2.1 Matrix-vector: Laplace N=%4d , d=%3d" % (N,d))
167
#####################################################################################
168
# Test matrix-vector product by computing the matrix-vector product of randomly generated input
169
#####################################################################################
170
span = np.array([0.,1.])
174
if isinstance(nrows,int): nrows = [nrows for i in range(d)]
175
if isinstance(ncols,int): ncols = [ncols for i in range(d)]
178
# sys.stdout.write("Matrix-vector: Random\n nrows=[%s],\n ncols=[%s], d=%3d [START] \n" % (','.join(map(str,nrows)),','.join(map(str,ncols)),d))
181
# Construction of TT random matrix
182
TT_RAND = DT.randmat(d,nrows,ncols)
184
# Construct FULL random tensor
185
FULL_RAND = TT_RAND.to_tensor()
187
rowcol = list(itertools.chain(*[[ri,ci] for (ri,ci) in zip(nrows,ncols)]))
188
FULL_RAND = np.reshape(FULL_RAND,rowcol)
189
idxswap = range(0,2*d,2)
190
idxswap.extend(range(1,2*d,2))
191
FULL_RAND = np.transpose(FULL_RAND,axes=idxswap)
192
FULL_RAND = np.reshape(FULL_RAND,(np.prod(nrows),np.prod(ncols)))
194
# Construct TT random vector
195
TT_VEC = DT.randvec(d,ncols)
197
# Construct FULL random vector
198
FULL_VEC = TT_VEC.to_tensor().flatten()
201
TT_RES = mla.dot(TT_RAND,TT_VEC)
204
FULL_RES = np.dot(FULL_RAND,FULL_VEC)
207
if not np.allclose(FULL_RES,TT_RES.to_tensor().flatten()):
208
print_fail("2.2 Matrix-vector: Random N=%4d , d=%3d" % (N,d),'')
211
print_ok("2.2 Matrix-vector: Random N=%4d , d=%3d" % (N,d))
214
print_summary("TT Matrix-Vector", nsucc, nfail)
218
if __name__ == "__main__":
219
# Number of processors to be used, defined as an additional arguement
220
# $ python TestTT.py N
221
# Mind that the program in this case will run slower than the non-parallel case
222
# due to the overhead for the creation and deletion of threads.
223
if len(sys.argv) == 2:
224
maxprocs = int(sys.argv[1])
228
run(maxprocs,PLOTTING=True, loglev=logging.INFO)