FrontISTR  5.7.0
Large-scale structural analysis program with finit element method
m_pds.f90
Go to the documentation of this file.
1 !-------------------------------------------------------------------------------
2 ! Copyright (c) 2019 FrontISTR Commons
3 ! This software is released under the MIT License, see LICENSE.txt
4 !-------------------------------------------------------------------------------
5 
6 ! module for Parallel Direct Solver
7 module m_pds
8 
9 use m_cclsmatrix
10 
11 implicit none
12 
13 private
14 
15 public sp_lineq ! entry point of Parallel Direct Solver
16 
17 
18 ! required global information for solver !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
19 
20 ! for trunk and leaf solve
21 integer :: m_pds_neqns ! size of given left side sparse matrix.
22 integer :: m_pds_ndeg ! degree of freedom of each element in sparse matrix.
23 
24 ! for trunksolve
25 integer :: m_pds_neqns1 ! size of divided sparse matrix a1.
26 integer :: m_pds_neqns2 ! size of divided sparse matrix a2.
27 integer :: m_pds_neqnsd ! number of columns of connection sparse matrix c. number is given as ndim is NOT multiplied.
28 integer :: m_pds_nd ! size of connection dens matrix d. number is given as ndim is already multiplied.
29 real(8), pointer :: m_pds_d(:,:) ! Connection dens matrix for trunksolve. Size is (nd,nd)
30 type(ccls_matrix), pointer :: m_pds_c1, m_pds_c2 ! Connection matrix. size is (neqns1, neqnsd), (neqns2, neqnsd)
31 integer, pointer :: m_pds_part(:) ! Right side vector partition information. Size is (neqns)
32 logical :: m_pds_isready_dc = .false. ! set true after d is updated correctly.
33 
34 ! for leafsolve
35 integer, parameter :: m_pds_lenv=80000000 ! allocate v as size of lenv in leaf process.
36 real(8), pointer :: m_pds_v(:) ! Communication vector for serial direct solver.
37 logical :: m_pds_isready_v = .false. ! set true after v is set correctly via nufct0().
38 
39 ! process position information on MPI processes binary tree. !!!!!!!!!!!!!!!!!!!!!!!!!!
40 
41 integer :: npe ! total number of process
42 integer :: myid ! my process id
43 integer :: imp ! mother process id
44 integer :: ip1 ! left side child process id
45 integer :: ip2 ! right side child process id
46 logical :: isroot ! true if this process is root of binary tree
47 logical :: istrunk ! true if this process is trunk of binary tree (not root, not leaf)
48 logical :: isleaf ! true if this process is leaf of binary tree
49 
50 
51 contains !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
52 
53 subroutine sp_lineq(SP_MAT)
54 
55 use m_elap
56 implicit none
57 
58 
59 character(132),parameter::version='sp_LINEQ: $Revision: 1.2 $ $Date: 2007/11/21 02:13:45 $'
60 
61 integer :: ierr
62 type (sp_matrix) :: sp_mat
63 
64 ! start !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
65 
66 call elapinitmpi()
67 !write(20,*) 'sp_LINEQ: entered'; call flush(20)!DEBUG
68 
69 ! set location in process tree
70 call setbt()
71 
72 if (isroot) then
73 ! write(20,*) 'sp_LINEQ: entering sp_direct_root'; call flush(20)!DEBUG
74  call sp_direct_root(sp_mat)
75 else if (istrunk) then
76 ! write(20,*) 'sp_LINEQ: entering sp_direct_trunk'; call flush(20)!DEBUG
77  call sp_direct_trunk()
78 else if (isleaf) then
79 !write(20,*) 'sp_LINEQ: entering sp_direct_leaf'; call flush(20)!DEBUG
80  call sp_direct_leaf()
81 else
82 ! write(20,*) 'sp_LINEQ: never come here'; call flush(20)
83  stop
84 end if
85 
86 return
87 end subroutine sp_lineq
88 
89 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
90 
91 subroutine sp_direct_root(sp_mat)
92 
93 use m_irjcmatrix
94 use m_cclsmatrix
95 use m_elap
96 
97 implicit none
98 
99 character(132), parameter :: version='sp_direct_root: $Revision: 1.2 $ $Date: 2007/11/21 02:13:45 $'
100 
101 include 'mpif.h'
102 
103 !I/O !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
104 type (sp_matrix) :: sp_mat ! Interface to FEM code
105 
106 
107 !internal !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
108 
109 ! given A0 x = b0
110 type (irjc_matrix), target :: a0 ! given left side matrix assembled from sp_matrix
111 real(8), allocatable, dimension(:) :: r ! right hand side value vector of equation.
112 integer :: ll, loc
113 integer :: jj, ii
114 integer :: nprof, nprof2
115 
116 integer :: ndeg
117 
118 ! for matrix dividing and restore valuables
119 integer, allocatable, dimension(:) :: iperm ! relation of index of large matrix and small matrix
120 
121 ! for divided matrixes (a1, a2)
122 type (irjc_matrix) :: a1, a2
123 type (ccls_matrix), target :: c1, c2
124 
125 ! for connection region
126 real(8), allocatable, dimension(:,:) :: d1, d2 ! to update D
127 
128 ! misc !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
129 integer :: nndeg
130 integer :: ierr
131 integer :: i,j,k,l,m,n
132 
133 ! for MPI
134 integer, dimension(MPI_STATUS_SIZE) :: istatus
135 logical :: do_calc
136 
137 ! for elaps time
138 logical, parameter :: elap=.true.
139 real(8) :: t00s, t00e
140 real(8) :: t10s, t10e
141 real(8) :: t20s, t20e, t21s, t21e
142 real(8) :: t30s, t30e, t31s, t31e
143 real(8) :: t40s, t40e
144 real(8) :: t50s, t50e
145 real(8) :: t60s, t60e
146 
147 !test
148 integer, allocatable, dimension(:), target :: part
149 real(8), allocatable, dimension(:,:), target :: d
150 integer :: neqns1, neqns2, neqnsd, nd
151 real(8), allocatable, dimension(:) :: oldb
152 
153 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
154 !
155 ! STEP01: get a0 from FEM data format SP_MAT
156 !
157 
158 !call ptime(curt);write(20,'(a,1f15.5)') 'sp_direct_root: begin ', curt - epocht; call flush(20)!DEBUG
159 call elapout('sp_direct_root: begin')
160 
161 call ptime(t10s)!ELAP
162 
163 a0%neqns=sp_mat%n
164 a0%ndeg=sp_mat%ndeg
165 
166 ndeg=a0%ndeg
167 nndeg=ndeg*ndeg
168 
169 ll=0
170 nprof=sp_mat%ipoi(sp_mat%ncol+1)-1
171 nprof2=nprof/2+a0%neqns
172 allocate(a0%irow(nprof/2+a0%neqns),a0%jcol(nprof/2+a0%neqns),a0%val(nndeg,nprof/2+a0%neqns))
173 do j=1,a0%neqns
174  jj=sp_mat%iperm(j)
175  ll=ll+1
176  a0%irow(ll)=j
177  a0%jcol(ll)=j
178  a0%val(1:nndeg,ll)=sp_mat%d(jj,1:nndeg)
179  loc=sp_mat%istat(jj)
180  do while(loc.ne.0)
181  ii=sp_mat%irowno(loc)
182  i=sp_mat%invp(ii)
183  if(i.gt.j) then
184  ll=ll+1
185  a0%irow(ll)=i
186  a0%jcol(ll)=j
187  a0%val(1:nndeg,ll)=sp_mat%elm(loc,1:nndeg)
188  end if
189  loc=sp_mat%irpt(loc)
190  end do
191 end do
192 a0%nttbr=ll
193 
194 ! set right hand side vector (b)
195 allocate(r(a0%neqns*ndeg))
196 do i=1,a0%neqns
197  do j=1,ndeg
198  r(ndeg*(i-1)+j)=sp_mat%b(j,i)
199  end do
200 end do
201 
202 call ptime(t10e)!ELAP
203 call elapout('sp_direct_root: get matrix information done ')
204 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
205 !
206 ! STEP02: divide given large matrix A into a1 and a2
207 !
208 call elapout('sp_direct_root: begin divide matrix ')
209 call ptime(t20s)!ELAP
210 
211 allocate(part(a0%neqns))
212 allocate(iperm(a0%neqns))
213 part=0
214 iperm=0
215 call mkpart(a0%neqns, a0%nttbr, ndeg, a0%irow, a0%jcol, neqns1, neqns2, neqnsd, part, iperm) ! set matrix partition information
216 
217 nd = neqnsd*ndeg
218 
219 allocate(d(nd,nd))
220 d=0
221 call divmat(a0, part, iperm, neqns1, neqns2, neqnsd, ndeg, a1, a2, c1, c2, d)
222 call ptime(t20e)!ELAP
223 call elapout('sp_direct_root: end divide matrix')
224 
225 
226 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
227 !
228 ! STEP03: Send divided left hand side matrixes to child process recursively.
229 ! To prepare solver.
230 !
231 
232 call ptime(t30s)!ELAP
233 call elapout('sp_direct_root: begin send a1 ')
234 !send a1
235 call mpi_send(a1%neqns, 1, mpi_integer,ip1,1,mpi_comm_world,ierr)
236 call mpi_send(a1%nttbr, 1, mpi_integer,ip1,1,mpi_comm_world,ierr)
237 call mpi_send(a1%ndeg, 1, mpi_integer,ip1,1,mpi_comm_world,ierr)
238 
239 call mpi_send(a1%irow, a1%nttbr, mpi_integer, ip1,1,mpi_comm_world,ierr)
240 call mpi_send(a1%jcol, a1%nttbr, mpi_integer, ip1,1,mpi_comm_world,ierr)
241 call mpi_send(a1%val, a1%nttbr*nndeg, mpi_real8, ip1,1,mpi_comm_world,ierr)
242 call elapout('sp_direct_root: end send a1 ')
243 
244 call elapout('sp_direct_root: begin send a2 ')
245 !send a2
246 call mpi_send(a2%neqns, 1, mpi_integer,ip2,1,mpi_comm_world,ierr)
247 call mpi_send(a2%nttbr, 1, mpi_integer,ip2,1,mpi_comm_world,ierr)
248 call mpi_send(a2%ndeg, 1, mpi_integer,ip2,1,mpi_comm_world,ierr)
249 
250 call mpi_send(a2%irow, a2%nttbr, mpi_integer, ip2,1,mpi_comm_world,ierr)
251 call mpi_send(a2%jcol, a2%nttbr, mpi_integer, ip2,1,mpi_comm_world,ierr)
252 call mpi_send(a2%val, a2%nttbr*nndeg, mpi_real8, ip2,1,mpi_comm_world,ierr)
253 call elapout('sp_direct_root: end send a2 ')
254 call ptime(t30e)!ELAP
255 
256 
257 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
258 !
259 ! STEP04: Set up trunksolver.
260 ! Send C matrixes to child process calcCtAC().
261 ! Get result D1, D2 and update D as D'= D -D1 -D2
262 ! LDU decompose D' and prepare dense solve.
263 !
264 
265 call ptime(t40s)!ELAP
266 call elapout('sp_direct_root: begin send c1 ')
267 call mpi_send(c1%neqns, 1, mpi_integer, ip1,1,mpi_comm_world,ierr)
268 call mpi_send(c1%ncol, 1, mpi_integer, ip1,1,mpi_comm_world,ierr)
269 call mpi_send(c1%nttbr, 1, mpi_integer, ip1,1,mpi_comm_world,ierr)
270 call mpi_send(c1%ndeg, 1, mpi_integer, ip1,1,mpi_comm_world,ierr)
271 call mpi_send(c1%ia, c1%ncol+1, mpi_integer, ip1,1,mpi_comm_world,ierr)
272 call mpi_send(c1%ja, c1%nttbr, mpi_integer, ip1,1,mpi_comm_world,ierr)
273 call mpi_send(c1%val, c1%nttbr*nndeg, mpi_real8, ip1,1,mpi_comm_world,ierr)
274 call elapout('sp_direct_root: end send c1 ')
275 
276 call elapout('sp_direct_root: begin send c2')
277 call mpi_send(c2%neqns, 1, mpi_integer, ip2,1,mpi_comm_world,ierr)
278 call mpi_send(c2%ncol, 1, mpi_integer, ip2,1,mpi_comm_world,ierr)
279 call mpi_send(c2%nttbr, 1, mpi_integer, ip2,1,mpi_comm_world,ierr)
280 call mpi_send(c2%ndeg, 1, mpi_integer, ip2,1,mpi_comm_world,ierr)
281 call mpi_send(c2%ia, c2%ncol+1, mpi_integer, ip2,1,mpi_comm_world,ierr)
282 call mpi_send(c2%ja, c2%nttbr, mpi_integer, ip2,1,mpi_comm_world,ierr)
283 call mpi_send(c2%val, c2%nttbr*nndeg, mpi_real8, ip2,1,mpi_comm_world,ierr)
284 call elapout('sp_direct_root: end send c2')
285 
286 allocate(d1(nd,nd), d2(nd,nd))
287 d1=0
288 d2=0
289 
290 call elapout('sp_direct_root: wait until receive D1, D2')
291 call mpi_recv(d1, nd*nd, mpi_real8, ip1, 1,mpi_comm_world, istatus, ierr)
292 call mpi_recv(d2, nd*nd, mpi_real8, ip2, 1,mpi_comm_world, istatus, ierr)
293 call elapout('sp_direct_root: receive d1, d2 from children')
294 
295 
296 call elapout('sp_direct_root: modify D and LDU decompose it.')
297 d=d-d1
298 d=d-d2
299 
300 call densldu(d,nd)
301 
302 deallocate(d1, d2)
303 call elapout('sp_direct_root: LDU decompose for denssol done.')
304 
305 ! set solver information
306 m_pds_neqns = a0%neqns
307 m_pds_ndeg = a0%ndeg
308 m_pds_neqns1 = neqns1
309 m_pds_neqns2 = neqns2
310 m_pds_neqnsd = neqnsd
311 m_pds_nd = nd
312 
313 m_pds_part => part
314 m_pds_c1 => c1
315 m_pds_c2 => c2
316 m_pds_d => d
317 m_pds_isready_dc=.true. ! on m_pds
318 
319 call ptime(t40e)!ELAP
320 
321 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
322 !
323 ! STEP05: Solve Ax=b0 using child processes.
324 ! Results is given as "r"
325 !
326 
327 call ptime(t50s)!ELAP
328 
329 allocate(oldb(a0%neqns*ndeg))
330 oldb=r
331 call elapout('sp_direct_root: entering trunksolve')
332 call trunksolve(r)
333 call elapout('sp_direct_root: end trunksolve')
334 
335 call verif0(a0%neqns, a0%ndeg, a0%nttbr, a0%irow, a0%jcol, a0%val, oldb, r) !verify result oldb will be broken.
336 deallocate(oldb)
337 
338 do i=1,a0%neqns
339  do j=1,ndeg
340  sp_mat%x(j,i)=r(ndeg*(i-1)+j)
341  end do
342 end do
343 
344 call ptime(t50e)!ELAP
345 ! send stop flag
346 call elapout('sp_direct_root: send stop flag to children')
347 do_calc=.false.
348 call mpi_send(do_calc, 1, mpi_logical, ip1,1,mpi_comm_world,ierr)
349 call mpi_send(do_calc, 1, mpi_logical, ip2,1,mpi_comm_world,ierr)
350 
351 call elapout('sp_direct_root: end')
352 
353 ! write(20,'(a)') '# profile sp_direct_root ########################################'
354 ! write(20,*) 'Elaptime (sec)'
355 ! write(20,'(a,1f15.5)') 'STEP01: Make matrix from SP_MAT: ', t10e - t10s
356 ! write(20,'(a,1f15.5)') 'STEP02: Divide matrix: ', t20e - t20s
357 ! write(20,'(a,1f15.5)') 'STEP03: Send divided matrix to children: ', t30e - t30s
358 ! write(20,'(a,1f15.5)') 'STEP04: Set up trunksolver: ', t40e - t40s
359 ! write(20,'(a,1f15.5)') 'STEP05: Solve Ax=b: ', t50e - t50s
360 ! write(20,'(a)') '# End profile data######################################'
361 
362 return
363 end subroutine sp_direct_root
364 
365 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
366 
367 subroutine sp_direct_trunk()
368 
369 use m_irjcmatrix
370 use m_cclsmatrix
371 use m_elap
372 
373 implicit none
374 
375 character(132), parameter :: version='sp_direct_trunk: $Revision: 1.2 $ $Date: 2007/11/21 02:13:45 $'
376 
377 include 'mpif.h'
378 
379 !internal !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
380 
381 ! given A0 x = b0
382 type (irjc_matrix), target :: a0 ! given left side matrix assembled from sp_matrix
383 
384 integer :: ndeg
385 
386 ! for matrix dividing and restore valuables
387 integer, allocatable, dimension(:) :: iperm ! relation of index of large matrix and small matrix
388 
389 ! for divided matrixes (a1, a2, c1, c2)
390 type (irjc_matrix) :: a1, a2
391 type (ccls_matrix), target :: c1, c2
392 
393 ! for connection region
394 real(8), allocatable, dimension(:,:) :: d1, d2 ! to update D
395 
396 ! misc !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
397 integer :: nndeg
398 integer :: ierr
399 integer :: i,j,k,l,m,n
400 
401 ! for MPI
402 integer, dimension(MPI_STATUS_SIZE) :: istatus
403 
404 ! for elaps time
405 logical, parameter :: elap=.true.
406 real(8) :: t00s, t00e
407 real(8) :: t10s, t10e
408 real(8) :: t20s, t20e, t21s, t21e
409 real(8) :: t30s, t30e, t31s, t31e
410 real(8) :: t40s, t40e
411 real(8) :: t50s, t50e
412 real(8) :: t60s, t60e
413 
414 !test
415 integer, allocatable, dimension(:), target :: part
416 real(8), allocatable, dimension(:,:), target :: d
417 integer :: neqns1, neqns2, neqnsd, nd
418 
419 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
420 !
421 ! STEP01 receive a0
422 !
423 
424 call elapout('sp_direct_trunk: begin')
425 call elapout('sp_direct_trunk: waiting matrix via MPI')
426 
427 call mpi_recv(a0%neqns, 1,mpi_integer,imp,1,mpi_comm_world,istatus,ierr)
428 call mpi_recv(a0%nttbr, 1,mpi_integer,imp,1,mpi_comm_world,istatus,ierr)
429 call mpi_recv(a0%ndeg, 1,mpi_integer,imp,1,mpi_comm_world,istatus,ierr)
430 allocate(a0%irow(a0%nttbr))
431 allocate(a0%jcol(a0%nttbr))
432 allocate(a0%val(a0%ndeg*a0%ndeg,a0%nttbr))
433 
434 call elapout('sp_direct_trunk: begen get matrix via MPI')
435 call mpi_recv(a0%irow, a0%nttbr, mpi_integer,imp,1,mpi_comm_world,istatus,ierr)
436 call mpi_recv(a0%jcol, a0%nttbr, mpi_integer,imp,1,mpi_comm_world,istatus,ierr)
437 call mpi_recv(a0%val, a0%nttbr*a0%ndeg*a0%ndeg, mpi_real8,imp,1,mpi_comm_world,istatus,ierr)
438 
439 call elapout('sp_direct_trunk: end get matrix via MPI')
440 
441 ndeg=a0%ndeg
442 nndeg=ndeg*ndeg
443 
444 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
445 !
446 ! STEP02: divide given large matrix A into a1 and a2
447 !
448 call elapout('sp_direct_trunk: begin divide matrix')
449 
450 allocate(part(a0%neqns))
451 allocate(iperm(a0%neqns))
452 part=0
453 iperm=0
454 call mkpart(a0%neqns, a0%nttbr, ndeg, a0%irow, a0%jcol, neqns1, neqns2, neqnsd, part, iperm) ! set matrix partition information
455 
456 nd = neqnsd*ndeg
457 
458 allocate(d(nd,nd))
459 d=0
460 call divmat(a0, part, iperm, neqns1, neqns2, neqnsd, ndeg, a1, a2, c1, c2, d)
461 call elapout('sp_direct_trunk: end divide matrix')
462 
463 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
464 !
465 ! STEP03: Send divided left hand side matrixes to child process recursively.
466 ! To prepare solver.
467 !
468 
469 call elapout('sp_direct_trunk: begin send a1 ')
470 !send a1
471 call mpi_send(a1%neqns, 1, mpi_integer,ip1,1,mpi_comm_world,ierr)
472 call mpi_send(a1%nttbr, 1, mpi_integer,ip1,1,mpi_comm_world,ierr)
473 call mpi_send(a1%ndeg, 1, mpi_integer,ip1,1,mpi_comm_world,ierr)
474 
475 call mpi_send(a1%irow, a1%nttbr, mpi_integer, ip1,1,mpi_comm_world,ierr)
476 call mpi_send(a1%jcol, a1%nttbr, mpi_integer, ip1,1,mpi_comm_world,ierr)
477 call mpi_send(a1%val, a1%nttbr*nndeg, mpi_real8, ip1,1,mpi_comm_world,ierr)
478 call elapout('sp_direct_trunk: end send a1 ')
479 
480 call elapout('sp_direct_trunk: begin send a2 ')
481 !send a2
482 call mpi_send(a2%neqns, 1, mpi_integer,ip2,1,mpi_comm_world,ierr)
483 call mpi_send(a2%nttbr, 1, mpi_integer,ip2,1,mpi_comm_world,ierr)
484 call mpi_send(a2%ndeg, 1, mpi_integer,ip2,1,mpi_comm_world,ierr)
485 
486 call mpi_send(a2%irow, a2%nttbr, mpi_integer, ip2,1,mpi_comm_world,ierr)
487 call mpi_send(a2%jcol, a2%nttbr, mpi_integer, ip2,1,mpi_comm_world,ierr)
488 call mpi_send(a2%val, a2%nttbr*nndeg, mpi_real8, ip2,1,mpi_comm_world,ierr)
489 call elapout('sp_direct_trunk: end send a2')
490 
491 
492 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
493 !
494 ! STEP04: Set up trunksolver.
495 ! Send C matrixes to child process calcCtAC().
496 ! Get result D1, D2 and update D as D'= D -D1 -D2
497 ! LDU decompose D' and prepare dense solve.
498 !
499 
500 call elapout('sp_direct_trunk: send c1')
501 call mpi_send(c1%neqns, 1, mpi_integer, ip1,1,mpi_comm_world,ierr)
502 call mpi_send(c1%ncol, 1, mpi_integer, ip1,1,mpi_comm_world,ierr)
503 call mpi_send(c1%nttbr, 1, mpi_integer, ip1,1,mpi_comm_world,ierr)
504 call mpi_send(c1%ndeg, 1, mpi_integer, ip1,1,mpi_comm_world,ierr)
505 call mpi_send(c1%ia, c1%ncol+1, mpi_integer, ip1,1,mpi_comm_world,ierr)
506 call mpi_send(c1%ja, c1%nttbr, mpi_integer, ip1,1,mpi_comm_world,ierr)
507 call mpi_send(c1%val, c1%nttbr*nndeg, mpi_real8, ip1,1,mpi_comm_world,ierr)
508 
509 call elapout('sp_direct_trunk: send c2')
510 call mpi_send(c2%neqns, 1, mpi_integer, ip2,1,mpi_comm_world,ierr)
511 call mpi_send(c2%ncol, 1, mpi_integer, ip2,1,mpi_comm_world,ierr)
512 call mpi_send(c2%nttbr, 1, mpi_integer, ip2,1,mpi_comm_world,ierr)
513 call mpi_send(c2%ndeg, 1, mpi_integer, ip2,1,mpi_comm_world,ierr)
514 call mpi_send(c2%ia, c2%ncol+1, mpi_integer, ip2,1,mpi_comm_world,ierr)
515 call mpi_send(c2%ja, c2%nttbr, mpi_integer, ip2,1,mpi_comm_world,ierr)
516 call mpi_send(c2%val, c2%nttbr*nndeg, mpi_real8, ip2,1,mpi_comm_world,ierr)
517 
518 allocate(d1(nd,nd), d2(nd,nd))
519 d1=0
520 d2=0
521 
522 call elapout('sp_direct_trunk: wait until receive D1, D2')
523 call mpi_recv(d1, nd*nd, mpi_real8, ip1, 1,mpi_comm_world, istatus, ierr)
524 call mpi_recv(d2, nd*nd, mpi_real8, ip2, 1,mpi_comm_world, istatus, ierr)
525 call elapout('sp_direct_trunk: receive d1, d2 from children')
526 
527 
528 call elapout('sp_direct_trunk: modify D and LDU decompose it.')
529 d=d-d1
530 d=d-d2
531 
532 call densldu(d,nd)
533 
534 deallocate(d1, d2)
535 call elapout('sp_direct_trunk: LDU decompose for denssol done.')
536 
537 ! set solver information
538 m_pds_neqns = a0%neqns
539 m_pds_ndeg = a0%ndeg
540 m_pds_neqns1 = neqns1
541 m_pds_neqns2 = neqns2
542 m_pds_neqnsd = neqnsd
543 m_pds_nd = nd
544 
545 m_pds_part => part
546 m_pds_c1 => c1
547 m_pds_c2 => c2
548 m_pds_d => d
549 m_pds_isready_dc=.true. ! on m_pds
550 
551 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
552 !
553 ! STEP05: Receive C matrixes and return d' to parent
554 !
555 
556 call elapout('sp_direct_trunk: begin calcCtAC()')
557 call calcctac()
558 call elapout('sp_direct_trunk: end calcCtAC()')
559 
560 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
561 !
562 ! STEP06: Start solver to wait right hand side vector
563 !
564 
565 call elapout('sp_direct_trunk: begin recsolve()')
566 call recsolve()
567 call elapout('sp_direct_trunk: end recsolve()')
568 
569 return
570 
571 end subroutine sp_direct_trunk
572 
573 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
574 
575 subroutine sp_direct_leaf()
576 
577 use m_irjcmatrix
578 use m_elap
579 
580 implicit none
581 
582 include 'mpif.h'
583 
584 !internal variables !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
585 
586 type (irjc_matrix) :: a
587 
588 ! for MPI
589 integer, dimension(MPI_STATUS_SIZE) :: istatus
590 integer :: ierr
591 
592 !misc
593 integer :: i,j,k,l
594 
595 ! for elaps time
596 real(8) :: t00s, t00e
597 real(8) :: t10s, t10e
598 real(8) :: t20s, t20e, t21s, t21e
599 real(8) :: t30s, t30e, t31s, t31e
600 real(8) :: t40s, t40e
601 real(8) :: t50s, t50e
602 real(8) :: t60s, t60e
603 
604 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
605 !
606 ! STEP01 receive a0
607 !
608 
609 call elapout('sp_direct_leaf: begin')
610 call elapout('sp_direct_leaf: waiting matrix via MPI')
611 
612 call mpi_recv(a%neqns, 1,mpi_integer,imp,1,mpi_comm_world,istatus,ierr)
613 call mpi_recv(a%nttbr, 1,mpi_integer,imp,1,mpi_comm_world,istatus,ierr)
614 call mpi_recv(a%ndeg, 1,mpi_integer,imp,1,mpi_comm_world,istatus,ierr)
615 allocate(a%irow(a%nttbr))
616 allocate(a%jcol(a%nttbr))
617 allocate(a%val(a%ndeg*a%ndeg,a%nttbr))
618 
619 call elapout('sp_direct_leaf: begin receive matrix')
620 call ptime(t10s)!ELAP
621 call mpi_recv(a%irow, a%nttbr, mpi_integer,imp,1,mpi_comm_world,istatus,ierr)
622 call mpi_recv(a%jcol, a%nttbr, mpi_integer,imp,1,mpi_comm_world,istatus,ierr)
623 call mpi_recv(a%val, a%nttbr*a%ndeg*a%ndeg, mpi_real8,imp,1,mpi_comm_world,istatus,ierr)
624 call ptime(t10e)!ELAP
625 call elapout('sp_direct_leaf: end get matrix via MPI')
626 
627 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
628 !
629 ! STEP02: Prepare serial solver
630 !
631 
632 call ptime(t20s)!ELAP
633 allocate(m_pds_v(m_pds_lenv)) ! communication matrix hold in m_pds
634 
635 call elapout('sp_direct_leaf: begin matini()')
636 call matini(a%neqns, a%nttbr, a%irow, a%jcol, m_pds_lenv, m_pds_v, ierr)
637 if (ierr .ne. 0) call errtrp('sp_direct_leaf: matini')
638 call elapout('sp_direct_leaf: end matini()')
639 
640 call elapout('sp_direct_leaf: begin staij1()')
641 do i=1,a%nttbr
642  call staij1(0, a%irow(i), a%jcol(i), a%val(1,i), m_pds_v, a%ndeg, ierr)
643  if (ierr .ne. 0) call errtrp('sp_direct_leaf: staij1')
644 end do
645 call elapout('sp_direct_leaf: end staij1()')
646 
647 call elapout('sp_direct_leaf: begin nufct0()')
648 call nufct0(m_pds_v, ierr)
649 if (ierr .ne. 0) call errtrp('sp_direct_leaf: nufct0')
650 call elapout('sp_direct_leaf: end nufct0()')
651 
652 ! set solver information in module m_pds
653 m_pds_neqns = a%neqns
654 m_pds_ndeg = a%ndeg
655 m_pds_isready_v=.true.
656 
657 call ptime(t20e)!ELAP
658 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
659 !
660 ! STEP03: Receive C matrixes and return d' to parent
661 !
662 
663 call elapout('sp_direct_leaf: entering calcCtAC')
664 call ptime(t30s)!ELAP
665 call calcctac()
666 call ptime(t30e)!ELAP
667 call elapout('sp_direct_leaf: exit calcCtAC')
668 
669 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
670 !
671 ! STEP04: Start solver to wait right hand side vector
672 !
673 
674 call elapout('sp_direct_leaf: entering recsolve')
675 call ptime(t40s)!ELAP
676 call recsolve()
677 call ptime(t40e)!ELAP
678 call elapout('sp_direct_leaf: exit recsolve')
679 
680 
681 ! write(20,'(a)') '# profile sp_direct_leaf ########################################'
682 ! write(20,*) 'Elaptime (sec)'
683 ! write(20,'(a,1f15.5)') 'STEP01: Receive a from parent: ', t10e - t10s
684 ! write(20,'(a,1f15.5)') 'STEP02: Prepare serial solver: ', t20e - t20s
685 ! write(20,'(a,1f15.5)') 'STEP03: calcCtAC: ', t30e - t30s
686 ! write(20,'(a,1f15.5)') 'STEP04: recsolve: ', t40e - t40s
687 ! write(20,'(a)') '# End profile data######################################'
688 
689 return
690 
691 end subroutine sp_direct_leaf
692 
693 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
694 
695 
696 subroutine recsolve()
697 ! Recursive solver for Ax=b
698 ! Before call this routine, trunk or leaf solver setting must finished in m_pds.
699 ! Wait right hand side vector b via MPI_RECV.
700 ! Solve equation using leafsolve() or trunksolve().
701 ! Send result to parents and do again.
702 implicit none
703 include 'mpif.h'
704 
705 real(8), dimension(m_pds_ndeg*m_pds_neqns) :: b
706 logical :: do_calc
707 
708 ! for MPI
709 integer, dimension(MPI_STATUS_SIZE) :: istatus
710 integer :: ierr
711 
712 do
713 !write(20,*)'recsolve: do_calc flag'; call flush(20)!DEBUG
714  call mpi_recv(do_calc, 1, mpi_logical, imp, 1,mpi_comm_world, istatus, ierr)
715  if (.false.==do_calc) then
716  if (.true. == istrunk) then
717  do_calc=.false.
718  call mpi_send(do_calc, 1, mpi_logical, ip1,1,mpi_comm_world,ierr)
719  call mpi_send(do_calc, 1, mpi_logical, ip2,1,mpi_comm_world,ierr)
720  end if
721  return
722  end if
723 
724 !write(20,*)'recsolve: waiting right hand side vector'; call flush(20)!DEBUG
725 !write(20,*)'recsolve: m_pds_ndeg',m_pds_ndeg; call flush(20)!DEBUG
726 !write(20,*)'recsolve: m_pds_neqns',m_pds_neqns; call flush(20)!DEBUG
727 !write(20,*)'recsolve: total MPI length',m_pds_ndeg*m_pds_neqns; call flush(20)!DEBUG
728 
729  call mpi_recv(b, m_pds_ndeg*m_pds_neqns, mpi_real8, imp, 1,mpi_comm_world, istatus, ierr)
730 
731  if (.true. == istrunk) then
732  call trunksolve(b)
733  else if(.true. == isleaf) then
734  call leafsolve(b)
735  else
736  call errtrp('recsolve: never come here')
737  end if
738  call mpi_send(b, m_pds_ndeg*m_pds_neqns, mpi_real8, imp, 1,mpi_comm_world, ierr)
739 end do
740 
741 return
742 
743 end subroutine recsolve
744 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
745 
746 
747 subroutine trunksolve(b)
748 ! Do recursive calculation to solve A x =b0
749 ! Before start this routine, m_pds_d, c1 and c2 must be set on module m_pds correctly.
750 !
751 ! Get right hand side vector b0 and divide it to b1, b2, bd.
752 ! Send b1, b2 to child process and receive result of A1 xab1 = b1.
753 ! Update right hand side vector as bd' = bd - C1^t xab1 - C2^t xab2
754 ! Using LDU decomposed D' on module m_pds, solve D'x_d= bd' myself.
755 ! Calc v1=C1 x_d, v2=C2 x_d.
756 ! Send v1, v2 to child process and receive result of A1 w1 = v1.
757 ! Calc x1=-w1+xab1, x2=-w2+xab2.
758 ! Reorder x1, x2, xd and return it as result
759 use m_cclsmatrix
760 implicit none
761 include 'mpif.h'
762 
763 ! I/O !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
764 real(8), dimension(:), intent(inout) :: b
765 
766 ! internal
767 real(8), allocatable, dimension(:) :: b1, x1, v1, w1, xab1, vtmp1
768 real(8), allocatable, dimension(:) :: b2, x2, v2, w2, xab2, vtmp2
769 real(8), allocatable, dimension(:) :: bd, xd
770 type (ccls_matrix) :: c1, c2
771 
772 integer :: neqns, ndeg, neqns1, neqns2, neqnsd, nd
773 logical :: do_calc
774 
775 ! misc !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
776 integer :: i,j,k,l,m,n
777 
778 ! for MPI
779 integer, dimension(MPI_STATUS_SIZE) :: istatus
780 integer :: ierr
781 
782 ! start !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
783 
784 if (.false.==m_pds_isready_dc) then
785  call errtrp('trunksolve is not ready.')
786  stop
787 end if
788 
789 ndeg = m_pds_ndeg
790 neqns = m_pds_neqns
791 neqns1 = m_pds_neqns1
792 neqns2 = m_pds_neqns2
793 neqnsd = m_pds_neqnsd
794 nd = m_pds_nd
795 
796 c1 = m_pds_c1
797 c2 = m_pds_c2
798 
799 allocate(b1(neqns1*ndeg), x1(neqns1*ndeg), v1(neqns1*ndeg), w1(neqns1*ndeg), xab1(neqns1*ndeg), vtmp1(neqns1*ndeg))
800 allocate(b2(neqns2*ndeg), x2(neqns2*ndeg), v2(neqns2*ndeg), w2(neqns2*ndeg), xab2(neqns2*ndeg), vtmp2(neqns2*ndeg))
801 allocate(bd(nd), xd(nd))
802 
803 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
804 !
805 ! Update right hand side vector as bd' = bd - C1^t xab1 - C2^t xab2
806 !
807 
808 !write(20,*) 'trunksolve: begin modify bd'; call flush(20)!DEBUG
809 
810 call divvec(neqns, ndeg, m_pds_part, b, b1, b2, bd)
811 
812 !write(20,*) 'trunksolve: send right hand side vector to children'; call flush(20)!DEBUG
813 ! send to children and get result for A1 xab1 = b1, A2 xab2 = b2
814 do_calc=.true.
815 call mpi_send(do_calc, 1, mpi_logical, ip1,1,mpi_comm_world,ierr)
816 call mpi_send(do_calc, 1, mpi_logical, ip2,1,mpi_comm_world,ierr)
817 
818 call mpi_send(b1, ndeg*neqns1, mpi_real8, ip1,1,mpi_comm_world,ierr)
819 call mpi_send(b2, ndeg*neqns2, mpi_real8, ip2,1,mpi_comm_world,ierr)
820 
821 call mpi_recv(xab1, ndeg*neqns1, mpi_real8, ip1, 1,mpi_comm_world, istatus, ierr)
822 call mpi_recv(xab2, ndeg*neqns2, mpi_real8, ip2, 1,mpi_comm_world, istatus, ierr)
823 
824 do k=1,nd
825  call m_cclsmatrix_getvec(c1, k, vtmp1)
826  do i=1,neqns1*ndeg
827  bd(k) = bd(k) -(vtmp1(i) * xab1(i))
828  end do
829  call m_cclsmatrix_getvec(c2, k, vtmp2)
830  do i=1,neqns2*ndeg
831  bd(k) = bd(k) -(vtmp2(i) * xab2(i))
832  end do
833 end do
834 !write(20,*) 'trunksolve: end modify bd'; call flush(20)!DEBUG
835 
836 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
837 !
838 ! calc D'x=bd'
839 ! D' is already LDU decomposed.
840 !
841 
842 call denssolve(m_pds_d, nd, bd)
843 
844 xd=bd
845 
846 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
847 !
848 ! Calc v1=C1 x_d, v2=C2 x_d.
849 !
850 
851 v1=0
852 do k=1, nd
853  call m_cclsmatrix_getvec(c1, k, vtmp1)
854  do i=1, neqns1*ndeg
855  v1(i) = v1(i) + vtmp1(i)*xd(k)
856  end do
857 end do
858 
859 v2=0
860 do k=1, nd
861  call m_cclsmatrix_getvec(c2, k, vtmp2)
862  do i=1, neqns2*ndeg
863  v2(i) = v2(i) + vtmp2(i)*xd(k)
864  end do
865 end do
866 
867 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
868 !
869 ! calc A1 w1 = v1, a2 w2 = v2
870 !
871 do_calc=.true.
872 call mpi_send(do_calc, 1, mpi_logical, ip1,1,mpi_comm_world,ierr)
873 call mpi_send(do_calc, 1, mpi_logical, ip2,1,mpi_comm_world,ierr)
874 
875 call mpi_send(v1, ndeg*neqns1, mpi_real8, ip1,1,mpi_comm_world,ierr)
876 call mpi_send(v2, ndeg*neqns2, mpi_real8, ip2,1,mpi_comm_world,ierr)
877 
878 call mpi_recv(w1, ndeg*neqns1, mpi_real8, ip1, 1,mpi_comm_world, istatus, ierr)
879 call mpi_recv(w2, ndeg*neqns2, mpi_real8, ip2, 1,mpi_comm_world, istatus, ierr)
880 
881 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
882 !
883 ! Calc x1=-w1+xab1, x2=-w2+xab2.
884 !
885 
886 x1 = -w1 + xab1
887 x2 = -w2 + xab2
888 
889 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
890 !
891 ! reorder x1, x2, xd and get finel result (x1, x2, xd) to b
892 !
893 
894 call reovec(neqns, ndeg, m_pds_part, b, x1, x2, xd)
895 
896 return
897 
898 end subroutine trunksolve
899 
900 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
901 
902 
903 subroutine leafsolve(b)
904 
905 implicit none
906 
907 real(8), dimension(:), intent(inout) :: b
908 integer ierr
909 
910 ! start !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
911 
912 if (.false.==m_pds_isready_v) then
913  call errtrp('leafsolve is not ready.')
914 end if
915 
916 call nusol0(b, m_pds_v, ierr)
917 if (ierr .ne. 0) call errtrp('leafsolve: nusol0')
918 return
919 
920 end subroutine leafsolve
921 
922 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
923 
924 
925 subroutine calcctac()
926 
927 use m_cclsmatrix
928 use m_elap
929 implicit none
930 include 'mpif.h'
931 
932 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
933 
934 type (ccls_matrix) :: pc ! parent C
935 real(8), allocatable, dimension(:,:) :: pd ! parent D' for update
936 integer :: npd ! size of parent dens D'
937 
938 real(8), allocatable, dimension(:) :: vtmp, vpc
939 integer :: ndeg, nndeg
940 integer :: jcol, iofset, idx
941 integer :: i,j,k,l
942 
943 ! for MPI
944 integer, dimension(MPI_STATUS_SIZE) :: istatus
945 integer :: ierr
946 
947 ! for elaps time
948 real(8) :: t10s, t10e, t20s, t20e, t30s, t30e
949 
950 ! start !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
951 
952 ! receive parent C
953 call elapout('calcCtAC: waiting c matrix via MPI')
954 call mpi_recv(pc%neqns, 1, mpi_integer, imp, 1,mpi_comm_world, istatus, ierr)
955 call mpi_recv(pc%ncol, 1, mpi_integer, imp, 1,mpi_comm_world, istatus, ierr)
956 call mpi_recv(pc%nttbr, 1, mpi_integer, imp, 1,mpi_comm_world, istatus, ierr)
957 call mpi_recv(pc%ndeg, 1, mpi_integer, imp, 1,mpi_comm_world, istatus, ierr)
958 ndeg=pc%ndeg
959 nndeg=ndeg*ndeg
960 npd=pc%ncol*pc%ndeg
961 call cclsallocation(pc)
962 
963 call elapout('calcCtAC: begin receive C matrix')
964 call ptime(t10s)!ELAP
965 call mpi_recv(pc%ia, pc%ncol+1, mpi_integer, imp, 1,mpi_comm_world, istatus, ierr)
966 call mpi_recv(pc%ja, pc%nttbr, mpi_integer, imp, 1,mpi_comm_world, istatus, ierr)
967 call mpi_recv(pc%val, pc%nttbr*nndeg, mpi_real8, imp, 1,mpi_comm_world, istatus, ierr)
968 call ptime(t10e)!ELAP
969 call elapout('calcCtAC: end receive C matrix')
970 
971 allocate(vtmp(pc%neqns*pc%ndeg))
972 allocate(vpc(pc%neqns*pc%ndeg))
973 allocate(pd(npd,npd))
974 
975 ! Calc Ct A^-1 C
976 ! Take care C is belong to parent.
977 call elapout('calcCtAC: start solve CtAC')
978 call ptime(t20s)!ELAP
979 pd=0
980 do i=1, npd
981  call m_cclsmatrix_getvec(pc, i, vtmp)
982 
983 !write(20,*) 'before solve vtmp',vtmp!DEBUG
984 
985  if (.true. == istrunk) then
986  call trunksolve(vtmp)
987  else if(.true. == isleaf) then
988  call leafsolve(vtmp)
989  else
990  call errtrp('calcCtAC: never come here')
991  end if
992 
993 !write(20,*) 'after solve vtmp',vtmp!DEBUG
994 
995  do j=1, npd
996  jcol = (j+ndeg-1) / ndeg ! column number in sparse matrix
997  iofset = mod(j+ndeg-1, ndeg) ! offset in val. 0offset
998  do k=pc%ia(jcol),pc%ia(jcol+1)-1
999  idx=pc%ja(k) ! row number in sparse matrix
1000  do l=1,ndeg
1001  pd(j,i)=pd(j,i)+pc%val(ndeg*iofset + l,k) * vtmp(ndeg*(idx-1)+l)
1002  end do
1003  end do
1004  end do
1005 end do
1006 
1007 !write(20,*)'pd',pd!DEBUG
1008 
1009 call ptime(t20e)!ELAP
1010 call elapout('calcCtAC: end solve CtAC')
1011 
1012 call elapout('calcCtAC: start send CtAC')
1013 call ptime(t30s)!ELAP
1014 call mpi_send(pd, npd*npd, mpi_real8, imp, 1,mpi_comm_world, ierr)
1015 call ptime(t30e)!ELAP
1016 deallocate(vtmp,pd,vpc)
1017 call elapout('calcCtAC: end send CtAC')
1018 
1019 ! write(20,'(a)') '# profile calcCtAC ########################################'
1020 ! write(20,*) 'Elaptime (sec)'
1021 ! write(20,'(a,1f15.5)') 'STEP01: Receive C from parent: ', t10e - t10s
1022 ! write(20,'(a,1f15.5)') 'STEP02: calc CtAC: ', t20e - t20s
1023 ! write(20,'(a,1f15.5)') 'STEP03: send CtAC to parent: ', t30e - t30s
1024 ! write(20,'(a)') '# End profile data######################################'
1025 
1026 return
1027 
1028 end subroutine
1029 
1030 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1031 
1032 subroutine mkpart(neqns, nttbr, ndeg, irow, jcol, neqns1, neqns2, neqnsd, part, iperm)
1033 ! Make partitioning information for given matrix
1034 ! which specified by neqns, nttbr, ndeg, irow, jcol.
1035 !
1036 ! number of partitioned array a1, a2, d (neqns1, neqns2, neqnsd) and
1037 ! partition information array part, iperm will return.
1038 
1039 implicit none
1040 
1041 integer, intent(in) :: neqns, nttbr, ndeg
1042 integer, dimension(:), intent(in) :: irow
1043 integer, dimension(:), intent(in) :: jcol
1044 
1045 integer, intent(out) :: neqns1, neqns2 ! dimension of each divided matrix (sparse)
1046 integer, intent(out) :: neqnsd ! dimension of join matrix D (ndeg is not multiplied)
1047 integer, dimension(:), intent(out) :: part ! belonging of each element in large matrix
1048 integer, dimension(:), intent(out) :: iperm ! relation of index of large matrix and small matrix
1049 
1050 integer :: ipart1(neqns),ipart2(neqns),isp(neqns)
1051 integer :: loc1, loc2, loc3
1052 integer :: i,j,k,l,m,n
1053 
1054 ! make partitioning array
1055 call bi_part_directive(neqns, nttbr, irow, jcol, neqns1, neqns2, neqnsd)
1056 call get_part_result(neqns1, ipart1, neqns2, ipart2, neqnsd, isp)
1057 
1058 do i=1,neqns1
1059  part(ipart1(i))=1
1060 enddo
1061 do i=1,neqns2
1062  part(ipart2(i))=2
1063 enddo
1064 do i=1,neqnsd
1065  part(isp(i))=3
1066 enddo
1067 loc1=0
1068 loc2=0
1069 loc3=0
1070 do i=1,neqns
1071  if(part(i).eq.1) then
1072  loc1=loc1+1
1073  iperm(i)=loc1
1074  endif
1075  if(part(i).eq.2) then
1076  loc2=loc2+1
1077  iperm(i)=loc2
1078  endif
1079  if(part(i).eq.3) then
1080  loc3=loc3+1
1081  iperm(i)=loc3
1082  endif
1083 enddo
1084 
1085 return
1086 end subroutine mkpart
1087 
1088 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1089 
1090 
1091 subroutine divmat(a0, part, iperm, neqns1, neqns2, neqnsd, ndeg, a1, a2, c1, c2, d)
1092 
1093 use m_irjcmatrix
1094 use m_cclsmatrix
1095 
1096 implicit none
1097 
1098 ! Divide given matrix a0 to a1 and a2
1099 ! according to partitioning information array "part" and "iperm" which set by mkpart()
1100 ! this subroutine set following valuables.
1101 !
1102 ! a1, a2 : divided small matrix
1103 ! c1, c2 : connection region
1104 ! d : connecting dens matrix
1105 
1106 ! I/O !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1107 type (irjc_matrix), intent(in) :: a0
1108 integer, dimension(:), intent(in) :: part ! belonging of each element in large matrix
1109 integer, dimension(:), intent(in) :: iperm ! relation of index of large matrix and small matrix
1110 integer, intent(in) :: neqns1 ! size of a1
1111 integer, intent(in) :: neqns2 ! size of a2
1112 integer, intent(in) :: neqnsd ! size of D (ndeg is not multiplied. so actual size of d is neqnsd*ndeg)
1113 integer, intent(in) :: ndeg ! degree of freedom of each element in sparse matrix
1114 
1115 type (irjc_matrix), intent(out) :: a1, a2
1116 type (ccls_matrix), intent(out) :: c1, c2
1117 real(8), dimension(:,:), intent(out) :: d
1118 
1119 ! internal !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1120 integer, allocatable :: jstat1(:), irpt1(:), irowno1(:)
1121 integer, allocatable :: jstat2(:), irpt2(:), irowno2(:)
1122 integer :: nttbr1, nttbr2, ntt1, ntt2
1123 
1124 integer :: ipass, nndeg
1125 integer :: i,j,k,l,m,n
1126 
1127 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1128 
1129 allocate(jstat1(neqnsd), jstat2(neqnsd))
1130 allocate(irpt1(a0%nttbr), irpt2(a0%nttbr))
1131 allocate(irowno1(a0%nttbr), irowno2(a0%nttbr))
1132 
1133 jstat1=0
1134 jstat2=0
1135 irpt1=0
1136 irpt2=0
1137 irowno1=0
1138 irowno2=0
1139 ntt1=0
1140 ntt2=0
1141 
1142 nndeg=ndeg*ndeg
1143 
1144 !write(20,*) 'divmat: begin divide matrix'; call flush(20)!DEBUG
1145 do ipass=1,2
1146  nttbr1=0
1147  nttbr2=0
1148  do l=1,a0%nttbr
1149  i=a0%irow(l)
1150  j=a0%jcol(l)
1151 
1152  if(part(i).eq.1.and.part(j).eq.1) then
1153  nttbr1=nttbr1+1
1154  if(ipass.eq.2) then
1155  a1%irow(nttbr1)=iperm(i)
1156  a1%jcol(nttbr1)=iperm(j)
1157  do k=1,nndeg
1158  a1%val(k,nttbr1)=a0%val(k,l)
1159  end do
1160  end if
1161  end if
1162 
1163  if(part(i).eq.2.and.part(j).eq.2) then
1164  nttbr2=nttbr2+1
1165  if(ipass.eq.2) then
1166  a2%irow(nttbr2)=iperm(i)
1167  a2%jcol(nttbr2)=iperm(j)
1168  do k=1,nndeg
1169  a2%val(k,nttbr2)=a0%val(k,l)
1170  end do
1171  end if
1172  end if
1173 
1174  if(part(i).eq.1.and.part(j).eq.3) then
1175  if(ipass.eq.1)then
1176  call reserv(iperm(i),iperm(j),jstat1,irpt1,irowno1,ntt1)
1177  endif
1178  if(ipass.eq.2)then
1179  call stval(c1,iperm(i),iperm(j),a0%val(1,l),0)
1180  endif
1181  end if
1182 
1183  if(part(i).eq.2.and.part(j).eq.3) then
1184  if(ipass.eq.1)then
1185  call reserv(iperm(i),iperm(j),jstat2,irpt2,irowno2,ntt2)
1186  endif
1187  if(ipass.eq.2)then
1188  call stval(c2,iperm(i),iperm(j),a0%val(1,l),0)
1189  endif
1190  end if
1191 
1192  if(part(i).eq.3.and.part(j).eq.1) then
1193  if(ipass.eq.1)then
1194  call reserv(iperm(j),iperm(i),jstat1,irpt1,irowno1,ntt1)
1195  endif
1196  if(ipass.eq.2)then
1197  call stval(c1,iperm(j),iperm(i),a0%val(1,l),1)
1198  endif
1199  end if
1200 
1201  if(part(i).eq.3.and.part(j).eq.2) then
1202  if(ipass.eq.1)then
1203  call reserv(iperm(j),iperm(i),jstat2,irpt2,irowno2,ntt2)
1204  endif
1205  if(ipass.eq.2)then
1206  call stval(c2,iperm(j),iperm(i),a0%val(1,l),1)
1207  endif
1208  end if
1209 
1210  if(part(i).eq.3.and.part(j).eq.3) then
1211  if(ipass.eq.2) then
1212  do m=1,ndeg
1213  do k=1,ndeg
1214  d(ndeg*(iperm(i)-1)+k,ndeg*(iperm(j)-1)+m)=a0%val(ndeg*(m-1)+k,l)
1215  if(i.ne.j) then
1216  d(ndeg*(iperm(j)-1)+k,ndeg*(iperm(i)-1)+m)=a0%val(ndeg*(k-1)+m,l)
1217  end if
1218  end do
1219  end do
1220  end if
1221  end if
1222 
1223  if(part(i).eq.1.and.part(j).eq.2)then
1224  write(20,*) "divmat: wrong",i,j
1225  stop
1226  endif
1227  if(part(i).eq.2.and.part(j).eq.1)then
1228  write(20,*) "divmat: wrong",i,j
1229  stop
1230  endif
1231  end do
1232 
1233  if(ipass.eq.1)then
1234  a1%neqns=neqns1
1235  a1%ndeg=ndeg
1236  a1%nttbr=nttbr1
1237 
1238  a2%neqns=neqns2
1239  a2%ndeg=ndeg
1240  a2%nttbr=nttbr2
1241 
1242  allocate(a1%irow(nttbr1), a1%jcol(nttbr1), a1%val(ndeg*ndeg,nttbr1))
1243  allocate(a2%irow(nttbr2), a2%jcol(nttbr2), a2%val(ndeg*ndeg,nttbr2))
1244 
1245  c1%neqns=neqns1
1246  c1%ncol=neqnsd
1247  c1%nttbr=ntt1
1248  c1%ndeg=ndeg
1249  allocate(c1%ia(c1%ncol+1))
1250  allocate(c1%ja(c1%nttbr))
1251  allocate(c1%val(ndeg*ndeg,c1%nttbr))
1252 
1253  c2%neqns=neqns2
1254  c2%ncol=neqnsd
1255  c2%nttbr=ntt2
1256  c2%ndeg=ndeg
1257  allocate(c2%ia(c2%ncol+1))
1258  allocate(c2%ja(c2%nttbr))
1259  allocate(c2%val(ndeg*ndeg,c2%nttbr))
1260 
1261 
1262 ! call cclsallocation(c1)
1263 ! call cclsallocation(c2)
1264 
1265  call stiajac(c1,jstat1,irpt1,irowno1)
1266  call stiajac(c2,jstat2,irpt2,irowno2)
1267  deallocate(jstat1,irpt1,irowno1)
1268  deallocate(jstat2,irpt2,irowno2)
1269  endif
1270 end do
1271 
1272 
1273 !write(20,*) 'divmat: end divide matrix'; call flush(20)!DEBUG
1274 
1275 return
1276 
1277 end subroutine divmat
1278 
1279 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1280 
1281 
1282 subroutine divvec(neqns, ndeg, part, r, r1, r2, rd)
1284 implicit none
1285 
1286 integer, intent(in) :: neqns, ndeg
1287 real(8), dimension(:), intent(in) :: r ! original right hand side vector
1288 integer, dimension(:), intent(in) :: part ! belonging of each element in large matrix
1289 
1290 real(8), dimension(:), intent(out) :: r1
1291 real(8), dimension(:), intent(out) :: r2
1292 real(8), dimension(:), intent(out) :: rd
1293 
1294 integer :: idx1, idx2, idxd, idxr
1295 integer :: i
1296 
1297 idx1=1
1298 idx2=1
1299 idxd=1
1300 idxr=1
1301 do i=1,neqns
1302  if(part(i).eq.1) then
1303  r1(idx1:idx1+ndeg-1)=r(idxr:idxr+ndeg-1)
1304  idx1=idx1+ndeg
1305  idxr=idxr+ndeg
1306  endif
1307  if(part(i).eq.2) then
1308  r2(idx2:idx2+ndeg-1)=r(idxr:idxr+ndeg-1)
1309  idx2=idx2+ndeg
1310  idxr=idxr+ndeg
1311  endif
1312  if(part(i).eq.3) then
1313  rd(idxd:idxd+ndeg-1)=r(idxr:idxr+ndeg-1)
1314  idxd=idxd+ndeg
1315  idxr=idxr+ndeg
1316  endif
1317 end do
1318 
1319 end subroutine divvec
1320 
1321 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1322 
1323 
1324 subroutine reovec(neqns, ndeg, part, r, r1, r2, rd)
1325 
1326 implicit none
1327 
1328 integer, intent(in) :: neqns, ndeg
1329 integer, dimension(:), intent(in) :: part ! belonging of each element in large matrix
1330 real(8), dimension(:), intent(in) :: r1
1331 real(8), dimension(:), intent(in) :: r2
1332 real(8), dimension(:), intent(in) :: rd
1333 
1334 real(8), dimension(:), intent(out) :: r ! reordered right hand side vector
1335 
1336 integer :: idx1, idx2, idxd, idxr
1337 integer :: i
1338 
1339 idx1=1
1340 idx2=1
1341 idxd=1
1342 idxr=1
1343 
1344 do i=1,neqns
1345  if(part(i).eq.1) then
1346  r(idxr:idxr+ndeg-1)=r1(idx1:idx1+ndeg-1)
1347  idx1=idx1+ndeg
1348  idxr=idxr+ndeg
1349  endif
1350  if(part(i).eq.2) then
1351  r(idxr:idxr+ndeg-1)=r2(idx2:idx2+ndeg-1)
1352  idx2=idx2+ndeg
1353  idxr=idxr+ndeg
1354  endif
1355  if(part(i).eq.3) then
1356  r(idxr:idxr+ndeg-1)=rd(idxd:idxd+ndeg-1)
1357  idxd=idxd+ndeg
1358  idxr=idxr+ndeg
1359  endif
1360 enddo
1361 
1362 end subroutine reovec
1363 
1364 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1365 
1366 subroutine densldu(a,n)
1367 ! do LDU decomposition for given matrix a.
1368 ! after this routine a is returned as below:
1369 !
1370 ! DUUUUU
1371 ! LDUUUU
1372 ! LLDUUU
1373 ! LLLDUU
1374 ! LLLLDU
1375 ! LLLLLD
1376 !
1377 ! in nortation of
1378 !
1379 ! a = l*d*u
1380 !
1381 ! l = 1000000 d = D000000 u = 1UUUUUU
1382 ! L100000 0D00000 01UUUUU
1383 ! LL10000 00D0000 001UUUU
1384 ! LLL1000 000D000 0001UUU
1385 ! LLLL100 0000D00 00001UU
1386 ! LLLLL10 00000D0 000001U
1387 ! LLLLLL1 000000D 0000001
1388 
1389 !$Id: m_pds.f90,v 1.2 2007/11/21 02:13:45 kitayama Exp $
1390 implicit none
1391 integer :: i,j,k,l,m,n
1392 real(8), dimension(n,n) :: a
1393 
1394 
1395 !write(*,*) 'in ldu: a'
1396 !do i=1,n
1397 ! do j=1,n
1398 !write(*,*) a(i,j)
1399 ! end do
1400 !end do
1401 
1402 do j=2, n
1403  a(1,j) = a(1,j)/a(1,1)
1404 end do
1405 
1406 do i=2,n
1407  do k=1,i-1
1408  a(i,i) = a(i,i) - a(i,k)*a(k,i)
1409  end do
1410 
1411  do j=i+1,n
1412  do k=1,i-1
1413  a(j,i)=a(j,i)-a(j,k)*a(k,i)
1414  end do
1415 
1416  do k=1,i-1
1417  a(i,j)=a(i,j)-a(i,k)*a(k,j)
1418  end do
1419  a(i,j) = a(i,j)/a(i,i)
1420  end do
1421 end do
1422 
1423 ! treat a as L D U
1424 do j=1,n
1425  do i=j+1, n
1426  a(i,j) = a(i,j)/a(j,j)
1427  end do
1428 end do
1429 
1430 !write(*,*) "in ldu: LDU decomposed a"
1431 !do i=1,n
1432 ! do j=1,n
1433 !write(*,*) a(i,j)
1434 ! end do
1435 !end do
1436 
1437 
1438 end subroutine densldu
1439 
1440 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1441 
1442 
1443 subroutine denssolve(a,ndim,r)
1444 !$Id: m_pds.f90,v 1.2 2007/11/21 02:13:45 kitayama Exp $
1445 ! solve Ax=b for multiple right hand side vector r=b1, b2, b3...
1446 ! result is return as r
1447 integer :: ndim ! dimension of matrix a
1448 real(8), dimension(:,:) :: a ! coffecient matrix which already decomposed as LDU by ldu()
1449 real(8), dimension(:) :: r ! right hand side vectors
1450 
1451 !is it needed?
1452 !real(8), dimension(ndim, nrhs) :: x !TMP
1453 
1454 integer :: i,j,k,l,m,n
1455 
1456 ! solve Ly=b (y=DUx)
1457 do i=1,ndim
1458  do j=1,i-1
1459  r(i)=r(i)-a(i,j)*r(j)
1460  end do
1461 end do
1462 
1463 !solve Dz=y (z=Ux)
1464 do i=1, ndim
1465  r(i)=r(i)/a(i,i)
1466 end do
1467 
1468 !solve Ux=z
1469 do i=ndim-1, 1, -1
1470  do j=i+1, ndim
1471  r(i)=r(i)-a(i,j)*r(j)
1472  end do
1473 end do
1474 
1475 end subroutine denssolve
1476 
1477 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1478 
1479 subroutine setbt()
1480 implicit none
1481 
1482 include 'mpif.h'
1483 
1484 integer :: ierr
1485 
1486 isroot=.false.
1487 istrunk=.false.
1488 isleaf=.false.
1489 
1490 ! get process number and number of whole processes
1491 call mpi_comm_size(mpi_comm_world, npe, ierr)
1492 call mpi_comm_rank(mpi_comm_world, myid, ierr)
1493 
1494 if ((mod(npe,2)==0) .and. (myid == npe-1)) then
1495 ! write(20,*) 'I am not a member of tree.'
1496  call fin()
1497  stop
1498 end if
1499 
1500 if (myid==0) then
1501  isroot=.true.
1502  ip1=myid*2+1
1503  ip2=myid*2+2
1504 ! write(20,*)'I am root.'
1505 else
1506  imp=(myid-1)/2
1507 end if
1508 
1509 ! trunk or leaf
1510 if (myid*2+1 < npe-1) then
1511  istrunk=.true.
1512  ip1=myid*2+1
1513  ip2=myid*2+2
1514 ! write(20,*)'I am trunk.'
1515 else
1516  isleaf=.true.
1517 ! write(20,*)'I am leaf.'
1518 end if
1519 return
1520 end subroutine
1521 
1522 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1523 
1524 subroutine errtrp(mes)
1525 character(*) mes
1526 write(6,*) 'Error in : process ', myid
1527 write(20,*) mes
1528 
1529 call finalize()
1530 stop
1531 end subroutine errtrp
1532 
1533 !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
1534 
1535 end module m_pds
m_cclsmatrix::cclsallocation
subroutine cclsallocation(c)
Definition: m_cclsmatrix.f90:64
m_pds
Definition: m_pds.f90:7
version
void version(char *arg)
show version and revision of FrontISTR
Definition: main.c:243
m_pds::leafsolve
subroutine leafsolve(b)
Definition: m_pds.f90:904
m_cclsmatrix::reserv
subroutine reserv(i, j, jstat, irpt, irowno, k)
Definition: m_cclsmatrix.f90:21
m_pds::sp_direct_root
subroutine sp_direct_root(sp_mat)
Definition: m_pds.f90:92
m_pds::sp_direct_leaf
subroutine sp_direct_leaf()
Definition: m_pds.f90:576
m_cclsmatrix::stval
subroutine stval(c, i, j, val, itrans)
Definition: m_cclsmatrix.f90:135
verif0
subroutine verif0(Neqns, Ndeg, Nttbr, Irow, Jcol, Val, Rhs, X)
Definition: _unused_code.f90:301
m_cclsmatrix::stiajac
subroutine stiajac(c, jstat, irpt, irowno)
Definition: m_cclsmatrix.f90:83
m_elap::elapout
subroutine, public elapout(mes)
Definition: m_elap.F90:35
m_irjcmatrix
Definition: m_irjcmatrix.f90:6
m_pds::mkpart
subroutine mkpart(neqns, nttbr, ndeg, irow, jcol, neqns1, neqns2, neqnsd, part, iperm)
Definition: m_pds.f90:1033
m_elap
Definition: m_elap.F90:7
m_pds::divvec
subroutine divvec(neqns, ndeg, part, r, r1, r2, rd)
Definition: m_pds.f90:1283
m_cclsmatrix
Definition: m_cclsmatrix.f90:6
m_pds::recsolve
subroutine recsolve()
Definition: m_pds.f90:697
m_pds::sp_lineq
subroutine, public sp_lineq(SP_MAT)
Definition: m_pds.f90:54
bi_part_directive
void bi_part_directive(int *neqns, int *nttbr, int *irow, int *jcol, int *num_graph1, int *num_graph2, int *num_separator)
Definition: matrix_repart.c:18
m_pds::sp_direct_trunk
subroutine sp_direct_trunk()
Definition: m_pds.f90:368
get_part_result
void get_part_result(int *num_graph1, int *igraph1, int *num_graph2, int *igraph2, int *num_separator, int *iseparator)
Definition: separator_c2f_c.c:10
m_cclsmatrix::m_cclsmatrix_getvec
subroutine m_cclsmatrix_getvec(c, k, v)
Definition: m_cclsmatrix.f90:164