Merge lp:~garsua/siesta/trunk-elsi-dm into lp:~albertog/siesta/trunk-elsi-dm
- trunk-elsi-dm
- Merge into trunk-elsi-dm
Proposed by
Alberto Garcia
Status: | Needs review |
---|---|
Proposed branch: | lp:~garsua/siesta/trunk-elsi-dm |
Merge into: | lp:~albertog/siesta/trunk-elsi-dm |
Diff against target: |
470 lines (+218/-220) 1 file modified
Src/m_elsi_interface.F90 (+218/-220) |
To merge this branch: | bzr merge lp:~garsua/siesta/trunk-elsi-dm |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Alberto Garcia | Pending | ||
Review via email: mp+361869@code.launchpad.net |
Commit message
Cosmetic changes on m_elsi_
- Removed "use fdf, only: fdf_get" from elsi_real_solver and elsi_complex_solver subroutines
- Unindented lines from 401 to 510 (in elsi_real_solver) and from 1351 to 1460 (in elsi_complex_
Description of the change
Trying to resubmit.
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'Src/m_elsi_interface.F90' |
2 | --- Src/m_elsi_interface.F90 2019-01-15 14:46:49 +0000 |
3 | +++ Src/m_elsi_interface.F90 2019-01-17 09:06:38 +0000 |
4 | @@ -271,7 +271,6 @@ |
5 | subroutine elsi_real_solver(iscf, n_basis, n_basis_l, n_spin, nnz_l, row_ptr, & |
6 | col_idx, qtot, temp, ham, ovlp, dm, edm, ef, ets, Get_EDM_Only) |
7 | |
8 | - use fdf, only: fdf_get |
9 | use m_mpi_utils, only: globalize_sum |
10 | use parallel, only: BlockSize |
11 | #ifdef MPI |
12 | @@ -399,116 +398,116 @@ |
13 | |
14 | endif ! iscf == 1 |
15 | |
16 | - if (n_spin == 1) then |
17 | - |
18 | - ! Sparsity pattern |
19 | - call globalize_sum(nnz_l, nnz_g, comm=elsi_global_comm) |
20 | - |
21 | - allocate(row_ptr2(n_basis_l+1)) |
22 | - row_ptr2(1:n_basis_l) = row_ptr(1:n_basis_l)+1 |
23 | - row_ptr2(n_basis_l+1) = nnz_l+1 |
24 | - |
25 | - call elsi_set_csc(elsi_h, nnz_g, nnz_l, n_basis_l, col_idx, row_ptr2) |
26 | - deallocate(row_ptr2) |
27 | - |
28 | - call elsi_set_csc_blk(elsi_h, BlockSize) |
29 | - call elsi_set_mpi(elsi_h, elsi_global_comm) |
30 | - |
31 | - else |
32 | - |
33 | - ! MPI logic for spin polarization |
34 | - |
35 | - ! Re-create numh, as we use it in the transfer |
36 | - allocate(numh(n_basis_l)) |
37 | - numh(1) = row_ptr(2) |
38 | - do i = 2, n_basis_l-1 |
39 | - numh(i) = row_ptr(i+1)-row_ptr(i) |
40 | - enddo |
41 | - numh(n_basis_l) = nnz_l - row_ptr(n_basis_l) |
42 | - |
43 | - ! Split the communicator in spins and get distribution objects |
44 | - ! for the data redistribution needed |
45 | - ! Note that dist_spin is an array |
46 | - call get_spin_comms_and_dists(elsi_global_comm,elsi_global_comm, & |
47 | - blocksize, n_spin, & |
48 | - dist_global,dist_spin, elsi_spatial_comm, elsi_spin_comm) |
49 | - |
50 | - ! Find out which spin team we are in, and tag the spin we work on |
51 | - call mpi_comm_rank( elsi_Spin_Comm, spin_rank, ierr ) |
52 | - my_spin = spin_rank+1 ! {1,2} |
53 | - |
54 | - |
55 | - ! This is done serially, each time filling one spin set |
56 | - ! Note that **all processes** need to have the same pkg_global |
57 | - |
58 | - do ispin = 1, n_spin |
59 | - |
60 | - ! Load pkg_global data package |
61 | - pkg_global%norbs = n_basis |
62 | - pkg_global%no_l = n_basis_l |
63 | - pkg_global%nnzl = nnz_l |
64 | - pkg_global%numcols => numh |
65 | - pkg_global%cols => col_idx |
66 | - |
67 | - allocate(pkg_global%vals(2)) |
68 | - ! Link the vals items to the appropriate arrays (no extra memory here) |
69 | - pkg_global%vals(1)%data => ovlp(:) |
70 | - ! Note that we *cannot* say => ham(:,my_spin) |
71 | - ! and avoid the sequential loop, as then half the processors will send |
72 | - ! the information for 'spin up' and the other half the information for 'spin down', |
73 | - ! which is *not* what we want. |
74 | - pkg_global%vals(2)%data => ham(:,ispin) |
75 | - |
76 | - call timer("redist_orbs_fwd", 1) |
77 | - |
78 | - ! We are doing the transfers sequentially. One spin team is |
79 | - ! 'idle' (in the receiving side) in each pass, as the dist_spin(ispin) distribution |
80 | - ! does not involve them. |
81 | - |
82 | - call redistribute_spmatrix(n_basis,pkg_global,dist_global, & |
83 | - pkg_spin,dist_spin(ispin),elsi_global_Comm) |
84 | - |
85 | - call timer("redist_orbs_fwd", 2) |
86 | - |
87 | - if (my_spin == ispin) then ! Each team gets their own data |
88 | - |
89 | - !nrows = pkg_spin%norbs ! or simply 'norbs' |
90 | - my_no_l = pkg_spin%no_l |
91 | - my_nnz_l = pkg_spin%nnzl |
92 | - call MPI_AllReduce(my_nnz_l,my_nnz,1,MPI_integer,MPI_sum,elsi_Spatial_Comm,ierr) |
93 | - ! generate off-by-one row pointer |
94 | - call re_alloc(my_row_ptr2,1,my_no_l+1,"my_row_ptr2","elsi_solver") |
95 | - my_row_ptr2(1) = 1 |
96 | - do ih = 1,my_no_l |
97 | - my_row_ptr2(ih+1) = my_row_ptr2(ih) + pkg_spin%numcols(ih) |
98 | - enddo |
99 | - |
100 | - my_col_idx => pkg_spin%cols |
101 | - my_S => pkg_spin%vals(1)%data |
102 | - my_H => pkg_spin%vals(2)%data |
103 | - |
104 | - call re_alloc(my_DM,1,my_nnz_l,"my_DM","elsi_solver") |
105 | - call re_alloc(my_EDM,1,my_nnz_l,"my_EDM","elsi_solver") |
106 | - endif |
107 | - |
108 | - ! Clean pkg_global |
109 | - nullify(pkg_global%vals(1)%data) |
110 | - nullify(pkg_global%vals(2)%data) |
111 | - deallocate(pkg_global%vals) |
112 | - nullify(pkg_global%numcols) |
113 | - nullify(pkg_global%cols) |
114 | - |
115 | - enddo |
116 | - |
117 | - call elsi_set_csc(elsi_h, my_nnz, my_nnz_l, my_no_l, my_col_idx, my_row_ptr2) |
118 | - call de_alloc(my_row_ptr2,"my_row_ptr2","elsi_solver") |
119 | - |
120 | - call elsi_set_csc_blk(elsi_h, BlockSize) |
121 | - call elsi_set_spin(elsi_h, n_spin, my_spin) |
122 | - call elsi_set_mpi(elsi_h, elsi_Spatial_comm) |
123 | - call elsi_set_mpi_global(elsi_h, elsi_global_comm) |
124 | - |
125 | - endif ! n_spin |
126 | + if (n_spin == 1) then |
127 | + |
128 | + ! Sparsity pattern |
129 | + call globalize_sum(nnz_l, nnz_g, comm=elsi_global_comm) |
130 | + |
131 | + allocate(row_ptr2(n_basis_l+1)) |
132 | + row_ptr2(1:n_basis_l) = row_ptr(1:n_basis_l)+1 |
133 | + row_ptr2(n_basis_l+1) = nnz_l+1 |
134 | + |
135 | + call elsi_set_csc(elsi_h, nnz_g, nnz_l, n_basis_l, col_idx, row_ptr2) |
136 | + deallocate(row_ptr2) |
137 | + |
138 | + call elsi_set_csc_blk(elsi_h, BlockSize) |
139 | + call elsi_set_mpi(elsi_h, elsi_global_comm) |
140 | + |
141 | + else |
142 | + |
143 | + ! MPI logic for spin polarization |
144 | + |
145 | + ! Re-create numh, as we use it in the transfer |
146 | + allocate(numh(n_basis_l)) |
147 | + numh(1) = row_ptr(2) |
148 | + do i = 2, n_basis_l-1 |
149 | + numh(i) = row_ptr(i+1)-row_ptr(i) |
150 | + enddo |
151 | + numh(n_basis_l) = nnz_l - row_ptr(n_basis_l) |
152 | + |
153 | + ! Split the communicator in spins and get distribution objects |
154 | + ! for the data redistribution needed |
155 | + ! Note that dist_spin is an array |
156 | + call get_spin_comms_and_dists(elsi_global_comm,elsi_global_comm, & |
157 | + blocksize, n_spin, & |
158 | + dist_global,dist_spin, elsi_spatial_comm, elsi_spin_comm) |
159 | + |
160 | + ! Find out which spin team we are in, and tag the spin we work on |
161 | + call mpi_comm_rank( elsi_Spin_Comm, spin_rank, ierr ) |
162 | + my_spin = spin_rank+1 ! {1,2} |
163 | + |
164 | + |
165 | + ! This is done serially, each time filling one spin set |
166 | + ! Note that **all processes** need to have the same pkg_global |
167 | + |
168 | + do ispin = 1, n_spin |
169 | + |
170 | + ! Load pkg_global data package |
171 | + pkg_global%norbs = n_basis |
172 | + pkg_global%no_l = n_basis_l |
173 | + pkg_global%nnzl = nnz_l |
174 | + pkg_global%numcols => numh |
175 | + pkg_global%cols => col_idx |
176 | + |
177 | + allocate(pkg_global%vals(2)) |
178 | + ! Link the vals items to the appropriate arrays (no extra memory here) |
179 | + pkg_global%vals(1)%data => ovlp(:) |
180 | + ! Note that we *cannot* say => ham(:,my_spin) |
181 | + ! and avoid the sequential loop, as then half the processors will send |
182 | + ! the information for 'spin up' and the other half the information for 'spin down', |
183 | + ! which is *not* what we want. |
184 | + pkg_global%vals(2)%data => ham(:,ispin) |
185 | + |
186 | + call timer("redist_orbs_fwd", 1) |
187 | + |
188 | + ! We are doing the transfers sequentially. One spin team is |
189 | + ! 'idle' (in the receiving side) in each pass, as the dist_spin(ispin) distribution |
190 | + ! does not involve them. |
191 | + |
192 | + call redistribute_spmatrix(n_basis,pkg_global,dist_global, & |
193 | + pkg_spin,dist_spin(ispin),elsi_global_Comm) |
194 | + |
195 | + call timer("redist_orbs_fwd", 2) |
196 | + |
197 | + if (my_spin == ispin) then ! Each team gets their own data |
198 | + |
199 | + !nrows = pkg_spin%norbs ! or simply 'norbs' |
200 | + my_no_l = pkg_spin%no_l |
201 | + my_nnz_l = pkg_spin%nnzl |
202 | + call MPI_AllReduce(my_nnz_l,my_nnz,1,MPI_integer,MPI_sum,elsi_Spatial_Comm,ierr) |
203 | + ! generate off-by-one row pointer |
204 | + call re_alloc(my_row_ptr2,1,my_no_l+1,"my_row_ptr2","elsi_solver") |
205 | + my_row_ptr2(1) = 1 |
206 | + do ih = 1,my_no_l |
207 | + my_row_ptr2(ih+1) = my_row_ptr2(ih) + pkg_spin%numcols(ih) |
208 | + enddo |
209 | + |
210 | + my_col_idx => pkg_spin%cols |
211 | + my_S => pkg_spin%vals(1)%data |
212 | + my_H => pkg_spin%vals(2)%data |
213 | + |
214 | + call re_alloc(my_DM,1,my_nnz_l,"my_DM","elsi_solver") |
215 | + call re_alloc(my_EDM,1,my_nnz_l,"my_EDM","elsi_solver") |
216 | + endif |
217 | + |
218 | + ! Clean pkg_global |
219 | + nullify(pkg_global%vals(1)%data) |
220 | + nullify(pkg_global%vals(2)%data) |
221 | + deallocate(pkg_global%vals) |
222 | + nullify(pkg_global%numcols) |
223 | + nullify(pkg_global%cols) |
224 | + |
225 | + enddo |
226 | + |
227 | + call elsi_set_csc(elsi_h, my_nnz, my_nnz_l, my_no_l, my_col_idx, my_row_ptr2) |
228 | + call de_alloc(my_row_ptr2,"my_row_ptr2","elsi_solver") |
229 | + |
230 | + call elsi_set_csc_blk(elsi_h, BlockSize) |
231 | + call elsi_set_spin(elsi_h, n_spin, my_spin) |
232 | + call elsi_set_mpi(elsi_h, elsi_Spatial_comm) |
233 | + call elsi_set_mpi_global(elsi_h, elsi_global_comm) |
234 | + |
235 | + endif ! n_spin |
236 | |
237 | call timer("elsi-solver", 1) |
238 | |
239 | @@ -1217,7 +1216,6 @@ |
240 | col_idx, qtot, temp, ham, ovlp, dm, edm, ef, ets, & |
241 | nkpnt, kpt_n, kpt, weight, kpt_comm, Get_EDM_Only) |
242 | |
243 | - use fdf, only: fdf_get |
244 | use m_mpi_utils, only: globalize_sum |
245 | use parallel, only: BlockSize |
246 | #ifdef MPI |
247 | @@ -1350,116 +1348,116 @@ |
248 | |
249 | !print *, global_rank, "| ", " Entering elsi_complex_solver" |
250 | |
251 | - if (n_spin == 1) then |
252 | - |
253 | - ! Sparsity pattern |
254 | - call globalize_sum(nnz_l, nnz_g, comm=kpt_comm) |
255 | - |
256 | - allocate(row_ptr2(n_basis_l+1)) |
257 | - row_ptr2(1:n_basis_l) = row_ptr(1:n_basis_l)+1 |
258 | - row_ptr2(n_basis_l+1) = nnz_l+1 |
259 | - |
260 | - call elsi_set_csc(elsi_h, nnz_g, nnz_l, n_basis_l, col_idx, row_ptr2) |
261 | - deallocate(row_ptr2) |
262 | - |
263 | - call elsi_set_csc_blk(elsi_h, BlockSize) |
264 | - call elsi_set_kpoint(elsi_h, nkpnt, kpt_n, weight) |
265 | - call elsi_set_mpi(elsi_h, kpt_comm) |
266 | - call elsi_set_mpi_global(elsi_h, elsi_global_comm) |
267 | - |
268 | - else |
269 | - |
270 | - call mpi_comm_rank( elsi_global_Comm, global_rank, ierr ) |
271 | - |
272 | - ! MPI logic for spin polarization |
273 | - |
274 | - ! Split the communicator in spins and get distribution objects |
275 | - ! for the data redistribution needed |
276 | - ! Note that dist_spin is an array |
277 | - call get_spin_comms_and_dists(kpt_comm,kpt_comm, & !! **** kpt_comm as global? |
278 | - blocksize, n_spin, & |
279 | - dist_global,dist_spin, elsi_spatial_comm, elsi_spin_comm) |
280 | - |
281 | - ! Find out which spin team we are in, and tag the spin we work on |
282 | - call mpi_comm_rank( elsi_Spin_Comm, spin_rank, ierr ) |
283 | - my_spin = spin_rank+1 ! {1,2} |
284 | - |
285 | - !print *, global_rank, "| ", "spin ", my_spin, " After spin splitting" |
286 | - |
287 | - ! This is done serially, each time filling one spin set |
288 | - ! Note that **all processes** need to have the same pkg_global |
289 | - |
290 | - do ispin = 1, n_spin |
291 | - |
292 | - ! Load pkg_global data package |
293 | - pkg_global%norbs = n_basis |
294 | - pkg_global%no_l = n_basis_l |
295 | - pkg_global%nnzl = nnz_l |
296 | - pkg_global%numcols => numh |
297 | - pkg_global%cols => col_idx |
298 | - |
299 | - allocate(pkg_global%complex_vals(2)) |
300 | - ! Link the vals items to the appropriate arrays (no extra memory here) |
301 | - pkg_global%complex_vals(1)%data => ovlp(:) |
302 | - ! Note that we *cannot* say => ham(:,my_spin) |
303 | - ! and avoid the sequential loop, as then half the processors will send |
304 | - ! the information for 'spin up' and the other half the information for 'spin down', |
305 | - ! which is *not* what we want. |
306 | - pkg_global%complex_vals(2)%data => ham(:,ispin) |
307 | - |
308 | - call timer("redist_orbs_fwd", 1) |
309 | - |
310 | - ! We are doing the transfers sequentially. One spin team is |
311 | - ! 'idle' (in the receiving side) in each pass, as the dist_spin(ispin) distribution |
312 | - ! does not involve them. |
313 | - |
314 | - call redistribute_spmatrix(n_basis,pkg_global,dist_global, & |
315 | + if (n_spin == 1) then |
316 | + |
317 | + ! Sparsity pattern |
318 | + call globalize_sum(nnz_l, nnz_g, comm=kpt_comm) |
319 | + |
320 | + allocate(row_ptr2(n_basis_l+1)) |
321 | + row_ptr2(1:n_basis_l) = row_ptr(1:n_basis_l)+1 |
322 | + row_ptr2(n_basis_l+1) = nnz_l+1 |
323 | + |
324 | + call elsi_set_csc(elsi_h, nnz_g, nnz_l, n_basis_l, col_idx, row_ptr2) |
325 | + deallocate(row_ptr2) |
326 | + |
327 | + call elsi_set_csc_blk(elsi_h, BlockSize) |
328 | + call elsi_set_kpoint(elsi_h, nkpnt, kpt_n, weight) |
329 | + call elsi_set_mpi(elsi_h, kpt_comm) |
330 | + call elsi_set_mpi_global(elsi_h, elsi_global_comm) |
331 | + |
332 | + else |
333 | + |
334 | + call mpi_comm_rank( elsi_global_Comm, global_rank, ierr ) |
335 | + |
336 | + ! MPI logic for spin polarization |
337 | + |
338 | + ! Split the communicator in spins and get distribution objects |
339 | + ! for the data redistribution needed |
340 | + ! Note that dist_spin is an array |
341 | + call get_spin_comms_and_dists(kpt_comm,kpt_comm, & !! **** kpt_comm as global? |
342 | + blocksize, n_spin, & |
343 | + dist_global,dist_spin, elsi_spatial_comm, elsi_spin_comm) |
344 | + |
345 | + ! Find out which spin team we are in, and tag the spin we work on |
346 | + call mpi_comm_rank( elsi_Spin_Comm, spin_rank, ierr ) |
347 | + my_spin = spin_rank+1 ! {1,2} |
348 | + |
349 | + !print *, global_rank, "| ", "spin ", my_spin, " After spin splitting" |
350 | + |
351 | + ! This is done serially, each time filling one spin set |
352 | + ! Note that **all processes** need to have the same pkg_global |
353 | + |
354 | + do ispin = 1, n_spin |
355 | + |
356 | + ! Load pkg_global data package |
357 | + pkg_global%norbs = n_basis |
358 | + pkg_global%no_l = n_basis_l |
359 | + pkg_global%nnzl = nnz_l |
360 | + pkg_global%numcols => numh |
361 | + pkg_global%cols => col_idx |
362 | + |
363 | + allocate(pkg_global%complex_vals(2)) |
364 | + ! Link the vals items to the appropriate arrays (no extra memory here) |
365 | + pkg_global%complex_vals(1)%data => ovlp(:) |
366 | + ! Note that we *cannot* say => ham(:,my_spin) |
367 | + ! and avoid the sequential loop, as then half the processors will send |
368 | + ! the information for 'spin up' and the other half the information for 'spin down', |
369 | + ! which is *not* what we want. |
370 | + pkg_global%complex_vals(2)%data => ham(:,ispin) |
371 | + |
372 | + call timer("redist_orbs_fwd", 1) |
373 | + |
374 | + ! We are doing the transfers sequentially. One spin team is |
375 | + ! 'idle' (in the receiving side) in each pass, as the dist_spin(ispin) distribution |
376 | + ! does not involve them. |
377 | + |
378 | + call redistribute_spmatrix(n_basis,pkg_global,dist_global, & |
379 | pkg_spin,dist_spin(ispin),kpt_Comm) |
380 | |
381 | - call timer("redist_orbs_fwd", 2) |
382 | - |
383 | - if (my_spin == ispin) then ! Each team gets their own data |
384 | - |
385 | - !nrows = pkg_spin%norbs ! or simply 'norbs' |
386 | - my_no_l = pkg_spin%no_l |
387 | - my_nnz_l = pkg_spin%nnzl |
388 | - call MPI_AllReduce(my_nnz_l,my_nnz,1,MPI_integer,MPI_sum,elsi_Spatial_Comm,ierr) |
389 | - ! generate off-by-one row pointer |
390 | - call re_alloc(my_row_ptr2,1,my_no_l+1,"my_row_ptr2","elsi_solver") |
391 | - my_row_ptr2(1) = 1 |
392 | - do ih = 1,my_no_l |
393 | - my_row_ptr2(ih+1) = my_row_ptr2(ih) + pkg_spin%numcols(ih) |
394 | - enddo |
395 | - |
396 | - my_col_idx => pkg_spin%cols |
397 | - my_S => pkg_spin%complex_vals(1)%data |
398 | - my_H => pkg_spin%complex_vals(2)%data |
399 | - |
400 | - call re_alloc(my_DM,1,my_nnz_l,"my_DM","elsi_solver") |
401 | - call re_alloc(my_EDM,1,my_nnz_l,"my_EDM","elsi_solver") |
402 | - endif |
403 | - |
404 | - ! Clean pkg_global |
405 | - nullify(pkg_global%complex_vals(1)%data) |
406 | - nullify(pkg_global%complex_vals(2)%data) |
407 | - deallocate(pkg_global%complex_vals) |
408 | - nullify(pkg_global%numcols) |
409 | - nullify(pkg_global%cols) |
410 | - |
411 | - enddo |
412 | - |
413 | - !print *, global_rank, "| ", "spin ", my_spin, "Done spin transfers" |
414 | - |
415 | - call elsi_set_csc(elsi_h, my_nnz, my_nnz_l, my_no_l, my_col_idx, my_row_ptr2) |
416 | - call de_alloc(my_row_ptr2,"my_row_ptr2","elsi_solver") |
417 | - |
418 | - call elsi_set_csc_blk(elsi_h, BlockSize) |
419 | - call elsi_set_spin(elsi_h, n_spin, my_spin) |
420 | - call elsi_set_kpoint(elsi_h, nkpnt, kpt_n, weight) |
421 | - call elsi_set_mpi(elsi_h, elsi_Spatial_comm) |
422 | - call elsi_set_mpi_global(elsi_h, elsi_global_comm) |
423 | - |
424 | - endif ! n_spin |
425 | + call timer("redist_orbs_fwd", 2) |
426 | + |
427 | + if (my_spin == ispin) then ! Each team gets their own data |
428 | + |
429 | + !nrows = pkg_spin%norbs ! or simply 'norbs' |
430 | + my_no_l = pkg_spin%no_l |
431 | + my_nnz_l = pkg_spin%nnzl |
432 | + call MPI_AllReduce(my_nnz_l,my_nnz,1,MPI_integer,MPI_sum,elsi_Spatial_Comm,ierr) |
433 | + ! generate off-by-one row pointer |
434 | + call re_alloc(my_row_ptr2,1,my_no_l+1,"my_row_ptr2","elsi_solver") |
435 | + my_row_ptr2(1) = 1 |
436 | + do ih = 1,my_no_l |
437 | + my_row_ptr2(ih+1) = my_row_ptr2(ih) + pkg_spin%numcols(ih) |
438 | + enddo |
439 | + |
440 | + my_col_idx => pkg_spin%cols |
441 | + my_S => pkg_spin%complex_vals(1)%data |
442 | + my_H => pkg_spin%complex_vals(2)%data |
443 | + |
444 | + call re_alloc(my_DM,1,my_nnz_l,"my_DM","elsi_solver") |
445 | + call re_alloc(my_EDM,1,my_nnz_l,"my_EDM","elsi_solver") |
446 | + endif |
447 | + |
448 | + ! Clean pkg_global |
449 | + nullify(pkg_global%complex_vals(1)%data) |
450 | + nullify(pkg_global%complex_vals(2)%data) |
451 | + deallocate(pkg_global%complex_vals) |
452 | + nullify(pkg_global%numcols) |
453 | + nullify(pkg_global%cols) |
454 | + |
455 | + enddo |
456 | + |
457 | + !print *, global_rank, "| ", "spin ", my_spin, "Done spin transfers" |
458 | + |
459 | + call elsi_set_csc(elsi_h, my_nnz, my_nnz_l, my_no_l, my_col_idx, my_row_ptr2) |
460 | + call de_alloc(my_row_ptr2,"my_row_ptr2","elsi_solver") |
461 | + |
462 | + call elsi_set_csc_blk(elsi_h, BlockSize) |
463 | + call elsi_set_spin(elsi_h, n_spin, my_spin) |
464 | + call elsi_set_kpoint(elsi_h, nkpnt, kpt_n, weight) |
465 | + call elsi_set_mpi(elsi_h, elsi_Spatial_comm) |
466 | + call elsi_set_mpi_global(elsi_h, elsi_global_comm) |
467 | + |
468 | + endif ! n_spin |
469 | |
470 | call timer("elsi-solver", 1) |
471 |