Merge lp:~garsua/siesta/trunk-elsi-dm into lp:~albertog/siesta/trunk-elsi-dm
- trunk-elsi-dm
- Merge into trunk-elsi-dm
Proposed by
Alberto Garcia
Status: | Needs review |
---|---|
Proposed branch: | lp:~garsua/siesta/trunk-elsi-dm |
Merge into: | lp:~albertog/siesta/trunk-elsi-dm |
Diff against target: |
470 lines (+218/-220) 1 file modified
Src/m_elsi_interface.F90 (+218/-220) |
To merge this branch: | bzr merge lp:~garsua/siesta/trunk-elsi-dm |
Related bugs: |
Reviewer | Review Type | Date Requested | Status |
---|---|---|---|
Alberto Garcia | Pending | ||
Review via email: mp+361869@code.launchpad.net |
Commit message
Cosmetic changes on m_elsi_
- Removed "use fdf, only: fdf_get" from elsi_real_solver and elsi_complex_solver subroutines
- Unindented lines from 401 to 510 (in elsi_real_solver) and from 1351 to 1460 (in elsi_complex_
Description of the change
Trying to resubmit.
To post a comment you must log in.
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1 | === modified file 'Src/m_elsi_interface.F90' | |||
2 | --- Src/m_elsi_interface.F90 2019-01-15 14:46:49 +0000 | |||
3 | +++ Src/m_elsi_interface.F90 2019-01-17 09:06:38 +0000 | |||
4 | @@ -271,7 +271,6 @@ | |||
5 | 271 | subroutine elsi_real_solver(iscf, n_basis, n_basis_l, n_spin, nnz_l, row_ptr, & | 271 | subroutine elsi_real_solver(iscf, n_basis, n_basis_l, n_spin, nnz_l, row_ptr, & |
6 | 272 | col_idx, qtot, temp, ham, ovlp, dm, edm, ef, ets, Get_EDM_Only) | 272 | col_idx, qtot, temp, ham, ovlp, dm, edm, ef, ets, Get_EDM_Only) |
7 | 273 | 273 | ||
8 | 274 | use fdf, only: fdf_get | ||
9 | 275 | use m_mpi_utils, only: globalize_sum | 274 | use m_mpi_utils, only: globalize_sum |
10 | 276 | use parallel, only: BlockSize | 275 | use parallel, only: BlockSize |
11 | 277 | #ifdef MPI | 276 | #ifdef MPI |
12 | @@ -399,116 +398,116 @@ | |||
13 | 399 | 398 | ||
14 | 400 | endif ! iscf == 1 | 399 | endif ! iscf == 1 |
15 | 401 | 400 | ||
126 | 402 | if (n_spin == 1) then | 401 | if (n_spin == 1) then |
127 | 403 | 402 | ||
128 | 404 | ! Sparsity pattern | 403 | ! Sparsity pattern |
129 | 405 | call globalize_sum(nnz_l, nnz_g, comm=elsi_global_comm) | 404 | call globalize_sum(nnz_l, nnz_g, comm=elsi_global_comm) |
130 | 406 | 405 | ||
131 | 407 | allocate(row_ptr2(n_basis_l+1)) | 406 | allocate(row_ptr2(n_basis_l+1)) |
132 | 408 | row_ptr2(1:n_basis_l) = row_ptr(1:n_basis_l)+1 | 407 | row_ptr2(1:n_basis_l) = row_ptr(1:n_basis_l)+1 |
133 | 409 | row_ptr2(n_basis_l+1) = nnz_l+1 | 408 | row_ptr2(n_basis_l+1) = nnz_l+1 |
134 | 410 | 409 | ||
135 | 411 | call elsi_set_csc(elsi_h, nnz_g, nnz_l, n_basis_l, col_idx, row_ptr2) | 410 | call elsi_set_csc(elsi_h, nnz_g, nnz_l, n_basis_l, col_idx, row_ptr2) |
136 | 412 | deallocate(row_ptr2) | 411 | deallocate(row_ptr2) |
137 | 413 | 412 | ||
138 | 414 | call elsi_set_csc_blk(elsi_h, BlockSize) | 413 | call elsi_set_csc_blk(elsi_h, BlockSize) |
139 | 415 | call elsi_set_mpi(elsi_h, elsi_global_comm) | 414 | call elsi_set_mpi(elsi_h, elsi_global_comm) |
140 | 416 | 415 | ||
141 | 417 | else | 416 | else |
142 | 418 | 417 | ||
143 | 419 | ! MPI logic for spin polarization | 418 | ! MPI logic for spin polarization |
144 | 420 | 419 | ||
145 | 421 | ! Re-create numh, as we use it in the transfer | 420 | ! Re-create numh, as we use it in the transfer |
146 | 422 | allocate(numh(n_basis_l)) | 421 | allocate(numh(n_basis_l)) |
147 | 423 | numh(1) = row_ptr(2) | 422 | numh(1) = row_ptr(2) |
148 | 424 | do i = 2, n_basis_l-1 | 423 | do i = 2, n_basis_l-1 |
149 | 425 | numh(i) = row_ptr(i+1)-row_ptr(i) | 424 | numh(i) = row_ptr(i+1)-row_ptr(i) |
150 | 426 | enddo | 425 | enddo |
151 | 427 | numh(n_basis_l) = nnz_l - row_ptr(n_basis_l) | 426 | numh(n_basis_l) = nnz_l - row_ptr(n_basis_l) |
152 | 428 | 427 | ||
153 | 429 | ! Split the communicator in spins and get distribution objects | 428 | ! Split the communicator in spins and get distribution objects |
154 | 430 | ! for the data redistribution needed | 429 | ! for the data redistribution needed |
155 | 431 | ! Note that dist_spin is an array | 430 | ! Note that dist_spin is an array |
156 | 432 | call get_spin_comms_and_dists(elsi_global_comm,elsi_global_comm, & | 431 | call get_spin_comms_and_dists(elsi_global_comm,elsi_global_comm, & |
157 | 433 | blocksize, n_spin, & | 432 | blocksize, n_spin, & |
158 | 434 | dist_global,dist_spin, elsi_spatial_comm, elsi_spin_comm) | 433 | dist_global,dist_spin, elsi_spatial_comm, elsi_spin_comm) |
159 | 435 | 434 | ||
160 | 436 | ! Find out which spin team we are in, and tag the spin we work on | 435 | ! Find out which spin team we are in, and tag the spin we work on |
161 | 437 | call mpi_comm_rank( elsi_Spin_Comm, spin_rank, ierr ) | 436 | call mpi_comm_rank( elsi_Spin_Comm, spin_rank, ierr ) |
162 | 438 | my_spin = spin_rank+1 ! {1,2} | 437 | my_spin = spin_rank+1 ! {1,2} |
163 | 439 | 438 | ||
164 | 440 | 439 | ||
165 | 441 | ! This is done serially, each time filling one spin set | 440 | ! This is done serially, each time filling one spin set |
166 | 442 | ! Note that **all processes** need to have the same pkg_global | 441 | ! Note that **all processes** need to have the same pkg_global |
167 | 443 | 442 | ||
168 | 444 | do ispin = 1, n_spin | 443 | do ispin = 1, n_spin |
169 | 445 | 444 | ||
170 | 446 | ! Load pkg_global data package | 445 | ! Load pkg_global data package |
171 | 447 | pkg_global%norbs = n_basis | 446 | pkg_global%norbs = n_basis |
172 | 448 | pkg_global%no_l = n_basis_l | 447 | pkg_global%no_l = n_basis_l |
173 | 449 | pkg_global%nnzl = nnz_l | 448 | pkg_global%nnzl = nnz_l |
174 | 450 | pkg_global%numcols => numh | 449 | pkg_global%numcols => numh |
175 | 451 | pkg_global%cols => col_idx | 450 | pkg_global%cols => col_idx |
176 | 452 | 451 | ||
177 | 453 | allocate(pkg_global%vals(2)) | 452 | allocate(pkg_global%vals(2)) |
178 | 454 | ! Link the vals items to the appropriate arrays (no extra memory here) | 453 | ! Link the vals items to the appropriate arrays (no extra memory here) |
179 | 455 | pkg_global%vals(1)%data => ovlp(:) | 454 | pkg_global%vals(1)%data => ovlp(:) |
180 | 456 | ! Note that we *cannot* say => ham(:,my_spin) | 455 | ! Note that we *cannot* say => ham(:,my_spin) |
181 | 457 | ! and avoid the sequential loop, as then half the processors will send | 456 | ! and avoid the sequential loop, as then half the processors will send |
182 | 458 | ! the information for 'spin up' and the other half the information for 'spin down', | 457 | ! the information for 'spin up' and the other half the information for 'spin down', |
183 | 459 | ! which is *not* what we want. | 458 | ! which is *not* what we want. |
184 | 460 | pkg_global%vals(2)%data => ham(:,ispin) | 459 | pkg_global%vals(2)%data => ham(:,ispin) |
185 | 461 | 460 | ||
186 | 462 | call timer("redist_orbs_fwd", 1) | 461 | call timer("redist_orbs_fwd", 1) |
187 | 463 | 462 | ||
188 | 464 | ! We are doing the transfers sequentially. One spin team is | 463 | ! We are doing the transfers sequentially. One spin team is |
189 | 465 | ! 'idle' (in the receiving side) in each pass, as the dist_spin(ispin) distribution | 464 | ! 'idle' (in the receiving side) in each pass, as the dist_spin(ispin) distribution |
190 | 466 | ! does not involve them. | 465 | ! does not involve them. |
191 | 467 | 466 | ||
192 | 468 | call redistribute_spmatrix(n_basis,pkg_global,dist_global, & | 467 | call redistribute_spmatrix(n_basis,pkg_global,dist_global, & |
193 | 469 | pkg_spin,dist_spin(ispin),elsi_global_Comm) | 468 | pkg_spin,dist_spin(ispin),elsi_global_Comm) |
194 | 470 | 469 | ||
195 | 471 | call timer("redist_orbs_fwd", 2) | 470 | call timer("redist_orbs_fwd", 2) |
196 | 472 | 471 | ||
197 | 473 | if (my_spin == ispin) then ! Each team gets their own data | 472 | if (my_spin == ispin) then ! Each team gets their own data |
198 | 474 | 473 | ||
199 | 475 | !nrows = pkg_spin%norbs ! or simply 'norbs' | 474 | !nrows = pkg_spin%norbs ! or simply 'norbs' |
200 | 476 | my_no_l = pkg_spin%no_l | 475 | my_no_l = pkg_spin%no_l |
201 | 477 | my_nnz_l = pkg_spin%nnzl | 476 | my_nnz_l = pkg_spin%nnzl |
202 | 478 | call MPI_AllReduce(my_nnz_l,my_nnz,1,MPI_integer,MPI_sum,elsi_Spatial_Comm,ierr) | 477 | call MPI_AllReduce(my_nnz_l,my_nnz,1,MPI_integer,MPI_sum,elsi_Spatial_Comm,ierr) |
203 | 479 | ! generate off-by-one row pointer | 478 | ! generate off-by-one row pointer |
204 | 480 | call re_alloc(my_row_ptr2,1,my_no_l+1,"my_row_ptr2","elsi_solver") | 479 | call re_alloc(my_row_ptr2,1,my_no_l+1,"my_row_ptr2","elsi_solver") |
205 | 481 | my_row_ptr2(1) = 1 | 480 | my_row_ptr2(1) = 1 |
206 | 482 | do ih = 1,my_no_l | 481 | do ih = 1,my_no_l |
207 | 483 | my_row_ptr2(ih+1) = my_row_ptr2(ih) + pkg_spin%numcols(ih) | 482 | my_row_ptr2(ih+1) = my_row_ptr2(ih) + pkg_spin%numcols(ih) |
208 | 484 | enddo | 483 | enddo |
209 | 485 | 484 | ||
210 | 486 | my_col_idx => pkg_spin%cols | 485 | my_col_idx => pkg_spin%cols |
211 | 487 | my_S => pkg_spin%vals(1)%data | 486 | my_S => pkg_spin%vals(1)%data |
212 | 488 | my_H => pkg_spin%vals(2)%data | 487 | my_H => pkg_spin%vals(2)%data |
213 | 489 | 488 | ||
214 | 490 | call re_alloc(my_DM,1,my_nnz_l,"my_DM","elsi_solver") | 489 | call re_alloc(my_DM,1,my_nnz_l,"my_DM","elsi_solver") |
215 | 491 | call re_alloc(my_EDM,1,my_nnz_l,"my_EDM","elsi_solver") | 490 | call re_alloc(my_EDM,1,my_nnz_l,"my_EDM","elsi_solver") |
216 | 492 | endif | 491 | endif |
217 | 493 | 492 | ||
218 | 494 | ! Clean pkg_global | 493 | ! Clean pkg_global |
219 | 495 | nullify(pkg_global%vals(1)%data) | 494 | nullify(pkg_global%vals(1)%data) |
220 | 496 | nullify(pkg_global%vals(2)%data) | 495 | nullify(pkg_global%vals(2)%data) |
221 | 497 | deallocate(pkg_global%vals) | 496 | deallocate(pkg_global%vals) |
222 | 498 | nullify(pkg_global%numcols) | 497 | nullify(pkg_global%numcols) |
223 | 499 | nullify(pkg_global%cols) | 498 | nullify(pkg_global%cols) |
224 | 500 | 499 | ||
225 | 501 | enddo | 500 | enddo |
226 | 502 | 501 | ||
227 | 503 | call elsi_set_csc(elsi_h, my_nnz, my_nnz_l, my_no_l, my_col_idx, my_row_ptr2) | 502 | call elsi_set_csc(elsi_h, my_nnz, my_nnz_l, my_no_l, my_col_idx, my_row_ptr2) |
228 | 504 | call de_alloc(my_row_ptr2,"my_row_ptr2","elsi_solver") | 503 | call de_alloc(my_row_ptr2,"my_row_ptr2","elsi_solver") |
229 | 505 | 504 | ||
230 | 506 | call elsi_set_csc_blk(elsi_h, BlockSize) | 505 | call elsi_set_csc_blk(elsi_h, BlockSize) |
231 | 507 | call elsi_set_spin(elsi_h, n_spin, my_spin) | 506 | call elsi_set_spin(elsi_h, n_spin, my_spin) |
232 | 508 | call elsi_set_mpi(elsi_h, elsi_Spatial_comm) | 507 | call elsi_set_mpi(elsi_h, elsi_Spatial_comm) |
233 | 509 | call elsi_set_mpi_global(elsi_h, elsi_global_comm) | 508 | call elsi_set_mpi_global(elsi_h, elsi_global_comm) |
234 | 510 | 509 | ||
235 | 511 | endif ! n_spin | 510 | endif ! n_spin |
236 | 512 | 511 | ||
237 | 513 | call timer("elsi-solver", 1) | 512 | call timer("elsi-solver", 1) |
238 | 514 | 513 | ||
239 | @@ -1217,7 +1216,6 @@ | |||
240 | 1217 | col_idx, qtot, temp, ham, ovlp, dm, edm, ef, ets, & | 1216 | col_idx, qtot, temp, ham, ovlp, dm, edm, ef, ets, & |
241 | 1218 | nkpnt, kpt_n, kpt, weight, kpt_comm, Get_EDM_Only) | 1217 | nkpnt, kpt_n, kpt, weight, kpt_comm, Get_EDM_Only) |
242 | 1219 | 1218 | ||
243 | 1220 | use fdf, only: fdf_get | ||
244 | 1221 | use m_mpi_utils, only: globalize_sum | 1219 | use m_mpi_utils, only: globalize_sum |
245 | 1222 | use parallel, only: BlockSize | 1220 | use parallel, only: BlockSize |
246 | 1223 | #ifdef MPI | 1221 | #ifdef MPI |
247 | @@ -1350,116 +1348,116 @@ | |||
248 | 1350 | 1348 | ||
249 | 1351 | !print *, global_rank, "| ", " Entering elsi_complex_solver" | 1349 | !print *, global_rank, "| ", " Entering elsi_complex_solver" |
250 | 1352 | 1350 | ||
315 | 1353 | if (n_spin == 1) then | 1351 | if (n_spin == 1) then |
316 | 1354 | 1352 | ||
317 | 1355 | ! Sparsity pattern | 1353 | ! Sparsity pattern |
318 | 1356 | call globalize_sum(nnz_l, nnz_g, comm=kpt_comm) | 1354 | call globalize_sum(nnz_l, nnz_g, comm=kpt_comm) |
319 | 1357 | 1355 | ||
320 | 1358 | allocate(row_ptr2(n_basis_l+1)) | 1356 | allocate(row_ptr2(n_basis_l+1)) |
321 | 1359 | row_ptr2(1:n_basis_l) = row_ptr(1:n_basis_l)+1 | 1357 | row_ptr2(1:n_basis_l) = row_ptr(1:n_basis_l)+1 |
322 | 1360 | row_ptr2(n_basis_l+1) = nnz_l+1 | 1358 | row_ptr2(n_basis_l+1) = nnz_l+1 |
323 | 1361 | 1359 | ||
324 | 1362 | call elsi_set_csc(elsi_h, nnz_g, nnz_l, n_basis_l, col_idx, row_ptr2) | 1360 | call elsi_set_csc(elsi_h, nnz_g, nnz_l, n_basis_l, col_idx, row_ptr2) |
325 | 1363 | deallocate(row_ptr2) | 1361 | deallocate(row_ptr2) |
326 | 1364 | 1362 | ||
327 | 1365 | call elsi_set_csc_blk(elsi_h, BlockSize) | 1363 | call elsi_set_csc_blk(elsi_h, BlockSize) |
328 | 1366 | call elsi_set_kpoint(elsi_h, nkpnt, kpt_n, weight) | 1364 | call elsi_set_kpoint(elsi_h, nkpnt, kpt_n, weight) |
329 | 1367 | call elsi_set_mpi(elsi_h, kpt_comm) | 1365 | call elsi_set_mpi(elsi_h, kpt_comm) |
330 | 1368 | call elsi_set_mpi_global(elsi_h, elsi_global_comm) | 1366 | call elsi_set_mpi_global(elsi_h, elsi_global_comm) |
331 | 1369 | 1367 | ||
332 | 1370 | else | 1368 | else |
333 | 1371 | 1369 | ||
334 | 1372 | call mpi_comm_rank( elsi_global_Comm, global_rank, ierr ) | 1370 | call mpi_comm_rank( elsi_global_Comm, global_rank, ierr ) |
335 | 1373 | 1371 | ||
336 | 1374 | ! MPI logic for spin polarization | 1372 | ! MPI logic for spin polarization |
337 | 1375 | 1373 | ||
338 | 1376 | ! Split the communicator in spins and get distribution objects | 1374 | ! Split the communicator in spins and get distribution objects |
339 | 1377 | ! for the data redistribution needed | 1375 | ! for the data redistribution needed |
340 | 1378 | ! Note that dist_spin is an array | 1376 | ! Note that dist_spin is an array |
341 | 1379 | call get_spin_comms_and_dists(kpt_comm,kpt_comm, & !! **** kpt_comm as global? | 1377 | call get_spin_comms_and_dists(kpt_comm,kpt_comm, & !! **** kpt_comm as global? |
342 | 1380 | blocksize, n_spin, & | 1378 | blocksize, n_spin, & |
343 | 1381 | dist_global,dist_spin, elsi_spatial_comm, elsi_spin_comm) | 1379 | dist_global,dist_spin, elsi_spatial_comm, elsi_spin_comm) |
344 | 1382 | 1380 | ||
345 | 1383 | ! Find out which spin team we are in, and tag the spin we work on | 1381 | ! Find out which spin team we are in, and tag the spin we work on |
346 | 1384 | call mpi_comm_rank( elsi_Spin_Comm, spin_rank, ierr ) | 1382 | call mpi_comm_rank( elsi_Spin_Comm, spin_rank, ierr ) |
347 | 1385 | my_spin = spin_rank+1 ! {1,2} | 1383 | my_spin = spin_rank+1 ! {1,2} |
348 | 1386 | 1384 | ||
349 | 1387 | !print *, global_rank, "| ", "spin ", my_spin, " After spin splitting" | 1385 | !print *, global_rank, "| ", "spin ", my_spin, " After spin splitting" |
350 | 1388 | 1386 | ||
351 | 1389 | ! This is done serially, each time filling one spin set | 1387 | ! This is done serially, each time filling one spin set |
352 | 1390 | ! Note that **all processes** need to have the same pkg_global | 1388 | ! Note that **all processes** need to have the same pkg_global |
353 | 1391 | 1389 | ||
354 | 1392 | do ispin = 1, n_spin | 1390 | do ispin = 1, n_spin |
355 | 1393 | 1391 | ||
356 | 1394 | ! Load pkg_global data package | 1392 | ! Load pkg_global data package |
357 | 1395 | pkg_global%norbs = n_basis | 1393 | pkg_global%norbs = n_basis |
358 | 1396 | pkg_global%no_l = n_basis_l | 1394 | pkg_global%no_l = n_basis_l |
359 | 1397 | pkg_global%nnzl = nnz_l | 1395 | pkg_global%nnzl = nnz_l |
360 | 1398 | pkg_global%numcols => numh | 1396 | pkg_global%numcols => numh |
361 | 1399 | pkg_global%cols => col_idx | 1397 | pkg_global%cols => col_idx |
362 | 1400 | 1398 | ||
363 | 1401 | allocate(pkg_global%complex_vals(2)) | 1399 | allocate(pkg_global%complex_vals(2)) |
364 | 1402 | ! Link the vals items to the appropriate arrays (no extra memory here) | 1400 | ! Link the vals items to the appropriate arrays (no extra memory here) |
365 | 1403 | pkg_global%complex_vals(1)%data => ovlp(:) | 1401 | pkg_global%complex_vals(1)%data => ovlp(:) |
366 | 1404 | ! Note that we *cannot* say => ham(:,my_spin) | 1402 | ! Note that we *cannot* say => ham(:,my_spin) |
367 | 1405 | ! and avoid the sequential loop, as then half the processors will send | 1403 | ! and avoid the sequential loop, as then half the processors will send |
368 | 1406 | ! the information for 'spin up' and the other half the information for 'spin down', | 1404 | ! the information for 'spin up' and the other half the information for 'spin down', |
369 | 1407 | ! which is *not* what we want. | 1405 | ! which is *not* what we want. |
370 | 1408 | pkg_global%complex_vals(2)%data => ham(:,ispin) | 1406 | pkg_global%complex_vals(2)%data => ham(:,ispin) |
371 | 1409 | 1407 | ||
372 | 1410 | call timer("redist_orbs_fwd", 1) | 1408 | call timer("redist_orbs_fwd", 1) |
373 | 1411 | 1409 | ||
374 | 1412 | ! We are doing the transfers sequentially. One spin team is | 1410 | ! We are doing the transfers sequentially. One spin team is |
375 | 1413 | ! 'idle' (in the receiving side) in each pass, as the dist_spin(ispin) distribution | 1411 | ! 'idle' (in the receiving side) in each pass, as the dist_spin(ispin) distribution |
376 | 1414 | ! does not involve them. | 1412 | ! does not involve them. |
377 | 1415 | 1413 | ||
378 | 1416 | call redistribute_spmatrix(n_basis,pkg_global,dist_global, & | 1414 | call redistribute_spmatrix(n_basis,pkg_global,dist_global, & |
379 | 1417 | pkg_spin,dist_spin(ispin),kpt_Comm) | 1415 | pkg_spin,dist_spin(ispin),kpt_Comm) |
380 | 1418 | 1416 | ||
425 | 1419 | call timer("redist_orbs_fwd", 2) | 1417 | call timer("redist_orbs_fwd", 2) |
426 | 1420 | 1418 | ||
427 | 1421 | if (my_spin == ispin) then ! Each team gets their own data | 1419 | if (my_spin == ispin) then ! Each team gets their own data |
428 | 1422 | 1420 | ||
429 | 1423 | !nrows = pkg_spin%norbs ! or simply 'norbs' | 1421 | !nrows = pkg_spin%norbs ! or simply 'norbs' |
430 | 1424 | my_no_l = pkg_spin%no_l | 1422 | my_no_l = pkg_spin%no_l |
431 | 1425 | my_nnz_l = pkg_spin%nnzl | 1423 | my_nnz_l = pkg_spin%nnzl |
432 | 1426 | call MPI_AllReduce(my_nnz_l,my_nnz,1,MPI_integer,MPI_sum,elsi_Spatial_Comm,ierr) | 1424 | call MPI_AllReduce(my_nnz_l,my_nnz,1,MPI_integer,MPI_sum,elsi_Spatial_Comm,ierr) |
433 | 1427 | ! generate off-by-one row pointer | 1425 | ! generate off-by-one row pointer |
434 | 1428 | call re_alloc(my_row_ptr2,1,my_no_l+1,"my_row_ptr2","elsi_solver") | 1426 | call re_alloc(my_row_ptr2,1,my_no_l+1,"my_row_ptr2","elsi_solver") |
435 | 1429 | my_row_ptr2(1) = 1 | 1427 | my_row_ptr2(1) = 1 |
436 | 1430 | do ih = 1,my_no_l | 1428 | do ih = 1,my_no_l |
437 | 1431 | my_row_ptr2(ih+1) = my_row_ptr2(ih) + pkg_spin%numcols(ih) | 1429 | my_row_ptr2(ih+1) = my_row_ptr2(ih) + pkg_spin%numcols(ih) |
438 | 1432 | enddo | 1430 | enddo |
439 | 1433 | 1431 | ||
440 | 1434 | my_col_idx => pkg_spin%cols | 1432 | my_col_idx => pkg_spin%cols |
441 | 1435 | my_S => pkg_spin%complex_vals(1)%data | 1433 | my_S => pkg_spin%complex_vals(1)%data |
442 | 1436 | my_H => pkg_spin%complex_vals(2)%data | 1434 | my_H => pkg_spin%complex_vals(2)%data |
443 | 1437 | 1435 | ||
444 | 1438 | call re_alloc(my_DM,1,my_nnz_l,"my_DM","elsi_solver") | 1436 | call re_alloc(my_DM,1,my_nnz_l,"my_DM","elsi_solver") |
445 | 1439 | call re_alloc(my_EDM,1,my_nnz_l,"my_EDM","elsi_solver") | 1437 | call re_alloc(my_EDM,1,my_nnz_l,"my_EDM","elsi_solver") |
446 | 1440 | endif | 1438 | endif |
447 | 1441 | 1439 | ||
448 | 1442 | ! Clean pkg_global | 1440 | ! Clean pkg_global |
449 | 1443 | nullify(pkg_global%complex_vals(1)%data) | 1441 | nullify(pkg_global%complex_vals(1)%data) |
450 | 1444 | nullify(pkg_global%complex_vals(2)%data) | 1442 | nullify(pkg_global%complex_vals(2)%data) |
451 | 1445 | deallocate(pkg_global%complex_vals) | 1443 | deallocate(pkg_global%complex_vals) |
452 | 1446 | nullify(pkg_global%numcols) | 1444 | nullify(pkg_global%numcols) |
453 | 1447 | nullify(pkg_global%cols) | 1445 | nullify(pkg_global%cols) |
454 | 1448 | 1446 | ||
455 | 1449 | enddo | 1447 | enddo |
456 | 1450 | 1448 | ||
457 | 1451 | !print *, global_rank, "| ", "spin ", my_spin, "Done spin transfers" | 1449 | !print *, global_rank, "| ", "spin ", my_spin, "Done spin transfers" |
458 | 1452 | 1450 | ||
459 | 1453 | call elsi_set_csc(elsi_h, my_nnz, my_nnz_l, my_no_l, my_col_idx, my_row_ptr2) | 1451 | call elsi_set_csc(elsi_h, my_nnz, my_nnz_l, my_no_l, my_col_idx, my_row_ptr2) |
460 | 1454 | call de_alloc(my_row_ptr2,"my_row_ptr2","elsi_solver") | 1452 | call de_alloc(my_row_ptr2,"my_row_ptr2","elsi_solver") |
461 | 1455 | 1453 | ||
462 | 1456 | call elsi_set_csc_blk(elsi_h, BlockSize) | 1454 | call elsi_set_csc_blk(elsi_h, BlockSize) |
463 | 1457 | call elsi_set_spin(elsi_h, n_spin, my_spin) | 1455 | call elsi_set_spin(elsi_h, n_spin, my_spin) |
464 | 1458 | call elsi_set_kpoint(elsi_h, nkpnt, kpt_n, weight) | 1456 | call elsi_set_kpoint(elsi_h, nkpnt, kpt_n, weight) |
465 | 1459 | call elsi_set_mpi(elsi_h, elsi_Spatial_comm) | 1457 | call elsi_set_mpi(elsi_h, elsi_Spatial_comm) |
466 | 1460 | call elsi_set_mpi_global(elsi_h, elsi_global_comm) | 1458 | call elsi_set_mpi_global(elsi_h, elsi_global_comm) |
467 | 1461 | 1459 | ||
468 | 1462 | endif ! n_spin | 1460 | endif ! n_spin |
469 | 1463 | 1461 | ||
470 | 1464 | call timer("elsi-solver", 1) | 1462 | call timer("elsi-solver", 1) |
471 | 1465 | 1463 |