36#define REAL_TYPE float
37#define FFT_TYPE_COMPLEX cufftComplex
38#define FFT_FORW_FFT cufftExecR2C
39#define FFT_BACK_FFT cufftExecC2R
40#define FFT_PLAN_FORW_FLAG CUFFT_R2C
41#define FFT_PLAN_BACK_FLAG CUFFT_C2R
44#ifdef P3M_GPU_REAL_DOUBLE
45#define REAL_TYPE double
46#define FFT_TYPE_COMPLEX cufftDoubleComplex
47#define FFT_FORW_FFT cufftExecD2Z
48#define FFT_BACK_FFT cufftExecZ2D
49#define FFT_PLAN_FORW_FLAG CUFFT_D2Z
50#define FFT_PLAN_BACK_FLAG CUFFT_Z2D
56#include "system/System.hpp"
70#if defined(OMPI_MPI_H) || defined(_MPI_H)
71#error CU-file includes mpi.h! This should not happen!
121 auto const free_device_pointer = [](
auto *&ptr) {
122 if (ptr !=
nullptr) {
150 for (
int i = 0; i < 3; ++i) {
151 Leni[i] = 1.0f / p.
box[i];
155 Zaehler[0] = Zaehler[1] = Zaehler[2] = *Nenner = 0.0;
160 S1 = int_pow<2 * cao>(
Utils::sinc(Meshi[0] * NMX));
163 ((NY > p.
mesh[1] / 2) ? NY - p.
mesh[1] : NY) + p.
mesh[1] * MY);
164 S2 = S1 * int_pow<2 * cao>(
Utils::sinc(Meshi[1] * NMY));
167 ((NZ > p.
mesh[2] / 2) ? NZ - p.
mesh[2] : NZ) + p.
mesh[2] * MZ);
168 S3 = S2 * int_pow<2 * cao>(
Utils::sinc(Meshi[2] * NMZ));
170 NM2 = sqr(NMX * Leni[0]) + sqr(NMY * Leni[1]) + sqr(NMZ * Leni[2]);
173 TE = exp(-sqr(Utils::pi<REAL_TYPE>() / (p.
alpha)) * NM2);
175 Zaehler[0] += NMX * zwi * Leni[0];
176 Zaehler[1] += NMY * zwi * Leni[1];
177 Zaehler[2] += NMZ * zwi * Leni[2];
187 const auto NX =
static_cast<int>(blockDim.x * blockIdx.x + threadIdx.x);
188 const auto NY =
static_cast<int>(blockDim.y * blockIdx.y + threadIdx.y);
189 const auto NZ =
static_cast<int>(blockDim.z * blockIdx.z + threadIdx.z);
191 REAL_TYPE Zaehler[3] = {0.0, 0.0, 0.0}, Nenner = 0.0;
195 for (
int i = 0; i < 3; ++i) {
196 Leni[i] = 1.0f / p.
box[i];
199 if ((NX >= p.
mesh[0]) || (NY >= p.
mesh[1]) || (NZ >= (p.
mesh[2] / 2 + 1)))
202 index = NX * p.
mesh[1] * (p.
mesh[2] / 2 + 1) + NY * (p.
mesh[2] / 2 + 1) + NZ;
204 if (((NX == 0) && (NY == 0) && (NZ == 0)) ||
205 ((NX % (p.
mesh[0] / 2) == 0) && (NY % (p.
mesh[1] / 2) == 0) &&
206 (NZ % (p.
mesh[2] / 2) == 0))) {
209 Aliasing_sums_ik<cao>(p, NX, NY, NZ, Zaehler, &Nenner);
215 zwi = Dnx * Zaehler[0] * Leni[0] + Dny * Zaehler[1] * Leni[1] +
216 Dnz * Zaehler[2] * Leni[2];
217 zwi /= ((sqr(Dnx * Leni[0]) + sqr(Dny * Leni[1]) + sqr(Dnz * Leni[2])) *
219 p.
G_hat[index] = 2 * zwi / Utils::pi<REAL_TYPE>();
223#ifdef P3M_GPU_REAL_DOUBLE
224__device__
double atomicAdd(
double *address,
double val) {
225 unsigned long long int *address_as_ull = (
unsigned long long int *)address;
226 unsigned long long int old = *address_as_ull, assumed;
229 old = atomicCAS(address_as_ull, assumed,
230 __double_as_longlong(val + __longlong_as_double(assumed)));
231 }
while (assumed != old);
232 return __longlong_as_double(old);
245 return static_cast<unsigned int>(p.
mesh[1] * (p.
mesh[2] / 2 + 1) * i +
246 (p.
mesh[2] / 2 + 1) * j + k);
251 auto const linear_index =
linear_index_k(p,
static_cast<int>(blockIdx.x),
252 static_cast<int>(blockIdx.y),
253 static_cast<int>(threadIdx.x));
255 auto const bidx =
static_cast<int>(blockIdx.x);
256 auto const bidy =
static_cast<int>(blockIdx.y);
257 auto const nx = (bidx > p.
mesh[0] / 2) ? bidx - p.
mesh[0] : bidx;
258 auto const ny = (bidy > p.
mesh[1] / 2) ? bidy - p.
mesh[1] : bidy;
259 auto const nz =
static_cast<int>(threadIdx.x);
263 buf.x = -2.0f * Utils::pi<float>() * meshw.y;
264 buf.y = 2.0f * Utils::pi<float>() * meshw.x;
267 static_cast<decltype(FFT_TYPE_COMPLEX::x)
>(nx) * buf.x / p.
box[0];
269 static_cast<decltype(FFT_TYPE_COMPLEX::x)
>(nx) * buf.y / p.
box[0];
272 static_cast<decltype(FFT_TYPE_COMPLEX::x)
>(ny) * buf.x / p.
box[1];
274 static_cast<decltype(FFT_TYPE_COMPLEX::x)
>(ny) * buf.y / p.
box[1];
277 static_cast<decltype(FFT_TYPE_COMPLEX::x)
>(nz) * buf.x / p.
box[2];
279 static_cast<decltype(FFT_TYPE_COMPLEX::x)
>(nz) * buf.y / p.
box[2];
282__device__
inline int wrap_index(
const int ind,
const int mesh) {
291 auto const linear_index =
linear_index_k(p,
static_cast<int>(blockIdx.x),
292 static_cast<int>(blockIdx.y),
293 static_cast<int>(threadIdx.x));
299template <
int cao,
bool shared>
301 float const *
const __restrict__ part_pos,
302 float const *
const __restrict__ part_q,
303 unsigned int const parts_per_block) {
304 auto const part_in_block = threadIdx.x /
static_cast<unsigned int>(cao);
305 auto const cao_id_x =
306 threadIdx.x - part_in_block *
static_cast<unsigned int>(cao);
309 parts_per_block * (blockIdx.x * gridDim.y + blockIdx.y) + part_in_block;
315 int nmp_x, nmp_y, nmp_z;
319 m_pos[0] = part_pos[3 *
id + 0] *
params.hi[0] -
params.pos_shift;
320 m_pos[1] = part_pos[3 *
id + 1] *
params.hi[1] -
params.pos_shift;
321 m_pos[2] = part_pos[3 *
id + 2] *
params.hi[2] -
params.pos_shift;
323 nmp_x =
static_cast<int>(floorf(m_pos[0] + 0.5f));
324 nmp_y =
static_cast<int>(floorf(m_pos[1] + 0.5f));
325 nmp_z =
static_cast<int>(floorf(m_pos[2] + 0.5f));
327 m_pos[0] -=
static_cast<REAL_TYPE>(nmp_x);
328 m_pos[1] -=
static_cast<REAL_TYPE>(nmp_y);
329 m_pos[2] -=
static_cast<REAL_TYPE>(nmp_z);
337 extern __shared__
float weights[];
340 auto const offset =
static_cast<unsigned int>(cao) * part_in_block;
341 if ((threadIdx.y < 3u) && (threadIdx.z == 0
u)) {
342 weights[3u * offset + 3u * cao_id_x + threadIdx.y] =
343 Utils::bspline<cao>(
static_cast<int>(cao_id_x), m_pos[threadIdx.y]);
348 auto const c = weights[3u * offset + 3u * cao_id_x] *
349 weights[3u * offset + 3u * threadIdx.y + 1u] *
350 weights[3u * offset + 3u * threadIdx.z + 2u] * part_q[id];
351 atomicAdd(&(charge_mesh[index]), c);
355 Utils::bspline<cao>(
static_cast<int>(cao_id_x), m_pos[0]) * part_q[id] *
356 Utils::bspline<cao>(
static_cast<int>(threadIdx.y), m_pos[1]) *
357 Utils::bspline<cao>(
static_cast<int>(threadIdx.z), m_pos[2]);
358 atomicAdd(&(charge_mesh[index]), c);
363 float const *
const __restrict__ part_pos,
364 float const *
const __restrict__ part_q) {
365 auto const cao =
static_cast<unsigned int>(
params.cao);
366 auto const cao3 = int_pow<3>(cao);
367 unsigned int parts_per_block = 1u, n_blocks = 1u;
369 while ((parts_per_block + 1u) * cao3 <= 1024u) {
372 if ((
params.n_part % parts_per_block) == 0
u)
373 n_blocks = std::max<unsigned>(1u,
params.n_part / parts_per_block);
375 n_blocks =
params.n_part / parts_per_block + 1u;
377 dim3
block(parts_per_block * cao, cao, cao);
378 dim3 grid(n_blocks, 1u, 1u);
379 while (grid.x > 65536u) {
381 if ((n_blocks % grid.y) == 0
u)
382 grid.x = std::max<unsigned>(1u, n_blocks / grid.y);
384 grid.x = n_blocks / grid.y + 1u;
387 auto const data_length =
388 3 *
static_cast<std::size_t
>(parts_per_block * cao) *
sizeof(
REAL_TYPE);
391 (assign_charge_kernel<1, false>)<<<grid,
block, 0,
nullptr>>>(
392 params, part_pos, part_q, parts_per_block);
395 (assign_charge_kernel<2, false>)<<<grid,
block, 0,
nullptr>>>(
396 params, part_pos, part_q, parts_per_block);
399 (assign_charge_kernel<3, true>)<<<grid,
block, data_length,
nullptr>>>(
400 params, part_pos, part_q, parts_per_block);
403 (assign_charge_kernel<4, true>)<<<grid,
block, data_length,
nullptr>>>(
404 params, part_pos, part_q, parts_per_block);
407 (assign_charge_kernel<5, true>)<<<grid,
block, data_length,
nullptr>>>(
408 params, part_pos, part_q, parts_per_block);
411 (assign_charge_kernel<6, true>)<<<grid,
block, data_length,
nullptr>>>(
412 params, part_pos, part_q, parts_per_block);
415 (assign_charge_kernel<7, true>)<<<grid,
block, data_length,
nullptr>>>(
416 params, part_pos, part_q, parts_per_block);
424template <
int cao,
bool shared>
426 float const *
const __restrict__ part_pos,
427 float const *
const __restrict__ part_q,
428 float *
const __restrict__ part_f,
430 unsigned int const parts_per_block) {
431 auto const part_in_block = threadIdx.x /
static_cast<unsigned int>(cao);
432 auto const cao_id_x =
433 threadIdx.x - part_in_block *
static_cast<unsigned int>(cao);
436 parts_per_block * (blockIdx.x * gridDim.y + blockIdx.y) + part_in_block;
442 int nmp_x, nmp_y, nmp_z;
444 m_pos[0] = part_pos[3 *
id + 0] *
params.hi[0] -
params.pos_shift;
445 m_pos[1] = part_pos[3 *
id + 1] *
params.hi[1] -
params.pos_shift;
446 m_pos[2] = part_pos[3 *
id + 2] *
params.hi[2] -
params.pos_shift;
448 nmp_x =
static_cast<int>(floorf(m_pos[0] +
REAL_TYPE{0.5}));
449 nmp_y =
static_cast<int>(floorf(m_pos[1] +
REAL_TYPE{0.5}));
450 nmp_z =
static_cast<int>(floorf(m_pos[2] +
REAL_TYPE{0.5}));
452 m_pos[0] -=
static_cast<REAL_TYPE>(nmp_x);
453 m_pos[1] -=
static_cast<REAL_TYPE>(nmp_y);
454 m_pos[2] -=
static_cast<REAL_TYPE>(nmp_z);
462 extern __shared__
float weights[];
466 auto const offset =
static_cast<unsigned int>(cao) * part_in_block;
467 if ((threadIdx.y < 3u) && (threadIdx.z == 0
u)) {
468 weights[3u * offset + 3u * cao_id_x + threadIdx.y] =
469 Utils::bspline<cao>(
static_cast<int>(cao_id_x), m_pos[threadIdx.y]);
474 c = -prefactor * weights[3u * offset + 3u * cao_id_x] *
475 weights[3u * offset + 3u * threadIdx.y + 1u] *
476 weights[3u * offset + 3u * threadIdx.z + 2u] * part_q[id];
478 c = -prefactor * part_q[id] *
479 Utils::bspline<cao>(
static_cast<int>(cao_id_x), m_pos[0]) *
480 Utils::bspline<cao>(
static_cast<int>(threadIdx.y), m_pos[1]) *
481 Utils::bspline<cao>(
static_cast<int>(threadIdx.z), m_pos[2]);
488 atomicAdd(&(part_f[3u *
id + 0
u]), c * force_mesh_x[index]);
489 atomicAdd(&(part_f[3u *
id + 1u]), c * force_mesh_y[index]);
490 atomicAdd(&(part_f[3u *
id + 2u]), c * force_mesh_z[index]);
494 float const *
const __restrict__ part_pos,
495 float const *
const __restrict__ part_q,
496 float *
const __restrict__ part_f,
498 auto const cao =
params.cao;
499 auto const cao3 = int_pow<3>(cao);
500 unsigned int parts_per_block = 1u, n_blocks = 1u;
502 while ((parts_per_block + 1u) *
static_cast<unsigned int>(cao3) <= 1024u) {
506 if ((
params.n_part % parts_per_block) == 0
u)
507 n_blocks = std::max<unsigned>(1u,
params.n_part / parts_per_block);
509 n_blocks =
params.n_part / parts_per_block + 1u;
511 dim3
block(parts_per_block *
static_cast<unsigned int>(cao),
512 static_cast<unsigned int>(cao),
static_cast<unsigned int>(cao));
513 dim3 grid(n_blocks, 1u, 1u);
514 while (grid.x > 65536u) {
516 if (n_blocks % grid.y == 0
u)
517 grid.x = std::max<unsigned>(1u, n_blocks / grid.y);
519 grid.x = n_blocks / grid.y + 1u;
524 auto const data_length =
526 static_cast<std::size_t
>(parts_per_block *
527 static_cast<unsigned int>(cao)) *
531 (assign_forces_kernel<1, false>)<<<grid,
block, 0,
nullptr>>>(
532 params, part_pos, part_q, part_f, prefactor, parts_per_block);
535 (assign_forces_kernel<2, false>)<<<grid,
block, 0,
nullptr>>>(
536 params, part_pos, part_q, part_f, prefactor, parts_per_block);
539 (assign_forces_kernel<3, true>)<<<grid,
block, data_length,
nullptr>>>(
540 params, part_pos, part_q, part_f, prefactor, parts_per_block);
543 (assign_forces_kernel<4, true>)<<<grid,
block, data_length,
nullptr>>>(
544 params, part_pos, part_q, part_f, prefactor, parts_per_block);
547 (assign_forces_kernel<5, true>)<<<grid,
block, data_length,
nullptr>>>(
548 params, part_pos, part_q, part_f, prefactor, parts_per_block);
551 (assign_forces_kernel<6, true>)<<<grid,
block, data_length,
nullptr>>>(
552 params, part_pos, part_q, part_f, prefactor, parts_per_block);
555 (assign_forces_kernel<7, true>)<<<grid,
block, data_length,
nullptr>>>(
556 params, part_pos, part_q, part_f, prefactor, parts_per_block);
573 if (mesh[0] == -1 && mesh[1] == -1 && mesh[2] == -1)
574 throw std::runtime_error(
"P3M: invalid mesh size");
577 data = std::make_shared<P3MGpuParams>();
580 auto &p3m_gpu_data = data->p3m_gpu_data;
581 bool do_reinit =
false, mesh_changed =
false;
582 p3m_gpu_data.n_part = n_part;
584 if (not data->is_initialized or p3m_gpu_data.alpha != alpha) {
585 p3m_gpu_data.alpha =
static_cast<REAL_TYPE>(alpha);
589 if (not data->is_initialized or p3m_gpu_data.cao != cao) {
590 p3m_gpu_data.cao = cao;
592 p3m_gpu_data.pos_shift =
static_cast<REAL_TYPE>((p3m_gpu_data.cao - 1) / 2);
596 if (not data->is_initialized or (p3m_gpu_data.mesh[0] != mesh[0]) or
597 (p3m_gpu_data.mesh[1] != mesh[1]) or (p3m_gpu_data.mesh[2] != mesh[2])) {
598 std::copy(mesh, mesh + 3, p3m_gpu_data.mesh);
603 if (not data->is_initialized or (p3m_gpu_data.box[0] != box_l[0]) or
604 (p3m_gpu_data.box[1] != box_l[1]) or (p3m_gpu_data.box[2] != box_l[2])) {
605 std::copy(box_l.
begin(), box_l.
end(), p3m_gpu_data.box);
609 p3m_gpu_data.mesh_z_padded = (mesh[2] / 2 + 1) * 2;
610 p3m_gpu_data.mesh_size = mesh[0] * mesh[1] * p3m_gpu_data.mesh_z_padded;
612 for (
int i = 0; i < 3; i++) {
614 static_cast<REAL_TYPE>(p3m_gpu_data.mesh[i]) / p3m_gpu_data.box[i];
617 if (data->is_initialized and mesh_changed) {
618 data->free_device_memory();
619 data->is_initialized =
false;
622 if (not data->is_initialized and p3m_gpu_data.mesh_size > 0) {
624 auto const cmesh_size =
625 static_cast<std::size_t
>(p3m_gpu_data.mesh[0]) *
626 static_cast<std::size_t
>(p3m_gpu_data.mesh[1]) *
627 static_cast<std::size_t
>(p3m_gpu_data.mesh[2] / 2 + 1);
629 cuda_safe_mem(cudaMalloc((
void **)&(p3m_gpu_data.charge_mesh), mesh_len));
630 cuda_safe_mem(cudaMalloc((
void **)&(p3m_gpu_data.force_mesh_x), mesh_len));
631 cuda_safe_mem(cudaMalloc((
void **)&(p3m_gpu_data.force_mesh_y), mesh_len));
632 cuda_safe_mem(cudaMalloc((
void **)&(p3m_gpu_data.force_mesh_z), mesh_len));
636 if (cufftPlan3d(&(data->p3m_fft.forw_plan), mesh[0], mesh[1], mesh[2],
638 cufftPlan3d(&(data->p3m_fft.back_plan), mesh[0], mesh[1], mesh[2],
640 throw std::runtime_error(
"Unable to create fft plan");
644 if ((do_reinit or not data->is_initialized) and p3m_gpu_data.mesh_size > 0) {
647 block.x =
static_cast<unsigned>(512 / mesh[0] + 1);
648 block.y =
static_cast<unsigned>(mesh[1]);
650 grid.x =
static_cast<unsigned>(mesh[0]) /
block.x + 1;
651 grid.z =
static_cast<unsigned>(mesh[2]) / 2 + 1;
653 switch (p3m_gpu_data.cao) {
684 if (p3m_gpu_data.mesh_size > 0)
685 data->is_initialized =
true;
692 double prefactor,
unsigned n_part) {
694 p3m_gpu_data.
n_part = n_part;
696 if (p3m_gpu_data.n_part == 0
u)
703 dim3 gridConv(
static_cast<unsigned>(p3m_gpu_data.mesh[0]),
704 static_cast<unsigned>(p3m_gpu_data.mesh[1]), 1u);
705 dim3 threadsConv(
static_cast<unsigned>(p3m_gpu_data.mesh[2] / 2 + 1), 1u, 1u);
712 static_cast<std::size_t
>(p3m_gpu_data.mesh_size) *
721 p3m_gpu_data.charge_mesh) != CUFFT_SUCCESS) {
722 fprintf(stderr,
"CUFFT error: Forward FFT failed\n");
741 assign_forces(p3m_gpu_data, positions_device, charges_device, forces_device,
Particle data communication manager for the GPU.
float * get_particle_charges_device() const
float * get_particle_forces_device() const
float * get_particle_positions_device() const
void cuda_check_errors_exit(const dim3 &block, const dim3 &grid, const char *function, const char *file, unsigned int line)
In case of error during a CUDA operation, print the error message and exit.
This file contains the defaults for ESPResSo.
#define P3M_BRILLOUIN
P3M: Number of Brillouin zones taken into account in the calculation of the optimal influence functio...
static double * block(double *p, std::size_t index, std::size_t size)
T product(Vector< T, N > const &v)
DEVICE_QUALIFIER T sinc(T d)
Calculates the sinc-function as sin(PI*x)/(PI*x).
DEVICE_QUALIFIER constexpr T sqr(T x)
Calculates the SQuaRe of x.
DEVICE_QUALIFIER constexpr T int_pow(T x)
Calculate integer powers.
__device__ auto linear_index_k(P3MGpuData const &p, int i, int j, int k)
__device__ auto linear_index_r(P3MGpuData const &p, int i, int j, int k)
__device__ static void Aliasing_sums_ik(const P3MGpuData p, int NX, int NY, int NZ, REAL_TYPE *Zaehler, REAL_TYPE *Nenner)
void p3m_gpu_init(std::shared_ptr< P3MGpuParams > &data, int cao, const int mesh[3], double alpha, Utils::Vector3d const &box_l, unsigned n_part)
__global__ void assign_charge_kernel(P3MGpuData const params, float const *const __restrict__ part_pos, float const *const __restrict__ part_q, unsigned int const parts_per_block)
__global__ void apply_influence_function(const P3MGpuData p)
void assign_charges(P3MGpuData const ¶ms, float const *const __restrict__ part_pos, float const *const __restrict__ part_q)
#define FFT_PLAN_FORW_FLAG
__global__ void apply_diff_op(const P3MGpuData p)
void p3m_gpu_add_farfield_force(P3MGpuParams &data, GpuParticleData &gpu, double prefactor, unsigned n_part)
The long-range part of the P3M algorithm.
void assign_forces(P3MGpuData const ¶ms, float const *const __restrict__ part_pos, float const *const __restrict__ part_q, float *const __restrict__ part_f, REAL_TYPE const prefactor)
__global__ void calculate_influence_function_device(const P3MGpuData p)
__device__ int wrap_index(const int ind, const int mesh)
#define FFT_PLAN_BACK_FLAG
__global__ void assign_forces_kernel(P3MGpuData const params, float const *const __restrict__ part_pos, float const *const __restrict__ part_q, float *const __restrict__ part_f, REAL_TYPE prefactor, unsigned int const parts_per_block)
static SteepestDescentParameters params
Currently active steepest descent instance.
int mesh[3]
Mesh dimensions.
REAL_TYPE pos_shift
Position shift.
REAL_TYPE * G_hat
Influence Function.
FFT_TYPE_COMPLEX * force_mesh_x
Force meshes.
int mesh_z_padded
Padded size.
int mesh_size
Total number of mesh points (including padding)
int cao
Charge assignment order.
unsigned int n_part
Number of particles.
FFT_TYPE_COMPLEX * force_mesh_z
REAL_TYPE hi[3]
Inverse mesh spacing.
FFT_TYPE_COMPLEX * charge_mesh
Charge mesh.
REAL_TYPE box[3]
Box size.
FFT_TYPE_COMPLEX * force_mesh_y
REAL_TYPE alpha
Ewald parameter.
cufftHandle forw_plan
Forward FFT plan.
cufftHandle back_plan
Backward FFT plan.
void free_device_memory()
DEVICE_QUALIFIER constexpr iterator begin() noexcept
DEVICE_QUALIFIER constexpr iterator end() noexcept
#define KERNELCALL(_function, _grid, _block,...)