forked from CmPA/iPic3D
-
Notifications
You must be signed in to change notification settings - Fork 0
/
iPIC3D.cpp
288 lines (269 loc) · 12.1 KB
/
iPIC3D.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
/***************************************************************************
iPIC3D.cpp - Main file for 3D simulation
-------------------
************************************************************************** */
// MPI
#include <mpi.h>
#include "mpidata/MPIdata.h"
// Topology
#include "processtopology/VirtualTopology3D.h"
#include "processtopology/VCtopology3D.h"
// input
#include "inputoutput/CollectiveIO.h"
#include "inputoutput/Collective.h"
// grid
#include "grids/Grid.h"
#include "grids/Grid3DCU.h"
// fields
#include "fields/Field.h"
#include "fields/EMfields3D.h"
// particles
#include "particles/Particles.h"
#include "particles/Particles3Dcomm.h"
#include "particles/Particles3D.h"
// output
#include "PSKOutput3D/PSKOutput.h"
#include "PSKOutput3D/PSKhdf5adaptor.h"
#include "inputoutput/Restart3D.h"
// performance
#include "performances/Timing.h"
// wave
// #include "perturbation/Planewave.h"
// serial ASCII output
// #include "inputoutput/SerialIO.h"
#include <iostream>
#include <fstream>
#include <string>
#include <sstream>
using namespace std;
using std::cerr;
using std::endl;
using std::ofstream;
int main(int argc, char **argv) {
// initialize MPI environment
// nprocs = number of processors
// myrank = rank of tha process*/
int nprocs, myrank, mem_avail;
MPIdata *mpi = new MPIdata(&argc, &argv);
nprocs = mpi->nprocs;
myrank = mpi->rank;
Collective *col = new Collective(argc, argv); // Every proc loads the parameters of simulation from class Collective
bool verbose = col->getVerbose();
int restart_cycle = col->getRestartOutputCycle();
string SaveDirName = col->getSaveDirName();
string RestartDirName = col->getRestartDirName();
const int restart = col->getRestart_status();
const int ns = col->getNs(); // get the number of particle species involved in simulation
const int first_cycle = col->getLast_cycle() + 1; // get the last cycle from the restart
// initialize the virtual cartesian topology
VCtopology3D *vct = new VCtopology3D();
// Check if we can map the processes into a matrix ordering defined in Collective.cpp
if (nprocs != vct->getNprocs()) {
if (myrank == 0) {
cerr << "Error: " << nprocs << " processes cant be mapped into " << vct->getXLEN() << "x" << vct->getYLEN() << "x" << vct->getZLEN() << " matrix: Change XLEN,YLEN, ZLEN in method VCtopology3D.init()" << endl;
mpi->finalize_mpi();
return (1);
}
}
// We create a new communicator with a 3D virtual Cartesian topology
vct->setup_vctopology(MPI_COMM_WORLD);
// initialize the central cell index
const int nx0 = col->getNxc() / vct->getXLEN(); // get the number of cells in x for each processor
const int ny0 = col->getNyc() / vct->getYLEN(); // get the number of cells in y for each processor
const int nz0 = col->getNzc() / vct->getZLEN(); // get the number of cells in z for each processor
// Print the initial settings to stdout and a file
if (myrank == 0) {
mpi->Print();
vct->Print();
col->Print();
col->save();
}
// Create the local grid
MPI_Barrier(MPI_COMM_WORLD);
Grid3DCU *grid = new Grid3DCU(col, vct); // Create the local grid
EMfields3D *EMf = new EMfields3D(col, grid); // Create Electromagnetic Fields Object
// EMf->initGEMnoPert(vct,grid);
// EMf->initForceFree(vct,grid);
EMf->initGEM(vct, grid);
// Allocation of particles
Particles3D *part = new Particles3D[ns];
for (int i = 0; i < ns; i++)
part[i].allocate(i, col, vct, grid);
// Initial Condition for PARTICLES if you are not starting from RESTART
if (restart == 0) {
// Planewave *wave = new Planewave(col, EMf, grid, vct);
// wave->Wave_Rotated(part); // Single Plane Wave
for (int i = 0; i < ns; i++)
part[i].maxwellian(grid, EMf, vct); // all the species have Maxwellian distribution in the velocity
// part[i].force_free(grid,EMf,vct); // force free
}
// Initialize the output (simulation results and restart file)
PSK::OutputManager < PSK::OutputAdaptor > output_mgr; // Create an Output Manager
myOutputAgent < PSK::HDF5OutputAdaptor > hdf5_agent; // Create an Output Agent for HDF5 output
hdf5_agent.set_simulation_pointers(EMf, grid, vct, mpi, col);
for (int i = 0; i < ns; ++i)
hdf5_agent.set_simulation_pointers_part(&part[i]);
output_mgr.push_back(&hdf5_agent); // Add the HDF5 output agent to the Output Manager's list
if (myrank == 0 & restart < 2) {
hdf5_agent.open(SaveDirName + "/settings.hdf");
output_mgr.output("collective + total_topology + proc_topology", 0);
hdf5_agent.close();
hdf5_agent.open(RestartDirName + "/settings.hdf");
output_mgr.output("collective + total_topology + proc_topology", 0);
hdf5_agent.close();
}
// Restart
stringstream num_proc;
num_proc << myrank;
if (restart == 0) { // new simulation from input file
hdf5_agent.open(SaveDirName + "/proc" + num_proc.str() + ".hdf");
output_mgr.output("proc_topology ", 0);
hdf5_agent.close();
}
else { // restart append the results to the previous simulation
hdf5_agent.open_append(SaveDirName + "/proc" + num_proc.str() + ".hdf");
output_mgr.output("proc_topology ", 0);
hdf5_agent.close();
}
MPI_Barrier(MPI_COMM_WORLD);
double Eenergy, Benergy, TOTenergy = 0.0, TOTmomentum = 0.0;
double *Ke = new double[ns];
double *momentum = new double[ns];
string cq = SaveDirName + "/ConservedQuantities.txt";
if (myrank == 0) {
ofstream my_file(cq.c_str());
my_file.close();
}
// Distribution functions
int nDistributionBins = 1000;
double maxVel = 0.;
unsigned long* VelocityDist = new unsigned long [nDistributionBins];
string ds = SaveDirName + "/DistributionFunctions.txt";
if (myrank==0) {
ofstream my_file(ds.c_str());
my_file.close();
}
string cqsat = SaveDirName + "/VirtualSatelliteTraces" + num_proc.str() + ".txt";
// if(myrank==0){
ofstream my_file(cqsat.c_str(), fstream::binary);
int nsat = 3;
for (int isat = 0; isat < nsat; isat++) {
for (int jsat = 0; jsat < nsat; jsat++) {
for (int ksat = 0; ksat < nsat; ksat++) {
int index1 = 1 + isat * nx0 / nsat + nx0 / nsat / 2;
int index2 = 1 + jsat * ny0 / nsat + ny0 / nsat / 2;
int index3 = 1 + ksat * nz0 / nsat + nz0 / nsat / 2;
my_file << grid->getXC(index1, index2, index3) << "\t" << grid->getYC(index1, index2, index3) << "\t" << grid->getZC(index1, index2, index3) << endl;
}}}
my_file.close();
// *******************************************//
// **** Start the Simulation! ***//
// *******************************************//
Timing *my_clock = new Timing(myrank);
for (int cycle = first_cycle; cycle < (col->getNcycles() + first_cycle); cycle++) {
if (myrank == 0 && verbose) {
cout << "***********************" << endl;
cout << "* cycle = " << cycle + 1 << " *" << endl;
cout << "***********************" << endl;
}
// interpolation
EMf->setZeroDensities(); // set to zero the densities
for (int i = 0; i < ns; i++)
part[i].interpP2G(EMf, grid, vct); // interpolate Particles to Grid(Nodes)
EMf->sumOverSpecies(vct); // sum all over the species
MPI_Barrier(MPI_COMM_WORLD);
EMf->interpDensitiesN2C(vct, grid); // calculate densities on centers from nodes
EMf->calculateHatFunctions(grid, vct); // calculate the hat quantities for the implicit method
MPI_Barrier(MPI_COMM_WORLD);
// OUTPUT to large file, called proc**
if (cycle % (col->getFieldOutputCycle()) == 0 || cycle == first_cycle) {
hdf5_agent.open_append(SaveDirName + "/proc" + num_proc.str() + ".hdf");
output_mgr.output("Eall + Ball + rhos + Jsall + pressure", cycle);
// Pressure tensor is available
hdf5_agent.close();
}
if (cycle % (col->getParticlesOutputCycle()) == 0 && col->getParticlesOutputCycle() != 1) {
hdf5_agent.open_append(SaveDirName + "/proc" + num_proc.str() + ".hdf");
output_mgr.output("position + velocity + q ", cycle, 1);
hdf5_agent.close();
}
// write the virtual satellite traces
if (ns > 2) {
ofstream my_file(cqsat.c_str(), fstream::app);
for (int isat = 0; isat < nsat; isat++) {
for (int jsat = 0; jsat < nsat; jsat++) {
for (int ksat = 0; ksat < nsat; ksat++) {
int index1 = 1 + isat * nx0 / nsat + nx0 / nsat / 2;
int index2 = 1 + jsat * ny0 / nsat + ny0 / nsat / 2;
int index3 = 1 + ksat * nz0 / nsat + nz0 / nsat / 2;
my_file << EMf->getBx(index1, index2, index3) << "\t" << EMf->getBy(index1, index2, index3) << "\t" << EMf->getBz(index1, index2, index3) << "\t";
my_file << EMf->getEx(index1, index2, index3) << "\t" << EMf->getEy(index1, index2, index3) << "\t" << EMf->getEz(index1, index2, index3) << "\t";
my_file << EMf->getJxs(index1, index2, index3, 0) + EMf->getJxs(index1, index2, index3, 2) << "\t" << EMf->getJys(index1, index2, index3, 0) + EMf->getJys(index1, index2, index3, 2) << "\t" << EMf->getJzs(index1, index2, index3, 0) + EMf->getJzs(index1, index2, index3, 2) << "\t";
my_file << EMf->getJxs(index1, index2, index3, 1) + EMf->getJxs(index1, index2, index3, 3) << "\t" << EMf->getJys(index1, index2, index3, 1) + EMf->getJys(index1, index2, index3, 3) << "\t" << EMf->getJzs(index1, index2, index3, 1) + EMf->getJzs(index1, index2, index3, 3) << "\t";
my_file << EMf->getRHOns(index1, index2, index3, 0) + EMf->getRHOns(index1, index2, index3, 2) << "\t";
my_file << EMf->getRHOns(index1, index2, index3, 1) + EMf->getRHOns(index1, index2, index3, 3) << "\t";
}}}
my_file << endl;
my_file.close();
}
// done with output
// MAXWELL'S SOLVER
EMf->calculateE(grid, vct); // calculate the E field
// PARTICLE MOVER
for (int i = 0; i < ns; i++) // move each species
mem_avail = part[i].mover_PC(grid, vct, EMf); // use the Predictor Corrector scheme
if (mem_avail < 0) { // not enough memory space allocated for particles: stop the simulation
if (myrank == 0) {
cout << "*************************************************************" << endl;
cout << "Simulation stopped. Not enough memory allocated for particles" << endl;
cout << "*************************************************************" << endl;
}
cycle = col->getNcycles() + first_cycle; // exit from the time loop
}
EMf->calculateB(grid, vct); // calculate the B field
// write the conserved quantities
if (cycle % col->getDiagnosticsOutputCycle() == 0) {
Eenergy = EMf->getEenergy();
Benergy = EMf->getBenergy();
TOTenergy = 0.0;
TOTmomentum = 0.0;
for (int is = 0; is < ns; is++) {
Ke[is] = part[is].getKe();
TOTenergy += Ke[is];
momentum[is] = part[is].getP();
TOTmomentum += momentum[is];
}
if (myrank == 0) {
ofstream my_file(cq.c_str(), fstream::app);
my_file << cycle << "\t" << "\t" << (Eenergy + Benergy + TOTenergy) << "\t" << TOTmomentum << "\t" << Eenergy << "\t" << Benergy << "\t" << TOTenergy << endl;
my_file.close();
}
// Velocity distribution
for (int is=0; is < ns; is++) {
maxVel = part[is].getMaxVelocity();
VelocityDist = part[is].getVelocityDistribution(nDistributionBins, maxVel);
if (myrank == 0){
ofstream my_file(ds.c_str(), fstream::app);
my_file << cycle << "\t" << is << "\t" << maxVel;
for (int i=0; i < nDistributionBins; i++)
my_file << "\t" << VelocityDist[i];
my_file << endl;
my_file.close();
}
}
}
// write the RESTART file
if (cycle % restart_cycle == 0 && cycle != first_cycle)
writeRESTART(RestartDirName, myrank, cycle, ns, mpi, vct, col, grid, EMf, part, 0); // without ,0 add to restart file
}
if (mem_avail == 0) // write the restart only if the simulation finished succesfully
writeRESTART(RestartDirName, myrank, (col->getNcycles() + first_cycle) - 1, ns, mpi, vct, col, grid, EMf, part, 0);
// stop profiling
my_clock->stopTiming();
// deallocate
delete[]Ke;
delete[]momentum;
// close MPI
mpi->finalize_mpi();
return (0);
}