Skip to content

Commit

Permalink
Merge pull request #311 from LLNL/bugfix/cerrs_to_couts
Browse files Browse the repository at this point in the history
Changed cerrs to couts in Distributed source files
  • Loading branch information
ldowen authored Nov 6, 2024
2 parents 6210f6d + 06a7e14 commit 50eec5f
Show file tree
Hide file tree
Showing 9 changed files with 27 additions and 27 deletions.
4 changes: 2 additions & 2 deletions src/Distributed/NestedGridRedistributeNodes.cc
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ redistributeNodes(DataBase<Dimension>& dataBase,

// Output the initial load distribution statistics.
const string initialLoadStats = this->gatherDomainDistributionStatistics(work);
if (procID == 0) cerr << "NestedGridRedistributeNodes::redistributeNodes initial load balancing:" << endl
if (procID == 0) cout << "NestedGridRedistributeNodes::redistributeNodes initial load balancing:" << endl
<< initialLoadStats << endl << endl;

// Compute the total work, and the target work per processor.
Expand Down Expand Up @@ -240,7 +240,7 @@ redistributeNodes(DataBase<Dimension>& dataBase,

// Output the final load distribution statistics.
const string finalLoadStats = this->gatherDomainDistributionStatistics(work);
if (procID == 0) cerr << "NestedGridRedistributeNodes::redistributeNodes final load balancing:" << endl
if (procID == 0) cout << "NestedGridRedistributeNodes::redistributeNodes final load balancing:" << endl
<< finalLoadStats << endl << endl;
MPI_Barrier(Communicator::communicator());

Expand Down
2 changes: 1 addition & 1 deletion src/Distributed/ParmetisRedistributeNodes.cc
Original file line number Diff line number Diff line change
Expand Up @@ -669,7 +669,7 @@ printConnectivityStatistics(const map<int, vector<pair<int, double> > >& neighbo

CHECK(navgNeighbor > 0);
avgNeighbor /= navgNeighbor;
cerr << "ParmetisRedistributeNodes:: min connections = "
cout << "ParmetisRedistributeNodes:: min connections = "
<< minNeighbor << endl
<< " max connections = "
<< maxNeighbor << endl
Expand Down
2 changes: 1 addition & 1 deletion src/Distributed/RedistributeNodes.cc
Original file line number Diff line number Diff line change
Expand Up @@ -736,7 +736,7 @@ workPerNode(const DataBase<Dimension>& dataBase,
// Output some statistics.
const Scalar minWeight = result.min();
const Scalar maxWeight = result.max();
if (Process::getRank() == 0) cerr << "RedistributeNodes::workPerNode: min/max work : "
if (Process::getRank() == 0) cout << "RedistributeNodes::workPerNode: min/max work : "
<< minWeight << " "
<< maxWeight << endl;

Expand Down
4 changes: 2 additions & 2 deletions src/Distributed/SortAndDivideRedistributeNodes1d.cc
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ redistributeNodes(DataBase<Dim<1> >& dataBase,

// Output the initial load distribution statistics.
const string initialLoadStats = this->gatherDomainDistributionStatistics(work);
if (procID == 0) cerr << "SortAndDivideRedistributeNodes::redistributeNodes initial load balancing:" << endl
if (procID == 0) cout << "SortAndDivideRedistributeNodes::redistributeNodes initial load balancing:" << endl
<< initialLoadStats << endl << endl;

// Compute the total work, and the target work per processor.
Expand Down Expand Up @@ -183,7 +183,7 @@ redistributeNodes(DataBase<Dim<1> >& dataBase,

// Output the final load distribution statistics.
const string finalLoadStats = this->gatherDomainDistributionStatistics(work);
if (procID == 0) cerr << "SortAndDivideRedistributeNodes::redistributeNodes final load balancing:" << endl
if (procID == 0) cout << "SortAndDivideRedistributeNodes::redistributeNodes final load balancing:" << endl
<< finalLoadStats << endl << endl;
MPI_Barrier(Communicator::communicator());

Expand Down
8 changes: 4 additions & 4 deletions src/Distributed/SortAndDivideRedistributeNodes2d.cc
Original file line number Diff line number Diff line change
Expand Up @@ -125,14 +125,14 @@ redistributeNodes(DataBase<Dim<2> >& dataBase,
// Output the initial load distribution statistics.
const string initialLoadStats = this->gatherDomainDistributionStatistics(work);
if (procID == 0) {
cerr << "SortAndDivideRedistributeNodes::redistributeNodes initial load balancing:" << endl
cout << "SortAndDivideRedistributeNodes::redistributeNodes initial load balancing:" << endl
<< initialLoadStats << endl
<< " Domain distribution shape tensor: " << shapeTensor.eigenValues << endl
<< " Number of domains per work chunk: ";
for (vector<int>::const_iterator itr = domainsPerStep.begin();
itr != domainsPerStep.end();
++itr) cerr << " " << *itr;
cerr << endl;
++itr) cout << " " << *itr;
cout << endl;
}

// Compute the total work, and the target work per processor.
Expand Down Expand Up @@ -232,7 +232,7 @@ redistributeNodes(DataBase<Dim<2> >& dataBase,

// Output the final load distribution statistics.
const string finalLoadStats = this->gatherDomainDistributionStatistics(work);
if (procID == 0) cerr << "SortAndDivideRedistributeNodes::redistributeNodes final load balancing:" << endl
if (procID == 0) cout << "SortAndDivideRedistributeNodes::redistributeNodes final load balancing:" << endl
<< finalLoadStats << endl << endl;
MPI_Barrier(Communicator::communicator());

Expand Down
10 changes: 5 additions & 5 deletions src/Distributed/SortAndDivideRedistributeNodes3d.cc
Original file line number Diff line number Diff line change
Expand Up @@ -123,11 +123,11 @@ redistributeNodes(DataBase<Dim<3> >& dataBase,
// Output the initial load distribution statistics.
const string initialLoadStats = this->gatherDomainDistributionStatistics(work);
if (procID == 0) {
cerr << "SortAndDivideRedistributeNodes::redistributeNodes initial load balancing:" << endl
cout << "SortAndDivideRedistributeNodes::redistributeNodes initial load balancing:" << endl
<< initialLoadStats << endl
<< " Domain distribution shape tensor: " << shapeTensor.eigenValues << endl;
for (int i = 0; i != Dimension::nDim; ++i) {
cerr << " " << shapeTensor.eigenVectors.getColumn(i) << endl;
cout << " " << shapeTensor.eigenVectors.getColumn(i) << endl;
}
}

Expand Down Expand Up @@ -205,7 +205,7 @@ redistributeNodes(DataBase<Dim<3> >& dataBase,
// Iterator over the number of z domains we'll be assigning.
for (int iz = 0; iz != numZChunks; ++iz) {

if (procID == 0) cerr << "Assigning domain " << assignDomainID
if (procID == 0) cout << "Assigning domain " << assignDomainID
<< " of " << numProcs << "...";

// Peel off nodes from the front of the unassigned nodes, until the desired work
Expand All @@ -231,7 +231,7 @@ redistributeNodes(DataBase<Dim<3> >& dataBase,

// Increment the domain we're assigning to.
++assignDomainID;
if (procID == 0) cerr << "Done." << endl;
if (procID == 0) cout << "Done." << endl;

}

Expand Down Expand Up @@ -272,7 +272,7 @@ redistributeNodes(DataBase<Dim<3> >& dataBase,

// Output the final load distribution statistics.
const string finalLoadStats = this->gatherDomainDistributionStatistics(work);
if (procID == 0) cerr << "SortAndDivideRedistributeNodes::redistributeNodes final load balancing:" << endl
if (procID == 0) cout << "SortAndDivideRedistributeNodes::redistributeNodes final load balancing:" << endl
<< finalLoadStats << endl << endl;
MPI_Barrier(Communicator::communicator());

Expand Down
12 changes: 6 additions & 6 deletions src/Distributed/SpaceFillingCurveRedistributeNodes.cc
Original file line number Diff line number Diff line change
Expand Up @@ -157,27 +157,27 @@ redistributeNodes(DataBase<Dimension>& dataBase,

// Compute the target work per domain.
const Scalar targetWork = workField.sumElements()/numProcs;
if (procID == 0) cerr << "SpaceFillingCurveRedistributeNodes: Target work per process " << targetWork << endl;
if (procID == 0) cout << "SpaceFillingCurveRedistributeNodes: Target work per process " << targetWork << endl;

// Compute the Key indices for each point on this processor.
if (procID == 0) cerr << "SpaceFillingCurveRedistributeNodes: Hashing indices" << endl;
if (procID == 0) cout << "SpaceFillingCurveRedistributeNodes: Hashing indices" << endl;
FieldList<Dimension, Key> indices = computeHashedIndices(dataBase);

// Find the range of hashed indices.
const Key indexMin = indices.min();
const Key indexMax = indices.max();
CHECK(indexMax < indexMax + indexMax);
if (procID == 0) cerr << "SpaceFillingCurveRedistributeNodes: Index min/max : " << indexMin << " " << indexMax << endl;
if (procID == 0) cout << "SpaceFillingCurveRedistributeNodes: Index min/max : " << indexMin << " " << indexMax << endl;

// Build the array of (hashed index, DomainNode) pairs.
// Note this comes back locally sorted.
if (procID == 0) cerr << "SpaceFillingCurveRedistributeNodes: sorting indices" << endl;
if (procID == 0) cout << "SpaceFillingCurveRedistributeNodes: sorting indices" << endl;
vector<pair<Key, DomainNode<Dimension> > > sortedIndices = buildIndex2IDPairs(indices,
nodeDistribution);
const int numLocalNodes = nodeDistribution.size();

// Build our set of unique indices and their count.
if (procID == 0) cerr << "SpaceFillingCurveRedistributeNodes: Counting uniques and such" << endl;
if (procID == 0) cout << "SpaceFillingCurveRedistributeNodes: Counting uniques and such" << endl;
vector<Key> uniqueIndices;
vector<int> count;
vector<Scalar> work;
Expand Down Expand Up @@ -209,7 +209,7 @@ redistributeNodes(DataBase<Dimension>& dataBase,
CHECK(work.size() == uniqueIndices.size());
}
maxCount = allReduce(maxCount, SPHERAL_OP_MAX);
if (procID == 0) cerr << "SpaceFillingCurveRedistributeNodes: max redundancy is " << maxCount << endl;
if (procID == 0) cout << "SpaceFillingCurveRedistributeNodes: max redundancy is " << maxCount << endl;

// // DEBUG
// {
Expand Down
8 changes: 4 additions & 4 deletions src/Distributed/VoronoiRedistributeNodes.cc
Original file line number Diff line number Diff line change
Expand Up @@ -404,7 +404,7 @@ redistributeNodes(DataBase<Dimension>& dataBase,

// Define the the length scale we use to determine when the generator positions have converged.
const double tol = (xmax - xmin).minElement() * mTolerance;
if (procID == 0) cerr << "VoronoiRedistributeNodes: Found bounding box of " << xmin << " " << xmax << endl
if (procID == 0) cout << "VoronoiRedistributeNodes: Found bounding box of " << xmin << " " << xmax << endl
<< " yielding generator convergence tolerance of " << tol << endl;

// Determine the average work per generator.
Expand Down Expand Up @@ -531,7 +531,7 @@ redistributeNodes(DataBase<Dimension>& dataBase,
CHECK(newGeneratorsInParents.size() == newParentCells.size());
generatorsInParents = newGeneratorsInParents;
parentCells = newParentCells;
if (procID == 0) cerr << " Generation " << level << " : "
if (procID == 0) cout << " Generation " << level << " : "
<< numRemainingGenerators << " generators remaining in "
<< generatorsInParents.size() << " cells."
<< endl;
Expand All @@ -540,7 +540,7 @@ redistributeNodes(DataBase<Dimension>& dataBase,

// // Are there still remaining degeneracies in the generator positions?
// if (numRemainingGenerators > 0) {
// if (procID == 0) cerr << " --> Breaking up " << numRemainingGenerators
// if (procID == 0) cout << " --> Breaking up " << numRemainingGenerators
// << " degeneracies in intial generator positions."
// << endl;
// for (vector<vector<size_t> >::const_iterator cellItr = generatorsInParents.begin();
Expand Down Expand Up @@ -648,7 +648,7 @@ redistributeNodes(DataBase<Dimension>& dataBase,
workRatio = maxWork*safeInv(minWork);

// Report this iterations statistics.
if (procID == 0) cerr << "VoronoiRedistributeNodes: Lloyds iteration " << iteration << endl
if (procID == 0) cout << "VoronoiRedistributeNodes: Lloyds iteration " << iteration << endl
<< " max change: " << maxDeltaGenerator << endl
<< " work ratio change: " << workRatio << " " << oldWorkRatio << " " << abs(workRatio*safeInv(oldWorkRatio) - 1.0) << endl
<< " [min, max, avg] work [" << minWork << ", " << maxWork << ", " << avgWork << "]" << endl
Expand Down
4 changes: 2 additions & 2 deletions src/Utilities/iterateIdealH.cc
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ iterateIdealH(DataBase<Dimension>& dataBase,

// Output the statitics.
if (Process::getRank() == 0 && maxIterations > 1)
cerr << "iterateIdealH: (iteration, deltaH) = ("
cout << "iterateIdealH: (iteration, deltaH) = ("
<< itr << ", "
<< maxDeltaH << ")"
<< endl;
Expand Down Expand Up @@ -275,7 +275,7 @@ iterateIdealH(DataBase<Dimension>& dataBase,
// Report the final timing.
const auto t1 = clock();
if (Process::getRank() == 0 && maxIterations > 1)
cerr << "iterateIdealH: required a total of "
cout << "iterateIdealH: required a total of "
<< (t1 - t0)/CLOCKS_PER_SEC
<< " seconds."
<< endl;
Expand Down

0 comments on commit 50eec5f

Please sign in to comment.