Commit ea2ae6b7 authored by Alexis SALZMAN's avatar Alexis SALZMAN

[xGraph] add dist BFS+nodeTo API chg+use xExportStringDist

nodeAndConnectedEdgeDistInternal.h
==================================
Replace IO with MPIIO by a call to xExportStringDist

nodeAndConnectedEdge.h
======================
Add xxVisited methods to class nodeTo. Dirty friend functions are
not friend anymore as they now use new public API.

nodeAndConnectedEdgeDist.h
==========================
Add distributed breath first search (BFS) function with one unique
source across a distributed oriented graph. This function is template on graph
description ( but mainly use nodeTo graph API) and exchanger/functor run
has call-back to do appropriate communication/work on each node/edge
traversal. It aims to be rather generic.
From a scaling point of view performance will certainly remain poor if
the chosen source do not span on part of the graph shared by all the
processes. Next move on that topic will be to implement many source BFS
strategy that probably will covers more graph part and thus more
processes. Communication will be higher but processes will have all
something to do. And communication will be amortized (larger msg)

This new function has been successfully tested with graph created by FM.
Nevertheless a dedicated test case should be added.TODO

Xfiles_dependence.dot
=====================
Update with FM->xGraph->xTool dependency
parent 6ec84b69
......@@ -64,8 +64,10 @@ digraph X {
xTLS->xGeom [color=darkgreen]
xTLS->xLegacySimpleCut [color=darkgreen]
xTLS->xDoubleCut [color=darkgreen]
xFastMarching->Trellis
xFastMarching->xTool
xFastMarching->xGraph [color=darkslategray arrowhead=icurve]
xFastMarching->Trellis [color=darkslategray]
xFastMarching->xTool [color=darkslategray]
xGraph->xTool [color=firebrick]
xDomainDecomp->xTool [color=darkorchid1]
xDomainDecomp->xFEM [color=darkorchid1]
xDomainDecomp->xInterfaceParMetis [color=darkorchid1]
......
......@@ -7,98 +7,79 @@
#ifndef _NODEANDCONNECTEDEDGEDIST_H
#error "This header must not be included directly"
#endif
// xTool
#include "xExportStringDist.h"
namespace xgraph
{
namespace internal
{
template < typename NODE, class DATAMANAGER,typename ID, typename L, typename W >
void exportDot(const DATAMANAGER & graph, std::string f_name,MPI_Comm world,const ID &fid, L *fl, W *fw)
template <typename NODE, class DATAMANAGER, typename ID, typename L, typename W>
void exportDot(const DATAMANAGER &graph, std::string f_name, MPI_Comm world, const ID &fid, L *fl, W *fw)
{
// local
MPI_File file;
MPI_Status status;
int proc_id,nb_proc;
std::ostringstream os;
// open file an clean it
MPI_File_open(world, f_name.c_str(), MPI_MODE_WRONLY|MPI_MODE_CREATE, MPI_INFO_NULL, &file);
MPI_File_set_size(file,0);
MPI_Comm_rank(world,&proc_id);
MPI_Comm_size(world,&nb_proc);
auto node_range = xtool::make_range(graph.beginKey(),graph.endKey());
size_t nb_node = node_range.size();
if (!proc_id)
os<<"digraph X {"<<std::endl;
if (nb_proc>1 && nb_node)
{
os<<" subgraph cluster_"<<proc_id<<" {"<<std::endl;
os<<" label=\"p"<<proc_id<<"\";"<<std::endl;
}
if (fl)
{
for (auto & key : node_range)
{
unsigned int parent = fid(key);
os<< parent<<"[label=\""<<(*fl)(key)<<"\"];"<<std::endl;
}
}
else
{
for (auto & key : node_range)
{
unsigned int parent = fid(key);
os<< parent<<";"<<std::endl;
}
}
if (nb_proc>1 && nb_node)
os<<" }"<<std::endl;
if (fw)
{
for (auto & key : node_range)
{
const NODE *node = graph.getData(*key);
assert(node);
unsigned int parent = fid(key);
for (auto pair : node->rangeChild())
{
os<<parent<<" -> "<<fid(pair.first)<<" [label="<<(*fw)(pair.second)<<"];"<<std::endl;
}
}
}
else
{
for (auto & key : node_range)
{
const NODE *node = graph.getData(*key);
assert(node);
unsigned int parent = fid(key);
for (auto pair : node->rangeChild())
{
os<<parent<<" -> "<<fid(pair.first)<<";"<<std::endl;
}
}
}
if (proc_id == nb_proc-1)
os<<"}"<<std::endl;
// nb_info stor nb of char and char considerer here to be 1 Byte long
const MPI_Offset nb_info = os.str().size();
// compute offset
MPI_Offset offsets = 0;
MPI_Scan(&nb_info,&offsets,1,MPI_OFFSET,MPI_SUM,world);
MPI_Offset dist;
dist = offsets-nb_info;
// local
int proc_id, nb_proc;
std::ostringstream os;
// write info
if (nb_info)
MPI_File_write_at(file,dist,os.str().c_str(),nb_info,MPI_CHAR,&status);
MPI_Comm_rank(world, &proc_id);
MPI_Comm_size(world, &nb_proc);
auto node_range = xtool::make_range(graph.beginKey(), graph.endKey());
size_t nb_node = node_range.size();
// close file
MPI_File_close(&file);
if (!proc_id) os << "digraph X {" << std::endl;
if (nb_proc > 1 && nb_node)
{
os << " subgraph cluster_" << proc_id << " {" << std::endl;
os << " label=\"p" << proc_id << "\";" << std::endl;
}
if (fl)
{
for (auto &key : node_range)
{
unsigned int parent = fid(key);
os << parent << "[label=\"" << (*fl)(key) << "\"];" << std::endl;
}
}
else
{
for (auto &key : node_range)
{
unsigned int parent = fid(key);
os << parent << ";" << std::endl;
}
}
if (nb_proc > 1 && nb_node) os << " }" << std::endl;
if (fw)
{
for (auto &key : node_range)
{
const NODE *node = graph.getData(*key);
assert(node);
unsigned int parent = fid(key);
for (auto pair : node->rangeChild())
{
os << parent << " -> " << fid(pair.first) << " [label=" << (*fw)(pair.second) << "];" << std::endl;
}
}
}
else
{
for (auto &key : node_range)
{
const NODE *node = graph.getData(*key);
assert(node);
unsigned int parent = fid(key);
for (auto pair : node->rangeChild())
{
os << parent << " -> " << fid(pair.first) << ";" << std::endl;
}
}
}
if (proc_id == nb_proc - 1) os << "}" << std::endl;
// export string to file
xtool::xExportStringDist(f_name.c_str(), os, world);
}
} //end namespace
} //end namespace
} // end namespace
} // end namespace
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment