annotate DEPENDENCIES/generic/include/boost/graph/distributed/page_rank.hpp @ 125:34e428693f5d vext

Vext -> Repoint
author Chris Cannam
date Thu, 14 Jun 2018 11:15:39 +0100
parents 2665513ce2d3
children
rev   line source
Chris@16 1 // Copyright (C) 2004-2006 The Trustees of Indiana University.
Chris@16 2 // Copyright (C) 2002 Brad King and Douglas Gregor
Chris@16 3
Chris@16 4 // Use, modification and distribution is subject to the Boost Software
Chris@16 5 // License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
Chris@16 6 // http://www.boost.org/LICENSE_1_0.txt)
Chris@16 7
Chris@16 8 // Authors: Douglas Gregor
Chris@16 9 // Andrew Lumsdaine
Chris@16 10 // Brian Barrett
Chris@16 11 #ifndef BOOST_PARALLEL_GRAPH_PAGE_RANK_HPP
Chris@16 12 #define BOOST_PARALLEL_GRAPH_PAGE_RANK_HPP
Chris@16 13
Chris@16 14 #ifndef BOOST_GRAPH_USE_MPI
Chris@16 15 #error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
Chris@16 16 #endif
Chris@16 17
Chris@16 18 #include <boost/assert.hpp>
Chris@16 19 #include <boost/graph/overloading.hpp>
Chris@16 20 #include <boost/graph/page_rank.hpp>
Chris@16 21 #include <boost/graph/distributed/concepts.hpp>
Chris@16 22 #include <boost/property_map/parallel/distributed_property_map.hpp>
Chris@16 23 #include <boost/property_map/parallel/caching_property_map.hpp>
Chris@16 24 #include <boost/graph/parallel/algorithm.hpp>
Chris@16 25 #include <boost/graph/parallel/container_traits.hpp>
Chris@16 26
Chris@16 27 // #define WANT_MPI_ONESIDED 1
Chris@16 28
Chris@16 29 namespace boost { namespace graph { namespace distributed {
Chris@16 30
Chris@16 31 namespace detail {
Chris@16 32 #ifdef WANT_MPI_ONESIDED
Chris@16 33 template<typename Graph, typename RankMap, typename owner_map_t>
Chris@16 34 void page_rank_step(const Graph& g, RankMap from_rank, MPI_Win to_win,
Chris@16 35 typename property_traits<RankMap>::value_type damping,
Chris@16 36 owner_map_t owner)
Chris@16 37 {
Chris@16 38 typedef typename property_traits<RankMap>::value_type rank_type;
Chris@16 39 int me, ret;
Chris@16 40 MPI_Comm_rank(MPI_COMM_WORLD, &me);
Chris@16 41
Chris@16 42 // MPI_Accumulate is not required to store the value of the data
Chris@16 43 // being sent, only the address. The value of the memory location
Chris@16 44 // must not change until the end of the access epoch, meaning the
Chris@16 45 // call to MPI_Fence. We therefore store the updated value back
Chris@16 46 // into the from_rank map before the accumulate rather than using
Chris@16 47 // a temporary. We're going to reset the values in the from_rank
Chris@16 48 // before the end of page_rank_step() anyway, so this isn't a huge
Chris@16 49 // deal. But MPI-2 One-sided is an abomination.
Chris@16 50 BGL_FORALL_VERTICES_T(u, g, Graph) {
Chris@16 51 put(from_rank, u, (damping * get(from_rank, u) / out_degree(u, g)));
Chris@16 52 BGL_FORALL_ADJ_T(u, v, g, Graph) {
Chris@16 53 ret = MPI_Accumulate(&(from_rank[u]),
Chris@16 54 1, MPI_DOUBLE,
Chris@16 55 get(owner, v), local(v),
Chris@16 56 1, MPI_DOUBLE, MPI_SUM, to_win);
Chris@16 57 BOOST_ASSERT(MPI_SUCCESS == ret);
Chris@16 58 }
Chris@16 59 }
Chris@16 60 MPI_Win_fence(0, to_win);
Chris@16 61
Chris@16 62 // Set new rank maps for the other map. Do this now to get around
Chris@16 63 // the stupid synchronization rules of MPI-2 One-sided
Chris@16 64 BGL_FORALL_VERTICES_T(v, g, Graph) put(from_rank, v, rank_type(1 - damping));
Chris@16 65 }
Chris@16 66 #endif
Chris@16 67
Chris@16 68 template<typename T>
Chris@16 69 struct rank_accumulate_reducer {
Chris@16 70 BOOST_STATIC_CONSTANT(bool, non_default_resolver = true);
Chris@16 71
Chris@16 72 template<typename K>
Chris@16 73 T operator()(const K&) const { return T(0); }
Chris@16 74
Chris@16 75 template<typename K>
Chris@16 76 T operator()(const K&, const T& x, const T& y) const { return x + y; }
Chris@16 77 };
Chris@16 78 } // end namespace detail
Chris@16 79
Chris@16 80 template<typename Graph, typename RankMap, typename Done, typename RankMap2>
Chris@16 81 void
Chris@16 82 page_rank_impl(const Graph& g, RankMap rank_map, Done done,
Chris@16 83 typename property_traits<RankMap>::value_type damping,
Chris@16 84 typename graph_traits<Graph>::vertices_size_type n,
Chris@16 85 RankMap2 rank_map2)
Chris@16 86 {
Chris@16 87 typedef typename property_traits<RankMap>::value_type rank_type;
Chris@16 88
Chris@16 89 int me;
Chris@16 90 MPI_Comm_rank(MPI_COMM_WORLD, &me);
Chris@16 91
Chris@16 92 typename property_map<Graph, vertex_owner_t>::const_type
Chris@16 93 owner = get(vertex_owner, g);
Chris@16 94 (void)owner;
Chris@16 95
Chris@16 96 typedef typename boost::graph::parallel::process_group_type<Graph>
Chris@16 97 ::type process_group_type;
Chris@16 98 typedef typename process_group_type::process_id_type process_id_type;
Chris@16 99
Chris@16 100 process_group_type pg = process_group(g);
Chris@16 101 process_id_type id = process_id(pg);
Chris@16 102
Chris@16 103 BOOST_ASSERT(me == id);
Chris@16 104
Chris@16 105 rank_type initial_rank = rank_type(rank_type(1) / n);
Chris@16 106 BGL_FORALL_VERTICES_T(v, g, Graph) put(rank_map, v, initial_rank);
Chris@16 107
Chris@16 108 #ifdef WANT_MPI_ONESIDED
Chris@16 109
Chris@16 110 BOOST_ASSERT(sizeof(rank_type) == sizeof(double));
Chris@16 111
Chris@16 112 bool to_map_2 = true;
Chris@16 113 MPI_Win win, win2;
Chris@16 114
Chris@16 115 MPI_Win_create(&(rank_map[*(vertices(g).first)]),
Chris@16 116 sizeof(double) * num_vertices(g),
Chris@16 117 sizeof(double),
Chris@16 118 MPI_INFO_NULL, MPI_COMM_WORLD, &win);
Chris@16 119 MPI_Win_set_name(win, "rank_map_win");
Chris@16 120 MPI_Win_create(&(rank_map2[*(vertices(g).first)]),
Chris@16 121 sizeof(double) * num_vertices(g),
Chris@16 122 sizeof(double),
Chris@16 123 MPI_INFO_NULL, MPI_COMM_WORLD, &win2);
Chris@16 124 MPI_Win_set_name(win, "rank_map2_win");
Chris@16 125
Chris@16 126 // set initial rank maps for the first iteration...
Chris@16 127 BGL_FORALL_VERTICES_T(v, g, Graph) put(rank_map2, v, rank_type(1 - damping));
Chris@16 128
Chris@16 129 MPI_Win_fence(0, win);
Chris@16 130 MPI_Win_fence(0, win2);
Chris@16 131
Chris@16 132 while ((to_map_2 && !done(rank_map, g)) ||
Chris@16 133 (!to_map_2 && !done(rank_map2, g))) {
Chris@16 134 if (to_map_2) {
Chris@16 135 graph::distributed::detail::page_rank_step(g, rank_map, win2, damping, owner);
Chris@16 136 to_map_2 = false;
Chris@16 137 } else {
Chris@16 138 graph::distributed::detail::page_rank_step(g, rank_map2, win, damping, owner);
Chris@16 139 to_map_2 = true;
Chris@16 140 }
Chris@16 141 }
Chris@16 142 synchronize(boost::graph::parallel::process_group(g));
Chris@16 143
Chris@16 144 MPI_Win_free(&win);
Chris@16 145 MPI_Win_free(&win2);
Chris@16 146
Chris@16 147 #else
Chris@16 148 // The ranks accumulate after each step.
Chris@16 149 rank_map.set_reduce(detail::rank_accumulate_reducer<rank_type>());
Chris@16 150 rank_map2.set_reduce(detail::rank_accumulate_reducer<rank_type>());
Chris@16 151 rank_map.set_consistency_model(boost::parallel::cm_flush | boost::parallel::cm_reset);
Chris@16 152 rank_map2.set_consistency_model(boost::parallel::cm_flush | boost::parallel::cm_reset);
Chris@16 153
Chris@16 154 bool to_map_2 = true;
Chris@16 155 while ((to_map_2 && !done(rank_map, g)) ||
Chris@16 156 (!to_map_2 && !done(rank_map2, g))) {
Chris@16 157 /**
Chris@16 158 * PageRank can implemented slightly more efficiently on a
Chris@16 159 * bidirectional graph than on an incidence graph. However,
Chris@16 160 * distributed PageRank requires that we have the rank of the
Chris@16 161 * source vertex available locally, so we force the incidence
Chris@16 162 * graph implementation, which pushes rank from source to
Chris@16 163 * target.
Chris@16 164 */
Chris@16 165 typedef incidence_graph_tag category;
Chris@16 166 if (to_map_2) {
Chris@16 167 graph::detail::page_rank_step(g, rank_map, rank_map2, damping,
Chris@16 168 category());
Chris@16 169 to_map_2 = false;
Chris@16 170 } else {
Chris@16 171 graph::detail::page_rank_step(g, rank_map2, rank_map, damping,
Chris@16 172 category());
Chris@16 173 to_map_2 = true;
Chris@16 174 }
Chris@16 175 using boost::graph::parallel::process_group;
Chris@16 176 synchronize(process_group(g));
Chris@16 177 }
Chris@16 178
Chris@16 179 rank_map.reset();
Chris@16 180 #endif
Chris@16 181
Chris@16 182 if (!to_map_2)
Chris@16 183 BGL_FORALL_VERTICES_T(v, g, Graph) put(rank_map, v, get(rank_map2, v));
Chris@16 184 }
Chris@16 185
Chris@16 186 template<typename Graph, typename RankMap, typename Done, typename RankMap2>
Chris@16 187 void
Chris@16 188 page_rank(const Graph& g, RankMap rank_map, Done done,
Chris@16 189 typename property_traits<RankMap>::value_type damping,
Chris@16 190 typename graph_traits<Graph>::vertices_size_type n,
Chris@16 191 RankMap2 rank_map2
Chris@16 192 BOOST_GRAPH_ENABLE_IF_MODELS_PARM(Graph, distributed_graph_tag))
Chris@16 193 {
Chris@16 194 (page_rank_impl)(g, rank_map, done, damping, n, rank_map2);
Chris@16 195 }
Chris@16 196
Chris@16 197 template<typename MutableGraph>
Chris@16 198 void
Chris@16 199 remove_dangling_links(MutableGraph& g
Chris@16 200 BOOST_GRAPH_ENABLE_IF_MODELS_PARM(MutableGraph,
Chris@16 201 distributed_graph_tag))
Chris@16 202 {
Chris@16 203 typename graph_traits<MutableGraph>::vertices_size_type old_n;
Chris@16 204 do {
Chris@16 205 old_n = num_vertices(g);
Chris@16 206
Chris@16 207 typename graph_traits<MutableGraph>::vertex_iterator vi, vi_end;
Chris@16 208 for (boost::tie(vi, vi_end) = vertices(g); vi != vi_end; /* in loop */) {
Chris@16 209 typename graph_traits<MutableGraph>::vertex_descriptor v = *vi++;
Chris@16 210 if (out_degree(v, g) == 0) {
Chris@16 211 clear_vertex(v, g);
Chris@16 212 remove_vertex(v, g);
Chris@16 213 }
Chris@16 214 }
Chris@16 215 } while (num_vertices(g) < old_n);
Chris@16 216 }
Chris@16 217
Chris@16 218 } // end namespace distributed
Chris@16 219
Chris@16 220 using distributed::page_rank;
Chris@16 221 using distributed::remove_dangling_links;
Chris@16 222
Chris@16 223 } } // end namespace boost::graph
Chris@16 224
Chris@16 225 #endif // BOOST_PARALLEL_GRAPH_PAGE_RANK_HPP