Chris@16: // Copyright (C) 2004-2006 The Trustees of Indiana University. Chris@16: // Copyright (C) 2002 Brad King and Douglas Gregor Chris@16: Chris@16: // Use, modification and distribution is subject to the Boost Software Chris@16: // License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at Chris@16: // http://www.boost.org/LICENSE_1_0.txt) Chris@16: Chris@16: // Authors: Douglas Gregor Chris@16: // Andrew Lumsdaine Chris@16: // Brian Barrett Chris@16: #ifndef BOOST_PARALLEL_GRAPH_PAGE_RANK_HPP Chris@16: #define BOOST_PARALLEL_GRAPH_PAGE_RANK_HPP Chris@16: Chris@16: #ifndef BOOST_GRAPH_USE_MPI Chris@16: #error "Parallel BGL files should not be included unless has been included" Chris@16: #endif Chris@16: Chris@16: #include Chris@16: #include Chris@16: #include Chris@16: #include Chris@16: #include Chris@16: #include Chris@16: #include Chris@16: #include Chris@16: Chris@16: // #define WANT_MPI_ONESIDED 1 Chris@16: Chris@16: namespace boost { namespace graph { namespace distributed { Chris@16: Chris@16: namespace detail { Chris@16: #ifdef WANT_MPI_ONESIDED Chris@16: template Chris@16: void page_rank_step(const Graph& g, RankMap from_rank, MPI_Win to_win, Chris@16: typename property_traits::value_type damping, Chris@16: owner_map_t owner) Chris@16: { Chris@16: typedef typename property_traits::value_type rank_type; Chris@16: int me, ret; Chris@16: MPI_Comm_rank(MPI_COMM_WORLD, &me); Chris@16: Chris@16: // MPI_Accumulate is not required to store the value of the data Chris@16: // being sent, only the address. The value of the memory location Chris@16: // must not change until the end of the access epoch, meaning the Chris@16: // call to MPI_Fence. We therefore store the updated value back Chris@16: // into the from_rank map before the accumulate rather than using Chris@16: // a temporary. We're going to reset the values in the from_rank Chris@16: // before the end of page_rank_step() anyway, so this isn't a huge Chris@16: // deal. But MPI-2 One-sided is an abomination. Chris@16: BGL_FORALL_VERTICES_T(u, g, Graph) { Chris@16: put(from_rank, u, (damping * get(from_rank, u) / out_degree(u, g))); Chris@16: BGL_FORALL_ADJ_T(u, v, g, Graph) { Chris@16: ret = MPI_Accumulate(&(from_rank[u]), Chris@16: 1, MPI_DOUBLE, Chris@16: get(owner, v), local(v), Chris@16: 1, MPI_DOUBLE, MPI_SUM, to_win); Chris@16: BOOST_ASSERT(MPI_SUCCESS == ret); Chris@16: } Chris@16: } Chris@16: MPI_Win_fence(0, to_win); Chris@16: Chris@16: // Set new rank maps for the other map. Do this now to get around Chris@16: // the stupid synchronization rules of MPI-2 One-sided Chris@16: BGL_FORALL_VERTICES_T(v, g, Graph) put(from_rank, v, rank_type(1 - damping)); Chris@16: } Chris@16: #endif Chris@16: Chris@16: template Chris@16: struct rank_accumulate_reducer { Chris@16: BOOST_STATIC_CONSTANT(bool, non_default_resolver = true); Chris@16: Chris@16: template Chris@16: T operator()(const K&) const { return T(0); } Chris@16: Chris@16: template Chris@16: T operator()(const K&, const T& x, const T& y) const { return x + y; } Chris@16: }; Chris@16: } // end namespace detail Chris@16: Chris@16: template Chris@16: void Chris@16: page_rank_impl(const Graph& g, RankMap rank_map, Done done, Chris@16: typename property_traits::value_type damping, Chris@16: typename graph_traits::vertices_size_type n, Chris@16: RankMap2 rank_map2) Chris@16: { Chris@16: typedef typename property_traits::value_type rank_type; Chris@16: Chris@16: int me; Chris@16: MPI_Comm_rank(MPI_COMM_WORLD, &me); Chris@16: Chris@16: typename property_map::const_type Chris@16: owner = get(vertex_owner, g); Chris@16: (void)owner; Chris@16: Chris@16: typedef typename boost::graph::parallel::process_group_type Chris@16: ::type process_group_type; Chris@16: typedef typename process_group_type::process_id_type process_id_type; Chris@16: Chris@16: process_group_type pg = process_group(g); Chris@16: process_id_type id = process_id(pg); Chris@16: Chris@16: BOOST_ASSERT(me == id); Chris@16: Chris@16: rank_type initial_rank = rank_type(rank_type(1) / n); Chris@16: BGL_FORALL_VERTICES_T(v, g, Graph) put(rank_map, v, initial_rank); Chris@16: Chris@16: #ifdef WANT_MPI_ONESIDED Chris@16: Chris@16: BOOST_ASSERT(sizeof(rank_type) == sizeof(double)); Chris@16: Chris@16: bool to_map_2 = true; Chris@16: MPI_Win win, win2; Chris@16: Chris@16: MPI_Win_create(&(rank_map[*(vertices(g).first)]), Chris@16: sizeof(double) * num_vertices(g), Chris@16: sizeof(double), Chris@16: MPI_INFO_NULL, MPI_COMM_WORLD, &win); Chris@16: MPI_Win_set_name(win, "rank_map_win"); Chris@16: MPI_Win_create(&(rank_map2[*(vertices(g).first)]), Chris@16: sizeof(double) * num_vertices(g), Chris@16: sizeof(double), Chris@16: MPI_INFO_NULL, MPI_COMM_WORLD, &win2); Chris@16: MPI_Win_set_name(win, "rank_map2_win"); Chris@16: Chris@16: // set initial rank maps for the first iteration... Chris@16: BGL_FORALL_VERTICES_T(v, g, Graph) put(rank_map2, v, rank_type(1 - damping)); Chris@16: Chris@16: MPI_Win_fence(0, win); Chris@16: MPI_Win_fence(0, win2); Chris@16: Chris@16: while ((to_map_2 && !done(rank_map, g)) || Chris@16: (!to_map_2 && !done(rank_map2, g))) { Chris@16: if (to_map_2) { Chris@16: graph::distributed::detail::page_rank_step(g, rank_map, win2, damping, owner); Chris@16: to_map_2 = false; Chris@16: } else { Chris@16: graph::distributed::detail::page_rank_step(g, rank_map2, win, damping, owner); Chris@16: to_map_2 = true; Chris@16: } Chris@16: } Chris@16: synchronize(boost::graph::parallel::process_group(g)); Chris@16: Chris@16: MPI_Win_free(&win); Chris@16: MPI_Win_free(&win2); Chris@16: Chris@16: #else Chris@16: // The ranks accumulate after each step. Chris@16: rank_map.set_reduce(detail::rank_accumulate_reducer()); Chris@16: rank_map2.set_reduce(detail::rank_accumulate_reducer()); Chris@16: rank_map.set_consistency_model(boost::parallel::cm_flush | boost::parallel::cm_reset); Chris@16: rank_map2.set_consistency_model(boost::parallel::cm_flush | boost::parallel::cm_reset); Chris@16: Chris@16: bool to_map_2 = true; Chris@16: while ((to_map_2 && !done(rank_map, g)) || Chris@16: (!to_map_2 && !done(rank_map2, g))) { Chris@16: /** Chris@16: * PageRank can implemented slightly more efficiently on a Chris@16: * bidirectional graph than on an incidence graph. However, Chris@16: * distributed PageRank requires that we have the rank of the Chris@16: * source vertex available locally, so we force the incidence Chris@16: * graph implementation, which pushes rank from source to Chris@16: * target. Chris@16: */ Chris@16: typedef incidence_graph_tag category; Chris@16: if (to_map_2) { Chris@16: graph::detail::page_rank_step(g, rank_map, rank_map2, damping, Chris@16: category()); Chris@16: to_map_2 = false; Chris@16: } else { Chris@16: graph::detail::page_rank_step(g, rank_map2, rank_map, damping, Chris@16: category()); Chris@16: to_map_2 = true; Chris@16: } Chris@16: using boost::graph::parallel::process_group; Chris@16: synchronize(process_group(g)); Chris@16: } Chris@16: Chris@16: rank_map.reset(); Chris@16: #endif Chris@16: Chris@16: if (!to_map_2) Chris@16: BGL_FORALL_VERTICES_T(v, g, Graph) put(rank_map, v, get(rank_map2, v)); Chris@16: } Chris@16: Chris@16: template Chris@16: void Chris@16: page_rank(const Graph& g, RankMap rank_map, Done done, Chris@16: typename property_traits::value_type damping, Chris@16: typename graph_traits::vertices_size_type n, Chris@16: RankMap2 rank_map2 Chris@16: BOOST_GRAPH_ENABLE_IF_MODELS_PARM(Graph, distributed_graph_tag)) Chris@16: { Chris@16: (page_rank_impl)(g, rank_map, done, damping, n, rank_map2); Chris@16: } Chris@16: Chris@16: template Chris@16: void Chris@16: remove_dangling_links(MutableGraph& g Chris@16: BOOST_GRAPH_ENABLE_IF_MODELS_PARM(MutableGraph, Chris@16: distributed_graph_tag)) Chris@16: { Chris@16: typename graph_traits::vertices_size_type old_n; Chris@16: do { Chris@16: old_n = num_vertices(g); Chris@16: Chris@16: typename graph_traits::vertex_iterator vi, vi_end; Chris@16: for (boost::tie(vi, vi_end) = vertices(g); vi != vi_end; /* in loop */) { Chris@16: typename graph_traits::vertex_descriptor v = *vi++; Chris@16: if (out_degree(v, g) == 0) { Chris@16: clear_vertex(v, g); Chris@16: remove_vertex(v, g); Chris@16: } Chris@16: } Chris@16: } while (num_vertices(g) < old_n); Chris@16: } Chris@16: Chris@16: } // end namespace distributed Chris@16: Chris@16: using distributed::page_rank; Chris@16: using distributed::remove_dangling_links; Chris@16: Chris@16: } } // end namespace boost::graph Chris@16: Chris@16: #endif // BOOST_PARALLEL_GRAPH_PAGE_RANK_HPP