Chris@16: // Copyright (C) 2004-2008 The Trustees of Indiana University. Chris@16: Chris@16: // Use, modification and distribution is subject to the Boost Software Chris@16: // License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at Chris@16: // http://www.boost.org/LICENSE_1_0.txt) Chris@16: Chris@16: // Authors: Douglas Gregor Chris@16: // Nick Edmonds Chris@16: // Andrew Lumsdaine Chris@16: Chris@16: // The placement of this #include probably looks very odd relative to Chris@16: // the #ifndef/#define pair below. However, this placement is Chris@16: // extremely important to allow the various property map headers to be Chris@16: // included in any order. Chris@16: #include Chris@16: Chris@16: #ifndef BOOST_PARALLEL_DISTRIBUTED_PROPERTY_MAP_HPP Chris@16: #define BOOST_PARALLEL_DISTRIBUTED_PROPERTY_MAP_HPP Chris@16: Chris@16: #include Chris@16: #include Chris@16: #include Chris@16: #include Chris@16: #include Chris@101: #include Chris@16: #include Chris@16: #include Chris@16: #include Chris@101: #include Chris@101: #include Chris@16: #include Chris@16: #include Chris@16: #include Chris@16: #include Chris@101: #include Chris@16: #include Chris@16: #include Chris@16: #include Chris@16: #include Chris@16: Chris@16: // Serialization functions for constructs we use Chris@16: #include Chris@16: Chris@16: namespace boost { namespace parallel { Chris@16: Chris@16: namespace detail { Chris@16: /************************************************************************** Chris@16: * Metafunction that degrades an Lvalue Property Map category tag to Chris@16: * a Read Write Property Map category tag. Chris@16: **************************************************************************/ Chris@16: template Chris@16: struct make_nonlvalue_property_map Chris@16: { Chris@16: template struct apply { typedef T type; }; Chris@16: }; Chris@16: Chris@16: template<> Chris@16: struct make_nonlvalue_property_map Chris@16: { Chris@16: template Chris@16: struct apply Chris@16: { Chris@16: typedef read_write_property_map_tag type; Chris@16: }; Chris@16: }; Chris@16: Chris@16: /************************************************************************** Chris@16: * Performs a "put" on a property map so long as the property map is Chris@16: * a Writable Property Map or a mutable Lvalue Property Map. This Chris@16: * is required because the distributed property map's message Chris@16: * handler handles "put" messages even for a const property map, Chris@16: * although receipt of a "put" message is ill-formed. Chris@16: **************************************************************************/ Chris@16: template Chris@16: struct maybe_put_in_lvalue_pm Chris@16: { Chris@16: template Chris@16: static inline void Chris@16: do_put(PropertyMap, const Key&, const Value&) Chris@16: { BOOST_ASSERT(false); } Chris@16: }; Chris@16: Chris@16: template<> Chris@16: struct maybe_put_in_lvalue_pm Chris@16: { Chris@16: template Chris@16: static inline void Chris@16: do_put(PropertyMap pm, const Key& key, const Value& value) Chris@101: { Chris@16: using boost::put; Chris@16: Chris@101: put(pm, key, value); Chris@16: } Chris@16: }; Chris@16: Chris@16: template Chris@16: inline void Chris@16: maybe_put_impl(PropertyMap pm, const Key& key, const Value& value, Chris@16: writable_property_map_tag) Chris@16: { Chris@16: using boost::put; Chris@16: Chris@16: put(pm, key, value); Chris@16: } Chris@16: Chris@16: template Chris@16: inline void Chris@16: maybe_put_impl(PropertyMap pm, const Key& key, const Value& value, Chris@16: lvalue_property_map_tag) Chris@16: { Chris@16: typedef typename property_traits::value_type value_type; Chris@16: typedef typename property_traits::reference reference; Chris@16: // DPG TBD: Some property maps are improperly characterized as Chris@16: // lvalue_property_maps, when in fact they do not provide true Chris@16: // references. The most typical example is those property maps Chris@16: // built from vector and its iterators, which deal with Chris@16: // proxies. We don't want to mischaracterize these as not having a Chris@16: // "put" operation, so we only consider an lvalue_property_map as Chris@16: // constant if its reference is const value_type&. In fact, this Chris@16: // isn't even quite correct (think of a Chris@16: // vector::const_iterator), but at present C++ doesn't Chris@16: // provide us with any alternatives. Chris@16: typedef is_same is_constant; Chris@16: Chris@16: maybe_put_in_lvalue_pm<(!is_constant::value)>::do_put(pm, key, value); Chris@16: } Chris@16: Chris@16: template Chris@16: inline void Chris@16: maybe_put_impl(PropertyMap, const Key&, const Value&, ...) Chris@16: { BOOST_ASSERT(false); } Chris@16: Chris@16: template Chris@16: inline void Chris@16: maybe_put(PropertyMap pm, const Key& key, const Value& value) Chris@16: { Chris@16: maybe_put_impl(pm, key, value, Chris@16: typename property_traits::category()); Chris@16: } Chris@16: } // end namespace detail Chris@16: Chris@16: /** The consistency model used by the distributed property map. */ Chris@16: enum consistency_model { Chris@16: cm_forward = 1 << 0, Chris@16: cm_backward = 1 << 1, Chris@16: cm_bidirectional = cm_forward | cm_backward, Chris@16: cm_flush = 1 << 2, Chris@16: cm_reset = 1 << 3, Chris@16: cm_clear = 1 << 4 Chris@16: }; Chris@16: Chris@16: /** Distributed property map adaptor. Chris@16: * Chris@16: * The distributed property map adaptor is a property map whose Chris@16: * stored values are distributed across multiple non-overlapping Chris@16: * memory spaces on different processes. Values local to the current Chris@16: * process are stored within a local property map and may be Chris@16: * immediately accessed via @c get and @c put. Values stored on Chris@16: * remote processes may also be access via @c get and @c put, but the Chris@16: * behavior differs slightly: Chris@16: * Chris@16: * - @c put operations update a local ghost cell and send a "put" Chris@16: * message to the process that owns the value. The owner is free to Chris@16: * update its own "official" value or may ignore the put request. Chris@16: * Chris@16: * - @c get operations returns the contents of the local ghost Chris@16: * cell. If no ghost cell is available, one is created using the Chris@16: * default value provided by the "reduce" operation. See, e.g., Chris@16: * @ref basic_reduce and @ref property_reduce. Chris@16: * Chris@16: * Using distributed property maps requires a bit more care than using Chris@16: * local, sequential property maps. While the syntax and semantics are Chris@16: * similar, distributed property maps may contain out-of-date Chris@16: * information that can only be guaranteed to be synchronized by Chris@16: * calling the @ref synchronize function in all processes. Chris@16: * Chris@16: * To address the issue of out-of-date values, distributed property Chris@16: * maps are supplied with a reduction operation. The reduction Chris@16: * operation has two roles: Chris@16: * Chris@16: * -# When a value is needed for a remote key but no value is Chris@16: * immediately available, the reduction operation provides a Chris@16: * suitable default. For instance, a distributed property map Chris@16: * storing distances may have a reduction operation that returns Chris@16: * an infinite value as the default, whereas a distributed Chris@16: * property map for vertex colors may return white as the Chris@16: * default. Chris@16: * Chris@16: * -# When a value is received from a remote process, the process Chris@16: * owning the key associated with that value must determine which Chris@16: * value---the locally stored value, the value received from a Chris@16: * remote process, or some combination of the two---will be Chris@16: * stored as the "official" value in the property map. The Chris@16: * reduction operation transforms the local and remote values Chris@16: * into the "official" value to be stored. Chris@16: * Chris@16: * @tparam ProcessGroup the type of the process group over which the Chris@16: * property map is distributed and is also the medium for Chris@16: * communication. Chris@16: * Chris@16: * @tparam StorageMap the type of the property map that will Chris@16: * store values for keys local to this processor. The @c value_type of Chris@16: * this property map will become the @c value_type of the distributed Chris@16: * property map. The distributed property map models the same property Chris@16: * map concepts as the @c LocalPropertyMap, with one exception: a Chris@16: * distributed property map cannot be an LvaluePropertyMap (because Chris@16: * remote values are not addressable), and is therefore limited to Chris@16: * ReadWritePropertyMap. Chris@16: */ Chris@16: template Chris@16: class distributed_property_map Chris@16: { Chris@16: public: Chris@16: /// The key type of the property map. Chris@16: typedef typename property_traits::key_type key_type; Chris@16: Chris@16: /// The value type of the property map. Chris@16: typedef typename property_traits::value_type value_type; Chris@16: typedef typename property_traits::reference reference; Chris@16: typedef ProcessGroup process_group_type; Chris@16: Chris@16: private: Chris@16: typedef distributed_property_map self_type; Chris@16: typedef typename property_traits::category local_category; Chris@16: typedef typename property_traits::key_type local_key_type; Chris@16: typedef typename property_traits::value_type owner_local_pair; Chris@16: typedef typename ProcessGroup::process_id_type process_id_type; Chris@16: Chris@16: enum property_map_messages { Chris@16: /** A request to store a value in a property map. The message Chris@16: * contains a std::pair. Chris@16: */ Chris@16: property_map_put, Chris@16: Chris@16: /** A request to retrieve a particular value in a property Chris@101: * map. The message contains a key. The owner of that key will Chris@16: * reply with a value. Chris@16: */ Chris@16: property_map_get, Chris@16: Chris@16: /** A request to update values stored on a remote processor. The Chris@16: * message contains a vector of keys for which the source Chris@16: * requests updated values. This message will only be transmitted Chris@16: * during synchronization. Chris@16: */ Chris@16: property_map_multiget, Chris@16: Chris@16: /** A request to store values in a ghost cell. This message Chris@16: * contains a vector of key/value pairs corresponding to the Chris@16: * sequence of keys sent to the source processor. Chris@16: */ Chris@16: property_map_multiget_reply, Chris@16: Chris@16: /** The payload containing a vector of local key-value pairs to be Chris@16: * put into the remote property map. A key-value std::pair will be Chris@16: * used to store each local key-value pair. Chris@16: */ Chris@16: property_map_multiput Chris@16: }; Chris@16: Chris@16: // Code from Joaquín M López Muñoz to work around unusual implementation of Chris@16: // std::pair in VC++ 10: Chris@16: template Chris@16: class pair_first_extractor { Chris@16: typedef std::pair value_type; Chris@16: Chris@16: public: Chris@16: typedef First result_type; Chris@16: const result_type& operator()(const value_type& x) const { Chris@16: return x.first; Chris@16: } Chris@16: Chris@16: result_type& operator()(value_type& x) const { Chris@16: return x.first; Chris@16: } Chris@16: }; Chris@16: Chris@16: public: Chris@16: /// The type of the ghost cells Chris@16: typedef multi_index::multi_index_container< Chris@16: std::pair, Chris@16: multi_index::indexed_by< Chris@16: multi_index::sequenced<>, Chris@16: multi_index::hashed_unique< Chris@16: pair_first_extractor Chris@16: > Chris@16: > Chris@16: > ghost_cells_type; Chris@16: Chris@16: /// Iterator into the ghost cells Chris@16: typedef typename ghost_cells_type::iterator iterator; Chris@16: Chris@16: /// Key-based index into the ghost cells Chris@16: typedef typename ghost_cells_type::template nth_index<1>::type Chris@16: ghost_cells_key_index_type; Chris@16: Chris@16: /// Iterator into the ghost cells (by key) Chris@16: typedef typename ghost_cells_key_index_type::iterator key_iterator; Chris@16: Chris@16: /** The property map category. A distributed property map cannot be Chris@16: * an Lvalue Property Map, because values on remote processes cannot Chris@16: * be addresses. Chris@16: */ Chris@16: typedef typename detail::make_nonlvalue_property_map< Chris@16: (is_base_and_derived::value Chris@16: || is_same::value)> Chris@16: ::template apply::type category; Chris@16: Chris@16: /** Default-construct a distributed property map. This function Chris@16: * creates an initialized property map that must be assigned to a Chris@16: * valid value before being used. It is only provided here because Chris@16: * property maps must be Default Constructible. Chris@16: */ Chris@16: distributed_property_map() {} Chris@16: Chris@16: /** Construct a distributed property map. Builds a distributed Chris@16: * property map communicating over the given process group and using Chris@16: * the given local property map for storage. Since no reduction Chris@16: * operation is provided, the default reduction operation @c Chris@16: * basic_reduce is used. Chris@16: */ Chris@16: distributed_property_map(const ProcessGroup& pg, const GlobalMap& global, Chris@16: const StorageMap& pm) Chris@16: : data(new data_t(pg, global, pm, basic_reduce(), false)) Chris@16: { Chris@16: typedef handle_message > Handler; Chris@16: Chris@16: data->ghost_cells.reset(new ghost_cells_type()); Chris@16: Handler handler(data); Chris@16: data->process_group.replace_handler(handler, true); Chris@16: data->process_group.template get_receiver() Chris@16: ->setup_triggers(data->process_group); Chris@16: } Chris@16: Chris@16: /** Construct a distributed property map. Builds a distributed Chris@16: * property map communicating over the given process group and using Chris@16: * the given local property map for storage. The given @p reduce Chris@16: * parameter is used as the reduction operation. Chris@16: */ Chris@16: template Chris@16: distributed_property_map(const ProcessGroup& pg, const GlobalMap& global, Chris@16: const StorageMap& pm, Chris@16: const Reduce& reduce); Chris@16: Chris@16: ~distributed_property_map(); Chris@16: Chris@16: /// Set the reduce operation of the distributed property map. Chris@16: template Chris@16: void set_reduce(const Reduce& reduce); Chris@16: Chris@16: // Set the consistency model for the distributed property map Chris@16: void set_consistency_model(int model); Chris@16: Chris@16: // Get the consistency model Chris@16: int get_consistency_model() const { return data->model; } Chris@16: Chris@16: // Set the maximum number of ghost cells that we are allowed to Chris@16: // maintain. If 0, all ghost cells will be retained. Chris@16: void set_max_ghost_cells(std::size_t max_ghost_cells); Chris@16: Chris@16: // Clear out all ghost cells Chris@16: void clear(); Chris@16: Chris@16: // Reset the values in all ghost cells to the default value Chris@16: void reset(); Chris@16: Chris@16: // Flush all values destined for remote processors Chris@16: void flush(); Chris@16: Chris@16: reference operator[](const key_type& key) const Chris@16: { Chris@16: owner_local_pair p = get(data->global, key); Chris@101: Chris@16: if (p.first == process_id(data->process_group)) { Chris@16: return data->storage[p.second]; Chris@16: } else { Chris@16: return cell(key); Chris@16: } Chris@16: } Chris@16: Chris@16: process_group_type process_group() const Chris@16: { Chris@16: return data->process_group.base(); Chris@16: } Chris@16: Chris@16: StorageMap& base() { return data->storage; } Chris@16: const StorageMap& base() const { return data->storage; } Chris@16: Chris@16: /** Sends a "put" request. Chris@16: * \internal Chris@16: * Chris@16: */ Chris@101: void Chris@16: request_put(process_id_type p, const key_type& k, const value_type& v) const Chris@101: { Chris@101: send(data->process_group, p, property_map_put, Chris@101: boost::parallel::detail::make_untracked_pair(k, v)); Chris@16: } Chris@16: Chris@16: /** Access the ghost cell for the given key. Chris@16: * \internal Chris@16: */ Chris@16: value_type& cell(const key_type& k, bool request_if_missing = true) const; Chris@16: Chris@16: /** Perform synchronization Chris@16: * \internal Chris@16: */ Chris@16: void do_synchronize(); Chris@16: Chris@16: const GlobalMap& global() const { return data->global; } Chris@16: GlobalMap& global() { return data->global; } Chris@16: Chris@16: struct data_t Chris@16: { Chris@101: data_t(const ProcessGroup& pg, const GlobalMap& global, Chris@16: const StorageMap& pm, const function1& dv, Chris@16: bool has_default_resolver) Chris@101: : process_group(pg), global(global), storage(pm), Chris@101: ghost_cells(), max_ghost_cells(1000000), get_default_value(dv), Chris@16: has_default_resolver(has_default_resolver), model(cm_forward) { } Chris@16: Chris@16: /// The process group Chris@16: ProcessGroup process_group; Chris@16: Chris@16: /// A mapping from the keys of this property map to the global Chris@16: /// descriptor. Chris@16: GlobalMap global; Chris@16: Chris@16: /// Local property map Chris@16: StorageMap storage; Chris@16: Chris@16: /// The ghost cells Chris@16: shared_ptr ghost_cells; Chris@16: Chris@16: /// The maximum number of ghost cells we are permitted to hold. If Chris@16: /// zero, we are permitted to have an infinite number of ghost Chris@16: /// cells. Chris@16: std::size_t max_ghost_cells; Chris@16: Chris@16: /// Default value for remote ghost cells, as defined by the Chris@16: /// reduction operation. Chris@16: function1 get_default_value; Chris@16: Chris@16: /// True if this resolver is the "default" resolver, meaning that Chris@16: /// we should not be able to get() a default value; it needs to be Chris@16: /// request()ed first. Chris@16: bool has_default_resolver; Chris@16: Chris@16: // Current consistency model Chris@16: int model; Chris@16: Chris@16: // Function that resets all of the ghost cells to their default Chris@16: // values. It knows the type of the resolver, so we can eliminate Chris@16: // a large number of calls through function pointers. Chris@16: void (data_t::*reset)(); Chris@16: Chris@16: // Clear out all ghost cells Chris@16: void clear(); Chris@16: Chris@16: // Flush all values destined for remote processors Chris@16: void flush(); Chris@16: Chris@16: // Send out requests to "refresh" the values of ghost cells that Chris@16: // we're holding. Chris@16: void refresh_ghost_cells(); Chris@16: Chris@16: private: Chris@16: template void do_reset(); Chris@16: Chris@16: friend class distributed_property_map; Chris@16: }; Chris@16: friend struct data_t; Chris@16: Chris@16: shared_ptr data; Chris@16: Chris@16: private: Chris@16: // Prunes the least recently used ghost cells until we have @c Chris@16: // max_ghost_cells or fewer ghost cells. Chris@16: void prune_ghost_cells() const; Chris@16: Chris@16: /** Handles incoming messages. Chris@16: * Chris@16: * This function object is responsible for handling all incoming Chris@16: * messages for the distributed property map. Chris@16: */ Chris@16: template Chris@16: struct handle_message Chris@16: { Chris@16: explicit handle_message(const shared_ptr& data, Chris@16: const Reduce& reduce = Reduce()) Chris@16: : data_ptr(data), reduce(reduce) { } Chris@16: Chris@16: void operator()(process_id_type source, int tag); Chris@16: Chris@16: /// Individual message handlers Chris@101: void Chris@101: handle_put(int source, int tag, Chris@101: const boost::parallel::detail::untracked_pair& data, Chris@16: trigger_receive_context); Chris@16: Chris@16: value_type Chris@101: handle_get(int source, int tag, const key_type& data, Chris@16: trigger_receive_context); Chris@16: Chris@16: void Chris@101: handle_multiget(int source, int tag, Chris@16: const std::vector& data, Chris@16: trigger_receive_context); Chris@16: Chris@16: void Chris@16: handle_multiget_reply Chris@101: (int source, int tag, Chris@16: const std::vector >& msg, Chris@16: trigger_receive_context); Chris@16: Chris@16: void Chris@16: handle_multiput Chris@101: (int source, int tag, Chris@16: const std::vector >& data, Chris@16: trigger_receive_context); Chris@16: Chris@16: void setup_triggers(process_group_type& pg); Chris@16: Chris@16: private: Chris@16: weak_ptr data_ptr; Chris@16: Reduce reduce; Chris@16: }; Chris@16: Chris@16: /* Sets up the next stage in a multi-stage synchronization, for Chris@16: bidirectional consistency. */ Chris@16: struct on_synchronize Chris@16: { Chris@16: explicit on_synchronize(const shared_ptr& data) : data_ptr(data) { } Chris@16: Chris@16: void operator()(); Chris@16: Chris@16: private: Chris@16: weak_ptr data_ptr; Chris@16: }; Chris@16: }; Chris@16: Chris@16: /* An implementation helper macro for the common case of naming Chris@16: distributed property maps with all of the normal template Chris@16: parameters. */ Chris@16: #define PBGL_DISTRIB_PMAP \ Chris@16: distributed_property_map Chris@16: Chris@16: /* Request that the value for the given remote key be retrieved in Chris@16: the next synchronization round. */ Chris@16: template Chris@16: inline void Chris@16: request(const PBGL_DISTRIB_PMAP& pm, Chris@16: typename PBGL_DISTRIB_PMAP::key_type const& key) Chris@16: { Chris@16: if (get(pm.data->global, key).first != process_id(pm.data->process_group)) Chris@16: pm.cell(key, false); Chris@16: } Chris@16: Chris@16: /** Get the value associated with a particular key. Retrieves the Chris@16: * value associated with the given key. If the key denotes a Chris@16: * locally-owned object, it returns the value from the local property Chris@16: * map; if the key denotes a remotely-owned object, retrieves the Chris@16: * value of the ghost cell for that key, which may be the default Chris@16: * value provided by the reduce operation. Chris@16: * Chris@16: * Complexity: For a local key, O(1) get operations on the underlying Chris@16: * property map. For a non-local key, O(1) accesses to the ghost cells. Chris@16: */ Chris@16: template Chris@16: inline Chris@16: typename PBGL_DISTRIB_PMAP::value_type Chris@16: get(const PBGL_DISTRIB_PMAP& pm, Chris@16: typename PBGL_DISTRIB_PMAP::key_type const& key) Chris@16: { Chris@16: using boost::get; Chris@16: Chris@101: typename property_traits::value_type p = Chris@16: get(pm.data->global, key); Chris@16: Chris@16: if (p.first == process_id(pm.data->process_group)) { Chris@16: return get(pm.data->storage, p.second); Chris@16: } else { Chris@16: return pm.cell(key); Chris@16: } Chris@16: } Chris@16: Chris@16: /** Put a value associated with the given key into the property map. Chris@16: * When the key denotes a locally-owned object, this operation updates Chris@16: * the underlying local property map. Otherwise, the local ghost cell Chris@16: * is updated and a "put" message is sent to the processor owning this Chris@16: * key. Chris@16: * Chris@16: * Complexity: For a local key, O(1) put operations on the underlying Chris@16: * property map. For a nonlocal key, O(1) accesses to the ghost cells Chris@16: * and will send O(1) messages of size O(sizeof(key) + sizeof(value)). Chris@16: */ Chris@16: template Chris@16: void Chris@16: put(const PBGL_DISTRIB_PMAP& pm, Chris@16: typename PBGL_DISTRIB_PMAP::key_type const & key, Chris@16: typename PBGL_DISTRIB_PMAP::value_type const & value) Chris@16: { Chris@16: using boost::put; Chris@16: Chris@101: typename property_traits::value_type p = Chris@16: get(pm.data->global, key); Chris@16: Chris@16: if (p.first == process_id(pm.data->process_group)) { Chris@16: put(pm.data->storage, p.second, value); Chris@16: } else { Chris@101: if (pm.data->model & cm_forward) Chris@16: pm.request_put(p.first, key, value); Chris@16: Chris@16: pm.cell(key, false) = value; Chris@16: } Chris@16: } Chris@16: Chris@16: /** Put a value associated with a given key into the local view of the Chris@16: * property map. This operation is equivalent to @c put, but with one Chris@16: * exception: no message will be sent to the owning processor in the Chris@16: * case of a remote update. The effect is that any value written via Chris@16: * @c local_put for a remote key may be overwritten in the next Chris@16: * synchronization round. Chris@16: */ Chris@16: template Chris@16: void Chris@16: local_put(const PBGL_DISTRIB_PMAP& pm, Chris@16: typename PBGL_DISTRIB_PMAP::key_type const & key, Chris@16: typename PBGL_DISTRIB_PMAP::value_type const & value) Chris@16: { Chris@16: using boost::put; Chris@16: Chris@101: typename property_traits::value_type p = Chris@16: get(pm.data->global, key); Chris@16: Chris@16: if (p.first == process_id(pm.data->process_group)) Chris@16: put(pm.data->storage, p.second, value); Chris@16: else pm.cell(key, false) = value; Chris@16: } Chris@16: Chris@16: /** Cache the value associated with the given remote key. If the key Chris@16: * is local, ignore the operation. */ Chris@16: template Chris@16: inline void Chris@16: cache(const PBGL_DISTRIB_PMAP& pm, Chris@16: typename PBGL_DISTRIB_PMAP::key_type const & key, Chris@16: typename PBGL_DISTRIB_PMAP::value_type const & value) Chris@16: { Chris@16: typename ProcessGroup::process_id_type id = get(pm.data->global, key).first; Chris@16: Chris@16: if (id != process_id(pm.data->process_group)) pm.cell(key, false) = value; Chris@16: } Chris@16: Chris@16: /// Synchronize the property map. Chris@16: template Chris@16: void Chris@16: synchronize(PBGL_DISTRIB_PMAP& pm) Chris@16: { Chris@16: pm.do_synchronize(); Chris@16: } Chris@16: Chris@16: /// Create a distributed property map. Chris@16: template Chris@16: inline distributed_property_map Chris@101: make_distributed_property_map(const ProcessGroup& pg, GlobalMap global, Chris@16: StorageMap storage) Chris@16: { Chris@16: typedef distributed_property_map Chris@16: result_type; Chris@16: return result_type(pg, global, storage); Chris@16: } Chris@16: Chris@16: /** Chris@16: * \overload Chris@16: */ Chris@101: template Chris@16: inline distributed_property_map Chris@101: make_distributed_property_map(const ProcessGroup& pg, GlobalMap global, Chris@16: StorageMap storage, Reduce reduce) Chris@16: { Chris@16: typedef distributed_property_map Chris@16: result_type; Chris@16: return result_type(pg, global, storage, reduce); Chris@16: } Chris@16: Chris@16: } } // end namespace boost::parallel Chris@16: Chris@16: #include Chris@16: Chris@16: #undef PBGL_DISTRIB_PMAP Chris@16: Chris@16: #endif // BOOST_PARALLEL_DISTRIBUTED_PROPERTY_MAP_HPP