comparison DEPENDENCIES/generic/include/boost/interprocess/mapped_region.hpp @ 16:2665513ce2d3

Add boost headers
author Chris Cannam
date Tue, 05 Aug 2014 11:11:38 +0100
parents
children c530137014c0
comparison
equal deleted inserted replaced
15:663ca0da4350 16:2665513ce2d3
1 //////////////////////////////////////////////////////////////////////////////
2 //
3 // (C) Copyright Ion Gaztanaga 2005-2012. Distributed under the Boost
4 // Software License, Version 1.0. (See accompanying file
5 // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
6 //
7 // See http://www.boost.org/libs/interprocess for documentation.
8 //
9 //////////////////////////////////////////////////////////////////////////////
10
11 #ifndef BOOST_INTERPROCESS_MAPPED_REGION_HPP
12 #define BOOST_INTERPROCESS_MAPPED_REGION_HPP
13
14 #include <boost/interprocess/detail/config_begin.hpp>
15 #include <boost/interprocess/detail/workaround.hpp>
16
17 #include <boost/interprocess/interprocess_fwd.hpp>
18 #include <boost/interprocess/exceptions.hpp>
19 #include <boost/move/move.hpp>
20 #include <boost/interprocess/detail/utilities.hpp>
21 #include <boost/interprocess/detail/os_file_functions.hpp>
22 #include <string>
23 #include <boost/cstdint.hpp>
24 #include <boost/assert.hpp>
25 //Some Unixes use caddr_t instead of void * in madvise
26 // SunOS Tru64 HP-UX AIX
27 #if defined(sun) || defined(__sun) || defined(__osf__) || defined(__osf) || defined(_hpux) || defined(hpux) || defined(_AIX)
28 #define BOOST_INTERPROCESS_MADVISE_USES_CADDR_T
29 #include <sys/types.h>
30 #endif
31
32 //A lot of UNIXes have destructive semantics for MADV_DONTNEED, so
33 //we need to be careful to allow it.
34 #if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__) || defined(__APPLE__)
35 #define BOOST_INTERPROCESS_MADV_DONTNEED_HAS_NONDESTRUCTIVE_SEMANTICS
36 #endif
37
38 #if defined (BOOST_INTERPROCESS_WINDOWS)
39 # include <boost/interprocess/detail/win32_api.hpp>
40 # include <boost/interprocess/sync/windows/sync_utils.hpp>
41 #else
42 # ifdef BOOST_HAS_UNISTD_H
43 # include <fcntl.h>
44 # include <sys/mman.h> //mmap
45 # include <unistd.h>
46 # include <sys/stat.h>
47 # include <sys/types.h>
48 # if defined(BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS)
49 # include <sys/shm.h> //System V shared memory...
50 # endif
51 # include <boost/assert.hpp>
52 # else
53 # error Unknown platform
54 # endif
55
56 #endif //#if (defined BOOST_INTERPROCESS_WINDOWS)
57
58 //!\file
59 //!Describes mapped region class
60
61 namespace boost {
62 namespace interprocess {
63
64 /// @cond
65
66 //Solaris declares madvise only in some configurations but defines MADV_XXX, a bit confusing.
67 //Predeclare it here to avoid any compilation error
68 #if (defined(sun) || defined(__sun)) && defined(MADV_NORMAL)
69 extern "C" int madvise(caddr_t, size_t, int);
70 #endif
71
72 namespace ipcdetail{ class interprocess_tester; }
73 namespace ipcdetail{ class raw_mapped_region_creator; }
74
75 /// @endcond
76
77 //!The mapped_region class represents a portion or region created from a
78 //!memory_mappable object.
79 //!
80 //!The OS can map a region bigger than the requested one, as region must
81 //!be multiple of the page size, but mapped_region will always refer to
82 //!the region specified by the user.
83 class mapped_region
84 {
85 /// @cond
86 //Non-copyable
87 BOOST_MOVABLE_BUT_NOT_COPYABLE(mapped_region)
88 /// @endcond
89
90 public:
91
92 //!Creates a mapping region of the mapped memory "mapping", starting in
93 //!offset "offset", and the mapping's size will be "size". The mapping
94 //!can be opened for read only, read-write or copy-on-write.
95 //!
96 //!If an address is specified, both the offset and the address must be
97 //!multiples of the page size.
98 //!
99 //!The map is created using "default_map_options". This flag is OS
100 //!dependant and it should not be changed unless the user needs to
101 //!specify special options.
102 //!
103 //!In Windows systems "map_options" is a DWORD value passed as
104 //!"dwDesiredAccess" to "MapViewOfFileEx". If "default_map_options" is passed
105 //!it's initialized to zero. "map_options" is XORed with FILE_MAP_[COPY|READ|WRITE].
106 //!
107 //!In UNIX systems and POSIX mappings "map_options" is an int value passed as "flags"
108 //!to "mmap". If "default_map_options" is specified it's initialized to MAP_NOSYNC
109 //!if that option exists and to zero otherwise. "map_options" XORed with MAP_PRIVATE or MAP_SHARED.
110 //!
111 //!In UNIX systems and XSI mappings "map_options" is an int value passed as "shmflg"
112 //!to "shmat". If "default_map_options" is specified it's initialized to zero.
113 //!"map_options" is XORed with SHM_RDONLY if needed.
114 //!
115 //!The OS could allocate more pages than size/page_size(), but get_address()
116 //!will always return the address passed in this function (if not null) and
117 //!get_size() will return the specified size.
118 template<class MemoryMappable>
119 mapped_region(const MemoryMappable& mapping
120 ,mode_t mode
121 ,offset_t offset = 0
122 ,std::size_t size = 0
123 ,const void *address = 0
124 ,map_options_t map_options = default_map_options);
125
126 //!Default constructor. Address will be 0 (nullptr).
127 //!Size will be 0.
128 //!Does not throw
129 mapped_region();
130
131 //!Move constructor. *this will be constructed taking ownership of "other"'s
132 //!region and "other" will be left in default constructor state.
133 mapped_region(BOOST_RV_REF(mapped_region) other)
134 #if defined (BOOST_INTERPROCESS_WINDOWS)
135 : m_base(0), m_size(0)
136 , m_page_offset(0)
137 , m_mode(read_only)
138 , m_file_or_mapping_hnd(ipcdetail::invalid_file())
139 #else
140 : m_base(0), m_size(0), m_page_offset(0), m_mode(read_only), m_is_xsi(false)
141 #endif
142 { this->swap(other); }
143
144 //!Destroys the mapped region.
145 //!Does not throw
146 ~mapped_region();
147
148 //!Move assignment. If *this owns a memory mapped region, it will be
149 //!destroyed and it will take ownership of "other"'s memory mapped region.
150 mapped_region &operator=(BOOST_RV_REF(mapped_region) other)
151 {
152 mapped_region tmp(boost::move(other));
153 this->swap(tmp);
154 return *this;
155 }
156
157 //!Swaps the mapped_region with another
158 //!mapped region
159 void swap(mapped_region &other);
160
161 //!Returns the size of the mapping. Never throws.
162 std::size_t get_size() const;
163
164 //!Returns the base address of the mapping.
165 //!Never throws.
166 void* get_address() const;
167
168 //!Returns the mode of the mapping used to construct the mapped region.
169 //!Never throws.
170 mode_t get_mode() const;
171
172 //!Flushes to the disk a byte range within the mapped memory.
173 //!If 'async' is true, the function will return before flushing operation is completed
174 //!If 'async' is false, function will return once data has been written into the underlying
175 //!device (i.e., in mapped files OS cached information is written to disk).
176 //!Never throws. Returns false if operation could not be performed.
177 bool flush(std::size_t mapping_offset = 0, std::size_t numbytes = 0, bool async = true);
178
179 //!Shrinks current mapped region. If after shrinking there is no longer need for a previously
180 //!mapped memory page, accessing that page can trigger a segmentation fault.
181 //!Depending on the OS, this operation might fail (XSI shared memory), it can decommit storage
182 //!and free a portion of the virtual address space (e.g.POSIX) or this
183 //!function can release some physical memory wihout freeing any virtual address space(Windows).
184 //!Returns true on success. Never throws.
185 bool shrink_by(std::size_t bytes, bool from_back = true);
186
187 //!This enum specifies region usage behaviors that an application can specify
188 //!to the mapped region implementation.
189 enum advice_types{
190 //!Specifies that the application has no advice to give on its behavior with respect to
191 //!the region. It is the default characteristic if no advice is given for a range of memory.
192 advice_normal,
193 //!Specifies that the application expects to access the region sequentially from
194 //!lower addresses to higher addresses. The implementation can lower the priority of
195 //!preceding pages within the region once a page have been accessed.
196 advice_sequential,
197 //!Specifies that the application expects to access the region in a random order,
198 //!and prefetching is likely not advantageous.
199 advice_random,
200 //!Specifies that the application expects to access the region in the near future.
201 //!The implementation can prefetch pages of the region.
202 advice_willneed,
203 //!Specifies that the application expects that it will not access the region in the near future.
204 //!The implementation can unload pages within the range to save system resources.
205 advice_dontneed
206 };
207
208 //!Advises the implementation on the expected behavior of the application with respect to the data
209 //!in the region. The implementation may use this information to optimize handling of the region data.
210 //!This function has no effect on the semantics of access to memory in the region, although it may affect
211 //!the performance of access.
212 //!If the advise type is not known to the implementation, the function returns false. True otherwise.
213 bool advise(advice_types advise);
214
215 //!Returns the size of the page. This size is the minimum memory that
216 //!will be used by the system when mapping a memory mappable source and
217 //!will restrict the address and the offset to map.
218 static std::size_t get_page_size();
219
220 /// @cond
221 private:
222 //!Closes a previously opened memory mapping. Never throws
223 void priv_close();
224
225 void* priv_map_address() const;
226 std::size_t priv_map_size() const;
227 bool priv_flush_param_check(std::size_t mapping_offset, void *&addr, std::size_t &numbytes) const;
228 bool priv_shrink_param_check(std::size_t bytes, bool from_back, void *&shrink_page_start, std::size_t &shrink_page_bytes);
229 static void priv_size_from_mapping_size
230 (offset_t mapping_size, offset_t offset, offset_t page_offset, std::size_t &size);
231 static offset_t priv_page_offset_addr_fixup(offset_t page_offset, const void *&addr);
232
233 template<int dummy>
234 struct page_size_holder
235 {
236 static const std::size_t PageSize;
237 static std::size_t get_page_size();
238 };
239
240 void* m_base;
241 std::size_t m_size;
242 std::size_t m_page_offset;
243 mode_t m_mode;
244 #if defined(BOOST_INTERPROCESS_WINDOWS)
245 file_handle_t m_file_or_mapping_hnd;
246 #else
247 bool m_is_xsi;
248 #endif
249
250 friend class ipcdetail::interprocess_tester;
251 friend class ipcdetail::raw_mapped_region_creator;
252 void dont_close_on_destruction();
253 #if defined(BOOST_INTERPROCESS_WINDOWS) && !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
254 template<int Dummy>
255 static void destroy_syncs_in_range(const void *addr, std::size_t size);
256 #endif
257 /// @endcond
258 };
259
260 ///@cond
261
262 inline void swap(mapped_region &x, mapped_region &y)
263 { x.swap(y); }
264
265 inline mapped_region::~mapped_region()
266 { this->priv_close(); }
267
268 inline std::size_t mapped_region::get_size() const
269 { return m_size; }
270
271 inline mode_t mapped_region::get_mode() const
272 { return m_mode; }
273
274 inline void* mapped_region::get_address() const
275 { return m_base; }
276
277 inline void* mapped_region::priv_map_address() const
278 { return static_cast<char*>(m_base) - m_page_offset; }
279
280 inline std::size_t mapped_region::priv_map_size() const
281 { return m_size + m_page_offset; }
282
283 inline bool mapped_region::priv_flush_param_check
284 (std::size_t mapping_offset, void *&addr, std::size_t &numbytes) const
285 {
286 //Check some errors
287 if(m_base == 0)
288 return false;
289
290 if(mapping_offset >= m_size || (mapping_offset + numbytes) > m_size){
291 return false;
292 }
293
294 //Update flush size if the user does not provide it
295 if(numbytes == 0){
296 numbytes = m_size - mapping_offset;
297 }
298 addr = (char*)this->priv_map_address() + mapping_offset;
299 numbytes += m_page_offset;
300 return true;
301 }
302
303 inline bool mapped_region::priv_shrink_param_check
304 (std::size_t bytes, bool from_back, void *&shrink_page_start, std::size_t &shrink_page_bytes)
305 {
306 //Check some errors
307 if(m_base == 0 || bytes > m_size){
308 return false;
309 }
310 else if(bytes == m_size){
311 this->priv_close();
312 return true;
313 }
314 else{
315 const std::size_t page_size = mapped_region::get_page_size();
316 if(from_back){
317 const std::size_t new_pages = (m_size + m_page_offset - bytes - 1)/page_size + 1;
318 shrink_page_start = static_cast<char*>(this->priv_map_address()) + new_pages*page_size;
319 shrink_page_bytes = m_page_offset + m_size - new_pages*page_size;
320 m_size -= bytes;
321 }
322 else{
323 shrink_page_start = this->priv_map_address();
324 m_page_offset += bytes;
325 shrink_page_bytes = (m_page_offset/page_size)*page_size;
326 m_page_offset = m_page_offset % page_size;
327 m_size -= bytes;
328 m_base = static_cast<char *>(m_base) + bytes;
329 BOOST_ASSERT(shrink_page_bytes%page_size == 0);
330 }
331 return true;
332 }
333 }
334
335 inline void mapped_region::priv_size_from_mapping_size
336 (offset_t mapping_size, offset_t offset, offset_t page_offset, std::size_t &size)
337 {
338 //Check if mapping size fits in the user address space
339 //as offset_t is the maximum file size and its signed.
340 if(mapping_size < offset ||
341 boost::uintmax_t(mapping_size - (offset - page_offset)) >
342 boost::uintmax_t(std::size_t(-1))){
343 error_info err(size_error);
344 throw interprocess_exception(err);
345 }
346 size = static_cast<std::size_t>(mapping_size - (offset - page_offset));
347 }
348
349 inline offset_t mapped_region::priv_page_offset_addr_fixup(offset_t offset, const void *&address)
350 {
351 //We can't map any offset so we have to obtain system's
352 //memory granularity
353 const std::size_t page_size = mapped_region::get_page_size();
354
355 //We calculate the difference between demanded and valid offset
356 //(always less than a page in std::size_t, thus, representable by std::size_t)
357 const std::size_t page_offset =
358 static_cast<std::size_t>(offset - (offset / page_size) * page_size);
359 //Update the mapping address
360 if(address){
361 address = static_cast<const char*>(address) - page_offset;
362 }
363 return page_offset;
364 }
365
366 #if defined (BOOST_INTERPROCESS_WINDOWS)
367
368 inline mapped_region::mapped_region()
369 : m_base(0), m_size(0), m_page_offset(0), m_mode(read_only)
370 , m_file_or_mapping_hnd(ipcdetail::invalid_file())
371 {}
372
373 template<int dummy>
374 inline std::size_t mapped_region::page_size_holder<dummy>::get_page_size()
375 {
376 winapi::system_info info;
377 get_system_info(&info);
378 return std::size_t(info.dwAllocationGranularity);
379 }
380
381 template<class MemoryMappable>
382 inline mapped_region::mapped_region
383 (const MemoryMappable &mapping
384 ,mode_t mode
385 ,offset_t offset
386 ,std::size_t size
387 ,const void *address
388 ,map_options_t map_options)
389 : m_base(0), m_size(0), m_page_offset(0), m_mode(mode)
390 , m_file_or_mapping_hnd(ipcdetail::invalid_file())
391 {
392 mapping_handle_t mhandle = mapping.get_mapping_handle();
393 {
394 file_handle_t native_mapping_handle = 0;
395
396 //Set accesses
397 //For "create_file_mapping"
398 unsigned long protection = 0;
399 //For "mapviewoffile"
400 unsigned long map_access = map_options == default_map_options ? 0 : map_options;
401
402 switch(mode)
403 {
404 case read_only:
405 case read_private:
406 protection |= winapi::page_readonly;
407 map_access |= winapi::file_map_read;
408 break;
409 case read_write:
410 protection |= winapi::page_readwrite;
411 map_access |= winapi::file_map_write;
412 break;
413 case copy_on_write:
414 protection |= winapi::page_writecopy;
415 map_access |= winapi::file_map_copy;
416 break;
417 default:
418 {
419 error_info err(mode_error);
420 throw interprocess_exception(err);
421 }
422 break;
423 }
424
425 //For file mapping (including emulated shared memory through temporary files),
426 //the device is a file handle so we need to obtain file's size and call create_file_mapping
427 //to obtain the mapping handle.
428 //For files we don't need the file mapping after mapping the memory, as the file is there
429 //so we'll program the handle close
430 void * handle_to_close = winapi::invalid_handle_value;
431 if(!mhandle.is_shm){
432 //Create mapping handle
433 native_mapping_handle = winapi::create_file_mapping
434 ( ipcdetail::file_handle_from_mapping_handle(mapping.get_mapping_handle())
435 , protection, 0, 0, 0);
436
437 //Check if all is correct
438 if(!native_mapping_handle){
439 error_info err = winapi::get_last_error();
440 throw interprocess_exception(err);
441 }
442 handle_to_close = native_mapping_handle;
443 }
444 else{
445 //For windows_shared_memory the device handle is already a mapping handle
446 //and we need to maintain it
447 native_mapping_handle = mhandle.handle;
448 }
449 //RAII handle close on scope exit
450 const winapi::handle_closer close_handle(handle_to_close);
451 (void)close_handle;
452
453 const offset_t page_offset = priv_page_offset_addr_fixup(offset, address);
454
455 //Obtain mapping size if user provides 0 size
456 if(size == 0){
457 offset_t mapping_size;
458 if(!winapi::get_file_mapping_size(native_mapping_handle, mapping_size)){
459 error_info err = winapi::get_last_error();
460 throw interprocess_exception(err);
461 }
462 //This can throw
463 priv_size_from_mapping_size(mapping_size, offset, page_offset, size);
464 }
465
466 //Map with new offsets and size
467 void *base = winapi::map_view_of_file_ex
468 (native_mapping_handle,
469 map_access,
470 offset - page_offset,
471 static_cast<std::size_t>(page_offset + size),
472 const_cast<void*>(address));
473 //Check error
474 if(!base){
475 error_info err = winapi::get_last_error();
476 throw interprocess_exception(err);
477 }
478
479 //Calculate new base for the user
480 m_base = static_cast<char*>(base) + page_offset;
481 m_page_offset = page_offset;
482 m_size = size;
483 }
484 //Windows shared memory needs the duplication of the handle if we want to
485 //make mapped_region independent from the mappable device
486 //
487 //For mapped files, we duplicate the file handle to be able to FlushFileBuffers
488 if(!winapi::duplicate_current_process_handle(mhandle.handle, &m_file_or_mapping_hnd)){
489 error_info err = winapi::get_last_error();
490 this->priv_close();
491 throw interprocess_exception(err);
492 }
493 }
494
495 inline bool mapped_region::flush(std::size_t mapping_offset, std::size_t numbytes, bool async)
496 {
497 void *addr;
498 if(!this->priv_flush_param_check(mapping_offset, addr, numbytes)){
499 return false;
500 }
501 //Flush it all
502 if(!winapi::flush_view_of_file(addr, numbytes)){
503 return false;
504 }
505 //m_file_or_mapping_hnd can be a file handle or a mapping handle.
506 //so flushing file buffers has only sense for files...
507 else if(!async && m_file_or_mapping_hnd != winapi::invalid_handle_value &&
508 winapi::get_file_type(m_file_or_mapping_hnd) == winapi::file_type_disk){
509 return winapi::flush_file_buffers(m_file_or_mapping_hnd);
510 }
511 return true;
512 }
513
514 inline bool mapped_region::shrink_by(std::size_t bytes, bool from_back)
515 {
516 void *shrink_page_start;
517 std::size_t shrink_page_bytes;
518 if(!this->priv_shrink_param_check(bytes, from_back, shrink_page_start, shrink_page_bytes)){
519 return false;
520 }
521 else if(shrink_page_bytes){
522 //In Windows, we can't decommit the storage or release the virtual address space,
523 //the best we can do is try to remove some memory from the process working set.
524 //With a bit of luck we can free some physical memory.
525 unsigned long old_protect_ignored;
526 bool b_ret = winapi::virtual_unlock(shrink_page_start, shrink_page_bytes)
527 || (winapi::get_last_error() == winapi::error_not_locked);
528 (void)old_protect_ignored;
529 //Change page protection to forbid any further access
530 b_ret = b_ret && winapi::virtual_protect
531 (shrink_page_start, shrink_page_bytes, winapi::page_noaccess, old_protect_ignored);
532 return b_ret;
533 }
534 else{
535 return true;
536 }
537 }
538
539 inline bool mapped_region::advise(advice_types)
540 {
541 //Windows has no madvise/posix_madvise equivalent
542 return false;
543 }
544
545 inline void mapped_region::priv_close()
546 {
547 if(m_base){
548 void *addr = this->priv_map_address();
549 #if !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
550 mapped_region::destroy_syncs_in_range<0>(addr, m_size);
551 #endif
552 winapi::unmap_view_of_file(addr);
553 m_base = 0;
554 }
555 if(m_file_or_mapping_hnd != ipcdetail::invalid_file()){
556 winapi::close_handle(m_file_or_mapping_hnd);
557 m_file_or_mapping_hnd = ipcdetail::invalid_file();
558 }
559 }
560
561 inline void mapped_region::dont_close_on_destruction()
562 {}
563
564 #else //#if (defined BOOST_INTERPROCESS_WINDOWS)
565
566 inline mapped_region::mapped_region()
567 : m_base(0), m_size(0), m_page_offset(0), m_mode(read_only), m_is_xsi(false)
568 {}
569
570 template<int dummy>
571 inline std::size_t mapped_region::page_size_holder<dummy>::get_page_size()
572 { return std::size_t(sysconf(_SC_PAGESIZE)); }
573
574 template<class MemoryMappable>
575 inline mapped_region::mapped_region
576 ( const MemoryMappable &mapping
577 , mode_t mode
578 , offset_t offset
579 , std::size_t size
580 , const void *address
581 , map_options_t map_options)
582 : m_base(0), m_size(0), m_page_offset(0), m_mode(mode), m_is_xsi(false)
583 {
584 mapping_handle_t map_hnd = mapping.get_mapping_handle();
585
586 //Some systems dont' support XSI shared memory
587 #ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
588 if(map_hnd.is_xsi){
589 //Get the size
590 ::shmid_ds xsi_ds;
591 int ret = ::shmctl(map_hnd.handle, IPC_STAT, &xsi_ds);
592 if(ret == -1){
593 error_info err(system_error_code());
594 throw interprocess_exception(err);
595 }
596 //Compare sizess
597 if(size == 0){
598 size = (std::size_t)xsi_ds.shm_segsz;
599 }
600 else if(size != (std::size_t)xsi_ds.shm_segsz){
601 error_info err(size_error);
602 throw interprocess_exception(err);
603 }
604 //Calculate flag
605 int flag = map_options == default_map_options ? 0 : map_options;
606 if(m_mode == read_only){
607 flag |= SHM_RDONLY;
608 }
609 else if(m_mode != read_write){
610 error_info err(mode_error);
611 throw interprocess_exception(err);
612 }
613 //Attach memory
614 void *base = ::shmat(map_hnd.handle, (void*)address, flag);
615 if(base == (void*)-1){
616 error_info err(system_error_code());
617 throw interprocess_exception(err);
618 }
619 //Update members
620 m_base = base;
621 m_size = size;
622 m_mode = mode;
623 m_page_offset = 0;
624 m_is_xsi = true;
625 return;
626 }
627 #endif //ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
628
629 //We calculate the difference between demanded and valid offset
630 const offset_t page_offset = priv_page_offset_addr_fixup(offset, address);
631
632 if(size == 0){
633 struct ::stat buf;
634 if(0 != fstat(map_hnd.handle, &buf)){
635 error_info err(system_error_code());
636 throw interprocess_exception(err);
637 }
638 //This can throw
639 priv_size_from_mapping_size(buf.st_size, offset, page_offset, size);
640 }
641
642 #ifdef MAP_NOSYNC
643 #define BOOST_INTERPROCESS_MAP_NOSYNC MAP_NOSYNC
644 #else
645 #define BOOST_INTERPROCESS_MAP_NOSYNC 0
646 #endif //MAP_NOSYNC
647
648 //Create new mapping
649 int prot = 0;
650 int flags = map_options == default_map_options ? BOOST_INTERPROCESS_MAP_NOSYNC : map_options;
651
652 #undef BOOST_INTERPROCESS_MAP_NOSYNC
653
654 switch(mode)
655 {
656 case read_only:
657 prot |= PROT_READ;
658 flags |= MAP_SHARED;
659 break;
660
661 case read_private:
662 prot |= (PROT_READ);
663 flags |= MAP_PRIVATE;
664 break;
665
666 case read_write:
667 prot |= (PROT_WRITE | PROT_READ);
668 flags |= MAP_SHARED;
669 break;
670
671 case copy_on_write:
672 prot |= (PROT_WRITE | PROT_READ);
673 flags |= MAP_PRIVATE;
674 break;
675
676 default:
677 {
678 error_info err(mode_error);
679 throw interprocess_exception(err);
680 }
681 break;
682 }
683
684 //Map it to the address space
685 void* base = mmap ( const_cast<void*>(address)
686 , static_cast<std::size_t>(page_offset + size)
687 , prot
688 , flags
689 , mapping.get_mapping_handle().handle
690 , offset - page_offset);
691
692 //Check if mapping was successful
693 if(base == MAP_FAILED){
694 error_info err = system_error_code();
695 throw interprocess_exception(err);
696 }
697
698 //Calculate new base for the user
699 m_base = static_cast<char*>(base) + page_offset;
700 m_page_offset = page_offset;
701 m_size = size;
702
703 //Check for fixed mapping error
704 if(address && (base != address)){
705 error_info err(busy_error);
706 this->priv_close();
707 throw interprocess_exception(err);
708 }
709 }
710
711 inline bool mapped_region::shrink_by(std::size_t bytes, bool from_back)
712 {
713 void *shrink_page_start = 0;
714 std::size_t shrink_page_bytes = 0;
715 if(m_is_xsi || !this->priv_shrink_param_check(bytes, from_back, shrink_page_start, shrink_page_bytes)){
716 return false;
717 }
718 else if(shrink_page_bytes){
719 //In UNIX we can decommit and free virtual address space.
720 return 0 == munmap(shrink_page_start, shrink_page_bytes);
721 }
722 else{
723 return true;
724 }
725 }
726
727 inline bool mapped_region::flush(std::size_t mapping_offset, std::size_t numbytes, bool async)
728 {
729 void *addr;
730 if(m_is_xsi || !this->priv_flush_param_check(mapping_offset, addr, numbytes)){
731 return false;
732 }
733 //Flush it all
734 return msync(addr, numbytes, async ? MS_ASYNC : MS_SYNC) == 0;
735 }
736
737 inline bool mapped_region::advise(advice_types advice)
738 {
739 int unix_advice = 0;
740 //Modes; 0: none, 2: posix, 1: madvise
741 const unsigned int mode_none = 0;
742 const unsigned int mode_padv = 1;
743 const unsigned int mode_madv = 2;
744 unsigned int mode = mode_none;
745 //Choose advice either from POSIX (preferred) or native Unix
746 switch(advice){
747 case advice_normal:
748 #if defined(POSIX_MADV_NORMAL)
749 unix_advice = POSIX_MADV_NORMAL;
750 mode = mode_padv;
751 #elif defined(MADV_NORMAL)
752 unix_advice = MADV_NORMAL;
753 mode = mode_madv;
754 #endif
755 break;
756 case advice_sequential:
757 #if defined(POSIX_MADV_SEQUENTIAL)
758 unix_advice = POSIX_MADV_SEQUENTIAL;
759 mode = mode_padv;
760 #elif defined(MADV_SEQUENTIAL)
761 unix_advice = MADV_SEQUENTIAL;
762 mode = mode_madv;
763 #endif
764 break;
765 case advice_random:
766 #if defined(POSIX_MADV_RANDOM)
767 unix_advice = POSIX_MADV_RANDOM;
768 mode = mode_padv;
769 #elif defined(MADV_RANDOM)
770 unix_advice = MADV_RANDOM;
771 mode = mode_madv;
772 #endif
773 break;
774 case advice_willneed:
775 #if defined(POSIX_MADV_WILLNEED)
776 unix_advice = POSIX_MADV_WILLNEED;
777 mode = mode_padv;
778 #elif defined(MADV_WILLNEED)
779 unix_advice = MADV_WILLNEED;
780 mode = mode_madv;
781 #endif
782 break;
783 case advice_dontneed:
784 #if defined(POSIX_MADV_DONTNEED)
785 unix_advice = POSIX_MADV_DONTNEED;
786 mode = mode_padv;
787 #elif defined(MADV_DONTNEED) && defined(BOOST_INTERPROCESS_MADV_DONTNEED_HAS_NONDESTRUCTIVE_SEMANTICS)
788 unix_advice = MADV_DONTNEED;
789 mode = mode_madv;
790 #endif
791 break;
792 default:
793 return false;
794 }
795 switch(mode){
796 #if defined(POSIX_MADV_NORMAL)
797 case mode_padv:
798 return 0 == posix_madvise(this->priv_map_address(), this->priv_map_size(), unix_advice);
799 #endif
800 #if defined(MADV_NORMAL)
801 case mode_madv:
802 return 0 == madvise(
803 #if defined(BOOST_INTERPROCESS_MADVISE_USES_CADDR_T)
804 (caddr_t)
805 #endif
806 this->priv_map_address(), this->priv_map_size(), unix_advice);
807 #endif
808 default:
809 return false;
810
811 }
812 }
813
814 inline void mapped_region::priv_close()
815 {
816 if(m_base != 0){
817 #ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
818 if(m_is_xsi){
819 int ret = ::shmdt(m_base);
820 BOOST_ASSERT(ret == 0);
821 (void)ret;
822 return;
823 }
824 #endif //#ifdef BOOST_INTERPROCESS_XSI_SHARED_MEMORY_OBJECTS
825 munmap(this->priv_map_address(), this->priv_map_size());
826 m_base = 0;
827 }
828 }
829
830 inline void mapped_region::dont_close_on_destruction()
831 { m_base = 0; }
832
833 #endif //##if (defined BOOST_INTERPROCESS_WINDOWS)
834
835 template<int dummy>
836 const std::size_t mapped_region::page_size_holder<dummy>::PageSize
837 = mapped_region::page_size_holder<dummy>::get_page_size();
838
839 inline std::size_t mapped_region::get_page_size()
840 {
841 if(!page_size_holder<0>::PageSize)
842 return page_size_holder<0>::get_page_size();
843 else
844 return page_size_holder<0>::PageSize;
845 }
846
847 inline void mapped_region::swap(mapped_region &other)
848 {
849 ipcdetail::do_swap(this->m_base, other.m_base);
850 ipcdetail::do_swap(this->m_size, other.m_size);
851 ipcdetail::do_swap(this->m_page_offset, other.m_page_offset);
852 ipcdetail::do_swap(this->m_mode, other.m_mode);
853 #if (defined BOOST_INTERPROCESS_WINDOWS)
854 ipcdetail::do_swap(this->m_file_or_mapping_hnd, other.m_file_or_mapping_hnd);
855 #else
856 ipcdetail::do_swap(this->m_is_xsi, other.m_is_xsi);
857 #endif
858 }
859
860 //!No-op functor
861 struct null_mapped_region_function
862 {
863 bool operator()(void *, std::size_t , bool) const
864 { return true; }
865
866 std::size_t get_min_size() const
867 { return 0; }
868 };
869
870 /// @endcond
871
872 } //namespace interprocess {
873 } //namespace boost {
874
875 #include <boost/interprocess/detail/config_end.hpp>
876
877 #endif //BOOST_INTERPROCESS_MAPPED_REGION_HPP
878
879 #if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
880
881 #ifndef BOOST_INTERPROCESS_MAPPED_REGION_EXT_HPP
882 #define BOOST_INTERPROCESS_MAPPED_REGION_EXT_HPP
883
884 #if defined(BOOST_INTERPROCESS_WINDOWS) && !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
885 # include <boost/interprocess/sync/windows/sync_utils.hpp>
886 # include <boost/interprocess/detail/windows_intermodule_singleton.hpp>
887
888 namespace boost {
889 namespace interprocess {
890
891 template<int Dummy>
892 inline void mapped_region::destroy_syncs_in_range(const void *addr, std::size_t size)
893 {
894 ipcdetail::sync_handles &handles =
895 ipcdetail::windows_intermodule_singleton<ipcdetail::sync_handles>::get();
896 handles.destroy_syncs_in_range(addr, size);
897 }
898
899 } //namespace interprocess {
900 } //namespace boost {
901
902 #endif //defined(BOOST_INTERPROCESS_WINDOWS) && !defined(BOOST_INTERPROCESS_FORCE_GENERIC_EMULATION)
903
904 #endif //#ifdef BOOST_INTERPROCESS_MAPPED_REGION_EXT_HPP
905
906 #endif //#if !defined(BOOST_INTERPROCESS_DOXYGEN_INVOKED)
907