port.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414
  1. // Protocol Buffers - Google's data interchange format
  2. // Copyright 2008 Google Inc. All rights reserved.
  3. // https://developers.google.com/protocol-buffers/
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are
  7. // met:
  8. //
  9. // * Redistributions of source code must retain the above copyright
  10. // notice, this list of conditions and the following disclaimer.
  11. // * Redistributions in binary form must reproduce the above
  12. // copyright notice, this list of conditions and the following disclaimer
  13. // in the documentation and/or other materials provided with the
  14. // distribution.
  15. // * Neither the name of Google Inc. nor the names of its
  16. // contributors may be used to endorse or promote products derived from
  17. // this software without specific prior written permission.
  18. //
  19. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. #ifndef GOOGLE_PROTOBUF_STUBS_PORT_H_
  31. #define GOOGLE_PROTOBUF_STUBS_PORT_H_
  32. #include <assert.h>
  33. #include <cstdint>
  34. #include <stdlib.h>
  35. #include <cstddef>
  36. #include <string>
  37. #include <string.h>
  38. #include <google/protobuf/stubs/platform_macros.h>
  39. #include <google/protobuf/port_def.inc>
  40. #undef PROTOBUF_LITTLE_ENDIAN
  41. #ifdef _WIN32
  42. // Assuming windows is always little-endian.
  43. // TODO(xiaofeng): The PROTOBUF_LITTLE_ENDIAN is not only used for
  44. // optimization but also for correctness. We should define an
  45. // different macro to test the big-endian code path in coded_stream.
  46. #if !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST)
  47. #define PROTOBUF_LITTLE_ENDIAN 1
  48. #endif
  49. #if defined(_MSC_VER) && _MSC_VER >= 1300 && !defined(__INTEL_COMPILER)
  50. // If MSVC has "/RTCc" set, it will complain about truncating casts at
  51. // runtime. This file contains some intentional truncating casts.
  52. #pragma runtime_checks("c", off)
  53. #endif
  54. #else
  55. #ifdef __APPLE__
  56. #include <machine/endian.h> // __BYTE_ORDER
  57. #elif defined(__FreeBSD__)
  58. #include <sys/endian.h> // __BYTE_ORDER
  59. #elif (defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__))
  60. #include <sys/isa_defs.h> // __BYTE_ORDER
  61. #elif defined(_AIX) || defined(__TOS_AIX__)
  62. #include <sys/machine.h> // BYTE_ORDER
  63. #else
  64. #if !defined(__QNX__)
  65. #include <endian.h> // __BYTE_ORDER
  66. #endif
  67. #endif
  68. #if ((defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)) || \
  69. (defined(__BYTE_ORDER) && __BYTE_ORDER == __LITTLE_ENDIAN) || \
  70. (defined(BYTE_ORDER) && BYTE_ORDER == LITTLE_ENDIAN)) && \
  71. !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST)
  72. #define PROTOBUF_LITTLE_ENDIAN 1
  73. #endif
  74. #endif
  75. // These #includes are for the byte swap functions declared later on.
  76. #ifdef _MSC_VER
  77. #include <stdlib.h> // NOLINT(build/include)
  78. #include <intrin.h>
  79. #elif defined(__APPLE__)
  80. #include <libkern/OSByteOrder.h>
  81. #elif defined(__linux__) || defined(__ANDROID__) || defined(__CYGWIN__)
  82. #include <byteswap.h> // IWYU pragma: export
  83. #endif
  84. // Legacy: some users reference these (internal-only) macros even though we
  85. // don't need them any more.
  86. #if defined(_MSC_VER) && defined(PROTOBUF_USE_DLLS)
  87. #ifdef LIBPROTOBUF_EXPORTS
  88. #define LIBPROTOBUF_EXPORT __declspec(dllexport)
  89. #else
  90. #define LIBPROTOBUF_EXPORT __declspec(dllimport)
  91. #endif
  92. #ifdef LIBPROTOC_EXPORTS
  93. #define LIBPROTOC_EXPORT __declspec(dllexport)
  94. #else
  95. #define LIBPROTOC_EXPORT __declspec(dllimport)
  96. #endif
  97. #else
  98. #define LIBPROTOBUF_EXPORT
  99. #define LIBPROTOC_EXPORT
  100. #endif
  101. #define PROTOBUF_RUNTIME_DEPRECATED(message) PROTOBUF_DEPRECATED_MSG(message)
  102. #define GOOGLE_PROTOBUF_RUNTIME_DEPRECATED(message) \
  103. PROTOBUF_DEPRECATED_MSG(message)
  104. // ===================================================================
  105. // from google3/base/port.h
  106. #if (defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L || \
  107. (defined(_MSC_VER) && _MSC_VER >= 1900))
  108. // Define this to 1 if the code is compiled in C++11 mode; leave it
  109. // undefined otherwise. Do NOT define it to 0 -- that causes
  110. // '#ifdef LANG_CXX11' to behave differently from '#if LANG_CXX11'.
  111. #define LANG_CXX11 1
  112. #else
  113. #error "Protobuf requires at least C++11."
  114. #endif
  115. namespace google {
  116. namespace protobuf {
  117. using ConstStringParam = const std::string &;
  118. typedef unsigned int uint;
  119. typedef int8_t int8;
  120. typedef int16_t int16;
  121. typedef int32_t int32;
  122. typedef int64_t int64;
  123. typedef uint8_t uint8;
  124. typedef uint16_t uint16;
  125. typedef uint32_t uint32;
  126. typedef uint64_t uint64;
  127. static const int32 kint32max = 0x7FFFFFFF;
  128. static const int32 kint32min = -kint32max - 1;
  129. static const int64 kint64max = int64_t{0x7FFFFFFFFFFFFFFF};
  130. static const int64 kint64min = -kint64max - 1;
  131. static const uint32 kuint32max = 0xFFFFFFFFu;
  132. static const uint64 kuint64max = uint64_t{0xFFFFFFFFFFFFFFFFu};
  133. #if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) ||\
  134. defined(MEMORY_SANITIZER)
  135. #ifdef __cplusplus
  136. extern "C" {
  137. #endif // __cplusplus
  138. uint16_t __sanitizer_unaligned_load16(const void *p);
  139. uint32_t __sanitizer_unaligned_load32(const void *p);
  140. uint64_t __sanitizer_unaligned_load64(const void *p);
  141. void __sanitizer_unaligned_store16(void *p, uint16_t v);
  142. void __sanitizer_unaligned_store32(void *p, uint32_t v);
  143. void __sanitizer_unaligned_store64(void *p, uint64_t v);
  144. #ifdef __cplusplus
  145. } // extern "C"
  146. #endif // __cplusplus
  147. inline uint16 GOOGLE_UNALIGNED_LOAD16(const void *p) {
  148. return __sanitizer_unaligned_load16(p);
  149. }
  150. inline uint32 GOOGLE_UNALIGNED_LOAD32(const void *p) {
  151. return __sanitizer_unaligned_load32(p);
  152. }
  153. inline uint64 GOOGLE_UNALIGNED_LOAD64(const void *p) {
  154. return __sanitizer_unaligned_load64(p);
  155. }
  156. inline void GOOGLE_UNALIGNED_STORE16(void *p, uint16 v) {
  157. __sanitizer_unaligned_store16(p, v);
  158. }
  159. inline void GOOGLE_UNALIGNED_STORE32(void *p, uint32 v) {
  160. __sanitizer_unaligned_store32(p, v);
  161. }
  162. inline void GOOGLE_UNALIGNED_STORE64(void *p, uint64 v) {
  163. __sanitizer_unaligned_store64(p, v);
  164. }
  165. #elif defined(GOOGLE_PROTOBUF_USE_UNALIGNED) && GOOGLE_PROTOBUF_USE_UNALIGNED
  166. #define GOOGLE_UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16 *>(_p))
  167. #define GOOGLE_UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32 *>(_p))
  168. #define GOOGLE_UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64 *>(_p))
  169. #define GOOGLE_UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16 *>(_p) = (_val))
  170. #define GOOGLE_UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32 *>(_p) = (_val))
  171. #define GOOGLE_UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64 *>(_p) = (_val))
  172. #else
  173. inline uint16 GOOGLE_UNALIGNED_LOAD16(const void *p) {
  174. uint16 t;
  175. memcpy(&t, p, sizeof t);
  176. return t;
  177. }
  178. inline uint32 GOOGLE_UNALIGNED_LOAD32(const void *p) {
  179. uint32 t;
  180. memcpy(&t, p, sizeof t);
  181. return t;
  182. }
  183. inline uint64 GOOGLE_UNALIGNED_LOAD64(const void *p) {
  184. uint64 t;
  185. memcpy(&t, p, sizeof t);
  186. return t;
  187. }
  188. inline void GOOGLE_UNALIGNED_STORE16(void *p, uint16 v) {
  189. memcpy(p, &v, sizeof v);
  190. }
  191. inline void GOOGLE_UNALIGNED_STORE32(void *p, uint32 v) {
  192. memcpy(p, &v, sizeof v);
  193. }
  194. inline void GOOGLE_UNALIGNED_STORE64(void *p, uint64 v) {
  195. memcpy(p, &v, sizeof v);
  196. }
  197. #endif
  198. #if defined(GOOGLE_PROTOBUF_OS_NACL) \
  199. || (defined(__ANDROID__) && defined(__clang__) \
  200. && (__clang_major__ == 3 && __clang_minor__ == 8) \
  201. && (__clang_patchlevel__ < 275480))
  202. # define GOOGLE_PROTOBUF_USE_PORTABLE_LOG2
  203. #endif
  204. // The following guarantees declaration of the byte swap functions.
  205. #ifdef _MSC_VER
  206. #define bswap_16(x) _byteswap_ushort(x)
  207. #define bswap_32(x) _byteswap_ulong(x)
  208. #define bswap_64(x) _byteswap_uint64(x)
  209. #elif defined(__APPLE__)
  210. // Mac OS X / Darwin features
  211. #define bswap_16(x) OSSwapInt16(x)
  212. #define bswap_32(x) OSSwapInt32(x)
  213. #define bswap_64(x) OSSwapInt64(x)
  214. #elif !defined(__linux__) && !defined(__ANDROID__) && !defined(__CYGWIN__)
  215. #ifndef bswap_16
  216. static inline uint16 bswap_16(uint16 x) {
  217. return static_cast<uint16>(((x & 0xFF) << 8) | ((x & 0xFF00) >> 8));
  218. }
  219. #define bswap_16(x) bswap_16(x)
  220. #endif
  221. #ifndef bswap_32
  222. static inline uint32 bswap_32(uint32 x) {
  223. return (((x & 0xFF) << 24) |
  224. ((x & 0xFF00) << 8) |
  225. ((x & 0xFF0000) >> 8) |
  226. ((x & 0xFF000000) >> 24));
  227. }
  228. #define bswap_32(x) bswap_32(x)
  229. #endif
  230. #ifndef bswap_64
  231. static inline uint64 bswap_64(uint64 x) {
  232. return (((x & uint64_t{0xFFu}) << 56) | ((x & uint64_t{0xFF00u}) << 40) |
  233. ((x & uint64_t{0xFF0000u}) << 24) |
  234. ((x & uint64_t{0xFF000000u}) << 8) |
  235. ((x & uint64_t{0xFF00000000u}) >> 8) |
  236. ((x & uint64_t{0xFF0000000000u}) >> 24) |
  237. ((x & uint64_t{0xFF000000000000u}) >> 40) |
  238. ((x & uint64_t{0xFF00000000000000u}) >> 56));
  239. }
  240. #define bswap_64(x) bswap_64(x)
  241. #endif
  242. #endif
  243. // ===================================================================
  244. // from google3/util/bits/bits.h
  245. class Bits {
  246. public:
  247. static uint32 Log2FloorNonZero(uint32 n) {
  248. #if defined(__GNUC__)
  249. return 31 ^ static_cast<uint32>(__builtin_clz(n));
  250. #elif defined(_MSC_VER)
  251. unsigned long where;
  252. _BitScanReverse(&where, n);
  253. return where;
  254. #else
  255. return Log2FloorNonZero_Portable(n);
  256. #endif
  257. }
  258. static uint32 Log2FloorNonZero64(uint64 n) {
  259. // Older versions of clang run into an instruction-selection failure when
  260. // it encounters __builtin_clzll:
  261. // https://bugs.chromium.org/p/nativeclient/issues/detail?id=4395
  262. // This includes arm-nacl-clang and clang in older Android NDK versions.
  263. // To work around this, when we build with those we use the portable
  264. // implementation instead.
  265. #if defined(__GNUC__) && !defined(GOOGLE_PROTOBUF_USE_PORTABLE_LOG2)
  266. return 63 ^ static_cast<uint32>(__builtin_clzll(n));
  267. #elif defined(_MSC_VER) && defined(_M_X64)
  268. unsigned long where;
  269. _BitScanReverse64(&where, n);
  270. return where;
  271. #else
  272. return Log2FloorNonZero64_Portable(n);
  273. #endif
  274. }
  275. private:
  276. static int Log2FloorNonZero_Portable(uint32 n) {
  277. if (n == 0)
  278. return -1;
  279. int log = 0;
  280. uint32 value = n;
  281. for (int i = 4; i >= 0; --i) {
  282. int shift = (1 << i);
  283. uint32 x = value >> shift;
  284. if (x != 0) {
  285. value = x;
  286. log += shift;
  287. }
  288. }
  289. assert(value == 1);
  290. return log;
  291. }
  292. static int Log2FloorNonZero64_Portable(uint64 n) {
  293. const uint32 topbits = static_cast<uint32>(n >> 32);
  294. if (topbits == 0) {
  295. // Top bits are zero, so scan in bottom bits
  296. return static_cast<int>(Log2FloorNonZero(static_cast<uint32>(n)));
  297. } else {
  298. return 32 + static_cast<int>(Log2FloorNonZero(topbits));
  299. }
  300. }
  301. };
  302. // ===================================================================
  303. // from google3/util/endian/endian.h
  304. PROTOBUF_EXPORT uint32 ghtonl(uint32 x);
  305. class BigEndian {
  306. public:
  307. #ifdef PROTOBUF_LITTLE_ENDIAN
  308. static uint16 FromHost16(uint16 x) { return bswap_16(x); }
  309. static uint16 ToHost16(uint16 x) { return bswap_16(x); }
  310. static uint32 FromHost32(uint32 x) { return bswap_32(x); }
  311. static uint32 ToHost32(uint32 x) { return bswap_32(x); }
  312. static uint64 FromHost64(uint64 x) { return bswap_64(x); }
  313. static uint64 ToHost64(uint64 x) { return bswap_64(x); }
  314. static bool IsLittleEndian() { return true; }
  315. #else
  316. static uint16 FromHost16(uint16 x) { return x; }
  317. static uint16 ToHost16(uint16 x) { return x; }
  318. static uint32 FromHost32(uint32 x) { return x; }
  319. static uint32 ToHost32(uint32 x) { return x; }
  320. static uint64 FromHost64(uint64 x) { return x; }
  321. static uint64 ToHost64(uint64 x) { return x; }
  322. static bool IsLittleEndian() { return false; }
  323. #endif /* ENDIAN */
  324. // Functions to do unaligned loads and stores in big-endian order.
  325. static uint16 Load16(const void *p) {
  326. return ToHost16(GOOGLE_UNALIGNED_LOAD16(p));
  327. }
  328. static void Store16(void *p, uint16 v) {
  329. GOOGLE_UNALIGNED_STORE16(p, FromHost16(v));
  330. }
  331. static uint32 Load32(const void *p) {
  332. return ToHost32(GOOGLE_UNALIGNED_LOAD32(p));
  333. }
  334. static void Store32(void *p, uint32 v) {
  335. GOOGLE_UNALIGNED_STORE32(p, FromHost32(v));
  336. }
  337. static uint64 Load64(const void *p) {
  338. return ToHost64(GOOGLE_UNALIGNED_LOAD64(p));
  339. }
  340. static void Store64(void *p, uint64 v) {
  341. GOOGLE_UNALIGNED_STORE64(p, FromHost64(v));
  342. }
  343. };
  344. } // namespace protobuf
  345. } // namespace google
  346. #include <google/protobuf/port_undef.inc>
  347. #endif // GOOGLE_PROTOBUF_STUBS_PORT_H_