blob: a17001dd91b1247f83213a80442800dbb2614604 [file] [log] [blame]
Tom Rini0344c602024-10-08 13:56:50 -06001/**
2 * \file alignment.h
3 *
4 * \brief Utility code for dealing with unaligned memory accesses
5 */
6/*
7 * Copyright The Mbed TLS Contributors
8 * SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
9 */
10
11#ifndef MBEDTLS_LIBRARY_ALIGNMENT_H
12#define MBEDTLS_LIBRARY_ALIGNMENT_H
13
14#include <stdint.h>
15#include <string.h>
16#include <stdlib.h>
17
18/*
19 * Define MBEDTLS_EFFICIENT_UNALIGNED_ACCESS for architectures where unaligned memory
20 * accesses are known to be efficient.
21 *
22 * All functions defined here will behave correctly regardless, but might be less
23 * efficient when this is not defined.
24 */
25#if defined(__ARM_FEATURE_UNALIGNED) \
26 || defined(MBEDTLS_ARCH_IS_X86) || defined(MBEDTLS_ARCH_IS_X64) \
27 || defined(MBEDTLS_PLATFORM_IS_WINDOWS_ON_ARM64)
28/*
29 * __ARM_FEATURE_UNALIGNED is defined where appropriate by armcc, gcc 7, clang 9
30 * (and later versions) for Arm v7 and later; all x86 platforms should have
31 * efficient unaligned access.
32 *
33 * https://learn.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions?view=msvc-170#alignment
34 * specifies that on Windows-on-Arm64, unaligned access is safe (except for uncached
35 * device memory).
36 */
37#define MBEDTLS_EFFICIENT_UNALIGNED_ACCESS
38#endif
39
40#if defined(__IAR_SYSTEMS_ICC__) && \
41 (defined(MBEDTLS_ARCH_IS_ARM64) || defined(MBEDTLS_ARCH_IS_ARM32) \
42 || defined(__ICCRX__) || defined(__ICCRL78__) || defined(__ICCRISCV__))
43#pragma language=save
44#pragma language=extended
45#define MBEDTLS_POP_IAR_LANGUAGE_PRAGMA
46/* IAR recommend this technique for accessing unaligned data in
47 * https://www.iar.com/knowledge/support/technical-notes/compiler/accessing-unaligned-data
48 * This results in a single load / store instruction (if unaligned access is supported).
49 * According to that document, this is only supported on certain architectures.
50 */
51 #define UINT_UNALIGNED
52typedef uint16_t __packed mbedtls_uint16_unaligned_t;
53typedef uint32_t __packed mbedtls_uint32_unaligned_t;
54typedef uint64_t __packed mbedtls_uint64_unaligned_t;
55#elif defined(MBEDTLS_COMPILER_IS_GCC) && (MBEDTLS_GCC_VERSION >= 40504) && \
56 ((MBEDTLS_GCC_VERSION < 60300) || (!defined(MBEDTLS_EFFICIENT_UNALIGNED_ACCESS)))
57/*
58 * gcc may generate a branch to memcpy for calls like `memcpy(dest, src, 4)` rather than
59 * generating some LDR or LDRB instructions (similar for stores).
60 *
61 * This is architecture dependent: x86-64 seems fine even with old gcc; 32-bit Arm
62 * is affected. To keep it simple, we enable for all architectures.
63 *
64 * For versions of gcc < 5.4.0 this issue always happens.
65 * For gcc < 6.3.0, this issue happens at -O0
66 * For all versions, this issue happens iff unaligned access is not supported.
67 *
68 * For gcc 4.x, this implementation will generate byte-by-byte loads even if unaligned access is
69 * supported, which is correct but not optimal.
70 *
71 * For performance (and code size, in some cases), we want to avoid the branch and just generate
72 * some inline load/store instructions since the access is small and constant-size.
73 *
74 * The manual states:
75 * "The packed attribute specifies that a variable or structure field should have the smallest
76 * possible alignment—one byte for a variable"
77 * https://gcc.gnu.org/onlinedocs/gcc-4.5.4/gcc/Variable-Attributes.html
78 *
79 * Previous implementations used __attribute__((__aligned__(1)), but had issues with a gcc bug:
80 * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94662
81 *
82 * Tested with several versions of GCC from 4.5.0 up to 13.2.0
83 * We don't enable for older than 4.5.0 as this has not been tested.
84 */
85 #define UINT_UNALIGNED_STRUCT
86typedef struct {
87 uint16_t x;
88} __attribute__((packed)) mbedtls_uint16_unaligned_t;
89typedef struct {
90 uint32_t x;
91} __attribute__((packed)) mbedtls_uint32_unaligned_t;
92typedef struct {
93 uint64_t x;
94} __attribute__((packed)) mbedtls_uint64_unaligned_t;
95 #endif
96
97/*
98 * We try to force mbedtls_(get|put)_unaligned_uintXX to be always inline, because this results
99 * in code that is both smaller and faster. IAR and gcc both benefit from this when optimising
100 * for size.
101 */
102
103/**
104 * Read the unsigned 16 bits integer from the given address, which need not
105 * be aligned.
106 *
107 * \param p pointer to 2 bytes of data
108 * \return Data at the given address
109 */
110#if defined(__IAR_SYSTEMS_ICC__)
111#pragma inline = forced
112#elif defined(__GNUC__)
113__attribute__((always_inline))
114#endif
115static inline uint16_t mbedtls_get_unaligned_uint16(const void *p)
116{
117 uint16_t r;
118#if defined(UINT_UNALIGNED)
119 mbedtls_uint16_unaligned_t *p16 = (mbedtls_uint16_unaligned_t *) p;
120 r = *p16;
121#elif defined(UINT_UNALIGNED_STRUCT)
122 mbedtls_uint16_unaligned_t *p16 = (mbedtls_uint16_unaligned_t *) p;
123 r = p16->x;
124#else
125 memcpy(&r, p, sizeof(r));
126#endif
127 return r;
128}
129
130/**
131 * Write the unsigned 16 bits integer to the given address, which need not
132 * be aligned.
133 *
134 * \param p pointer to 2 bytes of data
135 * \param x data to write
136 */
137#if defined(__IAR_SYSTEMS_ICC__)
138#pragma inline = forced
139#elif defined(__GNUC__)
140__attribute__((always_inline))
141#endif
142static inline void mbedtls_put_unaligned_uint16(void *p, uint16_t x)
143{
144#if defined(UINT_UNALIGNED)
145 mbedtls_uint16_unaligned_t *p16 = (mbedtls_uint16_unaligned_t *) p;
146 *p16 = x;
147#elif defined(UINT_UNALIGNED_STRUCT)
148 mbedtls_uint16_unaligned_t *p16 = (mbedtls_uint16_unaligned_t *) p;
149 p16->x = x;
150#else
151 memcpy(p, &x, sizeof(x));
152#endif
153}
154
155/**
156 * Read the unsigned 32 bits integer from the given address, which need not
157 * be aligned.
158 *
159 * \param p pointer to 4 bytes of data
160 * \return Data at the given address
161 */
162#if defined(__IAR_SYSTEMS_ICC__)
163#pragma inline = forced
164#elif defined(__GNUC__)
165__attribute__((always_inline))
166#endif
167static inline uint32_t mbedtls_get_unaligned_uint32(const void *p)
168{
169 uint32_t r;
170#if defined(UINT_UNALIGNED)
171 mbedtls_uint32_unaligned_t *p32 = (mbedtls_uint32_unaligned_t *) p;
172 r = *p32;
173#elif defined(UINT_UNALIGNED_STRUCT)
174 mbedtls_uint32_unaligned_t *p32 = (mbedtls_uint32_unaligned_t *) p;
175 r = p32->x;
176#else
177 memcpy(&r, p, sizeof(r));
178#endif
179 return r;
180}
181
182/**
183 * Write the unsigned 32 bits integer to the given address, which need not
184 * be aligned.
185 *
186 * \param p pointer to 4 bytes of data
187 * \param x data to write
188 */
189#if defined(__IAR_SYSTEMS_ICC__)
190#pragma inline = forced
191#elif defined(__GNUC__)
192__attribute__((always_inline))
193#endif
194static inline void mbedtls_put_unaligned_uint32(void *p, uint32_t x)
195{
196#if defined(UINT_UNALIGNED)
197 mbedtls_uint32_unaligned_t *p32 = (mbedtls_uint32_unaligned_t *) p;
198 *p32 = x;
199#elif defined(UINT_UNALIGNED_STRUCT)
200 mbedtls_uint32_unaligned_t *p32 = (mbedtls_uint32_unaligned_t *) p;
201 p32->x = x;
202#else
203 memcpy(p, &x, sizeof(x));
204#endif
205}
206
207/**
208 * Read the unsigned 64 bits integer from the given address, which need not
209 * be aligned.
210 *
211 * \param p pointer to 8 bytes of data
212 * \return Data at the given address
213 */
214#if defined(__IAR_SYSTEMS_ICC__)
215#pragma inline = forced
216#elif defined(__GNUC__)
217__attribute__((always_inline))
218#endif
219static inline uint64_t mbedtls_get_unaligned_uint64(const void *p)
220{
221 uint64_t r;
222#if defined(UINT_UNALIGNED)
223 mbedtls_uint64_unaligned_t *p64 = (mbedtls_uint64_unaligned_t *) p;
224 r = *p64;
225#elif defined(UINT_UNALIGNED_STRUCT)
226 mbedtls_uint64_unaligned_t *p64 = (mbedtls_uint64_unaligned_t *) p;
227 r = p64->x;
228#else
229 memcpy(&r, p, sizeof(r));
230#endif
231 return r;
232}
233
234/**
235 * Write the unsigned 64 bits integer to the given address, which need not
236 * be aligned.
237 *
238 * \param p pointer to 8 bytes of data
239 * \param x data to write
240 */
241#if defined(__IAR_SYSTEMS_ICC__)
242#pragma inline = forced
243#elif defined(__GNUC__)
244__attribute__((always_inline))
245#endif
246static inline void mbedtls_put_unaligned_uint64(void *p, uint64_t x)
247{
248#if defined(UINT_UNALIGNED)
249 mbedtls_uint64_unaligned_t *p64 = (mbedtls_uint64_unaligned_t *) p;
250 *p64 = x;
251#elif defined(UINT_UNALIGNED_STRUCT)
252 mbedtls_uint64_unaligned_t *p64 = (mbedtls_uint64_unaligned_t *) p;
253 p64->x = x;
254#else
255 memcpy(p, &x, sizeof(x));
256#endif
257}
258
259#if defined(MBEDTLS_POP_IAR_LANGUAGE_PRAGMA)
260#pragma language=restore
261#endif
262
263/** Byte Reading Macros
264 *
265 * Given a multi-byte integer \p x, MBEDTLS_BYTE_n retrieves the n-th
266 * byte from x, where byte 0 is the least significant byte.
267 */
268#define MBEDTLS_BYTE_0(x) ((uint8_t) ((x) & 0xff))
269#define MBEDTLS_BYTE_1(x) ((uint8_t) (((x) >> 8) & 0xff))
270#define MBEDTLS_BYTE_2(x) ((uint8_t) (((x) >> 16) & 0xff))
271#define MBEDTLS_BYTE_3(x) ((uint8_t) (((x) >> 24) & 0xff))
272#define MBEDTLS_BYTE_4(x) ((uint8_t) (((x) >> 32) & 0xff))
273#define MBEDTLS_BYTE_5(x) ((uint8_t) (((x) >> 40) & 0xff))
274#define MBEDTLS_BYTE_6(x) ((uint8_t) (((x) >> 48) & 0xff))
275#define MBEDTLS_BYTE_7(x) ((uint8_t) (((x) >> 56) & 0xff))
276
277/*
278 * Detect GCC built-in byteswap routines
279 */
280#if defined(__GNUC__) && defined(__GNUC_PREREQ)
281#if __GNUC_PREREQ(4, 8)
282#define MBEDTLS_BSWAP16 __builtin_bswap16
283#endif /* __GNUC_PREREQ(4,8) */
284#if __GNUC_PREREQ(4, 3)
285#define MBEDTLS_BSWAP32 __builtin_bswap32
286#define MBEDTLS_BSWAP64 __builtin_bswap64
287#endif /* __GNUC_PREREQ(4,3) */
288#endif /* defined(__GNUC__) && defined(__GNUC_PREREQ) */
289
290/*
291 * Detect Clang built-in byteswap routines
292 */
293#if defined(__clang__) && defined(__has_builtin)
294#if __has_builtin(__builtin_bswap16) && !defined(MBEDTLS_BSWAP16)
295#define MBEDTLS_BSWAP16 __builtin_bswap16
296#endif /* __has_builtin(__builtin_bswap16) */
297#if __has_builtin(__builtin_bswap32) && !defined(MBEDTLS_BSWAP32)
298#define MBEDTLS_BSWAP32 __builtin_bswap32
299#endif /* __has_builtin(__builtin_bswap32) */
300#if __has_builtin(__builtin_bswap64) && !defined(MBEDTLS_BSWAP64)
301#define MBEDTLS_BSWAP64 __builtin_bswap64
302#endif /* __has_builtin(__builtin_bswap64) */
303#endif /* defined(__clang__) && defined(__has_builtin) */
304
305/*
306 * Detect MSVC built-in byteswap routines
307 */
308#if defined(_MSC_VER)
309#if !defined(MBEDTLS_BSWAP16)
310#define MBEDTLS_BSWAP16 _byteswap_ushort
311#endif
312#if !defined(MBEDTLS_BSWAP32)
313#define MBEDTLS_BSWAP32 _byteswap_ulong
314#endif
315#if !defined(MBEDTLS_BSWAP64)
316#define MBEDTLS_BSWAP64 _byteswap_uint64
317#endif
318#endif /* defined(_MSC_VER) */
319
320/* Detect armcc built-in byteswap routine */
321#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 410000) && !defined(MBEDTLS_BSWAP32)
322#if defined(__ARM_ACLE) /* ARM Compiler 6 - earlier versions don't need a header */
323#include <arm_acle.h>
324#endif
325#define MBEDTLS_BSWAP32 __rev
326#endif
327
328/* Detect IAR built-in byteswap routine */
329#if defined(__IAR_SYSTEMS_ICC__)
330#if defined(__ARM_ACLE)
331#include <arm_acle.h>
332#define MBEDTLS_BSWAP16(x) ((uint16_t) __rev16((uint32_t) (x)))
333#define MBEDTLS_BSWAP32 __rev
334#define MBEDTLS_BSWAP64 __revll
335#endif
336#endif
337
338/*
339 * Where compiler built-ins are not present, fall back to C code that the
340 * compiler may be able to detect and transform into the relevant bswap or
341 * similar instruction.
342 */
343#if !defined(MBEDTLS_BSWAP16)
344static inline uint16_t mbedtls_bswap16(uint16_t x)
345{
346 return
347 (x & 0x00ff) << 8 |
348 (x & 0xff00) >> 8;
349}
350#define MBEDTLS_BSWAP16 mbedtls_bswap16
351#endif /* !defined(MBEDTLS_BSWAP16) */
352
353#if !defined(MBEDTLS_BSWAP32)
354static inline uint32_t mbedtls_bswap32(uint32_t x)
355{
356 return
357 (x & 0x000000ff) << 24 |
358 (x & 0x0000ff00) << 8 |
359 (x & 0x00ff0000) >> 8 |
360 (x & 0xff000000) >> 24;
361}
362#define MBEDTLS_BSWAP32 mbedtls_bswap32
363#endif /* !defined(MBEDTLS_BSWAP32) */
364
365#if !defined(MBEDTLS_BSWAP64)
366static inline uint64_t mbedtls_bswap64(uint64_t x)
367{
368 return
369 (x & 0x00000000000000ffULL) << 56 |
370 (x & 0x000000000000ff00ULL) << 40 |
371 (x & 0x0000000000ff0000ULL) << 24 |
372 (x & 0x00000000ff000000ULL) << 8 |
373 (x & 0x000000ff00000000ULL) >> 8 |
374 (x & 0x0000ff0000000000ULL) >> 24 |
375 (x & 0x00ff000000000000ULL) >> 40 |
376 (x & 0xff00000000000000ULL) >> 56;
377}
378#define MBEDTLS_BSWAP64 mbedtls_bswap64
379#endif /* !defined(MBEDTLS_BSWAP64) */
380
381#if !defined(__BYTE_ORDER__)
382
383#if defined(__LITTLE_ENDIAN__)
384/* IAR defines __xxx_ENDIAN__, but not __BYTE_ORDER__ */
385#define MBEDTLS_IS_BIG_ENDIAN 0
386#elif defined(__BIG_ENDIAN__)
387#define MBEDTLS_IS_BIG_ENDIAN 1
388#else
389static const uint16_t mbedtls_byte_order_detector = { 0x100 };
390#define MBEDTLS_IS_BIG_ENDIAN (*((unsigned char *) (&mbedtls_byte_order_detector)) == 0x01)
391#endif
392
393#else
394
395#if (__BYTE_ORDER__) == (__ORDER_BIG_ENDIAN__)
396#define MBEDTLS_IS_BIG_ENDIAN 1
397#else
398#define MBEDTLS_IS_BIG_ENDIAN 0
399#endif
400
401#endif /* !defined(__BYTE_ORDER__) */
402
403/**
404 * Get the unsigned 32 bits integer corresponding to four bytes in
405 * big-endian order (MSB first).
406 *
407 * \param data Base address of the memory to get the four bytes from.
408 * \param offset Offset from \p data of the first and most significant
409 * byte of the four bytes to build the 32 bits unsigned
410 * integer from.
411 */
412#define MBEDTLS_GET_UINT32_BE(data, offset) \
413 ((MBEDTLS_IS_BIG_ENDIAN) \
414 ? mbedtls_get_unaligned_uint32((data) + (offset)) \
415 : MBEDTLS_BSWAP32(mbedtls_get_unaligned_uint32((data) + (offset))) \
416 )
417
418/**
419 * Put in memory a 32 bits unsigned integer in big-endian order.
420 *
421 * \param n 32 bits unsigned integer to put in memory.
422 * \param data Base address of the memory where to put the 32
423 * bits unsigned integer in.
424 * \param offset Offset from \p data where to put the most significant
425 * byte of the 32 bits unsigned integer \p n.
426 */
427#define MBEDTLS_PUT_UINT32_BE(n, data, offset) \
428 { \
429 if (MBEDTLS_IS_BIG_ENDIAN) \
430 { \
431 mbedtls_put_unaligned_uint32((data) + (offset), (uint32_t) (n)); \
432 } \
433 else \
434 { \
435 mbedtls_put_unaligned_uint32((data) + (offset), MBEDTLS_BSWAP32((uint32_t) (n))); \
436 } \
437 }
438
439/**
440 * Get the unsigned 32 bits integer corresponding to four bytes in
441 * little-endian order (LSB first).
442 *
443 * \param data Base address of the memory to get the four bytes from.
444 * \param offset Offset from \p data of the first and least significant
445 * byte of the four bytes to build the 32 bits unsigned
446 * integer from.
447 */
448#define MBEDTLS_GET_UINT32_LE(data, offset) \
449 ((MBEDTLS_IS_BIG_ENDIAN) \
450 ? MBEDTLS_BSWAP32(mbedtls_get_unaligned_uint32((data) + (offset))) \
451 : mbedtls_get_unaligned_uint32((data) + (offset)) \
452 )
453
454
455/**
456 * Put in memory a 32 bits unsigned integer in little-endian order.
457 *
458 * \param n 32 bits unsigned integer to put in memory.
459 * \param data Base address of the memory where to put the 32
460 * bits unsigned integer in.
461 * \param offset Offset from \p data where to put the least significant
462 * byte of the 32 bits unsigned integer \p n.
463 */
464#define MBEDTLS_PUT_UINT32_LE(n, data, offset) \
465 { \
466 if (MBEDTLS_IS_BIG_ENDIAN) \
467 { \
468 mbedtls_put_unaligned_uint32((data) + (offset), MBEDTLS_BSWAP32((uint32_t) (n))); \
469 } \
470 else \
471 { \
472 mbedtls_put_unaligned_uint32((data) + (offset), ((uint32_t) (n))); \
473 } \
474 }
475
476/**
477 * Get the unsigned 16 bits integer corresponding to two bytes in
478 * little-endian order (LSB first).
479 *
480 * \param data Base address of the memory to get the two bytes from.
481 * \param offset Offset from \p data of the first and least significant
482 * byte of the two bytes to build the 16 bits unsigned
483 * integer from.
484 */
485#define MBEDTLS_GET_UINT16_LE(data, offset) \
486 ((MBEDTLS_IS_BIG_ENDIAN) \
487 ? MBEDTLS_BSWAP16(mbedtls_get_unaligned_uint16((data) + (offset))) \
488 : mbedtls_get_unaligned_uint16((data) + (offset)) \
489 )
490
491/**
492 * Put in memory a 16 bits unsigned integer in little-endian order.
493 *
494 * \param n 16 bits unsigned integer to put in memory.
495 * \param data Base address of the memory where to put the 16
496 * bits unsigned integer in.
497 * \param offset Offset from \p data where to put the least significant
498 * byte of the 16 bits unsigned integer \p n.
499 */
500#define MBEDTLS_PUT_UINT16_LE(n, data, offset) \
501 { \
502 if (MBEDTLS_IS_BIG_ENDIAN) \
503 { \
504 mbedtls_put_unaligned_uint16((data) + (offset), MBEDTLS_BSWAP16((uint16_t) (n))); \
505 } \
506 else \
507 { \
508 mbedtls_put_unaligned_uint16((data) + (offset), (uint16_t) (n)); \
509 } \
510 }
511
512/**
513 * Get the unsigned 16 bits integer corresponding to two bytes in
514 * big-endian order (MSB first).
515 *
516 * \param data Base address of the memory to get the two bytes from.
517 * \param offset Offset from \p data of the first and most significant
518 * byte of the two bytes to build the 16 bits unsigned
519 * integer from.
520 */
521#define MBEDTLS_GET_UINT16_BE(data, offset) \
522 ((MBEDTLS_IS_BIG_ENDIAN) \
523 ? mbedtls_get_unaligned_uint16((data) + (offset)) \
524 : MBEDTLS_BSWAP16(mbedtls_get_unaligned_uint16((data) + (offset))) \
525 )
526
527/**
528 * Put in memory a 16 bits unsigned integer in big-endian order.
529 *
530 * \param n 16 bits unsigned integer to put in memory.
531 * \param data Base address of the memory where to put the 16
532 * bits unsigned integer in.
533 * \param offset Offset from \p data where to put the most significant
534 * byte of the 16 bits unsigned integer \p n.
535 */
536#define MBEDTLS_PUT_UINT16_BE(n, data, offset) \
537 { \
538 if (MBEDTLS_IS_BIG_ENDIAN) \
539 { \
540 mbedtls_put_unaligned_uint16((data) + (offset), (uint16_t) (n)); \
541 } \
542 else \
543 { \
544 mbedtls_put_unaligned_uint16((data) + (offset), MBEDTLS_BSWAP16((uint16_t) (n))); \
545 } \
546 }
547
548/**
549 * Get the unsigned 24 bits integer corresponding to three bytes in
550 * big-endian order (MSB first).
551 *
552 * \param data Base address of the memory to get the three bytes from.
553 * \param offset Offset from \p data of the first and most significant
554 * byte of the three bytes to build the 24 bits unsigned
555 * integer from.
556 */
557#define MBEDTLS_GET_UINT24_BE(data, offset) \
558 ( \
559 ((uint32_t) (data)[(offset)] << 16) \
560 | ((uint32_t) (data)[(offset) + 1] << 8) \
561 | ((uint32_t) (data)[(offset) + 2]) \
562 )
563
564/**
565 * Put in memory a 24 bits unsigned integer in big-endian order.
566 *
567 * \param n 24 bits unsigned integer to put in memory.
568 * \param data Base address of the memory where to put the 24
569 * bits unsigned integer in.
570 * \param offset Offset from \p data where to put the most significant
571 * byte of the 24 bits unsigned integer \p n.
572 */
573#define MBEDTLS_PUT_UINT24_BE(n, data, offset) \
574 { \
575 (data)[(offset)] = MBEDTLS_BYTE_2(n); \
576 (data)[(offset) + 1] = MBEDTLS_BYTE_1(n); \
577 (data)[(offset) + 2] = MBEDTLS_BYTE_0(n); \
578 }
579
580/**
581 * Get the unsigned 24 bits integer corresponding to three bytes in
582 * little-endian order (LSB first).
583 *
584 * \param data Base address of the memory to get the three bytes from.
585 * \param offset Offset from \p data of the first and least significant
586 * byte of the three bytes to build the 24 bits unsigned
587 * integer from.
588 */
589#define MBEDTLS_GET_UINT24_LE(data, offset) \
590 ( \
591 ((uint32_t) (data)[(offset)]) \
592 | ((uint32_t) (data)[(offset) + 1] << 8) \
593 | ((uint32_t) (data)[(offset) + 2] << 16) \
594 )
595
596/**
597 * Put in memory a 24 bits unsigned integer in little-endian order.
598 *
599 * \param n 24 bits unsigned integer to put in memory.
600 * \param data Base address of the memory where to put the 24
601 * bits unsigned integer in.
602 * \param offset Offset from \p data where to put the least significant
603 * byte of the 24 bits unsigned integer \p n.
604 */
605#define MBEDTLS_PUT_UINT24_LE(n, data, offset) \
606 { \
607 (data)[(offset)] = MBEDTLS_BYTE_0(n); \
608 (data)[(offset) + 1] = MBEDTLS_BYTE_1(n); \
609 (data)[(offset) + 2] = MBEDTLS_BYTE_2(n); \
610 }
611
612/**
613 * Get the unsigned 64 bits integer corresponding to eight bytes in
614 * big-endian order (MSB first).
615 *
616 * \param data Base address of the memory to get the eight bytes from.
617 * \param offset Offset from \p data of the first and most significant
618 * byte of the eight bytes to build the 64 bits unsigned
619 * integer from.
620 */
621#define MBEDTLS_GET_UINT64_BE(data, offset) \
622 ((MBEDTLS_IS_BIG_ENDIAN) \
623 ? mbedtls_get_unaligned_uint64((data) + (offset)) \
624 : MBEDTLS_BSWAP64(mbedtls_get_unaligned_uint64((data) + (offset))) \
625 )
626
627/**
628 * Put in memory a 64 bits unsigned integer in big-endian order.
629 *
630 * \param n 64 bits unsigned integer to put in memory.
631 * \param data Base address of the memory where to put the 64
632 * bits unsigned integer in.
633 * \param offset Offset from \p data where to put the most significant
634 * byte of the 64 bits unsigned integer \p n.
635 */
636#define MBEDTLS_PUT_UINT64_BE(n, data, offset) \
637 { \
638 if (MBEDTLS_IS_BIG_ENDIAN) \
639 { \
640 mbedtls_put_unaligned_uint64((data) + (offset), (uint64_t) (n)); \
641 } \
642 else \
643 { \
644 mbedtls_put_unaligned_uint64((data) + (offset), MBEDTLS_BSWAP64((uint64_t) (n))); \
645 } \
646 }
647
648/**
649 * Get the unsigned 64 bits integer corresponding to eight bytes in
650 * little-endian order (LSB first).
651 *
652 * \param data Base address of the memory to get the eight bytes from.
653 * \param offset Offset from \p data of the first and least significant
654 * byte of the eight bytes to build the 64 bits unsigned
655 * integer from.
656 */
657#define MBEDTLS_GET_UINT64_LE(data, offset) \
658 ((MBEDTLS_IS_BIG_ENDIAN) \
659 ? MBEDTLS_BSWAP64(mbedtls_get_unaligned_uint64((data) + (offset))) \
660 : mbedtls_get_unaligned_uint64((data) + (offset)) \
661 )
662
663/**
664 * Put in memory a 64 bits unsigned integer in little-endian order.
665 *
666 * \param n 64 bits unsigned integer to put in memory.
667 * \param data Base address of the memory where to put the 64
668 * bits unsigned integer in.
669 * \param offset Offset from \p data where to put the least significant
670 * byte of the 64 bits unsigned integer \p n.
671 */
672#define MBEDTLS_PUT_UINT64_LE(n, data, offset) \
673 { \
674 if (MBEDTLS_IS_BIG_ENDIAN) \
675 { \
676 mbedtls_put_unaligned_uint64((data) + (offset), MBEDTLS_BSWAP64((uint64_t) (n))); \
677 } \
678 else \
679 { \
680 mbedtls_put_unaligned_uint64((data) + (offset), (uint64_t) (n)); \
681 } \
682 }
683
684#endif /* MBEDTLS_LIBRARY_ALIGNMENT_H */