summaryrefslogtreecommitdiffstats
path: root/src/libdw/memory-access.h
blob: a53f79127a941a757c314ba639cffeff47dffafe (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
/* Unaligned memory access functionality.
   Copyright (C) 2000-2014 Red Hat, Inc.
   This file is part of elfutils.
   Written by Ulrich Drepper <drepper@redhat.com>, 2001.

   This file is free software; you can redistribute it and/or modify
   it under the terms of either

     * the GNU Lesser General Public License as published by the Free
       Software Foundation; either version 3 of the License, or (at
       your option) any later version

   or

     * the GNU General Public License as published by the Free
       Software Foundation; either version 2 of the License, or (at
       your option) any later version

   or both in parallel, as here.

   elfutils is distributed in the hope that it will be useful, but
   WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   General Public License for more details.

   You should have received copies of the GNU General Public License and
   the GNU Lesser General Public License along with this program.  If
   not, see <http://www.gnu.org/licenses/>.  */

#ifndef _MEMORY_ACCESS_H
#define _MEMORY_ACCESS_H 1

#include <byteswap.h>
#include <limits.h>
#include <stdint.h>


/* Number decoding macros.  See 7.6 Variable Length Data.  */

#define len_leb128(var) ((8 * sizeof (var) + 6) / 7)

static inline size_t
__libdw_max_len_leb128 (const unsigned char *addr, const unsigned char *end)
{
  const size_t type_len = len_leb128 (uint64_t);
  const size_t pointer_len = likely (addr < end) ? end - addr : 0;
  return likely (type_len <= pointer_len) ? type_len : pointer_len;
}

#define get_uleb128_step(var, addr, nth)				      \
  do {									      \
    unsigned char __b = *(addr)++;					      \
    (var) |= (typeof (var)) (__b & 0x7f) << ((nth) * 7);		      \
    if (likely ((__b & 0x80) == 0))					      \
      return (var);							      \
  } while (0)

static inline uint64_t
__libdw_get_uleb128 (const unsigned char **addrp, const unsigned char *end)
{
  uint64_t acc = 0;

  /* Unroll the first step to help the compiler optimize
     for the common single-byte case.  */
  get_uleb128_step (acc, *addrp, 0);

  const size_t max = __libdw_max_len_leb128 (*addrp - 1, end);
  for (size_t i = 1; i < max; ++i)
    get_uleb128_step (acc, *addrp, i);
  /* Other implementations set VALUE to UINT_MAX in this
     case.  So we better do this as well.  */
  return UINT64_MAX;
}

/* Note, addr needs to me smaller than end. */
#define get_uleb128(var, addr, end) ((var) = __libdw_get_uleb128 (&(addr), end))

/* The signed case is similar, but we sign-extend the result.  */

#define get_sleb128_step(var, addr, nth)				      \
  do {									      \
    unsigned char __b = *(addr)++;					      \
    if (likely ((__b & 0x80) == 0))					      \
      {									      \
	struct { signed int i:7; } __s = { .i = __b };			      \
	(var) |= (typeof (var)) __s.i * ((typeof (var)) 1 << ((nth) * 7));    \
	return (var);							      \
      }									      \
    (var) |= (typeof (var)) (__b & 0x7f) << ((nth) * 7);		      \
  } while (0)

static inline int64_t
__libdw_get_sleb128 (const unsigned char **addrp, const unsigned char *end)
{
  int64_t acc = 0;

  /* Unroll the first step to help the compiler optimize
     for the common single-byte case.  */
  get_sleb128_step (acc, *addrp, 0);

  const size_t max = __libdw_max_len_leb128 (*addrp - 1, end);
  for (size_t i = 1; i < max; ++i)
    get_sleb128_step (acc, *addrp, i);
  /* Other implementations set VALUE to INT_MAX in this
     case.  So we better do this as well.  */
  return INT64_MAX;
}

#define get_sleb128(var, addr, end) ((var) = __libdw_get_sleb128 (&(addr), end))


/* We use simple memory access functions in case the hardware allows it.
   The caller has to make sure we don't have alias problems.  */
#if ALLOW_UNALIGNED

# define read_2ubyte_unaligned(Dbg, Addr) \
  (unlikely ((Dbg)->other_byte_order)					      \
   ? bswap_16 (*((const uint16_t *) (Addr)))				      \
   : *((const uint16_t *) (Addr)))
# define read_2sbyte_unaligned(Dbg, Addr) \
  (unlikely ((Dbg)->other_byte_order)					      \
   ? (int16_t) bswap_16 (*((const int16_t *) (Addr)))			      \
   : *((const int16_t *) (Addr)))

# define read_4ubyte_unaligned_noncvt(Addr) \
   *((const uint32_t *) (Addr))
# define read_4ubyte_unaligned(Dbg, Addr) \
  (unlikely ((Dbg)->other_byte_order)					      \
   ? bswap_32 (*((const uint32_t *) (Addr)))				      \
   : *((const uint32_t *) (Addr)))
# define read_4sbyte_unaligned(Dbg, Addr) \
  (unlikely ((Dbg)->other_byte_order)					      \
   ? (int32_t) bswap_32 (*((const int32_t *) (Addr)))			      \
   : *((const int32_t *) (Addr)))

# define read_8ubyte_unaligned_noncvt(Addr) \
   *((const uint64_t *) (Addr))
# define read_8ubyte_unaligned(Dbg, Addr) \
  (unlikely ((Dbg)->other_byte_order)					      \
   ? bswap_64 (*((const uint64_t *) (Addr)))				      \
   : *((const uint64_t *) (Addr)))
# define read_8sbyte_unaligned(Dbg, Addr) \
  (unlikely ((Dbg)->other_byte_order)					      \
   ? (int64_t) bswap_64 (*((const int64_t *) (Addr)))			      \
   : *((const int64_t *) (Addr)))

#else

union unaligned
  {
    void *p;
    uint16_t u2;
    uint32_t u4;
    uint64_t u8;
    int16_t s2;
    int32_t s4;
    int64_t s8;
  } __attribute__ ((packed));

# define read_2ubyte_unaligned(Dbg, Addr) \
  read_2ubyte_unaligned_1 ((Dbg)->other_byte_order, (Addr))
# define read_2sbyte_unaligned(Dbg, Addr) \
  read_2sbyte_unaligned_1 ((Dbg)->other_byte_order, (Addr))
# define read_4ubyte_unaligned(Dbg, Addr) \
  read_4ubyte_unaligned_1 ((Dbg)->other_byte_order, (Addr))
# define read_4sbyte_unaligned(Dbg, Addr) \
  read_4sbyte_unaligned_1 ((Dbg)->other_byte_order, (Addr))
# define read_8ubyte_unaligned(Dbg, Addr) \
  read_8ubyte_unaligned_1 ((Dbg)->other_byte_order, (Addr))
# define read_8sbyte_unaligned(Dbg, Addr) \
  read_8sbyte_unaligned_1 ((Dbg)->other_byte_order, (Addr))

static inline uint16_t
read_2ubyte_unaligned_1 (bool other_byte_order, const void *p)
{
  const union unaligned *up = p;
  if (unlikely (other_byte_order))
    return bswap_16 (up->u2);
  return up->u2;
}
static inline int16_t
read_2sbyte_unaligned_1 (bool other_byte_order, const void *p)
{
  const union unaligned *up = p;
  if (unlikely (other_byte_order))
    return (int16_t) bswap_16 (up->u2);
  return up->s2;
}

static inline uint32_t
read_4ubyte_unaligned_noncvt (const void *p)
{
  const union unaligned *up = p;
  return up->u4;
}
static inline uint32_t
read_4ubyte_unaligned_1 (bool other_byte_order, const void *p)
{
  const union unaligned *up = p;
  if (unlikely (other_byte_order))
    return bswap_32 (up->u4);
  return up->u4;
}
static inline int32_t
read_4sbyte_unaligned_1 (bool other_byte_order, const void *p)
{
  const union unaligned *up = p;
  if (unlikely (other_byte_order))
    return (int32_t) bswap_32 (up->u4);
  return up->s4;
}

static inline uint64_t
read_8ubyte_unaligned_noncvt (const void *p)
{
  const union unaligned *up = p;
  return up->u8;
}
static inline uint64_t
read_8ubyte_unaligned_1 (bool other_byte_order, const void *p)
{
  const union unaligned *up = p;
  if (unlikely (other_byte_order))
    return bswap_64 (up->u8);
  return up->u8;
}
static inline int64_t
read_8sbyte_unaligned_1 (bool other_byte_order, const void *p)
{
  const union unaligned *up = p;
  if (unlikely (other_byte_order))
    return (int64_t) bswap_64 (up->u8);
  return up->s8;
}

#endif	/* allow unaligned */


#define read_2ubyte_unaligned_inc(Dbg, Addr) \
  ({ uint16_t t_ = read_2ubyte_unaligned (Dbg, Addr);			      \
     Addr = (__typeof (Addr)) (((uintptr_t) (Addr)) + 2);		      \
     t_; })
#define read_2sbyte_unaligned_inc(Dbg, Addr) \
  ({ int16_t t_ = read_2sbyte_unaligned (Dbg, Addr);			      \
     Addr = (__typeof (Addr)) (((uintptr_t) (Addr)) + 2);		      \
     t_; })

#define read_4ubyte_unaligned_inc(Dbg, Addr) \
  ({ uint32_t t_ = read_4ubyte_unaligned (Dbg, Addr);			      \
     Addr = (__typeof (Addr)) (((uintptr_t) (Addr)) + 4);		      \
     t_; })
#define read_4sbyte_unaligned_inc(Dbg, Addr) \
  ({ int32_t t_ = read_4sbyte_unaligned (Dbg, Addr);			      \
     Addr = (__typeof (Addr)) (((uintptr_t) (Addr)) + 4);		      \
     t_; })

#define read_8ubyte_unaligned_inc(Dbg, Addr) \
  ({ uint64_t t_ = read_8ubyte_unaligned (Dbg, Addr);			      \
     Addr = (__typeof (Addr)) (((uintptr_t) (Addr)) + 8);		      \
     t_; })
#define read_8sbyte_unaligned_inc(Dbg, Addr) \
  ({ int64_t t_ = read_8sbyte_unaligned (Dbg, Addr);			      \
     Addr = (__typeof (Addr)) (((uintptr_t) (Addr)) + 8);		      \
     t_; })


#define read_addr_unaligned_inc(Nbytes, Dbg, Addr)			\
  (assert ((Nbytes) == 4 || (Nbytes) == 8),				\
    ((Nbytes) == 4 ? read_4ubyte_unaligned_inc (Dbg, Addr)		\
     : read_8ubyte_unaligned_inc (Dbg, Addr)))

#endif	/* memory-access.h */