This file is indexed.

/usr/include/sphde/sasatom.h is in libsphde-dev 1.3.0-1+b1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
/*
 * Copyright (c) 1995-2014 IBM Corporation.
 * All rights reserved. This program and the accompanying materials
 * are made available under the terms of the Eclipse Public License v1.0
 * which accompanies this distribution, and is available at
 * http://www.eclipse.org/legal/epl-v10.html
 * 
 * Contributors:
 *     IBM Corporation, Steven Munroe - initial API and implementation
 */

#ifndef _SASATOMIC_H
#define _SASATOMIC_H

#include <sched.h> // for sched_yield

/*!
 * \file   sasatom.h
 * \brief  Type and functions for SAS atomic operations.
 *
 * This file contains generic SAS atomic functions. Architecture specific code
 * is in its correpondent header ('sasatom_ppc.h' for  PowerPC32 and PowerPC64,
 * 'sasatom_i386.h' for i386, and 'sasatom_x86_64.h' for X86_64).
 * The 'sasatom_generic.h' header is provided as reference implementation and
 * it is not garanted to work correctly.
 */

/// @cond HIDE_FROM_DOXYGEN
#define GCC_VERSION \
        (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
/// @endcond

/*! Spinlock type used on sas_spin_xxx functions */
typedef unsigned int  sas_spin_lock_t;
/*! Pointer to sas_spin_lock_t type */
typedef void*         sas_lock_ptr_t;

#if defined(__powerpc64__) || defined (__powerpc__)
#include "sasatom_powerpc.h"
#elif defined(__x86_64__)
#include "sasatom_x86_64.h"
#elif defined(__i386__)
#include "sasatom_i386.h"
#else
#include "sasatom_generic.h"
#endif

/*!
 * Memory barrier for store operations.
 */
#define sas_write_barrier() __arch_sas_write_barrier()

/*!
 * Memory barrier for load operations.
 */
#define sas_read_barrier()  __arch_sas_read_barrier()

/*!
 * Memory barrier for both load/store operations.
 */
#define sas_full_barrier()  __arch_sas_full_barrier()

/*!
 * Memory barrier for compiler code motion.
 */
#define sas_code_barrier()  __asm ("" ::: "memory")

/*!
 * Atomic fetch and add operation on memory referenced by \a pointer.
 *
 * Performs the atomic operation:
 * { tmp = **pointer; **pointer = tmp + delta; return *pointer }
 *
 * Returns \a *pointer before update.
 */
static inline void *
sas_fetch_and_add_ptr(void **pointer, long int delta)
{
  return __arch_fetch_and_add_ptr(pointer, delta);
}

/*!
 * Atomic fetch and add operation on memory \a pointer.
 *
 * Performs the atomic operation:
 * { tmp = *pointer; *pointer = tmp + delta; return *pointer }
 *
 * Returns \a *pointer before update.
 */
static inline long int
sas_fetch_and_add(long *pointer, long int delta)
{
  return __arch_fetch_and_add(pointer, delta);
}

/*!
 * Atomic fetch and and operation on memory \a pointer of int type.
 *
 * Performs the atomic operation:
 * { tmp = *pointer; *pointer = tmp & delta; return *pointer }
 *
 * Returns \a *pointer before update.
 */
static inline long int
sas_fetch_and_and(unsigned int *pointer, int delta)
{
#if GCC_VERSION >= 40700
  return __atomic_fetch_and(pointer, delta, __ATOMIC_ACQ_REL);
#else
  return __sync_fetch_and_and(pointer, delta);
#endif
}

/*!
 * Atomic fetch and and operation on memory \a pointer of long type.
 *
 * Performs the atomic operation:
 * { tmp = *pointer; *pointer = tmp & delta; return *pointer }
 *
 * Returns \a *pointer before update.
 */
static inline long int
sas_fetch_and_and_long(unsigned long *pointer, long int delta)
{
#if GCC_VERSION >= 40700
  return __atomic_fetch_and(pointer, delta, __ATOMIC_ACQ_REL);
#else
  return __sync_fetch_and_and(pointer, delta);
#endif
}

/*!
 * Atomic fetch and or operation on memory \a pointer of int type.
 *
 * Performs the atomic operation:
 * { tmp = *pointer; *pointer = tmp | delta; return *pointer }
 *
 * Returns \a *pointer before update.
 */
static inline long int
sas_fetch_and_or(unsigned int *pointer, int delta)
{
#if GCC_VERSION >= 40700
  return __atomic_fetch_or(pointer, delta, __ATOMIC_ACQ_REL);
#else
  return __sync_fetch_and_or(pointer, delta);
#endif
}

/*!
 * Atomic fetch and or operation on memory \a pointer of long type.
 *
 * Performs the atomic operation:
 * { tmp = *pointer; *pointer = tmp | delta; return *pointer }
 *
 * Returns \a *pointer before update.
 */
static inline long int
sas_fetch_and_or_long(unsigned long *pointer, long int delta)
{
#if GCC_VERSION >= 40700
  return __atomic_fetch_or(pointer, delta, __ATOMIC_ACQ_REL);
#else
  return __sync_fetch_and_or(pointer, delta);
#endif
}
/*!
 * Atomic compare and swap operation.
 *
 * Performs the atomic operation:
 * { *p == oldval ? *p = newval : *p; return *p }
 *
 * Returns \a oldval.
 */
static inline int
sas_compare_and_swap (volatile long int *p, long int oldval, long int newval)
{
  return __arch_compare_and_swap(p, oldval, newval);
}

/*!
 * Atomic swap operation.
 *
 * Performs the atomic operation:
 * { tmp = *p; *p = tmp; return tmp }
 */
static inline long int
sas_atomic_swap (long int *p, long int replace)
{
  return __arch_atomic_swap(p, replace);
}

/*!
 * Atomic increment, performing { (*p)++ }
 */
static inline void
sas_atomic_inc(long int *p)
{
  __arch_atomic_inc(p);
}

/*!
 * Atomic decrement, performing { (*p)-- }
 */
static inline void
sas_atomic_dec(long int *p)
{
  __arch_atomic_dec(p);
}

/*!
 * Initializes the spinlock \a lock so it can be used with lock and 
 * unlock functions.
 */
static inline void
sas_spin_lock_init (volatile sas_spin_lock_t *lock)
{
  sas_read_barrier();
  *lock = 0;
}

/*!
 * Locks the spinlock \a lock.
 *
 * The function uses a busylock algorithm in the form:
 * while ({tmp = *lock; *lock = 1; });
 */
static inline void
sas_spin_lock (volatile sas_spin_lock_t *lock)
{
  __arch_sas_spin_lock(lock);
}

/*!
 * Try to lock the spinlock \a lock.
 *
 * It returns if the lock \a lock can not be locked.
 *
 * Returns 1 if the spinlock was locked, 0 otherwise.
 */
static inline int
sas_spin_trylock (volatile sas_spin_lock_t *lock)
{
  return __arch_sas_spin_trylock(lock);
}

/*!
 * Unlocks the spinlock \a lock reseting it to initial value.
 */
static inline void
sas_spin_unlock (volatile sas_spin_lock_t *lock)
{
  sas_read_barrier();
  *lock = 0;
}

/*!
 * Initializes the spinlock pointer \a lock so it can be used with lock
 * and unlock functions.
 */
static inline void
sas_lock_ptr_init (volatile sas_lock_ptr_t *lock)
{
  sas_read_barrier();
  *lock = NULL;
}

/*!
 * Locks the spinlock pointer \a lock and returns its unlocked value.
 */
static inline void*
sas_lock_ptr (volatile sas_lock_ptr_t *lock)
{
  int rc;
  long unlocked, locked;
  do {
    unlocked = (long)(*lock) & -2L;
    locked = unlocked | 1;
    rc = sas_compare_and_swap ((long int *)lock, unlocked, locked);
  } while (!rc);
  sas_write_barrier();
  
  return (void*)unlocked;
}

/*!
 * Spins over spinlock pointer \a lock until it is unlocked then swap its
 * value with \a newptr locked value.
 */
static inline void
sas_set_unlocked_ptr (volatile sas_lock_ptr_t *lock, sas_lock_ptr_t newptr)
{
  int rc;
  do {
    long unlocked= (long)(*lock) & -2L;
    long newlocked= (long)newptr | 1;
    rc = sas_compare_and_swap ((long int *)lock, unlocked, newlocked);
  } while (!rc);
  sas_write_barrier();
}

/*!
 * Spins over lock \a lock until it is locked then swap its value with
 * \a newptr locked value.
 */
static inline void
sas_set_locked_ptr (volatile sas_lock_ptr_t *lock, sas_lock_ptr_t newptr)
{
  int rc;
  do {
    long locked= (long)(*lock) | 1;
    long newlocked= (long)newptr | 1;
    rc = sas_compare_and_swap ((long int *)lock, locked, newlocked);
  } while (!rc);
}

/*!
 * The function tried to lock \a lock. If the operation is successful the
 * function return 0 or 1 otherwise.
 */
static inline int
sas_trylock_ptr (volatile sas_lock_ptr_t *lock)
{
  int rc;
  {
    long unlocked= (long)(*lock) & -2L;
    long locked= unlocked | 1;
    rc = sas_compare_and_swap ((long int *)lock, unlocked, locked);
  }
  sas_write_barrier();
  
  return !rc;
}

/*!
 * Unlocks the spinlock \a lock.
 */
static inline void
sas_unlock_ptr (volatile sas_lock_ptr_t *lock)
{
  int rc;
  
  sas_read_barrier();
  do {
    long unlocked= (long)(*lock) & -2L;
    long locked= unlocked | 1;
    rc = sas_compare_and_swap ((long int *)lock, locked, unlocked);
  } while (!rc);
}

/*!
 * The function tries 4 times to lock \a lock and if it fails it calls
 * 'sched_yield' returning with another lock try.
 */
static inline void
sas_spin_lock_with_yield (volatile sas_spin_lock_t *lock)
{
  int rc;
  
  if (sas_spin_trylock(lock) == 0)
    return;
  if (sas_spin_trylock(lock) == 0)
    return;
  if (sas_spin_trylock(lock) == 0)
    return;
  if (sas_spin_trylock(lock) == 0)
    return;
    
  do
    {
      sched_yield();
      rc = sas_spin_trylock(lock);
    } 
  while (rc);
}

/*!
 * The function tried 4 times to lock \a lock pointer and if it fails
 * it calls 'sched_yield' returning with another lock try.
 */
static inline void
sas_lock_ptr_with_yield (volatile sas_lock_ptr_t *lock)
{
  int rc;
  
  if (sas_trylock_ptr(lock) == 0)
    return;
  if (sas_trylock_ptr(lock) == 0)
    return;
  if (sas_trylock_ptr(lock) == 0)
    return;
  if (sas_trylock_ptr(lock) == 0)
    return;
    
  do
    {
      sched_yield();
      rc = sas_trylock_ptr(lock);
    } 
  while (rc);

  return;
}
#if 1
/*!
 * Atomically increments \a value by 1.
 */
static inline long
sas_atomic_inc_long (volatile long *value)
{
  long result;
  long delta = 1;

#if GCC_VERSION >= 40700
  result = __atomic_fetch_add(value, delta, __ATOMIC_ACQUIRE);
#else
  result = __sync_fetch_and_add(value, delta);
#endif
  
  return result;
}

/*!
 * Atomically decrements \a value by 1.
 */
static inline long
sas_atomic_dec_long (volatile long *value)
{
  long result;
  long delta = -1;

#if GCC_VERSION >= 40700
  result = __atomic_fetch_add(value, delta, __ATOMIC_ACQUIRE);
#else
  result = __sync_fetch_and_add(value, delta);
#endif
  
  return result;
}
#endif
#endif /*_SASATOMIC_H */