This file is indexed.

/usr/include/glusterfs/locking.h is in glusterfs-common 3.13.2-1build1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
/*
  Copyright (c) 2008-2012 Red Hat, Inc. <http://www.redhat.com>
  This file is part of GlusterFS.

  This file is licensed to you under your choice of the GNU Lesser
  General Public License, version 3 or any later version (LGPLv3 or
  later), or the GNU General Public License, version 2 (GPLv2), in all
  cases as published by the Free Software Foundation.
*/

#ifndef _LOCKING_H
#define _LOCKING_H

#include <pthread.h>

#if defined (GF_DARWIN_HOST_OS)
#include <libkern/OSAtomic.h>
#define pthread_spinlock_t OSSpinLock
#define pthread_spin_lock(l) OSSpinLockLock(l)
#define pthread_spin_unlock(l) OSSpinLockUnlock(l)
#define pthread_spin_destroy(l) 0
#define pthread_spin_init(l, v) (*l = v)
#endif

#if defined (HAVE_SPINLOCK)

typedef union {
        pthread_spinlock_t      spinlock;
        pthread_mutex_t         mutex;
} gf_lock_t;

#if !defined(LOCKING_IMPL)
extern int use_spinlocks;

/*
 * Using a dispatch table would be unpleasant because we're dealing with two
 * different types.  If the dispatch contains direct pointers to pthread_xx
 * or mutex_xxx then we have to hope that every possible union alternative
 * starts at the same address as the union itself.  I'm old enough to remember
 * compilers where this was not the case (for alignment reasons) so I'm a bit
 * paranoid about that.  Also, I don't like casting arguments through "void *"
 * which we'd also have to do to avoid type errors.  The other alternative would
 * be to define actual functions which pick out the right union member, and put
 * those in the dispatch tables.  Now we have a pointer dereference through the
 * dispatch table plus a function call, which is likely to be worse than the
 * branching here from the ?: construct.  If it were a clear win it might be
 * worth the extra complexity, but for now this way seems preferable.
 */

#define LOCK_INIT(x)    (use_spinlocks \
                                ? pthread_spin_init  (&((x)->spinlock), 0) \
                                : pthread_mutex_init (&((x)->mutex), 0))

#define LOCK(x)         (use_spinlocks \
                                ? pthread_spin_lock  (&((x)->spinlock)) \
                                : pthread_mutex_lock (&((x)->mutex)))

#define TRY_LOCK(x)     (use_spinlocks \
                                ? pthread_spin_trylock  (&((x)->spinlock)) \
                                : pthread_mutex_trylock (&((x)->mutex)))

#define UNLOCK(x)       (use_spinlocks \
                                ? pthread_spin_unlock  (&((x)->spinlock)) \
                                : pthread_mutex_unlock (&((x)->mutex)))

#define LOCK_DESTROY(x) (use_spinlocks \
                                ? pthread_spin_destroy  (&((x)->spinlock)) \
                                : pthread_mutex_destroy (&((x)->mutex)))

#endif

#else

typedef pthread_mutex_t gf_lock_t;

#define LOCK_INIT(x)    pthread_mutex_init (x, 0)
#define LOCK(x)         pthread_mutex_lock (x)
#define TRY_LOCK(x)     pthread_mutex_trylock (x)
#define UNLOCK(x)       pthread_mutex_unlock (x)
#define LOCK_DESTROY(x) pthread_mutex_destroy (x)

#endif /* HAVE_SPINLOCK */


#endif /* _LOCKING_H */