This file is indexed.

/usr/include/thunderbird/PseudoStack.h is in thunderbird-dev 1:38.6.0+build1-0ubuntu1.

This file is owned by root:root, with mode 0o644.

The actual contents of the file can be viewed below.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */

#ifndef PROFILER_PSEUDO_STACK_H_
#define PROFILER_PSEUDO_STACK_H_

#include "mozilla/ArrayUtils.h"
#include <stdint.h>
#include "js/ProfilingStack.h"
#include <stdlib.h>
#include "mozilla/Atomics.h"
#include "nsISupportsImpl.h"

/* we duplicate this code here to avoid header dependencies
 * which make it more difficult to include in other places */
#if defined(_M_X64) || defined(__x86_64__)
#define V8_HOST_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__) || defined(__i386)
#define V8_HOST_ARCH_IA32 1
#elif defined(__ARMEL__)
#define V8_HOST_ARCH_ARM 1
#else
#warning Please add support for your architecture in chromium_types.h
#endif

// STORE_SEQUENCER: Because signals can interrupt our profile modification
//                  we need to make stores are not re-ordered by the compiler
//                  or hardware to make sure the profile is consistent at
//                  every point the signal can fire.
#ifdef V8_HOST_ARCH_ARM
// TODO Is there something cheaper that will prevent
//      memory stores from being reordered

typedef void (*LinuxKernelMemoryBarrierFunc)(void);
LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
    (LinuxKernelMemoryBarrierFunc) 0xffff0fa0;

# define STORE_SEQUENCER() pLinuxKernelMemoryBarrier()
#elif defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)
# if defined(_MSC_VER)
#  include <intrin.h>
#  define STORE_SEQUENCER() _ReadWriteBarrier();
# elif defined(__INTEL_COMPILER)
#  define STORE_SEQUENCER() __memory_barrier();
# elif __GNUC__
#  define STORE_SEQUENCER() asm volatile("" ::: "memory");
# else
#  error "Memory clobber not supported for your compiler."
# endif
#else
# error "Memory clobber not supported for your platform."
#endif

// We can't include <algorithm> because it causes issues on OS X, so we use
// our own min function.
static inline uint32_t sMin(uint32_t l, uint32_t r) {
  return l < r ? l : r;
}

// A stack entry exists to allow the JS engine to inform SPS of the current
// backtrace, but also to instrument particular points in C++ in case stack
// walking is not available on the platform we are running on.
//
// Each entry has a descriptive string, a relevant stack address, and some extra
// information the JS engine might want to inform SPS of. This class inherits
// from the JS engine's version of the entry to ensure that the size and layout
// of the two representations are consistent.
class StackEntry : public js::ProfileEntry
{
};

class ProfilerMarkerPayload;
template<typename T>
class ProfilerLinkedList;
class JSStreamWriter;
class JSCustomArray;
class ThreadProfile;
class ProfilerMarker {
  friend class ProfilerLinkedList<ProfilerMarker>;
public:
  explicit ProfilerMarker(const char* aMarkerName,
         ProfilerMarkerPayload* aPayload = nullptr,
         float aTime = 0);

  ~ProfilerMarker();

  const char* GetMarkerName() const {
    return mMarkerName;
  }

  void
  StreamJSObject(JSStreamWriter& b) const;

  void SetGeneration(int aGenID);

  bool HasExpired(int aGenID) const {
    return mGenID + 2 <= aGenID;
  }

  float GetTime();

private:
  char* mMarkerName;
  ProfilerMarkerPayload* mPayload;
  ProfilerMarker* mNext;
  float mTime;
  int mGenID;
};

// Foward declaration
typedef struct _UnwinderThreadBuffer UnwinderThreadBuffer;

/**
 * This struct is used to add a mNext field to UnwinderThreadBuffer objects for
 * use with ProfilerLinkedList. It is done this way so that UnwinderThreadBuffer
 * may continue to be opaque with respect to code outside of UnwinderThread2.cpp
 */
struct LinkedUWTBuffer
{
  LinkedUWTBuffer()
    :mNext(nullptr)
  {}
  virtual ~LinkedUWTBuffer() {}
  virtual UnwinderThreadBuffer* GetBuffer() = 0;
  LinkedUWTBuffer*  mNext;
};

template<typename T>
class ProfilerLinkedList {
public:
  ProfilerLinkedList()
    : mHead(nullptr)
    , mTail(nullptr)
  {}

  void insert(T* elem)
  {
    if (!mTail) {
      mHead = elem;
      mTail = elem;
    } else {
      mTail->mNext = elem;
      mTail = elem;
    }
    elem->mNext = nullptr;
  }

  T* popHead()
  {
    if (!mHead) {
      MOZ_ASSERT(false);
      return nullptr;
    }

    T* head = mHead;

    mHead = head->mNext;
    if (!mHead) {
      mTail = nullptr;
    }

    return head;
  }

  const T* peek() {
    return mHead;
  }

private:
  T* mHead;
  T* mTail;
};

typedef ProfilerLinkedList<ProfilerMarker> ProfilerMarkerLinkedList;
typedef ProfilerLinkedList<LinkedUWTBuffer> UWTBufferLinkedList;

template<typename T>
class ProfilerSignalSafeLinkedList {
public:
  ProfilerSignalSafeLinkedList()
    : mSignalLock(false)
  {}

  ~ProfilerSignalSafeLinkedList()
  {
    if (mSignalLock) {
      // Some thread is modifying the list. We should only be released on that
      // thread.
      abort();
    }

    while (mList.peek()) {
      delete mList.popHead();
    }
  }

  // Insert an item into the list.
  // Must only be called from the owning thread.
  // Must not be called while the list from accessList() is being accessed.
  // In the profiler, we ensure that by interrupting the profiled thread
  // (which is the one that owns this list and calls insert() on it) until
  // we're done reading the list from the signal handler.
  void insert(T* aElement) {
    MOZ_ASSERT(aElement);

    mSignalLock = true;
    STORE_SEQUENCER();

    mList.insert(aElement);

    STORE_SEQUENCER();
    mSignalLock = false;
  }

  // Called within signal, from any thread, possibly while insert() is in the
  // middle of modifying the list (on the owning thread). Will return null if
  // that is the case.
  // Function must be reentrant.
  ProfilerLinkedList<T>* accessList()
  {
    if (mSignalLock) {
      return nullptr;
    }
    return &mList;
  }

private:
  ProfilerLinkedList<T> mList;

  // If this is set, then it's not safe to read the list because its contents
  // are being changed.
  volatile bool mSignalLock;
};

// Stub eventMarker function for js-engine event generation.
void ProfilerJSEventMarker(const char *event);

// the PseudoStack members are read by signal
// handlers, so the mutation of them needs to be signal-safe.
struct PseudoStack
{
public:
  // Create a new PseudoStack and acquire a reference to it.
  static PseudoStack *create()
  {
    return new PseudoStack();
  }

  // This is called on every profiler restart. Put things that should happen at that time here.
  void reinitializeOnResume() {
    // This is needed to cause an initial sample to be taken from sleeping threads. Otherwise sleeping
    // threads would not have any samples to copy forward while sleeping.
    mSleepId++;
  }

  void addLinkedUWTBuffer(LinkedUWTBuffer* aBuff)
  {
    mPendingUWTBuffers.insert(aBuff);
  }

  UWTBufferLinkedList* getLinkedUWTBuffers()
  {
    return mPendingUWTBuffers.accessList();
  }

  void addMarker(const char *aMarkerStr, ProfilerMarkerPayload *aPayload, float aTime)
  {
    ProfilerMarker* marker = new ProfilerMarker(aMarkerStr, aPayload, aTime);
    mPendingMarkers.insert(marker);
  }

  // called within signal. Function must be reentrant
  ProfilerMarkerLinkedList* getPendingMarkers()
  {
    // The profiled thread is interrupted, so we can access the list safely.
    // Unless the profiled thread was in the middle of changing the list when
    // we interrupted it - in that case, accessList() will return null.
    return mPendingMarkers.accessList();
  }

  void push(const char *aName, js::ProfileEntry::Category aCategory, uint32_t line)
  {
    push(aName, aCategory, nullptr, false, line);
  }

  void push(const char *aName, js::ProfileEntry::Category aCategory,
    void *aStackAddress, bool aCopy, uint32_t line)
  {
    if (size_t(mStackPointer) >= mozilla::ArrayLength(mStack)) {
      mStackPointer++;
      return;
    }

    // In order to ensure this object is kept alive while it is
    // active, we acquire a reference at the outermost push.  This is
    // released by the corresponding pop.
    if (mStackPointer == 0) {
      ref();
    }

    volatile StackEntry &entry = mStack[mStackPointer];

    // Make sure we increment the pointer after the name has
    // been written such that mStack is always consistent.
    entry.setLabel(aName);
    entry.setCppFrame(aStackAddress, line);
    MOZ_ASSERT(entry.flags() == js::ProfileEntry::IS_CPP_ENTRY);

    uint32_t uint_category = static_cast<uint32_t>(aCategory);
    MOZ_ASSERT(
      uint_category >= static_cast<uint32_t>(js::ProfileEntry::Category::FIRST) &&
      uint_category <= static_cast<uint32_t>(js::ProfileEntry::Category::LAST));

    entry.setFlag(uint_category);

    // Track if mLabel needs a copy.
    if (aCopy)
      entry.setFlag(js::ProfileEntry::FRAME_LABEL_COPY);
    else
      entry.unsetFlag(js::ProfileEntry::FRAME_LABEL_COPY);

    // Prevent the optimizer from re-ordering these instructions
    STORE_SEQUENCER();
    mStackPointer++;
  }

  // Pop the stack.  If the stack is empty and all other references to
  // this PseudoStack have been dropped, then the PseudoStack is
  // deleted and "false" is returned.  Otherwise "true" is returned.
  bool popAndMaybeDelete()
  {
    mStackPointer--;
    if (mStackPointer == 0) {
      // Release our self-owned reference count.  See 'push'.
      deref();
      return false;
    } else {
      return true;
    }
  }
  bool isEmpty()
  {
    return mStackPointer == 0;
  }
  uint32_t stackSize() const
  {
    return sMin(mStackPointer, mozilla::sig_safe_t(mozilla::ArrayLength(mStack)));
  }

  void sampleRuntime(JSRuntime *runtime) {
    mRuntime = runtime;
    if (!runtime) {
      // JS shut down
      return;
    }

    static_assert(sizeof(mStack[0]) == sizeof(js::ProfileEntry),
                  "mStack must be binary compatible with js::ProfileEntry.");
    js::SetRuntimeProfilingStack(runtime,
                                 (js::ProfileEntry*) mStack,
                                 (uint32_t*) &mStackPointer,
                                 uint32_t(mozilla::ArrayLength(mStack)));
    if (mStartJSSampling)
      enableJSSampling();
  }
  void enableJSSampling() {
    if (mRuntime) {
      js::EnableRuntimeProfilingStack(mRuntime, true);
      js::RegisterRuntimeProfilingEventMarker(mRuntime, &ProfilerJSEventMarker);
      mStartJSSampling = false;
    } else {
      mStartJSSampling = true;
    }
  }
  void jsOperationCallback() {
    if (mStartJSSampling)
      enableJSSampling();
  }
  void disableJSSampling() {
    mStartJSSampling = false;
    if (mRuntime)
      js::EnableRuntimeProfilingStack(mRuntime, false);
  }

  // Keep a list of active checkpoints
  StackEntry volatile mStack[1024];
 private:

  // A PseudoStack can only be created via the "create" method.
  PseudoStack()
    : mStackPointer(0)
    , mSleepId(0)
    , mSleepIdObserved(0)
    , mSleeping(false)
    , mRefCnt(1)
    , mRuntime(nullptr)
    , mStartJSSampling(false)
    , mPrivacyMode(false)
  { }

  // A PseudoStack can only be deleted via deref.
  ~PseudoStack() {
    if (mStackPointer != 0) {
      // We're releasing the pseudostack while it's still in use.
      // The label macros keep a non ref counted reference to the
      // stack to avoid a TLS. If these are not all cleared we will
      // get a use-after-free so better to crash now.
      abort();
    }
  }

  // No copying.
  PseudoStack(const PseudoStack&) = delete;
  void operator=(const PseudoStack&) = delete;

  // Keep a list of pending markers that must be moved
  // to the circular buffer
  ProfilerSignalSafeLinkedList<ProfilerMarker> mPendingMarkers;
  // List of LinkedUWTBuffers that must be processed on the next tick
  ProfilerSignalSafeLinkedList<LinkedUWTBuffer> mPendingUWTBuffers;
  // This may exceed the length of mStack, so instead use the stackSize() method
  // to determine the number of valid samples in mStack
  mozilla::sig_safe_t mStackPointer;
  // Incremented at every sleep/wake up of the thread
  int mSleepId;
  // Previous id observed. If this is not the same as mSleepId, this thread is not sleeping in the same place any more
  mozilla::Atomic<int> mSleepIdObserved;
  // Keeps tack of whether the thread is sleeping or not (1 when sleeping 0 when awake)
  mozilla::Atomic<int> mSleeping;
  // This class is reference counted because it must be kept alive by
  // the ThreadInfo, by the reference from tlsPseudoStack, and by the
  // current thread when callbacks are in progress.
  mozilla::Atomic<int> mRefCnt;

 public:
  // The runtime which is being sampled
  JSRuntime *mRuntime;
  // Start JS Profiling when possible
  bool mStartJSSampling;
  bool mPrivacyMode;

  enum SleepState {NOT_SLEEPING, SLEEPING_FIRST, SLEEPING_AGAIN};

  // The first time this is called per sleep cycle we return SLEEPING_FIRST
  // and any other subsequent call within the same sleep cycle we return SLEEPING_AGAIN
  SleepState observeSleeping() {
    if (mSleeping != 0) {
      if (mSleepIdObserved == mSleepId) {
        return SLEEPING_AGAIN;
      } else {
        mSleepIdObserved = mSleepId;
        return SLEEPING_FIRST;
      }
    } else {
      return NOT_SLEEPING;
    }
  }


  // Call this whenever the current thread sleeps or wakes up
  // Calling setSleeping with the same value twice in a row is an error
  void setSleeping(int sleeping) {
    MOZ_ASSERT(mSleeping != sleeping);
    mSleepId++;
    mSleeping = sleeping;
  }

  void ref() {
    ++mRefCnt;
  }

  void deref() {
    int newValue = --mRefCnt;
    if (newValue == 0) {
      delete this;
    }
  }
};

#endif