2011-08-27 00:05:37 +00:00
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2012-05-21 11:12:37 +00:00
/* This Source Code Form is subject to the terms of the Mozilla Public
* License , v . 2.0 . If a copy of the MPL was not distributed with this
* file , You can obtain one at http : //mozilla.org/MPL/2.0/. */
2011-08-27 00:05:37 +00:00
2011-12-04 19:09:00 +00:00
# include <stdlib.h>
2011-12-15 20:44:13 +00:00
# include <signal.h>
2012-06-27 03:25:14 +00:00
# include <stdarg.h>
2012-05-09 20:54:33 +00:00
# include "mozilla/ThreadLocal.h"
2011-11-04 21:07:50 +00:00
# include "nscore.h"
2013-01-18 23:47:31 +00:00
# include "mozilla/Assertions.h"
2011-12-07 19:48:15 +00:00
# include "mozilla/TimeStamp.h"
2012-03-15 12:44:02 +00:00
# include "mozilla/Util.h"
2012-08-06 18:35:56 +00:00
# include "nsAlgorithm.h"
2013-01-15 12:22:03 +00:00
# include <algorithm>
2011-12-07 19:48:15 +00:00
2012-11-30 17:49:20 +00:00
2012-06-21 00:58:55 +00:00
/* QT has a #define for the word "slots" and jsfriendapi.h has a struct with
* this variable name , causing compilation problems . Alleviate this for now by
* removing this # define */
# ifdef MOZ_WIDGET_QT
# undef slots
# endif
# include "jsfriendapi.h"
2011-12-07 19:48:15 +00:00
using mozilla : : TimeStamp ;
using mozilla : : TimeDuration ;
2011-08-27 00:05:37 +00:00
2013-03-12 07:14:52 +00:00
struct ProfileStack ;
2012-05-09 20:54:33 +00:00
class TableTicker ;
2012-11-30 17:49:20 +00:00
class JSCustomObject ;
2012-05-09 20:54:33 +00:00
2013-03-12 07:14:52 +00:00
extern mozilla : : ThreadLocal < ProfileStack * > tlsStack ;
extern mozilla : : ThreadLocal < TableTicker * > tlsTicker ;
2011-12-08 15:46:02 +00:00
extern bool stack_key_initialized ;
2011-08-27 00:05:37 +00:00
2012-01-17 20:33:04 +00:00
# ifndef SAMPLE_FUNCTION_NAME
# ifdef __GNUC__
# define SAMPLE_FUNCTION_NAME __FUNCTION__
# elif defined(_MSC_VER)
# define SAMPLE_FUNCTION_NAME __FUNCTION__
# else
# define SAMPLE_FUNCTION_NAME __func__ // defined in C99, supported in various C++ compilers. Just raw function name.
# endif
# endif
2013-03-12 07:14:52 +00:00
# define SAMPLER_INIT() mozilla_sampler_init()
# define SAMPLER_SHUTDOWN() mozilla_sampler_shutdown()
# define SAMPLER_START(entries, interval, features, featureCount) mozilla_sampler_start(entries, interval, features, featureCount)
# define SAMPLER_STOP() mozilla_sampler_stop()
# define SAMPLER_IS_ACTIVE() mozilla_sampler_is_active()
# define SAMPLER_RESPONSIVENESS(time) mozilla_sampler_responsiveness(time)
# define SAMPLER_GET_RESPONSIVENESS() mozilla_sampler_get_responsiveness()
# define SAMPLER_FRAME_NUMBER(frameNumber) mozilla_sampler_frame_number(frameNumber)
# define SAMPLER_SAVE() mozilla_sampler_save()
# define SAMPLER_GET_PROFILE() mozilla_sampler_get_profile()
# define SAMPLER_GET_PROFILE_DATA(ctx) mozilla_sampler_get_profile_data(ctx)
# define SAMPLER_GET_FEATURES() mozilla_sampler_get_features()
2012-01-17 20:33:04 +00:00
// we want the class and function name but can't easily get that using preprocessor macros
// __func__ doesn't have the class name and __PRETTY_FUNCTION__ has the parameters
2012-04-17 20:21:23 +00:00
# define SAMPLER_APPEND_LINE_NUMBER_PASTE(id, line) id ## line
# define SAMPLER_APPEND_LINE_NUMBER_EXPAND(id, line) SAMPLER_APPEND_LINE_NUMBER_PASTE(id, line)
# define SAMPLER_APPEND_LINE_NUMBER(id) SAMPLER_APPEND_LINE_NUMBER_EXPAND(id, __LINE__)
2012-08-06 18:35:56 +00:00
# define SAMPLE_LABEL(name_space, info) mozilla::SamplerStackFrameRAII SAMPLER_APPEND_LINE_NUMBER(sampler_raii)(name_space "::" info, __LINE__)
# define SAMPLE_LABEL_PRINTF(name_space, info, ...) mozilla::SamplerStackFramePrintfRAII SAMPLER_APPEND_LINE_NUMBER(sampler_raii)(name_space "::" info, __LINE__, __VA_ARGS__)
2011-12-05 01:53:17 +00:00
# define SAMPLE_MARKER(info) mozilla_sampler_add_marker(info)
2013-01-18 23:47:31 +00:00
# define SAMPLE_MAIN_THREAD_LABEL(name_space, info) MOZ_ASSERT(NS_IsMainThread(), "This can only be called on the main thread"); mozilla::SamplerStackFrameRAII SAMPLER_APPEND_LINE_NUMBER(sampler_raii)(name_space "::" info, __LINE__)
# define SAMPLE_MAIN_THREAD_LABEL_PRINTF(name_space, info, ...) MOZ_ASSERT(NS_IsMainThread(), "This can only be called on the main thread"); mozilla::SamplerStackFramePrintfRAII SAMPLER_APPEND_LINE_NUMBER(sampler_raii)(name_space "::" info, __LINE__, __VA_ARGS__)
# define SAMPLE_MAIN_THREAD_MARKER(info) MOZ_ASSERT(NS_IsMainThread(), "This can only be called on the main thread"); mozilla_sampler_add_marker(info)
2011-08-27 00:05:37 +00:00
2013-03-12 07:14:52 +00:00
# define SAMPLER_PRINT_LOCATION() mozilla_sampler_print_location()
2012-11-19 23:13:28 +00:00
2011-12-02 22:05:33 +00:00
/* we duplicate this code here to avoid header dependencies
* which make it more difficult to include in other places */
# if defined(_M_X64) || defined(__x86_64__)
# define V8_HOST_ARCH_X64 1
# elif defined(_M_IX86) || defined(__i386__) || defined(__i386)
# define V8_HOST_ARCH_IA32 1
# elif defined(__ARMEL__)
# define V8_HOST_ARCH_ARM 1
# else
# warning Please add support for your architecture in chromium_types.h
# endif
2012-09-24 22:38:07 +00:00
/* FIXME/bug 789667: memory constraints wouldn't much of a problem for
* this small a sample buffer size , except that serializing the
* profile data is extremely , unnecessarily memory intensive . */
# ifdef MOZ_WIDGET_GONK
# define PLATFORM_LIKELY_MEMORY_CONSTRAINED
# endif
2012-10-18 15:12:53 +00:00
# if !defined(PLATFORM_LIKELY_MEMORY_CONSTRAINED) && !defined(ARCH_ARMV6)
2012-09-24 22:38:07 +00:00
# define PROFILE_DEFAULT_ENTRY 1000000
# else
# define PROFILE_DEFAULT_ENTRY 100000
# endif
2012-09-24 22:38:07 +00:00
# if defined(PLATFORM_LIKELY_MEMORY_CONSTRAINED)
/* A 1ms sampling interval has been shown to be a large perf hit
* ( 10f ps ) on memory - contrained ( low - end ) platforms , and additionally
* to yield different results from the profiler . Where this is the
* important case , b2g , there are also many gecko processes which
* magnify these effects . */
# define PROFILE_DEFAULT_INTERVAL 10
# elif defined(ANDROID)
2012-03-02 19:11:47 +00:00
// We use a lower frequency on Android, in order to make things work
// more smoothly on phones. This value can be adjusted later with
// some libunwind optimizations.
// In one sample measurement on Galaxy Nexus, out of about 700 backtraces,
// 60 of them took more than 25ms, and the average and standard deviation
// were 6.17ms and 9.71ms respectively.
2012-04-20 15:31:09 +00:00
// For now since we don't support stackwalking let's use 1ms since it's fast
// enough.
# define PROFILE_DEFAULT_INTERVAL 1
2012-03-02 19:11:47 +00:00
# else
2012-04-20 15:31:09 +00:00
# define PROFILE_DEFAULT_INTERVAL 1
2012-03-02 19:11:47 +00:00
# endif
# define PROFILE_DEFAULT_FEATURES NULL
# define PROFILE_DEFAULT_FEATURE_COUNT 0
2011-12-02 22:05:33 +00:00
2011-08-27 00:05:37 +00:00
// STORE_SEQUENCER: Because signals can interrupt our profile modification
// we need to make stores are not re-ordered by the compiler
// or hardware to make sure the profile is consistent at
// every point the signal can fire.
2011-12-02 22:05:33 +00:00
# ifdef V8_HOST_ARCH_ARM
2011-08-27 00:05:37 +00:00
// TODO Is there something cheaper that will prevent
// memory stores from being reordered
2011-12-02 22:05:33 +00:00
typedef void ( * LinuxKernelMemoryBarrierFunc ) ( void ) ;
LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__ ( ( weak ) ) =
( LinuxKernelMemoryBarrierFunc ) 0xffff0fa0 ;
# define STORE_SEQUENCER() pLinuxKernelMemoryBarrier()
# elif defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)
2011-12-04 19:09:00 +00:00
# if defined(_MSC_VER)
2013-01-06 19:43:23 +00:00
# if _MSC_VER > 1400
# include <intrin.h>
# else // _MSC_VER > 1400
2012-01-19 00:53:35 +00:00
// MSVC2005 has a name collision bug caused when both <intrin.h> and <winnt.h> are included together.
# ifdef _WINNT_
2011-12-04 19:09:00 +00:00
# define _interlockedbittestandreset _interlockedbittestandreset_NAME_CHANGED_TO_AVOID_MSVS2005_ERROR
# define _interlockedbittestandset _interlockedbittestandset_NAME_CHANGED_TO_AVOID_MSVS2005_ERROR
# include <intrin.h>
2012-01-17 20:33:04 +00:00
# else
# include <intrin.h>
# define _interlockedbittestandreset _interlockedbittestandreset_NAME_CHANGED_TO_AVOID_MSVS2005_ERROR
# define _interlockedbittestandset _interlockedbittestandset_NAME_CHANGED_TO_AVOID_MSVS2005_ERROR
# endif
2011-12-04 19:09:00 +00:00
// Even though MSVC2005 has the intrinsic _ReadWriteBarrier, it fails to link to it when it's
// not explicitly declared.
# pragma intrinsic(_ReadWriteBarrier)
2013-01-06 19:43:23 +00:00
# endif // _MSC_VER > 1400
2011-12-04 19:09:00 +00:00
# define STORE_SEQUENCER() _ReadWriteBarrier();
# elif defined(__INTEL_COMPILER)
# define STORE_SEQUENCER() __memory_barrier();
# elif __GNUC__
# define STORE_SEQUENCER() asm volatile("" ::: "memory");
# else
# error "Memory clobber not supported for your compiler."
# endif
2011-08-27 00:05:37 +00:00
# else
# error "Memory clobber not supported for your platform."
# endif
2013-03-12 07:14:52 +00:00
// Returns a handdle to pass on exit. This can check that we are popping the
2011-08-27 00:05:37 +00:00
// correct callstack.
2013-03-12 07:14:52 +00:00
inline void * mozilla_sampler_call_enter ( const char * aInfo , void * aFrameAddress = NULL , bool aCopy = false , uint32_t line = 0 ) ;
2011-08-27 00:05:37 +00:00
inline void mozilla_sampler_call_exit ( void * handle ) ;
inline void mozilla_sampler_add_marker ( const char * aInfo ) ;
2013-03-12 07:14:52 +00:00
void mozilla_sampler_start ( int aEntries , int aInterval , const char * * aFeatures , uint32_t aFeatureCount ) ;
void mozilla_sampler_stop ( ) ;
bool mozilla_sampler_is_active ( ) ;
void mozilla_sampler_responsiveness ( TimeStamp time ) ;
void mozilla_sampler_frame_number ( int frameNumber ) ;
const double * mozilla_sampler_get_responsiveness ( ) ;
void mozilla_sampler_save ( ) ;
char * mozilla_sampler_get_profile ( ) ;
JSObject * mozilla_sampler_get_profile_data ( JSContext * aCx ) ;
const char * * mozilla_sampler_get_features ( ) ;
void mozilla_sampler_init ( ) ;
void mozilla_sampler_shutdown ( ) ;
void mozilla_sampler_print_location ( ) ;
2012-12-17 23:25:50 +00:00
// Lock the profiler. When locked the profiler is (1) stopped,
// (2) profile data is cleared, (3) profiler-locked is fired.
// This is used to lock down the profiler during private browsing
2013-03-12 07:14:52 +00:00
void mozilla_sampler_lock ( ) ;
2012-12-17 23:25:50 +00:00
// Unlock the profiler, leaving it stopped and fires profiler-unlocked.
2013-03-12 07:14:52 +00:00
void mozilla_sampler_unlock ( ) ;
2012-11-19 23:13:28 +00:00
2011-08-27 00:05:37 +00:00
namespace mozilla {
class NS_STACK_CLASS SamplerStackFrameRAII {
public :
2012-01-17 20:33:04 +00:00
// we only copy the strings at save time, so to take multiple parameters we'd need to copy them then.
2012-08-06 18:35:56 +00:00
SamplerStackFrameRAII ( const char * aInfo , uint32_t line ) {
mHandle = mozilla_sampler_call_enter ( aInfo , this , false , line ) ;
2011-08-27 00:05:37 +00:00
}
~ SamplerStackFrameRAII ( ) {
mozilla_sampler_call_exit ( mHandle ) ;
}
private :
void * mHandle ;
} ;
2012-06-27 03:25:14 +00:00
static const int SAMPLER_MAX_STRING = 128 ;
class NS_STACK_CLASS SamplerStackFramePrintfRAII {
public :
// we only copy the strings at save time, so to take multiple parameters we'd need to copy them then.
2012-08-06 18:35:56 +00:00
SamplerStackFramePrintfRAII ( const char * aDefault , uint32_t line , const char * aFormat , . . . ) {
2013-03-12 07:14:52 +00:00
if ( mozilla_sampler_is_active ( ) ) {
2012-06-27 03:25:14 +00:00
va_list args ;
va_start ( args , aFormat ) ;
char buff [ SAMPLER_MAX_STRING ] ;
// We have to use seperate printf's because we're using
// the vargs.
# if _MSC_VER
_vsnprintf ( buff , SAMPLER_MAX_STRING , aFormat , args ) ;
_snprintf ( mDest , SAMPLER_MAX_STRING , " %s %s " , aDefault , buff ) ;
# else
vsnprintf ( buff , SAMPLER_MAX_STRING , aFormat , args ) ;
snprintf ( mDest , SAMPLER_MAX_STRING , " %s %s " , aDefault , buff ) ;
# endif
2012-08-06 18:35:56 +00:00
mHandle = mozilla_sampler_call_enter ( mDest , this , true , line ) ;
2012-06-27 03:25:14 +00:00
va_end ( args ) ;
} else {
2012-08-06 18:35:56 +00:00
mHandle = mozilla_sampler_call_enter ( aDefault , NULL , false , line ) ;
2012-06-27 03:25:14 +00:00
}
}
~ SamplerStackFramePrintfRAII ( ) {
mozilla_sampler_call_exit ( mHandle ) ;
}
private :
char mDest [ SAMPLER_MAX_STRING ] ;
void * mHandle ;
} ;
2011-08-27 00:05:37 +00:00
} //mozilla
2012-08-06 18:35:56 +00:00
// A stack entry exists to allow the JS engine to inform SPS of the current
// backtrace, but also to instrument particular points in C++ in case stack
// walking is not available on the platform we are running on.
//
// Each entry has a descriptive string, a relevant stack address, and some extra
// information the JS engine might want to inform SPS of. This class inherits
// from the JS engine's version of the entry to ensure that the size and layout
// of the two representations are consistent.
class StackEntry : public js : : ProfileEntry
2012-06-27 03:25:14 +00:00
{
public :
2012-08-06 18:35:56 +00:00
bool isCopyLabel ( ) volatile {
return ! ( ( uintptr_t ) stackAddress ( ) & 0x1 ) ;
2012-06-27 03:25:14 +00:00
}
2013-02-18 06:56:32 +00:00
void setStackAddressCopy ( void * sparg , bool copy ) volatile {
2012-08-06 18:35:56 +00:00
// Tagged pointer. Less significant bit used to track if mLabel needs a
// copy. Note that we don't need the last bit of the stack address for
// proper ordering. This is optimized for encoding within the JS engine's
// instrumentation, so we do the extra work here of encoding a bit.
// Last bit 1 = Don't copy, Last bit 0 = Copy.
if ( copy ) {
setStackAddress ( reinterpret_cast < void * > (
2013-02-18 06:56:32 +00:00
reinterpret_cast < uintptr_t > ( sparg ) & ~ 0x1 ) ) ;
2012-08-06 18:35:56 +00:00
} else {
setStackAddress ( reinterpret_cast < void * > (
2013-02-18 06:56:32 +00:00
reinterpret_cast < uintptr_t > ( sparg ) | 0x1 ) ) ;
2012-08-06 18:35:56 +00:00
}
}
2012-06-27 03:25:14 +00:00
} ;
2013-03-12 07:14:52 +00:00
// the SamplerStack members are read by signal
2011-08-27 00:05:37 +00:00
// handlers, so the mutation of them needs to be signal-safe.
2013-03-12 07:14:52 +00:00
struct ProfileStack
2011-08-27 00:05:37 +00:00
{
public :
2013-03-12 07:14:52 +00:00
ProfileStack ( )
2011-08-27 00:05:37 +00:00
: mStackPointer ( 0 )
, mMarkerPointer ( 0 )
, mQueueClearMarker ( false )
2012-10-01 16:51:03 +00:00
, mRuntime ( NULL )
2012-07-11 02:05:38 +00:00
, mStartJSSampling ( false )
2011-08-27 00:05:37 +00:00
{ }
void addMarker ( const char * aMarker )
{
2012-12-11 19:10:56 +00:00
char * markerCopy = strdup ( aMarker ) ;
mSignalLock = true ;
STORE_SEQUENCER ( ) ;
2011-08-27 00:05:37 +00:00
if ( mQueueClearMarker ) {
clearMarkers ( ) ;
}
if ( ! aMarker ) {
return ; //discard
}
2012-03-21 16:06:01 +00:00
if ( size_t ( mMarkerPointer ) = = mozilla : : ArrayLength ( mMarkers ) ) {
2011-08-27 00:05:37 +00:00
return ; //array full, silently drop
}
2012-12-11 19:10:56 +00:00
mMarkers [ mMarkerPointer ] = markerCopy ;
2011-08-27 00:05:37 +00:00
mMarkerPointer + + ;
2012-12-11 19:10:56 +00:00
mSignalLock = false ;
STORE_SEQUENCER ( ) ;
2011-08-27 00:05:37 +00:00
}
// called within signal. Function must be reentrant
const char * getMarker ( int aMarkerId )
{
2012-12-11 19:10:56 +00:00
// if mSignalLock then the stack is inconsistent because it's being
// modified by the profiled thread. Post pone these markers
// for the next sample. The odds of a livelock are nearly impossible
// and would show up in a profile as many sample in 'addMarker' thus
// we ignore this scenario.
// if mQueueClearMarker then we've the sampler thread has already
// thread the markers then they are pending deletion.
if ( mSignalLock | | mQueueClearMarker | | aMarkerId < 0 | |
2012-06-27 03:25:14 +00:00
static_cast < mozilla : : sig_safe_t > ( aMarkerId ) > = mMarkerPointer ) {
2011-08-27 00:05:37 +00:00
return NULL ;
}
return mMarkers [ aMarkerId ] ;
}
// called within signal. Function must be reentrant
void clearMarkers ( )
{
2012-12-11 19:10:56 +00:00
for ( mozilla : : sig_safe_t i = 0 ; i < mMarkerPointer ; i + + ) {
free ( mMarkers [ i ] ) ;
}
2011-08-27 00:05:37 +00:00
mMarkerPointer = 0 ;
mQueueClearMarker = false ;
}
2012-08-06 18:35:56 +00:00
void push ( const char * aName , uint32_t line )
2012-06-27 03:25:14 +00:00
{
2012-08-06 18:35:56 +00:00
push ( aName , NULL , false , line ) ;
2012-06-27 03:25:14 +00:00
}
2012-08-06 18:35:56 +00:00
void push ( const char * aName , void * aStackAddress , bool aCopy , uint32_t line )
2011-08-27 00:05:37 +00:00
{
2012-03-21 16:06:01 +00:00
if ( size_t ( mStackPointer ) > = mozilla : : ArrayLength ( mStack ) ) {
2012-06-21 00:58:55 +00:00
mStackPointer + + ;
2011-08-27 00:05:37 +00:00
return ;
}
// Make sure we increment the pointer after the name has
// been written such that mStack is always consistent.
2012-08-06 18:35:56 +00:00
mStack [ mStackPointer ] . setLabel ( aName ) ;
mStack [ mStackPointer ] . setStackAddressCopy ( aStackAddress , aCopy ) ;
mStack [ mStackPointer ] . setLine ( line ) ;
2012-06-27 03:25:14 +00:00
2011-08-27 00:05:37 +00:00
// Prevent the optimizer from re-ordering these instructions
2011-12-04 19:09:00 +00:00
STORE_SEQUENCER ( ) ;
2011-08-27 00:05:37 +00:00
mStackPointer + + ;
}
void pop ( )
{
2012-06-21 00:58:55 +00:00
mStackPointer - - ;
2011-08-27 00:05:37 +00:00
}
bool isEmpty ( )
{
return mStackPointer = = 0 ;
}
2012-08-06 18:35:56 +00:00
uint32_t stackSize ( ) const
{
2013-01-15 12:22:03 +00:00
return std : : min < uint32_t > ( mStackPointer , mozilla : : ArrayLength ( mStack ) ) ;
2012-08-06 18:35:56 +00:00
}
2011-08-27 00:05:37 +00:00
2012-06-21 00:58:55 +00:00
void sampleRuntime ( JSRuntime * runtime ) {
mRuntime = runtime ;
2013-01-18 17:04:35 +00:00
if ( ! runtime ) {
// JS shut down
return ;
}
2012-07-09 21:24:23 +00:00
JS_STATIC_ASSERT ( sizeof ( mStack [ 0 ] ) = = sizeof ( js : : ProfileEntry ) ) ;
js : : SetRuntimeProfilingStack ( runtime ,
( js : : ProfileEntry * ) mStack ,
( uint32_t * ) & mStackPointer ,
mozilla : : ArrayLength ( mStack ) ) ;
2012-07-11 02:05:38 +00:00
if ( mStartJSSampling )
2012-07-09 21:24:23 +00:00
enableJSSampling ( ) ;
2012-06-21 00:58:55 +00:00
}
2012-07-09 21:24:23 +00:00
void enableJSSampling ( ) {
2012-07-11 02:05:38 +00:00
if ( mRuntime ) {
2012-07-09 21:24:23 +00:00
js : : EnableRuntimeProfilingStack ( mRuntime , true ) ;
2012-07-11 02:05:38 +00:00
mStartJSSampling = false ;
} else {
mStartJSSampling = true ;
}
2012-06-21 00:58:55 +00:00
}
2012-07-09 21:24:23 +00:00
void disableJSSampling ( ) {
2012-07-11 02:05:38 +00:00
mStartJSSampling = false ;
if ( mRuntime )
2012-07-09 21:24:23 +00:00
js : : EnableRuntimeProfilingStack ( mRuntime , false ) ;
2012-06-21 00:58:55 +00:00
}
2011-08-27 00:05:37 +00:00
// Keep a list of active checkpoints
2012-06-27 03:25:14 +00:00
StackEntry volatile mStack [ 1024 ] ;
2011-08-27 00:05:37 +00:00
// Keep a list of active markers to be applied to the next sample taken
2012-12-11 19:10:56 +00:00
char * mMarkers [ 1024 ] ;
2012-08-06 18:35:56 +00:00
private :
// This may exceed the length of mStack, so instead use the stackSize() method
// to determine the number of valid samples in mStack
2012-12-11 19:10:56 +00:00
mozilla : : sig_safe_t mStackPointer ;
// If this is set then it's not safe to read mStackPointer from the signal handler
volatile bool mSignalLock ;
2012-08-06 18:35:56 +00:00
public :
2011-12-04 19:09:00 +00:00
volatile mozilla : : sig_safe_t mMarkerPointer ;
2011-08-27 00:05:37 +00:00
// We don't want to modify _markers from within the signal so we allow
// it to queue a clear operation.
2011-12-04 19:09:00 +00:00
volatile mozilla : : sig_safe_t mQueueClearMarker ;
2012-06-21 00:58:55 +00:00
// The runtime which is being sampled
JSRuntime * mRuntime ;
2012-07-11 02:05:38 +00:00
// Start JS Profiling when possible
bool mStartJSSampling ;
2011-08-27 00:05:37 +00:00
} ;
2013-03-12 07:14:52 +00:00
inline ProfileStack * mozilla_profile_stack ( void )
2012-06-21 00:58:55 +00:00
{
if ( ! stack_key_initialized )
return NULL ;
2013-03-12 07:14:52 +00:00
return tlsStack . get ( ) ;
2012-06-21 00:58:55 +00:00
}
2012-08-06 18:35:56 +00:00
inline void * mozilla_sampler_call_enter ( const char * aInfo , void * aFrameAddress ,
bool aCopy , uint32_t line )
2011-08-27 00:05:37 +00:00
{
2011-12-08 15:46:02 +00:00
// check if we've been initialized to avoid calling pthread_getspecific
2012-05-16 21:20:06 +00:00
// with a null tlsStack which will return undefined results.
2011-12-08 15:46:02 +00:00
if ( ! stack_key_initialized )
return NULL ;
2013-03-12 07:14:52 +00:00
ProfileStack * stack = tlsStack . get ( ) ;
2011-12-08 15:46:02 +00:00
// we can't infer whether 'stack' has been initialized
// based on the value of stack_key_intiailized because
// 'stack' is only intialized when a thread is being
// profiled.
2011-08-27 00:05:37 +00:00
if ( ! stack ) {
return stack ;
}
2012-08-06 18:35:56 +00:00
stack - > push ( aInfo , aFrameAddress , aCopy , line ) ;
2011-08-27 00:05:37 +00:00
// The handle is meant to support future changes
// but for now it is simply use to save a call to
// pthread_getspecific on exit. It also supports the
// case where the sampler is initialized between
// enter and exit.
return stack ;
}
inline void mozilla_sampler_call_exit ( void * aHandle )
{
if ( ! aHandle )
return ;
2013-03-12 07:14:52 +00:00
ProfileStack * stack = ( ProfileStack * ) aHandle ;
2011-08-27 00:05:37 +00:00
stack - > pop ( ) ;
}
inline void mozilla_sampler_add_marker ( const char * aMarker )
{
2012-10-23 21:18:37 +00:00
if ( ! stack_key_initialized )
return ;
2012-12-11 19:10:56 +00:00
// Don't insert a marker if we're not profiling to avoid
// the heap copy (malloc).
2013-03-12 07:14:52 +00:00
if ( ! mozilla_sampler_is_active ( ) ) {
2012-12-11 19:10:56 +00:00
return ;
}
2013-03-12 07:14:52 +00:00
ProfileStack * stack = tlsStack . get ( ) ;
2011-08-27 00:05:37 +00:00
if ( ! stack ) {
return ;
}
stack - > addMarker ( aMarker ) ;
}