Bug 518747 - NJ merge: get rid of NJ_LOG2_PAGE_SIZE et al. r=graydon.

This commit is contained in:
Nicholas Nethercote 2009-09-25 16:01:55 +10:00
parent 2a552feffa
commit 5025d78459
9 changed files with 42 additions and 49 deletions

View File

@ -840,12 +840,13 @@ static Oracle oracle;
* tracker's responsibility is to map opaque, 4-byte aligned addresses to LIns
* pointers. To do this efficiently, we observe that the addresses of jsvals
* living in the interpreter tend to be aggregated close to each other -
* usually on the same page.
* usually on the same page (where a tracker page doesn't have to be the same
* size as the OS page size, but it's typically similar).
*
* For every address, we split it into two values: upper bits which represent
* the "base", and lower bits which represent an offset against the base. We
* create a list of:
* struct Page {
* struct TrackerPage {
* void* base;
* LIns* map;
* };
@ -853,18 +854,20 @@ static Oracle oracle;
* page = page such that Base(address) == page->base,
* page->map[Index(address)]
*
* The size of the map is allocated as (PageSize >> 2) * sizeof(LIns*). Since
* the lower two bits are 0, they are always discounted. The result is the map
* can store N pointers, where N is (PageSize >> 2).
* The size of the map is allocated as N * sizeof(LIns*), where N is
* (TRACKER_PAGE_SIZE >> 2). Since the lower two bits are 0, they are always
* discounted.
*
* PAGEMASK is the "reverse" expression, with a |- 1| to get a mask which
* separates an address into the Base and Index bits. It is necessary to do
* all this work rather than use NJ_PAGE_SIZE - 1, because on 64-bit platforms
* the pointer width is twice as large, and only half as many indexes can fit
* into Page::map. So the "Base" grows by one bit, and the "Index" shrinks by
* one bit.
* TRACKER_PAGE_MASK is the "reverse" expression, with a |- 1| to get a mask
* which separates an address into the Base and Index bits. It is necessary to
* do all this work rather than use TRACKER_PAGE_SIZE - 1, because on 64-bit
* platforms the pointer width is twice as large, and only half as many
* indexes can fit into TrackerPage::map. So the "Base" grows by one bit, and
* the "Index" shrinks by one bit.
*/
#define PAGEMASK (((NJ_PAGE_SIZE / sizeof(void*)) << 2) - 1)
#define TRACKER_PAGE_MASK (((TRACKER_PAGE_SIZE / sizeof(void*)) << 2) - 1)
#define TRACKER_PAGE_SIZE 4096
Tracker::Tracker()
{
@ -877,16 +880,16 @@ Tracker::~Tracker()
}
jsuword
Tracker::getPageBase(const void* v) const
Tracker::getTrackerPageBase(const void* v) const
{
return jsuword(v) & ~jsuword(PAGEMASK);
return jsuword(v) & ~jsuword(TRACKER_PAGE_MASK);
}
struct Tracker::Page*
Tracker::findPage(const void* v) const
struct Tracker::TrackerPage*
Tracker::findTrackerPage(const void* v) const
{
jsuword base = getPageBase(v);
struct Tracker::Page* p = pagelist;
jsuword base = getTrackerPageBase(v);
struct Tracker::TrackerPage* p = pagelist;
while (p) {
if (p->base == base) {
return p;
@ -896,11 +899,11 @@ Tracker::findPage(const void* v) const
return 0;
}
struct Tracker::Page*
Tracker::addPage(const void* v) {
jsuword base = getPageBase(v);
struct Tracker::Page* p = (struct Tracker::Page*)
calloc(1, sizeof(*p) - sizeof(p->map) + (NJ_PAGE_SIZE >> 2) * sizeof(LIns*));
struct Tracker::TrackerPage*
Tracker::addTrackerPage(const void* v) {
jsuword base = getTrackerPageBase(v);
struct Tracker::TrackerPage* p = (struct Tracker::TrackerPage*)
calloc(1, sizeof(*p) - sizeof(p->map) + (TRACKER_PAGE_SIZE >> 2) * sizeof(LIns*));
p->base = base;
p->next = pagelist;
pagelist = p;
@ -911,7 +914,7 @@ void
Tracker::clear()
{
while (pagelist) {
Page* p = pagelist;
TrackerPage* p = pagelist;
pagelist = pagelist->next;
free(p);
}
@ -926,19 +929,19 @@ Tracker::has(const void *v) const
LIns*
Tracker::get(const void* v) const
{
struct Tracker::Page* p = findPage(v);
struct Tracker::TrackerPage* p = findTrackerPage(v);
if (!p)
return NULL;
return p->map[(jsuword(v) & PAGEMASK) >> 2];
return p->map[(jsuword(v) & TRACKER_PAGE_MASK) >> 2];
}
void
Tracker::set(const void* v, LIns* i)
{
struct Tracker::Page* p = findPage(v);
struct Tracker::TrackerPage* p = findTrackerPage(v);
if (!p)
p = addPage(v);
p->map[(jsuword(v) & PAGEMASK) >> 2] = i;
p = addTrackerPage(v);
p->map[(jsuword(v) & TRACKER_PAGE_MASK) >> 2] = i;
}
static inline jsuint

View File

@ -157,19 +157,20 @@ public:
/*
* Tracker is used to keep track of values being manipulated by the interpreter
* during trace recording.
* during trace recording. Note that tracker pages aren't necessarily the
* same size as OS pages, they just are a moderate-sized chunk of memory.
*/
class Tracker {
struct Page {
struct Page* next;
jsuword base;
nanojit::LIns* map[1];
struct TrackerPage {
struct TrackerPage* next;
jsuword base;
nanojit::LIns* map[1];
};
struct Page* pagelist;
struct TrackerPage* pagelist;
jsuword getPageBase(const void* v) const;
struct Page* findPage(const void* v) const;
struct Page* addPage(const void* v);
jsuword getTrackerPageBase(const void* v) const;
struct TrackerPage* findTrackerPage(const void* v) const;
struct TrackerPage* addTrackerPage(const void* v);
public:
Tracker();
~Tracker();

View File

@ -69,8 +69,6 @@
#endif
namespace nanojit {
const size_t NJ_PAGE_SIZE = 1 << NJ_LOG2_PAGE_SIZE;
class Fragment;
struct SideExit;
struct SwitchInfo;

View File

@ -56,9 +56,6 @@
namespace nanojit
{
const int NJ_LOG2_PAGE_SIZE = 12; // 4K
// only d0-d6 are actually used; we'll use d7 as s14-s15 for i2f/u2f/etc.
#define NJ_VFP_MAX_REGISTERS 8
#define NJ_MAX_REGISTERS (11 + NJ_VFP_MAX_REGISTERS)

View File

@ -54,8 +54,6 @@
namespace nanojit
{
const int NJ_LOG2_PAGE_SIZE = 12; // 4K
#define NJ_MAX_STACK_ENTRY 256
#define NJ_ALIGN_STACK 16

View File

@ -65,7 +65,6 @@ namespace nanojit
static const int kLinkageAreaSize = 68;
static const int kcalleeAreaSize = 80; // The max size.
static const int NJ_PAGE_SIZE_SPARC = 8192; // Use sparc page size here.
#define BIT_ROUND_UP(v,q) ( (((uintptr_t)v)+(q)-1) & ~((q)-1) )
#define TODO(x) do{ verbose_only(outputf(#x);) NanoAssertMsgf(false, "%s", #x); } while(0)

View File

@ -67,7 +67,6 @@
namespace nanojit
{
const int NJ_LOG2_PAGE_SIZE = 13; // 8K
const int NJ_MAX_REGISTERS = 30; // L0 - L7, I0 - I5, F2 - F14
const int LARGEST_UNDERRUN_PROT = 32; // largest value passed to underrunProtect

View File

@ -58,7 +58,6 @@
namespace nanojit
{
const int NJ_LOG2_PAGE_SIZE = 12; // 4K
#define NJ_MAX_STACK_ENTRY 256
#define NJ_ALIGN_STACK 16

View File

@ -91,7 +91,6 @@
namespace nanojit
{
const int NJ_LOG2_PAGE_SIZE = 12; // 4K
const int NJ_MAX_REGISTERS = 24; // gpregs, x87 regs, xmm regs
#define NJ_MAX_STACK_ENTRY 256