make JavaScriptCore use Katie macros for assertations where possible

Signed-off-by: Ivailo Monev <xakepa10@laimg.moc>
This commit is contained in:
Ivailo Monev 2016-08-07 22:11:45 +00:00
parent 2ec2bd2af3
commit d2c6af0269
153 changed files with 1383 additions and 1467 deletions

View file

@ -51,20 +51,20 @@ typedef struct OpaqueJSValue* JSObjectRef;
inline JSC::ExecState* toJS(JSContextRef c)
{
ASSERT(c);
Q_ASSERT(c);
return reinterpret_cast<JSC::ExecState*>(const_cast<OpaqueJSContext*>(c));
}
inline JSC::ExecState* toJS(JSGlobalContextRef c)
{
ASSERT(c);
Q_ASSERT(c);
return reinterpret_cast<JSC::ExecState*>(c);
}
inline JSC::JSValue toJS(JSC::ExecState* exec, JSValueRef v)
{
ASSERT_UNUSED(exec, exec);
ASSERT(v);
Q_ASSERT(v);
#if USE(JSVALUE32_64)
JSC::JSCell* jsCell = reinterpret_cast<JSC::JSCell*>(const_cast<OpaqueJSValue*>(v));
if (!jsCell)
@ -80,7 +80,7 @@ inline JSC::JSValue toJS(JSC::ExecState* exec, JSValueRef v)
inline JSC::JSValue toJSForGC(JSC::ExecState* exec, JSValueRef v)
{
ASSERT_UNUSED(exec, exec);
ASSERT(v);
Q_ASSERT(v);
#if USE(JSVALUE32_64)
JSC::JSCell* jsCell = reinterpret_cast<JSC::JSCell*>(const_cast<OpaqueJSValue*>(v));
if (!jsCell)
@ -137,7 +137,7 @@ inline JSContextRef toRef(JSC::ExecState* e)
inline JSGlobalContextRef toGlobalRef(JSC::ExecState* e)
{
ASSERT(e == e->lexicalGlobalObject()->globalExec());
Q_ASSERT(e == e->lexicalGlobalObject()->globalExec());
return reinterpret_cast<JSGlobalContextRef>(e);
}

View file

@ -42,7 +42,7 @@ namespace JSC {
template <class Base>
inline JSCallbackObject<Base>* JSCallbackObject<Base>::asCallbackObject(JSValue value)
{
ASSERT(asObject(value)->inherits(&info));
Q_ASSERT(asObject(value)->inherits(&info));
return static_cast<JSCallbackObject*>(asObject(value));
}
@ -61,14 +61,14 @@ JSCallbackObject<Base>::JSCallbackObject(JSClassRef jsClass)
: Base()
, m_callbackObjectData(new JSCallbackObjectData(0, jsClass))
{
ASSERT(Base::isGlobalObject());
Q_ASSERT(Base::isGlobalObject());
init(static_cast<JSGlobalObject*>(this)->globalExec());
}
template <class Base>
void JSCallbackObject<Base>::init(ExecState* exec)
{
ASSERT(exec);
Q_ASSERT(exec);
Vector<JSObjectInitializeCallback, 16> initRoutines;
JSClassRef jsClass = classRef();

View file

@ -85,12 +85,12 @@ OpaqueJSClass::OpaqueJSClass(const JSClassDefinition* definition, OpaqueJSClass*
OpaqueJSClass::~OpaqueJSClass()
{
ASSERT(!m_className.rep()->isIdentifier());
Q_ASSERT(!m_className.rep()->isIdentifier());
if (m_staticValues) {
OpaqueJSClassStaticValuesTable::const_iterator end = m_staticValues->end();
for (OpaqueJSClassStaticValuesTable::const_iterator it = m_staticValues->begin(); it != end; ++it) {
ASSERT(!it->first->isIdentifier());
Q_ASSERT(!it->first->isIdentifier());
delete it->second;
}
delete m_staticValues;
@ -99,7 +99,7 @@ OpaqueJSClass::~OpaqueJSClass()
if (m_staticFunctions) {
OpaqueJSClassStaticFunctionsTable::const_iterator end = m_staticFunctions->end();
for (OpaqueJSClassStaticFunctionsTable::const_iterator it = m_staticFunctions->begin(); it != end; ++it) {
ASSERT(!it->first->isIdentifier());
Q_ASSERT(!it->first->isIdentifier());
delete it->second;
}
delete m_staticFunctions;
@ -117,7 +117,7 @@ PassRefPtr<OpaqueJSClass> OpaqueJSClass::createNoAutomaticPrototype(const JSClas
static void clearReferenceToPrototype(JSObjectRef prototype)
{
OpaqueJSClassContextData* jsClassData = static_cast<OpaqueJSClassContextData*>(JSObjectGetPrivate(prototype));
ASSERT(jsClassData);
Q_ASSERT(jsClassData);
jsClassData->cachedPrototype = 0;
}
@ -142,7 +142,7 @@ OpaqueJSClassContextData::OpaqueJSClassContextData(OpaqueJSClass* jsClass)
staticValues = new OpaqueJSClassStaticValuesTable;
OpaqueJSClassStaticValuesTable::const_iterator end = jsClass->m_staticValues->end();
for (OpaqueJSClassStaticValuesTable::const_iterator it = jsClass->m_staticValues->begin(); it != end; ++it) {
ASSERT(!it->first->isIdentifier());
Q_ASSERT(!it->first->isIdentifier());
// Use a local variable here to sidestep an RVCT compiler bug.
StaticValueEntry* entry = new StaticValueEntry(it->second->getProperty, it->second->setProperty, it->second->attributes);
staticValues->add(UString::Rep::create(it->first->data(), it->first->size()), entry);
@ -157,7 +157,7 @@ OpaqueJSClassContextData::OpaqueJSClassContextData(OpaqueJSClass* jsClass)
staticFunctions = new OpaqueJSClassStaticFunctionsTable;
OpaqueJSClassStaticFunctionsTable::const_iterator end = jsClass->m_staticFunctions->end();
for (OpaqueJSClassStaticFunctionsTable::const_iterator it = jsClass->m_staticFunctions->begin(); it != end; ++it) {
ASSERT(!it->first->isIdentifier());
Q_ASSERT(!it->first->isIdentifier());
// Use a local variable here to sidestep an RVCT compiler bug.
StaticFunctionEntry* entry = new StaticFunctionEntry(it->second->callAsFunction, it->second->attributes);
staticFunctions->add(UString::Rep::create(it->first->data(), it->first->size()), entry);

View file

@ -61,7 +61,7 @@ using namespace JSC;
return kJSTypeNumber;
if (jsValue.isString())
return kJSTypeString;
ASSERT(jsValue.isObject());
Q_ASSERT(jsValue.isObject());
return kJSTypeObject;
}

View file

@ -40,10 +40,10 @@ void ARMAssembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr)
ARMWord diff = reinterpret_cast<ARMWord*>(constPoolAddr) - ldr;
ARMWord index = (*ldr & 0xfff) >> 1;
ASSERT(diff >= 1);
Q_ASSERT(diff >= 1);
if (diff >= 2 || index > 0) {
diff = (diff + index - 2) * sizeof(ARMWord);
ASSERT(diff <= 0xfff);
Q_ASSERT(diff <= 0xfff);
*ldr = (*ldr & ~0xfff) | diff;
} else
*ldr = (*ldr & ~(0xfff | ARMAssembler::DT_UP)) | sizeof(ARMWord);
@ -126,7 +126,7 @@ int ARMAssembler::genInt(int reg, ARMWord imm, bool positive)
}
}
ASSERT((imm & 0xff) == 0);
Q_ASSERT((imm & 0xff) == 0);
if ((imm & 0xff000000) == 0) {
imm1 = OP2_IMM | ((imm >> 16) & 0xff) | (((rol + 4) & 0xf) << 8);
@ -292,7 +292,7 @@ void ARMAssembler::baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterI
{
ARMWord op2;
ASSERT(scale >= 0 && scale <= 3);
Q_ASSERT(scale >= 0 && scale <= 3);
op2 = lsl(index, scale);
if (offset >= 0 && offset <= 0xfff) {

View file

@ -218,7 +218,7 @@ namespace JSC {
: m_offset(offset)
, m_used(false)
{
ASSERT(m_offset == offset);
Q_ASSERT(m_offset == offset);
}
int m_offset : 31;
@ -346,13 +346,13 @@ namespace JSC {
#if WTF_ARM_ARCH_AT_LEAST(7)
void movw_r(int rd, ARMWord op2, Condition cc = AL)
{
ASSERT((op2 | 0xf0fff) == 0xf0fff);
Q_ASSERT((op2 | 0xf0fff) == 0xf0fff);
m_buffer.putInt(static_cast<ARMWord>(cc) | MOVW | RD(rd) | op2);
}
void movt_r(int rd, ARMWord op2, Condition cc = AL)
{
ASSERT((op2 | 0xf0fff) == 0xf0fff);
Q_ASSERT((op2 | 0xf0fff) == 0xf0fff);
m_buffer.putInt(static_cast<ARMWord>(cc) | MOVT | RD(rd) | op2);
}
#endif
@ -474,25 +474,25 @@ namespace JSC {
void fdtr_u(bool isLoad, int rd, int rb, ARMWord op2, Condition cc = AL)
{
ASSERT(op2 <= 0xff);
Q_ASSERT(op2 <= 0xff);
emitInst(static_cast<ARMWord>(cc) | FDTR | DT_UP | (isLoad ? DT_LOAD : 0), rd, rb, op2);
}
void fdtr_d(bool isLoad, int rd, int rb, ARMWord op2, Condition cc = AL)
{
ASSERT(op2 <= 0xff);
Q_ASSERT(op2 <= 0xff);
emitInst(static_cast<ARMWord>(cc) | FDTR | (isLoad ? DT_LOAD : 0), rd, rb, op2);
}
void push_r(int reg, Condition cc = AL)
{
ASSERT(ARMWord(reg) <= 0xf);
Q_ASSERT(ARMWord(reg) <= 0xf);
m_buffer.putInt(cc | DTR | DT_WB | RN(ARMRegisters::sp) | RD(reg) | 0x4);
}
void pop_r(int reg, Condition cc = AL)
{
ASSERT(ARMWord(reg) <= 0xf);
Q_ASSERT(ARMWord(reg) <= 0xf);
m_buffer.putInt(cc | (DTR ^ DT_PRE) | DT_LOAD | DT_UP | RN(ARMRegisters::sp) | RD(reg) | 0x4);
}
@ -550,43 +550,43 @@ namespace JSC {
static ARMWord lsl(int reg, ARMWord value)
{
ASSERT(reg <= ARMRegisters::pc);
ASSERT(value <= 0x1f);
Q_ASSERT(reg <= ARMRegisters::pc);
Q_ASSERT(value <= 0x1f);
return reg | (value << 7) | 0x00;
}
static ARMWord lsr(int reg, ARMWord value)
{
ASSERT(reg <= ARMRegisters::pc);
ASSERT(value <= 0x1f);
Q_ASSERT(reg <= ARMRegisters::pc);
Q_ASSERT(value <= 0x1f);
return reg | (value << 7) | 0x20;
}
static ARMWord asr(int reg, ARMWord value)
{
ASSERT(reg <= ARMRegisters::pc);
ASSERT(value <= 0x1f);
Q_ASSERT(reg <= ARMRegisters::pc);
Q_ASSERT(value <= 0x1f);
return reg | (value << 7) | 0x40;
}
static ARMWord lsl_r(int reg, int shiftReg)
{
ASSERT(reg <= ARMRegisters::pc);
ASSERT(shiftReg <= ARMRegisters::pc);
Q_ASSERT(reg <= ARMRegisters::pc);
Q_ASSERT(shiftReg <= ARMRegisters::pc);
return reg | (shiftReg << 8) | 0x10;
}
static ARMWord lsr_r(int reg, int shiftReg)
{
ASSERT(reg <= ARMRegisters::pc);
ASSERT(shiftReg <= ARMRegisters::pc);
Q_ASSERT(reg <= ARMRegisters::pc);
Q_ASSERT(shiftReg <= ARMRegisters::pc);
return reg | (shiftReg << 8) | 0x30;
}
static ARMWord asr_r(int reg, int shiftReg)
{
ASSERT(reg <= ARMRegisters::pc);
ASSERT(shiftReg <= ARMRegisters::pc);
Q_ASSERT(reg <= ARMRegisters::pc);
Q_ASSERT(shiftReg <= ARMRegisters::pc);
return reg | (shiftReg << 8) | 0x50;
}
@ -636,7 +636,7 @@ namespace JSC {
static ARMWord* getLdrImmAddress(ARMWord* insn)
{
// Must be an ldr ..., [pc +/- imm]
ASSERT((*insn & 0x0f7f0000) == 0x051f0000);
Q_ASSERT((*insn & 0x0f7f0000) == 0x051f0000);
ARMWord addr = reinterpret_cast<ARMWord>(insn) + DefaultPrefetching * sizeof(ARMWord);
if (*insn & DT_UP)
@ -647,7 +647,7 @@ namespace JSC {
static ARMWord* getLdrImmAddressOnPool(ARMWord* insn, uint32_t* constPool)
{
// Must be an ldr ..., [pc +/- imm]
ASSERT((*insn & 0x0f7f0000) == 0x051f0000);
Q_ASSERT((*insn & 0x0f7f0000) == 0x051f0000);
if (*insn & 0x1)
return reinterpret_cast<ARMWord*>(constPool + ((*insn & SDT_OFFSET_MASK) >> 1));
@ -664,7 +664,7 @@ namespace JSC {
static ARMWord patchConstantPoolLoad(ARMWord load, ARMWord value)
{
value = (value << 1) + 1;
ASSERT(!(value & ~0xfff));
Q_ASSERT(!(value & ~0xfff));
return (load & ~0xfff) | value;
}
@ -692,7 +692,7 @@ namespace JSC {
// On arm, this is a patch from LDR to ADD. It is restricted conversion,
// from special case to special case, altough enough for its purpose
ARMWord* insn = reinterpret_cast<ARMWord*>(from);
ASSERT((*insn & 0x0ff00f00) == 0x05900000);
Q_ASSERT((*insn & 0x0ff00f00) == 0x05900000);
*insn = (*insn & 0xf00ff0ff) | 0x02800000;
ExecutableAllocator::cacheFlush(insn, sizeof(ARMWord));
@ -760,7 +760,7 @@ namespace JSC {
static ARMWord getOp2Byte(ARMWord imm)
{
ASSERT(imm <= 0xff);
Q_ASSERT(imm <= 0xff);
return OP2_IMMh | (imm & 0x0f) | ((imm & 0xf0) << 4) ;
}
@ -789,32 +789,32 @@ namespace JSC {
static ARMWord placeConstantPoolBarrier(int offset)
{
offset = (offset - sizeof(ARMWord)) >> 2;
ASSERT((offset <= BOFFSET_MAX && offset >= BOFFSET_MIN));
Q_ASSERT((offset <= BOFFSET_MAX && offset >= BOFFSET_MIN));
return AL | B | (offset & BRANCH_MASK);
}
private:
ARMWord RM(int reg)
{
ASSERT(reg <= ARMRegisters::pc);
Q_ASSERT(reg <= ARMRegisters::pc);
return reg;
}
ARMWord RS(int reg)
{
ASSERT(reg <= ARMRegisters::pc);
Q_ASSERT(reg <= ARMRegisters::pc);
return reg << 8;
}
ARMWord RD(int reg)
{
ASSERT(reg <= ARMRegisters::pc);
Q_ASSERT(reg <= ARMRegisters::pc);
return reg << 12;
}
ARMWord RN(int reg)
{
ASSERT(reg <= ARMRegisters::pc);
Q_ASSERT(reg <= ARMRegisters::pc);
return reg << 16;
}

View file

@ -259,7 +259,7 @@ public:
int32_t leadingZeros = countLeadingZeros(value);
// if there were 24 or more leading zeros, then we'd have hit the (value < 256) case.
ASSERT(leadingZeros < 24);
Q_ASSERT(leadingZeros < 24);
// Given a number with bit fields Z:B:C, where count(Z)+count(B)+count(C) == 32,
// Z are the bits known zero, B is the 8-bit immediate, C are the bits to check for
@ -337,16 +337,16 @@ public:
bool isUInt10() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xfc00); }
bool isUInt12() { return (m_type == TypeUInt16) && !(m_value.asInt & 0xf000); }
bool isUInt16() { return m_type == TypeUInt16; }
uint8_t getUInt3() { ASSERT(isUInt3()); return m_value.asInt; }
uint8_t getUInt4() { ASSERT(isUInt4()); return m_value.asInt; }
uint8_t getUInt5() { ASSERT(isUInt5()); return m_value.asInt; }
uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; }
uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; }
uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; }
uint8_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; }
uint8_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; }
uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; }
uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; }
uint8_t getUInt3() { Q_ASSERT(isUInt3()); return m_value.asInt; }
uint8_t getUInt4() { Q_ASSERT(isUInt4()); return m_value.asInt; }
uint8_t getUInt5() { Q_ASSERT(isUInt5()); return m_value.asInt; }
uint8_t getUInt6() { Q_ASSERT(isUInt6()); return m_value.asInt; }
uint8_t getUInt7() { Q_ASSERT(isUInt7()); return m_value.asInt; }
uint8_t getUInt8() { Q_ASSERT(isUInt8()); return m_value.asInt; }
uint8_t getUInt9() { Q_ASSERT(isUInt9()); return m_value.asInt; }
uint8_t getUInt10() { Q_ASSERT(isUInt10()); return m_value.asInt; }
uint16_t getUInt12() { Q_ASSERT(isUInt12()); return m_value.asInt; }
uint16_t getUInt16() { Q_ASSERT(isUInt16()); return m_value.asInt; }
bool isEncodedImm() { return m_type == TypeEncoded; }
@ -414,7 +414,7 @@ class ARMv7Assembler {
public:
~ARMv7Assembler()
{
ASSERT(m_jumpsToLink.isEmpty());
Q_ASSERT(m_jumpsToLink.isEmpty());
}
typedef ARMRegisters::RegisterID RegisterID;
@ -478,7 +478,7 @@ public:
: m_offset(offset)
, m_used(false)
{
ASSERT(m_offset == offset);
Q_ASSERT(m_offset == offset);
}
int m_offset : 31;
@ -523,19 +523,19 @@ private:
uint32_t singleRegisterNum(FPRegisterID reg)
{
ASSERT(isSingleRegister(reg));
Q_ASSERT(isSingleRegister(reg));
return reg;
}
uint32_t doubleRegisterNum(FPRegisterID reg)
{
ASSERT(isDoubleRegister(reg));
Q_ASSERT(isDoubleRegister(reg));
return reg >> 1;
}
uint32_t quadRegisterNum(FPRegisterID reg)
{
ASSERT(isQuadRegister(reg));
Q_ASSERT(isQuadRegister(reg));
return reg >> 2;
}
@ -685,7 +685,7 @@ private:
| (ifThenElseConditionBit(condition, inst3if) << 2)
| (ifThenElseConditionBit(condition, inst4if) << 1)
| 1;
ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
Q_ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
return (condition << 4) | mask;
}
uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if)
@ -693,21 +693,21 @@ private:
int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
| (ifThenElseConditionBit(condition, inst3if) << 2)
| 2;
ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
Q_ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
return (condition << 4) | mask;
}
uint8_t ifThenElse(Condition condition, bool inst2if)
{
int mask = (ifThenElseConditionBit(condition, inst2if) << 3)
| 4;
ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
Q_ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
return (condition << 4) | mask;
}
uint8_t ifThenElse(Condition condition)
{
int mask = 8;
ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
Q_ASSERT((condition != ConditionAL) || (mask & (mask - 1)));
return (condition << 4) | mask;
}
@ -716,10 +716,10 @@ public:
void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
{
// Rd can only be SP if Rn is also SP.
ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
ASSERT(rd != ARMRegisters::pc);
ASSERT(rn != ARMRegisters::pc);
ASSERT(imm.isValid());
Q_ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
Q_ASSERT(rd != ARMRegisters::pc);
Q_ASSERT(rn != ARMRegisters::pc);
Q_ASSERT(imm.isValid());
if (rn == ARMRegisters::sp) {
if (!(rd & 8) && imm.isUInt10()) {
@ -742,17 +742,17 @@ public:
if (imm.isEncodedImm())
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T3, rn, rd, imm);
else {
ASSERT(imm.isUInt12());
Q_ASSERT(imm.isUInt12());
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADD_imm_T4, rn, rd, imm);
}
}
void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
{
ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
ASSERT(rd != ARMRegisters::pc);
ASSERT(rn != ARMRegisters::pc);
ASSERT(!BadReg(rm));
Q_ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
Q_ASSERT(rd != ARMRegisters::pc);
Q_ASSERT(rn != ARMRegisters::pc);
Q_ASSERT(!BadReg(rm));
m_formatter.twoWordOp12Reg4FourFours(OP_ADD_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
}
@ -773,10 +773,10 @@ public:
void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
{
// Rd can only be SP if Rn is also SP.
ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
ASSERT(rd != ARMRegisters::pc);
ASSERT(rn != ARMRegisters::pc);
ASSERT(imm.isEncodedImm());
Q_ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
Q_ASSERT(rd != ARMRegisters::pc);
Q_ASSERT(rn != ARMRegisters::pc);
Q_ASSERT(imm.isEncodedImm());
if (!((rd | rn) & 8)) {
if (imm.isUInt3()) {
@ -794,10 +794,10 @@ public:
// Not allowed in an IT (if then) block?
void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
{
ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
ASSERT(rd != ARMRegisters::pc);
ASSERT(rn != ARMRegisters::pc);
ASSERT(!BadReg(rm));
Q_ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
Q_ASSERT(rd != ARMRegisters::pc);
Q_ASSERT(rn != ARMRegisters::pc);
Q_ASSERT(!BadReg(rm));
m_formatter.twoWordOp12Reg4FourFours(OP_ADD_S_reg_T3, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
}
@ -812,17 +812,17 @@ public:
void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
{
ASSERT(!BadReg(rd));
ASSERT(!BadReg(rn));
ASSERT(imm.isEncodedImm());
Q_ASSERT(!BadReg(rd));
Q_ASSERT(!BadReg(rn));
Q_ASSERT(imm.isEncodedImm());
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm);
}
void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
{
ASSERT(!BadReg(rd));
ASSERT(!BadReg(rn));
ASSERT(!BadReg(rm));
Q_ASSERT(!BadReg(rd));
Q_ASSERT(!BadReg(rn));
Q_ASSERT(!BadReg(rm));
m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
}
@ -838,17 +838,17 @@ public:
void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
{
ASSERT(!BadReg(rd));
ASSERT(!BadReg(rm));
Q_ASSERT(!BadReg(rd));
Q_ASSERT(!BadReg(rm));
ShiftTypeAndAmount shift(SRType_ASR, shiftAmount);
m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
}
void asr(RegisterID rd, RegisterID rn, RegisterID rm)
{
ASSERT(!BadReg(rd));
ASSERT(!BadReg(rn));
ASSERT(!BadReg(rm));
Q_ASSERT(!BadReg(rd));
Q_ASSERT(!BadReg(rn));
Q_ASSERT(!BadReg(rm));
m_formatter.twoWordOp12Reg4FourFours(OP_ASR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
}
@ -862,7 +862,7 @@ public:
// Only allowed in IT (if then) block if last instruction.
JmpSrc blx(RegisterID rm)
{
ASSERT(rm != ARMRegisters::pc);
Q_ASSERT(rm != ARMRegisters::pc);
m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8);
return JmpSrc(m_formatter.size());
}
@ -881,16 +881,16 @@ public:
void cmn(RegisterID rn, ARMThumbImmediate imm)
{
ASSERT(rn != ARMRegisters::pc);
ASSERT(imm.isEncodedImm());
Q_ASSERT(rn != ARMRegisters::pc);
Q_ASSERT(imm.isEncodedImm());
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm);
}
void cmp(RegisterID rn, ARMThumbImmediate imm)
{
ASSERT(rn != ARMRegisters::pc);
ASSERT(imm.isEncodedImm());
Q_ASSERT(rn != ARMRegisters::pc);
Q_ASSERT(imm.isEncodedImm());
if (!(rn & 8) && imm.isUInt8())
m_formatter.oneWordOp5Reg3Imm8(OP_CMP_imm_T1, rn, imm.getUInt8());
@ -900,8 +900,8 @@ public:
void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
{
ASSERT(rn != ARMRegisters::pc);
ASSERT(!BadReg(rm));
Q_ASSERT(rn != ARMRegisters::pc);
Q_ASSERT(!BadReg(rm));
m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
}
@ -916,18 +916,18 @@ public:
// xor is not spelled with an 'e'. :-(
void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
{
ASSERT(!BadReg(rd));
ASSERT(!BadReg(rn));
ASSERT(imm.isEncodedImm());
Q_ASSERT(!BadReg(rd));
Q_ASSERT(!BadReg(rn));
Q_ASSERT(imm.isEncodedImm());
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_EOR_imm_T1, rn, rd, imm);
}
// xor is not spelled with an 'e'. :-(
void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
{
ASSERT(!BadReg(rd));
ASSERT(!BadReg(rn));
ASSERT(!BadReg(rm));
Q_ASSERT(!BadReg(rd));
Q_ASSERT(!BadReg(rn));
Q_ASSERT(!BadReg(rm));
m_formatter.twoWordOp12Reg4FourFours(OP_EOR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
}
@ -965,8 +965,8 @@ public:
// rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
{
ASSERT(rn != ARMRegisters::pc); // LDR (literal)
ASSERT(imm.isUInt12());
Q_ASSERT(rn != ARMRegisters::pc); // LDR (literal)
Q_ASSERT(imm.isUInt12());
if (!((rt | rn) & 8) && imm.isUInt7())
m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt);
@ -989,17 +989,17 @@ public:
// if (wback) REG[rn] = _tmp
void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
{
ASSERT(rt != ARMRegisters::pc);
ASSERT(rn != ARMRegisters::pc);
ASSERT(index || wback);
ASSERT(!wback | (rt != rn));
Q_ASSERT(rt != ARMRegisters::pc);
Q_ASSERT(rn != ARMRegisters::pc);
Q_ASSERT(index || wback);
Q_ASSERT(!wback | (rt != rn));
bool add = true;
if (offset < 0) {
add = false;
offset = -offset;
}
ASSERT((offset & ~0xff) == 0);
Q_ASSERT((offset & ~0xff) == 0);
offset |= (wback << 8);
offset |= (add << 9);
@ -1012,9 +1012,9 @@ public:
// rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0)
{
ASSERT(rn != ARMRegisters::pc); // LDR (literal)
ASSERT(!BadReg(rm));
ASSERT(shift <= 3);
Q_ASSERT(rn != ARMRegisters::pc); // LDR (literal)
Q_ASSERT(!BadReg(rm));
Q_ASSERT(shift <= 3);
if (!shift && !((rt | rn | rm) & 8))
m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDR_reg_T1, rm, rn, rt);
@ -1025,8 +1025,8 @@ public:
// rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
{
ASSERT(rn != ARMRegisters::pc); // LDR (literal)
ASSERT(imm.isUInt12());
Q_ASSERT(rn != ARMRegisters::pc); // LDR (literal)
Q_ASSERT(imm.isUInt12());
if (!((rt | rn) & 8) && imm.isUInt6())
m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 2, rn, rt);
@ -1047,17 +1047,17 @@ public:
// if (wback) REG[rn] = _tmp
void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
{
ASSERT(rt != ARMRegisters::pc);
ASSERT(rn != ARMRegisters::pc);
ASSERT(index || wback);
ASSERT(!wback | (rt != rn));
Q_ASSERT(rt != ARMRegisters::pc);
Q_ASSERT(rn != ARMRegisters::pc);
Q_ASSERT(index || wback);
Q_ASSERT(!wback | (rt != rn));
bool add = true;
if (offset < 0) {
add = false;
offset = -offset;
}
ASSERT((offset & ~0xff) == 0);
Q_ASSERT((offset & ~0xff) == 0);
offset |= (wback << 8);
offset |= (add << 9);
@ -1069,10 +1069,10 @@ public:
void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0)
{
ASSERT(!BadReg(rt)); // Memory hint
ASSERT(rn != ARMRegisters::pc); // LDRH (literal)
ASSERT(!BadReg(rm));
ASSERT(shift <= 3);
Q_ASSERT(!BadReg(rt)); // Memory hint
Q_ASSERT(rn != ARMRegisters::pc); // LDRH (literal)
Q_ASSERT(!BadReg(rm));
Q_ASSERT(shift <= 3);
if (!shift && !((rt | rn | rm) & 8))
m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRH_reg_T1, rm, rn, rt);
@ -1082,49 +1082,49 @@ public:
void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount)
{
ASSERT(!BadReg(rd));
ASSERT(!BadReg(rm));
Q_ASSERT(!BadReg(rd));
Q_ASSERT(!BadReg(rm));
ShiftTypeAndAmount shift(SRType_LSL, shiftAmount);
m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
}
void lsl(RegisterID rd, RegisterID rn, RegisterID rm)
{
ASSERT(!BadReg(rd));
ASSERT(!BadReg(rn));
ASSERT(!BadReg(rm));
Q_ASSERT(!BadReg(rd));
Q_ASSERT(!BadReg(rn));
Q_ASSERT(!BadReg(rm));
m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm));
}
void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount)
{
ASSERT(!BadReg(rd));
ASSERT(!BadReg(rm));
Q_ASSERT(!BadReg(rd));
Q_ASSERT(!BadReg(rm));
ShiftTypeAndAmount shift(SRType_LSR, shiftAmount);
m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
}
void lsr(RegisterID rd, RegisterID rn, RegisterID rm)
{
ASSERT(!BadReg(rd));
ASSERT(!BadReg(rn));
ASSERT(!BadReg(rm));
Q_ASSERT(!BadReg(rd));
Q_ASSERT(!BadReg(rn));
Q_ASSERT(!BadReg(rm));
m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
}
void movT3(RegisterID rd, ARMThumbImmediate imm)
{
ASSERT(imm.isValid());
ASSERT(!imm.isEncodedImm());
ASSERT(!BadReg(rd));
Q_ASSERT(imm.isValid());
Q_ASSERT(!imm.isEncodedImm());
Q_ASSERT(!BadReg(rd));
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm);
}
void mov(RegisterID rd, ARMThumbImmediate imm)
{
ASSERT(imm.isValid());
ASSERT(!BadReg(rd));
Q_ASSERT(imm.isValid());
Q_ASSERT(!BadReg(rd));
if ((rd < 8) && imm.isUInt8())
m_formatter.oneWordOp5Reg3Imm8(OP_MOV_imm_T1, rd, imm.getUInt8());
@ -1141,23 +1141,23 @@ public:
void movt(RegisterID rd, ARMThumbImmediate imm)
{
ASSERT(imm.isUInt16());
ASSERT(!BadReg(rd));
Q_ASSERT(imm.isUInt16());
Q_ASSERT(!BadReg(rd));
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm);
}
void mvn(RegisterID rd, ARMThumbImmediate imm)
{
ASSERT(imm.isEncodedImm());
ASSERT(!BadReg(rd));
Q_ASSERT(imm.isEncodedImm());
Q_ASSERT(!BadReg(rd));
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm);
}
void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift)
{
ASSERT(!BadReg(rd));
ASSERT(!BadReg(rm));
Q_ASSERT(!BadReg(rd));
Q_ASSERT(!BadReg(rm));
m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm));
}
@ -1171,17 +1171,17 @@ public:
void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
{
ASSERT(!BadReg(rd));
ASSERT(!BadReg(rn));
ASSERT(imm.isEncodedImm());
Q_ASSERT(!BadReg(rd));
Q_ASSERT(!BadReg(rn));
Q_ASSERT(imm.isEncodedImm());
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm);
}
void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
{
ASSERT(!BadReg(rd));
ASSERT(!BadReg(rn));
ASSERT(!BadReg(rm));
Q_ASSERT(!BadReg(rd));
Q_ASSERT(!BadReg(rn));
Q_ASSERT(!BadReg(rm));
m_formatter.twoWordOp12Reg4FourFours(OP_ORR_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
}
@ -1197,36 +1197,36 @@ public:
void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount)
{
ASSERT(!BadReg(rd));
ASSERT(!BadReg(rm));
Q_ASSERT(!BadReg(rd));
Q_ASSERT(!BadReg(rm));
ShiftTypeAndAmount shift(SRType_ROR, shiftAmount);
m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm));
}
void ror(RegisterID rd, RegisterID rn, RegisterID rm)
{
ASSERT(!BadReg(rd));
ASSERT(!BadReg(rn));
ASSERT(!BadReg(rm));
Q_ASSERT(!BadReg(rd));
Q_ASSERT(!BadReg(rn));
Q_ASSERT(!BadReg(rm));
m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm));
}
void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm)
{
ASSERT(!BadReg(rdLo));
ASSERT(!BadReg(rdHi));
ASSERT(!BadReg(rn));
ASSERT(!BadReg(rm));
ASSERT(rdLo != rdHi);
Q_ASSERT(!BadReg(rdLo));
Q_ASSERT(!BadReg(rdHi));
Q_ASSERT(!BadReg(rn));
Q_ASSERT(!BadReg(rm));
Q_ASSERT(rdLo != rdHi);
m_formatter.twoWordOp12Reg4FourFours(OP_SMULL_T1, rn, FourFours(rdLo, rdHi, 0, rm));
}
// rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm)
{
ASSERT(rt != ARMRegisters::pc);
ASSERT(rn != ARMRegisters::pc);
ASSERT(imm.isUInt12());
Q_ASSERT(rt != ARMRegisters::pc);
Q_ASSERT(rn != ARMRegisters::pc);
Q_ASSERT(imm.isUInt12());
if (!((rt | rn) & 8) && imm.isUInt7())
m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt);
@ -1249,17 +1249,17 @@ public:
// if (wback) REG[rn] = _tmp
void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback)
{
ASSERT(rt != ARMRegisters::pc);
ASSERT(rn != ARMRegisters::pc);
ASSERT(index || wback);
ASSERT(!wback | (rt != rn));
Q_ASSERT(rt != ARMRegisters::pc);
Q_ASSERT(rn != ARMRegisters::pc);
Q_ASSERT(index || wback);
Q_ASSERT(!wback | (rt != rn));
bool add = true;
if (offset < 0) {
add = false;
offset = -offset;
}
ASSERT((offset & ~0xff) == 0);
Q_ASSERT((offset & ~0xff) == 0);
offset |= (wback << 8);
offset |= (add << 9);
@ -1272,9 +1272,9 @@ public:
// rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block.
void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0)
{
ASSERT(rn != ARMRegisters::pc);
ASSERT(!BadReg(rm));
ASSERT(shift <= 3);
Q_ASSERT(rn != ARMRegisters::pc);
Q_ASSERT(!BadReg(rm));
Q_ASSERT(shift <= 3);
if (!shift && !((rt | rn | rm) & 8))
m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STR_reg_T1, rm, rn, rt);
@ -1285,10 +1285,10 @@ public:
void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
{
// Rd can only be SP if Rn is also SP.
ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
ASSERT(rd != ARMRegisters::pc);
ASSERT(rn != ARMRegisters::pc);
ASSERT(imm.isValid());
Q_ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
Q_ASSERT(rd != ARMRegisters::pc);
Q_ASSERT(rn != ARMRegisters::pc);
Q_ASSERT(imm.isValid());
if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, imm.getUInt9() >> 2);
@ -1306,17 +1306,17 @@ public:
if (imm.isEncodedImm())
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T3, rn, rd, imm);
else {
ASSERT(imm.isUInt12());
Q_ASSERT(imm.isUInt12());
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_imm_T4, rn, rd, imm);
}
}
void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
{
ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
ASSERT(rd != ARMRegisters::pc);
ASSERT(rn != ARMRegisters::pc);
ASSERT(!BadReg(rm));
Q_ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
Q_ASSERT(rd != ARMRegisters::pc);
Q_ASSERT(rn != ARMRegisters::pc);
Q_ASSERT(!BadReg(rm));
m_formatter.twoWordOp12Reg4FourFours(OP_SUB_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
}
@ -1333,10 +1333,10 @@ public:
void sub_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm)
{
// Rd can only be SP if Rn is also SP.
ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
ASSERT(rd != ARMRegisters::pc);
ASSERT(rn != ARMRegisters::pc);
ASSERT(imm.isValid());
Q_ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
Q_ASSERT(rd != ARMRegisters::pc);
Q_ASSERT(rn != ARMRegisters::pc);
Q_ASSERT(imm.isValid());
if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) {
m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, imm.getUInt9() >> 2);
@ -1357,10 +1357,10 @@ public:
// Not allowed in an IT (if then) block?
void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
{
ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
ASSERT(rd != ARMRegisters::pc);
ASSERT(rn != ARMRegisters::pc);
ASSERT(!BadReg(rm));
Q_ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp));
Q_ASSERT(rd != ARMRegisters::pc);
Q_ASSERT(rn != ARMRegisters::pc);
Q_ASSERT(!BadReg(rm));
m_formatter.twoWordOp12Reg4FourFours(OP_SUB_S_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm));
}
@ -1375,16 +1375,16 @@ public:
void tst(RegisterID rn, ARMThumbImmediate imm)
{
ASSERT(!BadReg(rn));
ASSERT(imm.isEncodedImm());
Q_ASSERT(!BadReg(rn));
Q_ASSERT(imm.isEncodedImm());
m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm);
}
void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift)
{
ASSERT(!BadReg(rn));
ASSERT(!BadReg(rm));
Q_ASSERT(!BadReg(rn));
Q_ASSERT(!BadReg(rm));
m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm));
}
@ -1468,14 +1468,14 @@ public:
static void* getRelocatedAddress(void* code, JmpSrc jump)
{
ASSERT(jump.m_offset != -1);
Q_ASSERT(jump.m_offset != -1);
return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
}
static void* getRelocatedAddress(void* code, JmpDst destination)
{
ASSERT(destination.m_offset != -1);
Q_ASSERT(destination.m_offset != -1);
return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset);
}
@ -1514,13 +1514,13 @@ public:
}
m_jumpsToLink.clear();
ASSERT(copy);
Q_ASSERT(copy);
return copy;
}
static unsigned getCallReturnOffset(JmpSrc call)
{
ASSERT(call.m_offset >= 0);
Q_ASSERT(call.m_offset >= 0);
return call.m_offset;
}
@ -1534,14 +1534,14 @@ public:
void linkJump(JmpSrc from, JmpDst to)
{
ASSERT(to.m_offset != -1);
ASSERT(from.m_offset != -1);
Q_ASSERT(to.m_offset != -1);
Q_ASSERT(from.m_offset != -1);
m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset));
}
static void linkJump(void* code, JmpSrc from, void* to)
{
ASSERT(from.m_offset != -1);
Q_ASSERT(from.m_offset != -1);
uint16_t* location = reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset);
linkJumpAbsolute(location, to);
@ -1551,9 +1551,9 @@ public:
// return a bool saying whether the link was successful?
static void linkCall(void* code, JmpSrc from, void* to)
{
ASSERT(!(reinterpret_cast<intptr_t>(code) & 1));
ASSERT(from.m_offset != -1);
ASSERT(reinterpret_cast<intptr_t>(to) & 1);
Q_ASSERT(!(reinterpret_cast<intptr_t>(code) & 1));
Q_ASSERT(from.m_offset != -1);
Q_ASSERT(reinterpret_cast<intptr_t>(to) & 1);
setPointer(reinterpret_cast<uint16_t*>(reinterpret_cast<intptr_t>(code) + from.m_offset) - 1, to);
}
@ -1565,8 +1565,8 @@ public:
static void relinkJump(void* from, void* to)
{
ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
Q_ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
Q_ASSERT(!(reinterpret_cast<intptr_t>(to) & 1));
linkJumpAbsolute(reinterpret_cast<uint16_t*>(from), to);
@ -1575,8 +1575,8 @@ public:
static void relinkCall(void* from, void* to)
{
ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
ASSERT(reinterpret_cast<intptr_t>(to) & 1);
Q_ASSERT(!(reinterpret_cast<intptr_t>(from) & 1));
Q_ASSERT(reinterpret_cast<intptr_t>(to) & 1);
setPointer(reinterpret_cast<uint16_t*>(from) - 1, to);
@ -1585,7 +1585,7 @@ public:
static void repatchInt32(void* where, int32_t value)
{
ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
Q_ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
setInt32(where, value);
@ -1594,7 +1594,7 @@ public:
static void repatchPointer(void* where, void* value)
{
ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
Q_ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
setPointer(where, value);
@ -1603,10 +1603,10 @@ public:
static void repatchLoadPtrToLEA(void* where)
{
ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
Q_ASSERT(!(reinterpret_cast<intptr_t>(where) & 1));
uint16_t* loadOp = reinterpret_cast<uint16_t*>(where) + 4;
ASSERT((*loadOp & 0xfff0) == OP_LDR_reg_T2);
Q_ASSERT((*loadOp & 0xfff0) == OP_LDR_reg_T2);
*loadOp = OP_ADD_reg_T3 | (*loadOp & 0xf);
ExecutableAllocator::cacheFlush(loadOp, sizeof(uint16_t));
@ -1630,7 +1630,7 @@ private:
// offset is effectively leftshifted by 2 already (the bottom two bits are zero, and not
// reperesented in the instruction. Left shift by 14, to mov it into position 0x00AA0000.
ASSERT((offset & ~(0xff << 2)) == 0);
Q_ASSERT((offset & ~(0xff << 2)) == 0);
offset <<= 14;
m_formatter.vfpOp(0x0b00ed00 | offset | (up << 7) | (isLoad << 4) | doubleRegisterMask(rd, 6, 28) | rn);
@ -1639,7 +1639,7 @@ private:
static void setInt32(void* code, uint32_t value)
{
uint16_t* location = reinterpret_cast<uint16_t*>(code);
ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
Q_ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2));
ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value));
ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast<uint16_t>(value >> 16));
@ -1697,10 +1697,10 @@ private:
// FIMXE: this should be up in the MacroAssembler layer. :-(
const uint16_t JUMP_TEMPORARY_REGISTER = ARMRegisters::ip;
ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
Q_ASSERT(!(reinterpret_cast<intptr_t>(instruction) & 1));
Q_ASSERT(!(reinterpret_cast<intptr_t>(target) & 1));
ASSERT( (isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
Q_ASSERT( (isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1))
|| (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2)) );
intptr_t relative = reinterpret_cast<intptr_t>(target) - (reinterpret_cast<intptr_t>(instruction));
@ -1710,7 +1710,7 @@ private:
relative ^= 0xC00000;
// All branch offsets should be an even distance.
ASSERT(!(relative & 1));
Q_ASSERT(!(relative & 1));
// There may be a better way to fix this, but right now put the NOPs first, since in the
// case of an conditional branch this will be coming after an ITTT predicating *three*
// instructions! Looking backwards to modify the ITTT to an IT is not easy, due to

View file

@ -67,7 +67,7 @@ namespace JSC {
void putByteUnchecked(int value)
{
ASSERT(!(m_size > m_capacity - 4));
Q_ASSERT(!(m_size > m_capacity - 4));
m_buffer[m_size] = value;
m_size++;
}
@ -81,7 +81,7 @@ namespace JSC {
void putShortUnchecked(int value)
{
ASSERT(!(m_size > m_capacity - 4));
Q_ASSERT(!(m_size > m_capacity - 4));
*reinterpret_cast<short*>(&m_buffer[m_size]) = value;
m_size += 2;
}
@ -95,14 +95,14 @@ namespace JSC {
void putIntUnchecked(int value)
{
ASSERT(!(m_size > m_capacity - 4));
Q_ASSERT(!(m_size > m_capacity - 4));
*reinterpret_cast<int*>(&m_buffer[m_size]) = value;
m_size += 4;
}
void putInt64Unchecked(int64_t value)
{
ASSERT(!(m_size > m_capacity - 8));
Q_ASSERT(!(m_size > m_capacity - 8));
*reinterpret_cast<int64_t*>(&m_buffer[m_size]) = value;
m_size += 8;
}

View file

@ -74,14 +74,14 @@ public:
~LinkBuffer()
{
ASSERT(m_completed);
Q_ASSERT(m_completed);
}
// These methods are used to link or set values at code generation time.
void link(Call call, FunctionPtr function)
{
ASSERT(call.isFlagSet(Call::Linkable));
Q_ASSERT(call.isFlagSet(Call::Linkable));
MacroAssembler::linkCall(m_code, call, function);
}
@ -110,15 +110,15 @@ public:
CodeLocationCall locationOf(Call call)
{
ASSERT(call.isFlagSet(Call::Linkable));
ASSERT(!call.isFlagSet(Call::Near));
Q_ASSERT(call.isFlagSet(Call::Linkable));
Q_ASSERT(!call.isFlagSet(Call::Near));
return CodeLocationCall(MacroAssembler::getLinkerAddress(m_code, call.m_jmp));
}
CodeLocationNearCall locationOfNearCall(Call call)
{
ASSERT(call.isFlagSet(Call::Linkable));
ASSERT(call.isFlagSet(Call::Near));
Q_ASSERT(call.isFlagSet(Call::Linkable));
Q_ASSERT(call.isFlagSet(Call::Near));
return CodeLocationNearCall(MacroAssembler::getLinkerAddress(m_code, call.m_jmp));
}
@ -158,7 +158,7 @@ private:
void performFinalization()
{
#ifndef NDEBUG
ASSERT(!m_completed);
Q_ASSERT(!m_completed);
m_completed = true;
#endif

View file

@ -47,7 +47,7 @@ void MacroAssemblerARM::load32WithUnalignedHalfWords(BaseIndex address, Register
{
ARMWord op2;
ASSERT(address.scale >= 0 && address.scale <= 3);
Q_ASSERT(address.scale >= 0 && address.scale <= 3);
op2 = m_assembler.lsl(address.index, static_cast<int>(address.scale));
if (address.offset >= 0 && address.offset + 0x2 <= 0xff) {

View file

@ -121,7 +121,7 @@ public:
void lshift32(RegisterID shift_amount, RegisterID dest)
{
ARMWord w = ARMAssembler::getOp2(0x1f);
ASSERT(w != ARMAssembler::INVALID_IMM);
Q_ASSERT(w != ARMAssembler::INVALID_IMM);
m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
m_assembler.movs_r(dest, m_assembler.lsl_r(dest, ARMRegisters::S0));
@ -170,7 +170,7 @@ public:
void rshift32(RegisterID shift_amount, RegisterID dest)
{
ARMWord w = ARMAssembler::getOp2(0x1f);
ASSERT(w != ARMAssembler::INVALID_IMM);
Q_ASSERT(w != ARMAssembler::INVALID_IMM);
m_assembler.and_r(ARMRegisters::S0, shift_amount, w);
m_assembler.movs_r(dest, m_assembler.asr_r(dest, ARMRegisters::S0));
@ -424,14 +424,14 @@ public:
Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
{
ASSERT((cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Zero) || (cond == NonZero));
m_assembler.tst_r(reg, mask);
return Jump(m_assembler.jmp(ARMCondition(cond)));
}
Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
{
ASSERT((cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Zero) || (cond == NonZero));
ARMWord w = m_assembler.getImm(mask.m_value, ARMRegisters::S0, true);
if (w & ARMAssembler::OP2_INV_IMM)
m_assembler.bics_r(ARMRegisters::S0, reg, w & ~ARMAssembler::OP2_INV_IMM);
@ -469,14 +469,14 @@ public:
Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
add32(src, dest);
return Jump(m_assembler.jmp(ARMCondition(cond)));
}
Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
add32(imm, dest);
return Jump(m_assembler.jmp(ARMCondition(cond)));
}
@ -493,7 +493,7 @@ public:
Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
if (cond == Overflow) {
mull32(src, dest, dest);
cond = NonZero;
@ -505,7 +505,7 @@ public:
Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
if (cond == Overflow) {
move(imm, ARMRegisters::S0);
mull32(ARMRegisters::S0, src, dest);
@ -518,28 +518,28 @@ public:
Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
sub32(src, dest);
return Jump(m_assembler.jmp(ARMCondition(cond)));
}
Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
sub32(imm, dest);
return Jump(m_assembler.jmp(ARMCondition(cond)));
}
Jump branchNeg32(Condition cond, RegisterID srcDest)
{
ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
neg32(srcDest);
return Jump(m_assembler.jmp(ARMCondition(cond)));
}
Jump branchOr32(Condition cond, RegisterID src, RegisterID dest)
{
ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
or32(src, dest);
return Jump(m_assembler.jmp(ARMCondition(cond)));
}

View file

@ -201,7 +201,7 @@ public:
{
// Clamp the shift to the range 0..31
ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
ASSERT(armImm.isValid());
Q_ASSERT(armImm.isValid());
m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
m_assembler.lsl(dest, dest, dataTempRegister);
@ -248,7 +248,7 @@ public:
{
// Clamp the shift to the range 0..31
ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
ASSERT(armImm.isValid());
Q_ASSERT(armImm.isValid());
m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
m_assembler.asr(dest, dest, dataTempRegister);
@ -346,10 +346,10 @@ private:
m_assembler.ldr(dest, address.base, address.u.index, address.u.scale);
else if (address.u.offset >= 0) {
ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
ASSERT(armImm.isValid());
Q_ASSERT(armImm.isValid());
m_assembler.ldr(dest, address.base, armImm);
} else {
ASSERT(address.u.offset >= -255);
Q_ASSERT(address.u.offset >= -255);
m_assembler.ldr(dest, address.base, address.u.offset, true, false);
}
}
@ -360,10 +360,10 @@ private:
m_assembler.ldrh(dest, address.base, address.u.index, address.u.scale);
else if (address.u.offset >= 0) {
ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
ASSERT(armImm.isValid());
Q_ASSERT(armImm.isValid());
m_assembler.ldrh(dest, address.base, armImm);
} else {
ASSERT(address.u.offset >= -255);
Q_ASSERT(address.u.offset >= -255);
m_assembler.ldrh(dest, address.base, address.u.offset, true, false);
}
}
@ -374,10 +374,10 @@ private:
m_assembler.str(src, address.base, address.u.index, address.u.scale);
else if (address.u.offset >= 0) {
ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
ASSERT(armImm.isValid());
Q_ASSERT(armImm.isValid());
m_assembler.str(src, address.base, armImm);
} else {
ASSERT(address.u.offset >= -255);
Q_ASSERT(address.u.offset >= -255);
m_assembler.str(src, address.base, address.u.offset, true, false);
}
}
@ -795,21 +795,21 @@ public:
Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
{
ASSERT((cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Zero) || (cond == NonZero));
m_assembler.tst(reg, mask);
return Jump(makeBranch(cond));
}
Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
{
ASSERT((cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Zero) || (cond == NonZero));
test32(reg, mask);
return Jump(makeBranch(cond));
}
Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
{
ASSERT((cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Zero) || (cond == NonZero));
// use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
load32(address, addressTempRegister);
return branchTest32(cond, addressTempRegister, mask);
@ -817,7 +817,7 @@ public:
Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
{
ASSERT((cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Zero) || (cond == NonZero));
// use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/
load32(address, addressTempRegister);
return branchTest32(cond, addressTempRegister, mask);
@ -853,14 +853,14 @@ public:
Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
m_assembler.add_S(dest, dest, src);
return Jump(makeBranch(cond));
}
Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
if (armImm.isValid())
m_assembler.add_S(dest, dest, armImm);
@ -873,7 +873,7 @@ public:
Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
{
ASSERT(cond == Overflow);
Q_ASSERT(cond == Overflow);
m_assembler.smull(dest, dataTempRegister, dest, src);
m_assembler.asr(addressTempRegister, dest, 31);
return branch32(NotEqual, addressTempRegister, dataTempRegister);
@ -881,7 +881,7 @@ public:
Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
{
ASSERT(cond == Overflow);
Q_ASSERT(cond == Overflow);
move(imm, dataTempRegister);
m_assembler.smull(dest, dataTempRegister, src, dataTempRegister);
m_assembler.asr(addressTempRegister, dest, 31);
@ -890,14 +890,14 @@ public:
Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
m_assembler.sub_S(dest, dest, src);
return Jump(makeBranch(cond));
}
Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
if (armImm.isValid())
m_assembler.sub_S(dest, dest, armImm);

View file

@ -44,13 +44,13 @@
// decorated and undectorated null, and the second test ensures that the pointer is
// decorated.
#define ASSERT_VALID_CODE_POINTER(ptr) \
ASSERT(reinterpret_cast<intptr_t>(ptr) & ~1); \
ASSERT(reinterpret_cast<intptr_t>(ptr) & 1)
Q_ASSERT(reinterpret_cast<intptr_t>(ptr) & ~1); \
Q_ASSERT(reinterpret_cast<intptr_t>(ptr) & 1)
#define ASSERT_VALID_CODE_OFFSET(offset) \
ASSERT(!(offset & 1)) // Must be multiple of 2.
Q_ASSERT(!(offset & 1)) // Must be multiple of 2.
#else
#define ASSERT_VALID_CODE_POINTER(ptr) \
ASSERT(ptr)
Q_ASSERT(ptr)
#define ASSERT_VALID_CODE_OFFSET(offset) // Anything goes!
#endif

View file

@ -91,7 +91,7 @@ public:
void loadDouble(void* address, FPRegisterID dest)
{
ASSERT(isSSE2Present());
Q_ASSERT(isSSE2Present());
m_assembler.movsd_mr(address, dest);
}

View file

@ -365,79 +365,79 @@ public:
void loadDouble(ImplicitAddress address, FPRegisterID dest)
{
ASSERT(isSSE2Present());
Q_ASSERT(isSSE2Present());
m_assembler.movsd_mr(address.offset, address.base, dest);
}
void storeDouble(FPRegisterID src, ImplicitAddress address)
{
ASSERT(isSSE2Present());
Q_ASSERT(isSSE2Present());
m_assembler.movsd_rm(src, address.offset, address.base);
}
void addDouble(FPRegisterID src, FPRegisterID dest)
{
ASSERT(isSSE2Present());
Q_ASSERT(isSSE2Present());
m_assembler.addsd_rr(src, dest);
}
void addDouble(Address src, FPRegisterID dest)
{
ASSERT(isSSE2Present());
Q_ASSERT(isSSE2Present());
m_assembler.addsd_mr(src.offset, src.base, dest);
}
void divDouble(FPRegisterID src, FPRegisterID dest)
{
ASSERT(isSSE2Present());
Q_ASSERT(isSSE2Present());
m_assembler.divsd_rr(src, dest);
}
void divDouble(Address src, FPRegisterID dest)
{
ASSERT(isSSE2Present());
Q_ASSERT(isSSE2Present());
m_assembler.divsd_mr(src.offset, src.base, dest);
}
void subDouble(FPRegisterID src, FPRegisterID dest)
{
ASSERT(isSSE2Present());
Q_ASSERT(isSSE2Present());
m_assembler.subsd_rr(src, dest);
}
void subDouble(Address src, FPRegisterID dest)
{
ASSERT(isSSE2Present());
Q_ASSERT(isSSE2Present());
m_assembler.subsd_mr(src.offset, src.base, dest);
}
void mulDouble(FPRegisterID src, FPRegisterID dest)
{
ASSERT(isSSE2Present());
Q_ASSERT(isSSE2Present());
m_assembler.mulsd_rr(src, dest);
}
void mulDouble(Address src, FPRegisterID dest)
{
ASSERT(isSSE2Present());
Q_ASSERT(isSSE2Present());
m_assembler.mulsd_mr(src.offset, src.base, dest);
}
void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
{
ASSERT(isSSE2Present());
Q_ASSERT(isSSE2Present());
m_assembler.cvtsi2sd_rr(src, dest);
}
void convertInt32ToDouble(Address src, FPRegisterID dest)
{
ASSERT(isSSE2Present());
Q_ASSERT(isSSE2Present());
m_assembler.cvtsi2sd_mr(src.offset, src.base, dest);
}
Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
{
ASSERT(isSSE2Present());
Q_ASSERT(isSSE2Present());
if (cond & DoubleConditionBitInvert)
m_assembler.ucomisd_rr(left, right);
@ -458,7 +458,7 @@ public:
return result;
}
ASSERT(!(cond & DoubleConditionBitSpecial));
Q_ASSERT(!(cond & DoubleConditionBitSpecial));
return Jump(m_assembler.jCC(static_cast<X86Assembler::Condition>(cond & ~DoubleConditionBits)));
}
@ -468,7 +468,7 @@ public:
// (specifically, in this case, INT_MIN).
Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
{
ASSERT(isSSE2Present());
Q_ASSERT(isSSE2Present());
m_assembler.cvttsd2si_rr(src, dest);
return branch32(Equal, dest, Imm32(0x80000000));
}
@ -479,7 +479,7 @@ public:
// (specifically, in this case, 0).
void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
{
ASSERT(isSSE2Present());
Q_ASSERT(isSSE2Present());
m_assembler.cvttsd2si_rr(src, dest);
// If the result is zero, it might have been -0.0, and the double comparison won't catch this!
@ -494,7 +494,7 @@ public:
void zeroDouble(FPRegisterID srcDest)
{
ASSERT(isSSE2Present());
Q_ASSERT(isSSE2Present());
m_assembler.xorpd_rr(srcDest, srcDest);
}
@ -672,7 +672,7 @@ public:
Jump branch16(Condition cond, BaseIndex left, Imm32 right)
{
ASSERT(!(right.m_value & 0xFFFF0000));
Q_ASSERT(!(right.m_value & 0xFFFF0000));
m_assembler.cmpw_im(right.m_value, left.offset, left.base, left.index, left.scale);
return Jump(m_assembler.jCC(x86Condition(cond)));
@ -680,14 +680,14 @@ public:
Jump branchTest32(Condition cond, RegisterID reg, RegisterID mask)
{
ASSERT((cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Zero) || (cond == NonZero));
m_assembler.testl_rr(reg, mask);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchTest32(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1))
{
ASSERT((cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Zero) || (cond == NonZero));
// if we are only interested in the low seven bits, this can be tested with a testb
if (mask.m_value == -1)
m_assembler.testl_rr(reg, reg);
@ -700,7 +700,7 @@ public:
Jump branchTest32(Condition cond, Address address, Imm32 mask = Imm32(-1))
{
ASSERT((cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Zero) || (cond == NonZero));
if (mask.m_value == -1)
m_assembler.cmpl_im(0, address.offset, address.base);
else
@ -710,7 +710,7 @@ public:
Jump branchTest32(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1))
{
ASSERT((cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Zero) || (cond == NonZero));
if (mask.m_value == -1)
m_assembler.cmpl_im(0, address.offset, address.base, address.index, address.scale);
else
@ -747,105 +747,105 @@ public:
Jump branchAdd32(Condition cond, RegisterID src, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
add32(src, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchAdd32(Condition cond, Imm32 imm, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
add32(imm, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchAdd32(Condition cond, Imm32 src, Address dest)
{
ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
add32(src, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchAdd32(Condition cond, RegisterID src, Address dest)
{
ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
add32(src, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchAdd32(Condition cond, Address src, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
add32(src, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchMul32(Condition cond, RegisterID src, RegisterID dest)
{
ASSERT(cond == Overflow);
Q_ASSERT(cond == Overflow);
mul32(src, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchMul32(Condition cond, Address src, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
mul32(src, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchMul32(Condition cond, Imm32 imm, RegisterID src, RegisterID dest)
{
ASSERT(cond == Overflow);
Q_ASSERT(cond == Overflow);
mul32(imm, src, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchSub32(Condition cond, RegisterID src, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
sub32(src, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchSub32(Condition cond, Imm32 imm, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero));
sub32(imm, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchSub32(Condition cond, Imm32 imm, Address dest)
{
ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
sub32(imm, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchSub32(Condition cond, RegisterID src, Address dest)
{
ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
sub32(src, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchSub32(Condition cond, Address src, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
sub32(src, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchNeg32(Condition cond, RegisterID srcDest)
{
ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
neg32(srcDest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchOr32(Condition cond, RegisterID src, RegisterID dest)
{
ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Signed) || (cond == Zero) || (cond == NonZero));
or32(src, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
@ -996,7 +996,7 @@ private:
s_sse2CheckState = (flags & SSE2FeatureBit) ? HasSSE2 : NoSSE2;
}
// Only check once.
ASSERT(s_sse2CheckState != NotCheckedSSE2);
Q_ASSERT(s_sse2CheckState != NotCheckedSSE2);
return s_sse2CheckState == HasSSE2;
}

View file

@ -374,14 +374,14 @@ public:
Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
addPtr(src, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}
Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest)
{
ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
Q_ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero));
subPtr(imm, dest);
return Jump(m_assembler.jCC(x86Condition(cond)));
}

View file

@ -253,7 +253,7 @@ public:
: m_offset(offset)
, m_used(false)
{
ASSERT(m_offset == offset);
Q_ASSERT(m_offset == offset);
}
int m_offset : 31;
@ -1485,8 +1485,8 @@ public:
void linkJump(JmpSrc from, JmpDst to)
{
ASSERT(from.m_offset != -1);
ASSERT(to.m_offset != -1);
Q_ASSERT(from.m_offset != -1);
Q_ASSERT(to.m_offset != -1);
char* code = reinterpret_cast<char*>(m_formatter.data());
setRel32(code + from.m_offset, code + to.m_offset);
@ -1494,21 +1494,21 @@ public:
static void linkJump(void* code, JmpSrc from, void* to)
{
ASSERT(from.m_offset != -1);
Q_ASSERT(from.m_offset != -1);
setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
}
static void linkCall(void* code, JmpSrc from, void* to)
{
ASSERT(from.m_offset != -1);
Q_ASSERT(from.m_offset != -1);
setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
}
static void linkPointer(void* code, JmpDst where, void* value)
{
ASSERT(where.m_offset != -1);
Q_ASSERT(where.m_offset != -1);
setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
}
@ -1545,20 +1545,20 @@ public:
static unsigned getCallReturnOffset(JmpSrc call)
{
ASSERT(call.m_offset >= 0);
Q_ASSERT(call.m_offset >= 0);
return call.m_offset;
}
static void* getRelocatedAddress(void* code, JmpSrc jump)
{
ASSERT(jump.m_offset != -1);
Q_ASSERT(jump.m_offset != -1);
return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
}
static void* getRelocatedAddress(void* code, JmpDst destination)
{
ASSERT(destination.m_offset != -1);
Q_ASSERT(destination.m_offset != -1);
return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset);
}
@ -1581,7 +1581,7 @@ public:
void* executableCopy(ExecutablePool* allocator)
{
void* copy = m_formatter.executableCopy(allocator);
ASSERT(copy);
Q_ASSERT(copy);
return copy;
}
@ -1600,7 +1600,7 @@ private:
static void setRel32(void* from, void* to)
{
intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
ASSERT(offset == static_cast<int32_t>(offset));
Q_ASSERT(offset == static_cast<int32_t>(offset));
setInt32(from, offset);
}
@ -1953,7 +1953,7 @@ private:
void putModRmSib(ModRmMode mode, int reg, RegisterID base, RegisterID index, int scale)
{
ASSERT(mode != ModRmRegister);
Q_ASSERT(mode != ModRmRegister);
putModRm(mode, reg, hasSib);
m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7));
@ -2016,7 +2016,7 @@ private:
void memoryModRM(int reg, RegisterID base, RegisterID index, int scale, int offset)
{
ASSERT(index != noIndex);
Q_ASSERT(index != noIndex);
#if CPU(X86_64)
if (!offset && (base != noBase) && (base != noBase2))

View file

@ -317,7 +317,7 @@ void CodeBlock::printStructures(const Instruction* vPC) const
}
// These m_instructions doesn't ref Structures.
ASSERT(vPC[0].u.opcode == op_get_by_id_generic || vPC[0].u.opcode == op_put_by_id_generic || vPC[0].u.opcode == op_call || vPC[0].u.opcode == op_call_eval || vPC[0].u.opcode == op_construct);
Q_ASSERT(vPC[0].u.opcode == op_get_by_id_generic || vPC[0].u.opcode == op_put_by_id_generic || vPC[0].u.opcode == op_call || vPC[0].u.opcode == op_call_eval || vPC[0].u.opcode == op_construct);
}
void CodeBlock::dump(ExecState* exec) const
@ -445,7 +445,7 @@ void CodeBlock::dump(ExecState* exec) const
for (Vector<int32_t>::const_iterator iter = m_rareData->m_characterSwitchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) {
if (!*iter)
continue;
ASSERT(!((i + m_rareData->m_characterSwitchJumpTables[i].min) & ~0xFFFF));
Q_ASSERT(!((i + m_rareData->m_characterSwitchJumpTables[i].min) & ~0xFFFF));
UChar ch = static_cast<UChar>(entry + m_rareData->m_characterSwitchJumpTables[i].min);
printf("\t\t\"%s\" => %04d\n", UString(&ch, 1).ascii(), *iter);
}
@ -1277,7 +1277,7 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, PassR
, m_symbolTable(symTab)
, m_exceptionInfo(new ExceptionInfo)
{
ASSERT(m_source);
Q_ASSERT(m_source);
#if DUMP_CODE_BLOCK_STATISTICS
liveCodeBlockSet.add(this);
@ -1311,7 +1311,7 @@ CodeBlock::~CodeBlock()
if (Structure* structure = m_methodCallLinkInfos[i].cachedStructure) {
structure->deref();
// Both members must be filled at the same time
ASSERT(!!m_methodCallLinkInfos[i].cachedPrototypeStructure);
Q_ASSERT(!!m_methodCallLinkInfos[i].cachedPrototypeStructure);
m_methodCallLinkInfos[i].cachedPrototypeStructure->deref();
}
}
@ -1380,7 +1380,7 @@ void CodeBlock::derefStructures(Instruction* vPC) const
}
// These instructions don't ref their Structures.
ASSERT(vPC[0].u.opcode == op_get_by_id || vPC[0].u.opcode == op_put_by_id || vPC[0].u.opcode == op_get_by_id_generic || vPC[0].u.opcode == op_put_by_id_generic || vPC[0].u.opcode == op_get_array_length || vPC[0].u.opcode == op_get_string_length);
Q_ASSERT(vPC[0].u.opcode == op_get_by_id || vPC[0].u.opcode == op_put_by_id || vPC[0].u.opcode == op_get_by_id_generic || vPC[0].u.opcode == op_put_by_id_generic || vPC[0].u.opcode == op_get_array_length || vPC[0].u.opcode == op_get_string_length);
}
void CodeBlock::refStructures(Instruction* vPC) const
@ -1411,7 +1411,7 @@ void CodeBlock::refStructures(Instruction* vPC) const
}
// These instructions don't ref their Structures.
ASSERT(vPC[0].u.opcode == op_get_by_id || vPC[0].u.opcode == op_put_by_id || vPC[0].u.opcode == op_get_by_id_generic || vPC[0].u.opcode == op_put_by_id_generic);
Q_ASSERT(vPC[0].u.opcode == op_get_by_id || vPC[0].u.opcode == op_put_by_id || vPC[0].u.opcode == op_get_by_id_generic || vPC[0].u.opcode == op_put_by_id_generic);
}
void CodeBlock::markAggregate(MarkStack& markStack)
@ -1437,7 +1437,7 @@ void CodeBlock::reparseForExceptionInfoIfNecessary(CallFrame* callFrame)
scopeDelta -= static_cast<EvalCodeBlock*>(this)->baseScopeDepth();
else if (m_codeType == FunctionCode)
scopeDelta++; // Compilation of function code assumes activation is not on the scope chain yet.
ASSERT(scopeDelta >= 0);
Q_ASSERT(scopeDelta >= 0);
while (scopeDelta--)
scopeChain = scopeChain->next;
}
@ -1447,7 +1447,7 @@ void CodeBlock::reparseForExceptionInfoIfNecessary(CallFrame* callFrame)
HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset)
{
ASSERT(bytecodeOffset < m_instructionCount);
Q_ASSERT(bytecodeOffset < m_instructionCount);
if (!m_rareData)
return 0;
@ -1465,10 +1465,10 @@ HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset)
int CodeBlock::lineNumberForBytecodeOffset(CallFrame* callFrame, unsigned bytecodeOffset)
{
ASSERT(bytecodeOffset < m_instructionCount);
Q_ASSERT(bytecodeOffset < m_instructionCount);
reparseForExceptionInfoIfNecessary(callFrame);
ASSERT(m_exceptionInfo);
Q_ASSERT(m_exceptionInfo);
if (!m_exceptionInfo->m_lineInfo.size())
return m_ownerExecutable->source().firstLine(); // Empty function
@ -1490,10 +1490,10 @@ int CodeBlock::lineNumberForBytecodeOffset(CallFrame* callFrame, unsigned byteco
int CodeBlock::expressionRangeForBytecodeOffset(CallFrame* callFrame, unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset)
{
ASSERT(bytecodeOffset < m_instructionCount);
Q_ASSERT(bytecodeOffset < m_instructionCount);
reparseForExceptionInfoIfNecessary(callFrame);
ASSERT(m_exceptionInfo);
Q_ASSERT(m_exceptionInfo);
if (!m_exceptionInfo->m_expressionInfo.size()) {
// We didn't think anything could throw. Apparently we were wrong.
@ -1513,7 +1513,7 @@ int CodeBlock::expressionRangeForBytecodeOffset(CallFrame* callFrame, unsigned b
high = mid;
}
ASSERT(low);
Q_ASSERT(low);
if (!low) {
startOffset = 0;
endOffset = 0;
@ -1529,10 +1529,10 @@ int CodeBlock::expressionRangeForBytecodeOffset(CallFrame* callFrame, unsigned b
bool CodeBlock::getByIdExceptionInfoForBytecodeOffset(CallFrame* callFrame, unsigned bytecodeOffset, OpcodeID& opcodeID)
{
ASSERT(bytecodeOffset < m_instructionCount);
Q_ASSERT(bytecodeOffset < m_instructionCount);
reparseForExceptionInfoIfNecessary(callFrame);
ASSERT(m_exceptionInfo);
Q_ASSERT(m_exceptionInfo);
if (!m_exceptionInfo->m_getByIdExceptionInfo.size())
return false;
@ -1557,7 +1557,7 @@ bool CodeBlock::getByIdExceptionInfoForBytecodeOffset(CallFrame* callFrame, unsi
#if ENABLE(JIT)
bool CodeBlock::functionRegisterForBytecodeOffset(unsigned bytecodeOffset, int& functionRegisterIndex)
{
ASSERT(bytecodeOffset < m_instructionCount);
Q_ASSERT(bytecodeOffset < m_instructionCount);
if (!m_rareData || !m_rareData->m_functionRegisterInfos.size())
return false;

View file

@ -238,12 +238,12 @@ namespace JSC {
}
// 'size' should never reach zero.
ASSERT(size);
Q_ASSERT(size);
}
// If we reach this point we've chopped down to one element, no need to check it matches
ASSERT(size == 1);
ASSERT(key == valueAtPosition(&array[0]));
Q_ASSERT(size == 1);
Q_ASSERT(key == valueAtPosition(&array[0]));
return &array[0];
}
#endif
@ -412,21 +412,21 @@ namespace JSC {
size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
void addExceptionHandler(const HandlerInfo& hanler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(hanler); }
HandlerInfo& exceptionHandler(int index) { ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
HandlerInfo& exceptionHandler(int index) { Q_ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
bool hasExceptionInfo() const { return m_exceptionInfo; }
void clearExceptionInfo() { m_exceptionInfo.clear(); }
ExceptionInfo* extractExceptionInfo() { ASSERT(m_exceptionInfo); return m_exceptionInfo.release(); }
ExceptionInfo* extractExceptionInfo() { Q_ASSERT(m_exceptionInfo); return m_exceptionInfo.release(); }
void addExpressionInfo(const ExpressionRangeInfo& expressionInfo) { ASSERT(m_exceptionInfo); m_exceptionInfo->m_expressionInfo.append(expressionInfo); }
void addGetByIdExceptionInfo(const GetByIdExceptionInfo& info) { ASSERT(m_exceptionInfo); m_exceptionInfo->m_getByIdExceptionInfo.append(info); }
void addExpressionInfo(const ExpressionRangeInfo& expressionInfo) { Q_ASSERT(m_exceptionInfo); m_exceptionInfo->m_expressionInfo.append(expressionInfo); }
void addGetByIdExceptionInfo(const GetByIdExceptionInfo& info) { Q_ASSERT(m_exceptionInfo); m_exceptionInfo->m_getByIdExceptionInfo.append(info); }
size_t numberOfLineInfos() const { ASSERT(m_exceptionInfo); return m_exceptionInfo->m_lineInfo.size(); }
void addLineInfo(const LineInfo& lineInfo) { ASSERT(m_exceptionInfo); m_exceptionInfo->m_lineInfo.append(lineInfo); }
LineInfo& lastLineInfo() { ASSERT(m_exceptionInfo); return m_exceptionInfo->m_lineInfo.last(); }
size_t numberOfLineInfos() const { Q_ASSERT(m_exceptionInfo); return m_exceptionInfo->m_lineInfo.size(); }
void addLineInfo(const LineInfo& lineInfo) { Q_ASSERT(m_exceptionInfo); m_exceptionInfo->m_lineInfo.append(lineInfo); }
LineInfo& lastLineInfo() { Q_ASSERT(m_exceptionInfo); return m_exceptionInfo->m_lineInfo.last(); }
#if ENABLE(JIT)
Vector<CallReturnOffsetToBytecodeIndex>& callReturnIndexVector() { ASSERT(m_exceptionInfo); return m_exceptionInfo->m_callReturnIndexVector; }
Vector<CallReturnOffsetToBytecodeIndex>& callReturnIndexVector() { Q_ASSERT(m_exceptionInfo); return m_exceptionInfo->m_callReturnIndexVector; }
#endif
// Constant Pool
@ -448,26 +448,26 @@ namespace JSC {
FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
unsigned addRegExp(RegExp* r) { createRareDataIfNecessary(); unsigned size = m_rareData->m_regexps.size(); m_rareData->m_regexps.append(r); return size; }
RegExp* regexp(int index) const { ASSERT(m_rareData); return m_rareData->m_regexps[index].get(); }
RegExp* regexp(int index) const { Q_ASSERT(m_rareData); return m_rareData->m_regexps[index].get(); }
// Jump Tables
size_t numberOfImmediateSwitchJumpTables() const { return m_rareData ? m_rareData->m_immediateSwitchJumpTables.size() : 0; }
SimpleJumpTable& addImmediateSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_immediateSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_immediateSwitchJumpTables.last(); }
SimpleJumpTable& immediateSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_immediateSwitchJumpTables[tableIndex]; }
SimpleJumpTable& immediateSwitchJumpTable(int tableIndex) { Q_ASSERT(m_rareData); return m_rareData->m_immediateSwitchJumpTables[tableIndex]; }
size_t numberOfCharacterSwitchJumpTables() const { return m_rareData ? m_rareData->m_characterSwitchJumpTables.size() : 0; }
SimpleJumpTable& addCharacterSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_characterSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_characterSwitchJumpTables.last(); }
SimpleJumpTable& characterSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_characterSwitchJumpTables[tableIndex]; }
SimpleJumpTable& characterSwitchJumpTable(int tableIndex) { Q_ASSERT(m_rareData); return m_rareData->m_characterSwitchJumpTables[tableIndex]; }
size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
StringJumpTable& stringSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
StringJumpTable& stringSwitchJumpTable(int tableIndex) { Q_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
SymbolTable* symbolTable() { return m_symbolTable; }
SharedSymbolTable* sharedSymbolTable() { ASSERT(m_codeType == FunctionCode); return static_cast<SharedSymbolTable*>(m_symbolTable); }
SharedSymbolTable* sharedSymbolTable() { Q_ASSERT(m_codeType == FunctionCode); return static_cast<SharedSymbolTable*>(m_symbolTable); }
EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
@ -609,7 +609,7 @@ namespace JSC {
unsigned numVariables() { return m_variables.size(); }
void adoptVariables(Vector<Identifier>& variables)
{
ASSERT(m_variables.isEmpty());
Q_ASSERT(m_variables.isEmpty());
m_variables.swap(variables);
}

View file

@ -109,7 +109,7 @@ namespace JSC {
for (int i = 0; i < count; ++i) {
PolymorphicStubInfo& info = list[i];
ASSERT(info.base);
Q_ASSERT(info.base);
info.base->deref();
if (info.u.proto) {

View file

@ -124,7 +124,7 @@ void* SamplingThread::threadStartFunc(void*)
void SamplingThread::start(unsigned hertz)
{
ASSERT(!s_running);
Q_ASSERT(!s_running);
s_running = true;
s_hertz = hertz;
@ -133,7 +133,7 @@ void SamplingThread::start(unsigned hertz)
void SamplingThread::stop()
{
ASSERT(s_running);
Q_ASSERT(s_running);
s_running = false;
waitForThreadCompletion(s_samplingThread, 0);
}
@ -180,7 +180,7 @@ void SamplingTool::doRun()
if (CodeBlock* codeBlock = sample.codeBlock()) {
QMutexLocker locker(m_scriptSampleMapMutex);
ScriptSampleRecord* record = m_scopeSampleMap->get(codeBlock->ownerExecutable());
ASSERT(record);
Q_ASSERT(record);
record->sample(codeBlock, sample.vPC());
}
#endif

View file

@ -49,15 +49,15 @@ namespace JSC {
#if ENABLE(SAMPLING_FLAGS)
static void setFlag(unsigned flag)
{
ASSERT(flag >= 1);
ASSERT(flag <= 32);
Q_ASSERT(flag >= 1);
Q_ASSERT(flag <= 32);
s_flags |= 1u << (flag - 1);
}
static void clearFlag(unsigned flag)
{
ASSERT(flag >= 1);
ASSERT(flag <= 32);
Q_ASSERT(flag >= 1);
Q_ASSERT(flag <= 32);
s_flags &= ~(1u << (flag - 1));
}
@ -220,7 +220,7 @@ namespace JSC {
void sample(CodeBlock* codeBlock, Instruction* vPC)
{
ASSERT(!(reinterpret_cast<intptr_t>(vPC) & 0x3));
Q_ASSERT(!(reinterpret_cast<intptr_t>(vPC) & 0x3));
m_codeBlock = codeBlock;
m_sample = reinterpret_cast<intptr_t>(vPC);
}
@ -230,7 +230,7 @@ namespace JSC {
void* encodeSample(Instruction* vPC, bool inCTIFunction = false, bool inHostFunction = false)
{
ASSERT(!(reinterpret_cast<intptr_t>(vPC) & 0x3));
Q_ASSERT(!(reinterpret_cast<intptr_t>(vPC) & 0x3));
return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(vPC) | (static_cast<intptr_t>(inCTIFunction) << 1) | static_cast<intptr_t>(inHostFunction));
}
@ -404,8 +404,8 @@ namespace JSC {
fprintf(stderr, "DeletableSamplingCounter \"%s\" deleted early (with count %lld)\n", m_name, m_counter);
// Our m_referer pointer should know where the pointer to this node is,
// and m_next should know that this node is the previous node in the list.
ASSERT(*m_referer == this);
ASSERT(m_next->m_referer == &m_next);
Q_ASSERT(*m_referer == this);
Q_ASSERT(m_next->m_referer == &m_next);
// Remove this node from the list, and inform m_next that we have done so.
m_next->m_referer = m_referer;
*m_referer = m_next;

View file

@ -463,10 +463,10 @@ bool BytecodeGenerator::willResolveToArguments(const Identifier& ident)
RegisterID* BytecodeGenerator::uncheckedRegisterForArguments()
{
ASSERT(willResolveToArguments(propertyNames().arguments));
Q_ASSERT(willResolveToArguments(propertyNames().arguments));
SymbolTableEntry entry = symbolTable().get(propertyNames().arguments.ustring().rep());
ASSERT(!entry.isNull());
Q_ASSERT(!entry.isNull());
return &registerFor(entry.getIndex());
}
@ -551,7 +551,7 @@ PassRefPtr<Label> BytecodeGenerator::emitLabel(Label* l0)
if (m_codeBlock->numberOfJumpTargets()) {
unsigned lastLabelIndex = m_codeBlock->lastJumpTarget();
ASSERT(lastLabelIndex <= newLabelIndex);
Q_ASSERT(lastLabelIndex <= newLabelIndex);
if (newLabelIndex == lastLabelIndex) {
// Peephole optimizations have already been disabled by emitting the last label
return l0;
@ -573,7 +573,7 @@ void BytecodeGenerator::emitOpcode(OpcodeID opcodeID)
void BytecodeGenerator::retrieveLastBinaryOp(int& dstIndex, int& src1Index, int& src2Index)
{
ASSERT(instructions().size() >= 4);
Q_ASSERT(instructions().size() >= 4);
size_t size = instructions().size();
dstIndex = instructions().at(size - 3).u.operand;
src1Index = instructions().at(size - 2).u.operand;
@ -582,7 +582,7 @@ void BytecodeGenerator::retrieveLastBinaryOp(int& dstIndex, int& src1Index, int&
void BytecodeGenerator::retrieveLastUnaryOp(int& dstIndex, int& srcIndex)
{
ASSERT(instructions().size() >= 3);
Q_ASSERT(instructions().size() >= 3);
size_t size = instructions().size();
dstIndex = instructions().at(size - 2).u.operand;
srcIndex = instructions().at(size - 1).u.operand;
@ -590,13 +590,13 @@ void BytecodeGenerator::retrieveLastUnaryOp(int& dstIndex, int& srcIndex)
void ALWAYS_INLINE BytecodeGenerator::rewindBinaryOp()
{
ASSERT(instructions().size() >= 4);
Q_ASSERT(instructions().size() >= 4);
instructions().shrink(instructions().size() - 4);
}
void ALWAYS_INLINE BytecodeGenerator::rewindUnaryOp()
{
ASSERT(instructions().size() >= 3);
Q_ASSERT(instructions().size() >= 3);
instructions().shrink(instructions().size() - 3);
}
@ -994,7 +994,7 @@ bool BytecodeGenerator::findScopedProperty(const Identifier& property, int& inde
if (shouldOptimizeLocals() && m_codeType == GlobalCode) {
ScopeChainIterator iter = m_scopeChain->begin();
globalObject = *iter;
ASSERT((++iter) == m_scopeChain->end());
Q_ASSERT((++iter) == m_scopeChain->end());
}
return false;
}
@ -1335,7 +1335,7 @@ RegisterID* BytecodeGenerator::emitNewArray(RegisterID* dst, ElementNode* elemen
break;
argv.append(newTemporary());
// op_new_array requires the initial values to be a sequential range of registers
ASSERT(argv.size() == 1 || argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() + 1);
Q_ASSERT(argv.size() == 1 || argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() + 1);
emitNode(argv.last().get(), n->value());
}
emitOpcode(op_new_array);
@ -1394,9 +1394,9 @@ RegisterID* BytecodeGenerator::emitCallEval(RegisterID* dst, RegisterID* func, R
RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, RegisterID* func, RegisterID* thisRegister, ArgumentsNode* argumentsNode, unsigned divot, unsigned startOffset, unsigned endOffset)
{
ASSERT(opcodeID == op_call || opcodeID == op_call_eval);
ASSERT(func->refCount());
ASSERT(thisRegister->refCount());
Q_ASSERT(opcodeID == op_call || opcodeID == op_call_eval);
Q_ASSERT(func->refCount());
Q_ASSERT(thisRegister->refCount());
RegisterID* originalFunc = func;
@ -1406,7 +1406,7 @@ RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, Regi
for (ArgumentListNode* n = argumentsNode->m_listNode; n; n = n->m_next) {
argv.append(newTemporary());
// op_call requires the arguments to be a sequential range of registers
ASSERT(argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() + 1);
Q_ASSERT(argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() + 1);
emitNode(argv.last().get(), n);
}
@ -1433,7 +1433,7 @@ RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, Regi
RegisterID* BytecodeGenerator::emitLoadVarargs(RegisterID* argCountDst, RegisterID* arguments)
{
ASSERT(argCountDst->index() < arguments->index());
Q_ASSERT(argCountDst->index() < arguments->index());
emitOpcode(op_load_varargs);
instructions().append(argCountDst->index());
instructions().append(arguments->index());
@ -1442,9 +1442,9 @@ RegisterID* BytecodeGenerator::emitLoadVarargs(RegisterID* argCountDst, Register
RegisterID* BytecodeGenerator::emitCallVarargs(RegisterID* dst, RegisterID* func, RegisterID* thisRegister, RegisterID* argCountRegister, unsigned divot, unsigned startOffset, unsigned endOffset)
{
ASSERT(func->refCount());
ASSERT(thisRegister->refCount());
ASSERT(dst != func);
Q_ASSERT(func->refCount());
Q_ASSERT(thisRegister->refCount());
Q_ASSERT(dst != func);
emitExpressionInfo(divot, startOffset, endOffset);
@ -1478,7 +1478,7 @@ RegisterID* BytecodeGenerator::emitUnaryNoDstOp(OpcodeID opcodeID, RegisterID* s
RegisterID* BytecodeGenerator::emitConstruct(RegisterID* dst, RegisterID* func, ArgumentsNode* argumentsNode, unsigned divot, unsigned startOffset, unsigned endOffset)
{
ASSERT(func->refCount());
Q_ASSERT(func->refCount());
RegisterID* originalFunc = func;
@ -1490,7 +1490,7 @@ RegisterID* BytecodeGenerator::emitConstruct(RegisterID* dst, RegisterID* func,
for (ArgumentListNode* n = argumentsNode ? argumentsNode->m_listNode : 0; n; n = n->m_next) {
argv.append(newTemporary());
// op_construct requires the arguments to be a sequential range of registers
ASSERT(argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() + 1);
Q_ASSERT(argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() + 1);
emitNode(argv.last().get(), n);
}
@ -1544,7 +1544,7 @@ void BytecodeGenerator::emitToPrimitive(RegisterID* dst, RegisterID* src)
RegisterID* BytecodeGenerator::emitPushScope(RegisterID* scope)
{
ASSERT(scope->isTemporary());
Q_ASSERT(scope->isTemporary());
ControlFlowContext context;
context.isFinallyBlock = false;
m_scopeContextStack.append(context);
@ -1556,8 +1556,8 @@ RegisterID* BytecodeGenerator::emitPushScope(RegisterID* scope)
void BytecodeGenerator::emitPopScope()
{
ASSERT(m_scopeContextStack.size());
ASSERT(!m_scopeContextStack.last().isFinallyBlock);
Q_ASSERT(m_scopeContextStack.size());
Q_ASSERT(!m_scopeContextStack.last().isFinallyBlock);
emitOpcode(op_pop_scope);
@ -1587,9 +1587,9 @@ void BytecodeGenerator::pushFinallyContext(Label* target, RegisterID* retAddrDst
void BytecodeGenerator::popFinallyContext()
{
ASSERT(m_scopeContextStack.size());
ASSERT(m_scopeContextStack.last().isFinallyBlock);
ASSERT(m_finallyDepth > 0);
Q_ASSERT(m_scopeContextStack.size());
Q_ASSERT(m_scopeContextStack.last().isFinallyBlock);
Q_ASSERT(m_finallyDepth > 0);
m_scopeContextStack.removeLast();
m_finallyDepth--;
}
@ -1619,7 +1619,7 @@ LabelScope* BytecodeGenerator::breakTarget(const Identifier& name)
for (int i = m_labelScopes.size() - 1; i >= 0; --i) {
LabelScope* scope = &m_labelScopes[i];
if (scope->type() != LabelScope::NamedLabel) {
ASSERT(scope->breakTarget());
Q_ASSERT(scope->breakTarget());
return scope;
}
}
@ -1629,7 +1629,7 @@ LabelScope* BytecodeGenerator::breakTarget(const Identifier& name)
for (int i = m_labelScopes.size() - 1; i >= 0; --i) {
LabelScope* scope = &m_labelScopes[i];
if (scope->name() && *scope->name() == name) {
ASSERT(scope->breakTarget());
Q_ASSERT(scope->breakTarget());
return scope;
}
}
@ -1649,7 +1649,7 @@ LabelScope* BytecodeGenerator::continueTarget(const Identifier& name)
for (int i = m_labelScopes.size() - 1; i >= 0; --i) {
LabelScope* scope = &m_labelScopes[i];
if (scope->type() == LabelScope::Loop) {
ASSERT(scope->continueTarget());
Q_ASSERT(scope->continueTarget());
return scope;
}
}
@ -1662,7 +1662,7 @@ LabelScope* BytecodeGenerator::continueTarget(const Identifier& name)
for (int i = m_labelScopes.size() - 1; i >= 0; --i) {
LabelScope* scope = &m_labelScopes[i];
if (scope->type() == LabelScope::Loop) {
ASSERT(scope->continueTarget());
Q_ASSERT(scope->continueTarget());
result = scope;
}
if (scope->name() && *scope->name() == name)
@ -1716,11 +1716,11 @@ PassRefPtr<Label> BytecodeGenerator::emitComplexJumpScopes(Label* target, Contro
PassRefPtr<Label> BytecodeGenerator::emitJumpScopes(Label* target, int targetScopeDepth)
{
ASSERT(scopeDepth() - targetScopeDepth >= 0);
ASSERT(target->isForward());
Q_ASSERT(scopeDepth() - targetScopeDepth >= 0);
Q_ASSERT(target->isForward());
size_t scopeDelta = scopeDepth() - targetScopeDepth;
ASSERT(scopeDelta <= m_scopeContextStack.size());
Q_ASSERT(scopeDelta <= m_scopeContextStack.size());
if (!scopeDelta)
return emitJump(target);
@ -1847,12 +1847,12 @@ void BytecodeGenerator::beginSwitch(RegisterID* scrutineeRegister, SwitchInfo::S
static int32_t keyForImmediateSwitch(ExpressionNode* node, int32_t min, int32_t max)
{
UNUSED_PARAM(max);
ASSERT(node->isNumber());
Q_ASSERT(node->isNumber());
double value = static_cast<NumberNode*>(node)->value();
int32_t key = static_cast<int32_t>(value);
ASSERT(key == value);
ASSERT(key >= min);
ASSERT(key <= max);
Q_ASSERT(key == value);
Q_ASSERT(key >= min);
Q_ASSERT(key <= max);
return key - min;
}
@ -1864,7 +1864,7 @@ static void prepareJumpTableForImmediateSwitch(SimpleJumpTable& jumpTable, int32
for (uint32_t i = 0; i < clauseCount; ++i) {
// We're emitting this after the clause labels should have been fixed, so
// the labels should not be "forward" references
ASSERT(!labels[i]->isForward());
Q_ASSERT(!labels[i]->isForward());
jumpTable.add(keyForImmediateSwitch(nodes[i], min, max), labels[i]->bind(switchAddress, switchAddress + 3));
}
}
@ -1872,13 +1872,13 @@ static void prepareJumpTableForImmediateSwitch(SimpleJumpTable& jumpTable, int32
static int32_t keyForCharacterSwitch(ExpressionNode* node, int32_t min, int32_t max)
{
UNUSED_PARAM(max);
ASSERT(node->isString());
Q_ASSERT(node->isString());
UString::Rep* clause = static_cast<StringNode*>(node)->value().ustring().rep();
ASSERT(clause->size() == 1);
Q_ASSERT(clause->size() == 1);
int32_t key = clause->data()[0];
ASSERT(key >= min);
ASSERT(key <= max);
Q_ASSERT(key >= min);
Q_ASSERT(key <= max);
return key - min;
}
@ -1890,7 +1890,7 @@ static void prepareJumpTableForCharacterSwitch(SimpleJumpTable& jumpTable, int32
for (uint32_t i = 0; i < clauseCount; ++i) {
// We're emitting this after the clause labels should have been fixed, so
// the labels should not be "forward" references
ASSERT(!labels[i]->isForward());
Q_ASSERT(!labels[i]->isForward());
jumpTable.add(keyForCharacterSwitch(nodes[i], min, max), labels[i]->bind(switchAddress, switchAddress + 3));
}
}
@ -1900,9 +1900,9 @@ static void prepareJumpTableForStringSwitch(StringJumpTable& jumpTable, int32_t
for (uint32_t i = 0; i < clauseCount; ++i) {
// We're emitting this after the clause labels should have been fixed, so
// the labels should not be "forward" references
ASSERT(!labels[i]->isForward());
Q_ASSERT(!labels[i]->isForward());
ASSERT(nodes[i]->isString());
Q_ASSERT(nodes[i]->isString());
UString::Rep* clause = static_cast<StringNode*>(nodes[i])->value().ustring().rep();
OffsetLocation location;
location.branchOffset = labels[i]->bind(switchAddress, switchAddress + 3);
@ -1927,7 +1927,7 @@ void BytecodeGenerator::endSwitch(uint32_t clauseCount, RefPtr<Label>* labels, E
SimpleJumpTable& jumpTable = m_codeBlock->addCharacterSwitchJumpTable();
prepareJumpTableForCharacterSwitch(jumpTable, switchInfo.bytecodeOffset, clauseCount, labels, nodes, min, max);
} else {
ASSERT(switchInfo.switchType == SwitchInfo::SwitchString);
Q_ASSERT(switchInfo.switchType == SwitchInfo::SwitchString);
instructions()[switchInfo.bytecodeOffset + 1] = m_codeBlock->numberOfStringSwitchJumpTables();
instructions()[switchInfo.bytecodeOffset + 2] = defaultLabel->bind(switchInfo.bytecodeOffset, switchInfo.bytecodeOffset + 3);

View file

@ -144,7 +144,7 @@ namespace JSC {
{
if (originalDst && originalDst != ignoredResult())
return originalDst;
ASSERT(tempDst != ignoredResult());
Q_ASSERT(tempDst != ignoredResult());
if (tempDst && tempDst->isTemporary())
return tempDst;
return newTemporary();
@ -174,7 +174,7 @@ namespace JSC {
RegisterID* emitNode(RegisterID* dst, Node* n)
{
// Node::emitCode assumes that dst, if provided, is either a local or a referenced temporary.
ASSERT(!dst || dst == ignoredResult() || !dst->isTemporary() || dst->refCount());
Q_ASSERT(!dst || dst == ignoredResult() || !dst->isTemporary() || dst->refCount());
if (!m_codeBlock->numberOfLineInfos() || m_codeBlock->lastLineInfo().lineNumber != n->lineNo()) {
LineInfo info = { static_cast<uint32_t>(instructions().size()), n->lineNo() };
m_codeBlock->addLineInfo(info);
@ -238,7 +238,7 @@ namespace JSC {
{
// Only op_construct and op_instanceof need exception info for
// a preceding op_get_by_id.
ASSERT(opcodeID == op_construct || opcodeID == op_instanceof);
Q_ASSERT(opcodeID == op_construct || opcodeID == op_instanceof);
GetByIdExceptionInfo info;
info.bytecodeOffset = instructions().size();
info.isOpConstruct = (opcodeID == op_construct);
@ -437,7 +437,7 @@ namespace JSC {
return m_argumentsRegister;
if (m_parameters.size()) {
ASSERT(!m_globals.size());
Q_ASSERT(!m_globals.size());
return m_parameters[index + m_parameters.size() + RegisterFile::CallFrameHeaderSize];
}

View file

@ -68,7 +68,7 @@ namespace JSC {
void deref()
{
--m_refCount;
ASSERT(m_refCount >= 0);
Q_ASSERT(m_refCount >= 0);
}
int refCount() const { return m_refCount; }

View file

@ -54,7 +54,7 @@ namespace JSC {
void deref()
{
--m_refCount;
ASSERT(m_refCount >= 0);
Q_ASSERT(m_refCount >= 0);
}
int refCount() const { return m_refCount; }

View file

@ -76,7 +76,7 @@ namespace JSC {
static void substitute(UString& string, const UString& substring)
{
int position = string.find("%s");
ASSERT(position != -1);
Q_ASSERT(position != -1);
string = makeString(string.substr(0, position), substring, string.substr(position + 2));
}
@ -225,7 +225,7 @@ bool ArrayNode::isSimpleArray() const
ArgumentListNode* ArrayNode::toArgumentList(JSGlobalData* globalData) const
{
ASSERT(!m_elision && !m_optional);
Q_ASSERT(!m_elision && !m_optional);
ElementNode* ptr = m_element;
if (!ptr)
return 0;
@ -233,7 +233,7 @@ ArgumentListNode* ArrayNode::toArgumentList(JSGlobalData* globalData) const
ArgumentListNode* tail = head;
ptr = ptr->next();
for (; ptr; ptr = ptr->next()) {
ASSERT(!ptr->elision());
Q_ASSERT(!ptr->elision());
tail = new (globalData) ArgumentListNode(globalData, tail, ptr->value());
}
return head;
@ -306,7 +306,7 @@ RegisterID* DotAccessorNode::emitBytecode(BytecodeGenerator& generator, Register
RegisterID* ArgumentListNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst)
{
ASSERT(m_expr);
Q_ASSERT(m_expr);
return generator.emitNode(dst, m_expr);
}
@ -450,8 +450,8 @@ RegisterID* ApplyFunctionCallDotNode::emitBytecode(BytecodeGenerator& generator,
generator.emitNode(thisRegister.get(), m_args->m_listNode->m_expr);
m_args->m_listNode = m_args->m_listNode->m_next;
if (m_args->m_listNode) {
ASSERT(m_args->m_listNode->m_expr->isSimpleArray());
ASSERT(!m_args->m_listNode->m_next);
Q_ASSERT(m_args->m_listNode->m_expr->isSimpleArray());
Q_ASSERT(!m_args->m_listNode->m_next);
m_args->m_listNode = static_cast<ArrayNode*>(m_args->m_listNode->m_expr)->toArgumentList(generator.globalData());
}
} else
@ -459,7 +459,7 @@ RegisterID* ApplyFunctionCallDotNode::emitBytecode(BytecodeGenerator& generator,
generator.emitCall(finalDestination.get(), realFunction.get(), thisRegister.get(), m_args, divot(), startOffset(), endOffset());
m_args->m_listNode = oldList;
} else {
ASSERT(m_args->m_listNode && m_args->m_listNode->m_next);
Q_ASSERT(m_args->m_listNode && m_args->m_listNode->m_next);
RefPtr<RegisterID> realFunction = generator.emitMove(generator.newTemporary(), base.get());
RefPtr<RegisterID> argsCountRegister = generator.newTemporary();
RefPtr<RegisterID> thisRegister = generator.newTemporary();
@ -783,7 +783,7 @@ RegisterID* UnaryOpNode::emitBytecode(BytecodeGenerator& generator, RegisterID*
void LogicalNotNode::emitBytecodeInConditionContext(BytecodeGenerator& generator, Label* trueTarget, Label* falseTarget, bool fallThroughMeansTrue)
{
ASSERT(expr()->hasConditionContextCodegen());
Q_ASSERT(expr()->hasConditionContextCodegen());
// reverse the true and false targets
generator.emitNodeInConditionContext(expr(), falseTarget, trueTarget, !fallThroughMeansTrue);
@ -822,8 +822,8 @@ void LogicalNotNode::emitBytecodeInConditionContext(BytecodeGenerator& generator
//
RegisterID* BinaryOpNode::emitStrcat(BytecodeGenerator& generator, RegisterID* dst, RegisterID* lhs, ReadModifyResolveNode* emitExpressionInfoForMe)
{
ASSERT(isAdd());
ASSERT(resultDescriptor().definitelyIsString());
Q_ASSERT(isAdd());
Q_ASSERT(resultDescriptor().definitelyIsString());
// Create a list of expressions for all the adds in the tree of nodes we can convert into
// a string concatenation. The rightmost node (c) is added first. The rightmost node is
@ -895,7 +895,7 @@ RegisterID* BinaryOpNode::emitStrcat(BytecodeGenerator& generator, RegisterID* d
if (!node->isString())
generator.emitToPrimitive(temporaryRegisters.last().get(), temporaryRegisters.last().get());
}
ASSERT(temporaryRegisters.size() >= 3);
Q_ASSERT(temporaryRegisters.size() >= 3);
// Certain read-modify nodes require expression info to be emitted *after* m_right has been generated.
// If this is required the node is passed as 'emitExpressionInfoForMe'; do so now.
@ -1244,7 +1244,7 @@ RegisterID* ReadModifyBracketNode::emitBytecode(BytecodeGenerator& generator, Re
RegisterID* CommaNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst)
{
ASSERT(m_expressions.size() > 1);
Q_ASSERT(m_expressions.size() > 1);
for (size_t i = 0; i < m_expressions.size() - 1; i++)
generator.emitNode(generator.ignoredResult(), m_expressions[i]);
return generator.emitNode(dst, m_expressions.last());
@ -1341,7 +1341,7 @@ RegisterID* DebuggerStatementNode::emitBytecode(BytecodeGenerator& generator, Re
RegisterID* ExprStatementNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst)
{
ASSERT(m_expr);
Q_ASSERT(m_expr);
generator.emitDebugHook(WillExecuteStatement, firstLine(), lastLine());
return generator.emitNode(dst, m_expr);
}
@ -1350,7 +1350,7 @@ RegisterID* ExprStatementNode::emitBytecode(BytecodeGenerator& generator, Regist
RegisterID* VarStatementNode::emitBytecode(BytecodeGenerator& generator, RegisterID*)
{
ASSERT(m_expr);
Q_ASSERT(m_expr);
generator.emitDebugHook(WillExecuteStatement, firstLine(), lastLine());
return generator.emitNode(m_expr);
}
@ -1551,7 +1551,7 @@ RegisterID* ForInNode::emitBytecode(BytecodeGenerator& generator, RegisterID* ds
generator.emitExpressionInfo(assignNode->divot(), assignNode->startOffset(), assignNode->endOffset());
generator.emitPutById(base, ident, propertyName);
} else {
ASSERT(m_lexpr->isBracketAccessorNode());
Q_ASSERT(m_lexpr->isBracketAccessorNode());
BracketAccessorNode* assignNode = static_cast<BracketAccessorNode*>(m_lexpr);
propertyName = generator.newTemporary();
RefPtr<RegisterID> protect = propertyName;
@ -1724,7 +1724,7 @@ SwitchInfo::SwitchType CaseBlockNode::tryOptimizedSwitch(Vector<ExpressionNode*,
return SwitchInfo::SwitchNone;
}
ASSERT(typeForTable == SwitchString);
Q_ASSERT(typeForTable == SwitchString);
if (singleCharacterSwitch) {
int32_t range = max_num - min_num;
@ -1791,9 +1791,9 @@ RegisterID* CaseBlockNode::emitBytecodeForBlock(BytecodeGenerator& generator, Re
if (!m_defaultClause)
generator.emitLabel(defaultLabel.get());
ASSERT(i == labelVector.size());
Q_ASSERT(i == labelVector.size());
if (switchType != SwitchInfo::SwitchNone) {
ASSERT(labelVector.size() == literalVector.size());
Q_ASSERT(labelVector.size() == literalVector.size());
generator.endSwitch(labelVector.size(), labelVector.data(), literalVector.data(), defaultLabel.get(), min_num, max_num);
}
return result;

View file

@ -58,7 +58,7 @@ namespace JSC {
void setIndex(int index)
{
ASSERT(!m_refCount);
Q_ASSERT(!m_refCount);
#ifndef NDEBUG
m_didSetIndex = true;
#endif
@ -72,7 +72,7 @@ namespace JSC {
int index() const
{
ASSERT(m_didSetIndex);
Q_ASSERT(m_didSetIndex);
return m_index;
}
@ -89,7 +89,7 @@ namespace JSC {
void deref()
{
--m_refCount;
ASSERT(m_refCount >= 0);
Q_ASSERT(m_refCount >= 0);
}
int refCount() const

View file

@ -41,14 +41,14 @@ Debugger::~Debugger()
void Debugger::attach(JSGlobalObject* globalObject)
{
ASSERT(!globalObject->debugger());
Q_ASSERT(!globalObject->debugger());
globalObject->setDebugger(this);
m_globalObjects.add(globalObject);
}
void Debugger::detach(JSGlobalObject* globalObject)
{
ASSERT(m_globalObjects.contains(globalObject));
Q_ASSERT(m_globalObjects.contains(globalObject));
m_globalObjects.remove(globalObject);
globalObject->setDebugger(0);
}
@ -57,7 +57,7 @@ void Debugger::recompileAllJSFunctions(JSGlobalData* globalData)
{
// If JavaScript is running, it's not safe to recompile, since we'll end
// up throwing away code that is live on the stack.
ASSERT(!globalData->dynamicGlobalObject);
Q_ASSERT(!globalData->dynamicGlobalObject);
if (globalData->dynamicGlobalObject)
return;

View file

@ -172,14 +172,14 @@ template <typename T> inline NodeDeclarationInfo<T> createNodeDeclarationInfo(T
ParserArenaData<DeclarationStacks::FunctionStack>* funcDecls,
CodeFeatures info, int numConstants)
{
ASSERT((info & ~AllFeatures) == 0);
Q_ASSERT((info & ~AllFeatures) == 0);
NodeDeclarationInfo<T> result = { node, varDecls, funcDecls, info, numConstants };
return result;
}
template <typename T> inline NodeInfo<T> createNodeInfo(T node, CodeFeatures info, int numConstants)
{
ASSERT((info & ~AllFeatures) == 0);
Q_ASSERT((info & ~AllFeatures) == 0);
NodeInfo<T> result = { node, info, numConstants };
return result;
}
@ -4812,7 +4812,7 @@ static ExpressionNode* makeAssignNode(JSGlobalData* globalData, ExpressionNode*
return node;
}
}
ASSERT(loc->isDotAccessorNode());
Q_ASSERT(loc->isDotAccessorNode());
DotAccessorNode* dot = static_cast<DotAccessorNode*>(loc);
if (op == OpEqual)
return new (globalData) AssignDotNode(globalData, dot->base(), dot->identifier(), expr, exprHasAssignments, dot->divot(), dot->divot() - start, end - dot->divot());
@ -4837,7 +4837,7 @@ static ExpressionNode* makePrefixNode(JSGlobalData* globalData, ExpressionNode*
node->setSubexpressionInfo(bracket->divot(), bracket->startOffset());
return node;
}
ASSERT(expr->isDotAccessorNode());
Q_ASSERT(expr->isDotAccessorNode());
DotAccessorNode* dot = static_cast<DotAccessorNode*>(expr);
PrefixDotNode* node = new (globalData) PrefixDotNode(globalData, dot->base(), dot->identifier(), op, divot, divot - start, end - divot);
node->setSubexpressionInfo(dot->divot(), dot->startOffset());
@ -4860,7 +4860,7 @@ static ExpressionNode* makePostfixNode(JSGlobalData* globalData, ExpressionNode*
return node;
}
ASSERT(expr->isDotAccessorNode());
Q_ASSERT(expr->isDotAccessorNode());
DotAccessorNode* dot = static_cast<DotAccessorNode*>(expr);
PostfixDotNode* node = new (globalData) PostfixDotNode(globalData, dot->base(), dot->identifier(), op, divot, divot - start, end - divot);
node->setSubexpressionInfo(dot->divot(), dot->endOffset());
@ -4886,7 +4886,7 @@ static ExpressionNodeInfo makeFunctionCallNode(JSGlobalData* globalData, Express
node->setSubexpressionInfo(bracket->divot(), bracket->endOffset());
return createNodeInfo<ExpressionNode*>(node, features, numConstants);
}
ASSERT(func.m_node->isDotAccessorNode());
Q_ASSERT(func.m_node->isDotAccessorNode());
DotAccessorNode* dot = static_cast<DotAccessorNode*>(func.m_node);
FunctionCallDotNode* node;
if (dot->identifier() == globalData->propertyNames->call)
@ -4920,7 +4920,7 @@ static ExpressionNode* makeDeleteNode(JSGlobalData* globalData, ExpressionNode*
BracketAccessorNode* bracket = static_cast<BracketAccessorNode*>(expr);
return new (globalData) DeleteBracketNode(globalData, bracket->base(), bracket->subscript(), divot, divot - start, end - divot);
}
ASSERT(expr->isDotAccessorNode());
Q_ASSERT(expr->isDotAccessorNode());
DotAccessorNode* dot = static_cast<DotAccessorNode*>(expr);
return new (globalData) DeleteDotNode(globalData, dot->base(), dot->identifier(), divot, divot - start, end - divot);
}

View file

@ -40,14 +40,14 @@ namespace JSC {
, m_exception(exception)
, m_globalObjectScope(callFrame, function->scope().globalObject())
{
ASSERT(!function->isHostFunction());
Q_ASSERT(!function->isHostFunction());
m_closure = m_interpreter->prepareForRepeatCall(function->jsExecutable(), callFrame, function, argCount, function->scope().node(), exception);
m_valid = !*exception;
}
JSValue call()
{
ASSERT(m_valid);
Q_ASSERT(m_valid);
return m_interpreter->execute(m_closure, m_exception);
}
void setThis(JSValue v) { m_closure.setArgument(0, v); }

View file

@ -41,7 +41,7 @@ namespace JSC {
CodeBlock* codeBlock() const { return this[RegisterFile::CodeBlock].Register::codeBlock(); }
ScopeChainNode* scopeChain() const
{
ASSERT(this[RegisterFile::ScopeChain].Register::scopeChain());
Q_ASSERT(this[RegisterFile::ScopeChain].Register::scopeChain());
return this[RegisterFile::ScopeChain].Register::scopeChain();
}
int argumentCount() const { return this[RegisterFile::ArgumentCount].i(); }
@ -70,7 +70,7 @@ namespace JSC {
// or a pointer everywhere.
JSGlobalData& globalData() const
{
ASSERT(scopeChain()->globalData);
Q_ASSERT(scopeChain()->globalData);
return *scopeChain()->globalData;
}
@ -117,7 +117,7 @@ namespace JSC {
ALWAYS_INLINE void init(CodeBlock* codeBlock, Instruction* vPC, ScopeChainNode* scopeChain,
CallFrame* callerFrame, int returnValueRegister, int argc, JSObject* callee)
{
ASSERT(callerFrame); // Use noCaller() rather than 0 for the outer host call frame caller.
Q_ASSERT(callerFrame); // Use noCaller() rather than 0 for the outer host call frame caller.
setCodeBlock(codeBlock);
setScopeChain(scopeChain);

View file

@ -97,7 +97,7 @@ NEVER_INLINE bool Interpreter::resolve(CallFrame* callFrame, Instruction* vPC, J
ScopeChainNode* scopeChain = callFrame->scopeChain();
ScopeChainIterator iter = scopeChain->begin();
ScopeChainIterator end = scopeChain->end();
ASSERT(iter != end);
Q_ASSERT(iter != end);
CodeBlock* codeBlock = callFrame->codeBlock();
Identifier& ident = codeBlock->identifier(property);
@ -128,10 +128,10 @@ NEVER_INLINE bool Interpreter::resolveSkip(CallFrame* callFrame, Instruction* vP
ScopeChainNode* scopeChain = callFrame->scopeChain();
ScopeChainIterator iter = scopeChain->begin();
ScopeChainIterator end = scopeChain->end();
ASSERT(iter != end);
Q_ASSERT(iter != end);
while (skip--) {
++iter;
ASSERT(iter != end);
Q_ASSERT(iter != end);
}
Identifier& ident = codeBlock->identifier(property);
do {
@ -154,7 +154,7 @@ NEVER_INLINE bool Interpreter::resolveGlobal(CallFrame* callFrame, Instruction*
{
int dst = vPC[1].u.operand;
JSGlobalObject* globalObject = static_cast<JSGlobalObject*>(vPC[2].u.jsCell);
ASSERT(globalObject->isGlobalObject());
Q_ASSERT(globalObject->isGlobalObject());
int property = vPC[3].u.operand;
Structure* structure = vPC[4].u.structure;
int offset = vPC[5].u.operand;
@ -209,7 +209,7 @@ NEVER_INLINE bool Interpreter::resolveBaseAndProperty(CallFrame* callFrame, Inst
// FIXME: add scopeDepthIsZero optimization
ASSERT(iter != end);
Q_ASSERT(iter != end);
CodeBlock* codeBlock = callFrame->codeBlock();
Identifier& ident = codeBlock->identifier(property);
@ -543,7 +543,7 @@ NEVER_INLINE HandlerInfo* Interpreter::throwException(CallFrame*& callFrame, JSV
ScopeChainNode* scopeChain = callFrame->scopeChain();
ScopeChain sc(scopeChain);
int scopeDelta = depth(codeBlock, sc) - handler->scopeDepth;
ASSERT(scopeDelta >= 0);
Q_ASSERT(scopeDelta >= 0);
while (scopeDelta--)
scopeChain = scopeChain->pop();
callFrame->setScopeChain(scopeChain);
@ -553,7 +553,7 @@ NEVER_INLINE HandlerInfo* Interpreter::throwException(CallFrame*& callFrame, JSV
JSValue Interpreter::execute(ProgramExecutable* program, CallFrame* callFrame, ScopeChainNode* scopeChain, JSObject* thisObj, JSValue* exception)
{
ASSERT(!scopeChain->globalData->exception);
Q_ASSERT(!scopeChain->globalData->exception);
if (m_reentryDepth >= MaxSecondaryThreadReentryDepth) {
if (!isMainThread() || m_reentryDepth >= MaxMainThreadReentryDepth) {
@ -607,7 +607,7 @@ JSValue Interpreter::execute(ProgramExecutable* program, CallFrame* callFrame, S
JSValue Interpreter::execute(FunctionExecutable* functionExecutable, CallFrame* callFrame, JSFunction* function, JSObject* thisObj, const ArgList& args, ScopeChainNode* scopeChain, JSValue* exception)
{
ASSERT(!scopeChain->globalData->exception);
Q_ASSERT(!scopeChain->globalData->exception);
if (m_reentryDepth >= MaxSecondaryThreadReentryDepth) {
if (!isMainThread() || m_reentryDepth >= MaxMainThreadReentryDepth) {
@ -662,7 +662,7 @@ JSValue Interpreter::execute(FunctionExecutable* functionExecutable, CallFrame*
CallFrameClosure Interpreter::prepareForRepeatCall(FunctionExecutable* FunctionExecutable, CallFrame* callFrame, JSFunction* function, int argCount, ScopeChainNode* scopeChain, JSValue* exception)
{
ASSERT(!scopeChain->globalData->exception);
Q_ASSERT(!scopeChain->globalData->exception);
if (m_reentryDepth >= MaxSecondaryThreadReentryDepth) {
if (!isMainThread() || m_reentryDepth >= MaxMainThreadReentryDepth) {
@ -733,7 +733,7 @@ JSValue Interpreter::execute(EvalExecutable* eval, CallFrame* callFrame, JSObjec
JSValue Interpreter::execute(EvalExecutable* eval, CallFrame* callFrame, JSObject* thisObj, int globalRegisterOffset, ScopeChainNode* scopeChain, JSValue* exception)
{
ASSERT(!scopeChain->globalData->exception);
Q_ASSERT(!scopeChain->globalData->exception);
if (m_reentryDepth >= MaxSecondaryThreadReentryDepth) {
if (!isMainThread() || m_reentryDepth >= MaxMainThreadReentryDepth) {
@ -748,7 +748,7 @@ JSValue Interpreter::execute(EvalExecutable* eval, CallFrame* callFrame, JSObjec
JSVariableObject* variableObject;
for (ScopeChainNode* node = scopeChain; ; node = node->next) {
ASSERT(node);
Q_ASSERT(node);
if (node->object->isVariableObject()) {
variableObject = static_cast<JSVariableObject*>(node->object);
break;
@ -994,7 +994,7 @@ NEVER_INLINE void Interpreter::tryCacheGetByID(CallFrame* callFrame, CodeBlock*
}
if (slot.slotBase() == structure->prototypeForLookup(callFrame)) {
ASSERT(slot.slotBase().isObject());
Q_ASSERT(slot.slotBase().isObject());
JSObject* baseObject = asObject(slot.slotBase());
size_t offset = slot.cachedOffset();
@ -1006,7 +1006,7 @@ NEVER_INLINE void Interpreter::tryCacheGetByID(CallFrame* callFrame, CodeBlock*
offset = baseObject->structure()->get(propertyName);
}
ASSERT(!baseObject->structure()->isUncacheableDictionary());
Q_ASSERT(!baseObject->structure()->isUncacheableDictionary());
vPC[0] = op_get_by_id_proto;
vPC[5] = baseObject->structure();
@ -1491,7 +1491,7 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
if (dividend.isInt32() && divisor.isInt32() && divisor.asInt32() != 0) {
JSValue result = jsNumber(callFrame, dividend.asInt32() % divisor.asInt32());
ASSERT(result);
Q_ASSERT(result);
callFrame->r(dst) = result;
vPC += OPCODE_LENGTH(op_mod);
NEXT_INSTRUCTION();
@ -1898,7 +1898,7 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
*/
int dst = vPC[1].u.operand;
JSGlobalObject* scope = static_cast<JSGlobalObject*>(vPC[2].u.jsCell);
ASSERT(scope->isGlobalObject());
Q_ASSERT(scope->isGlobalObject());
int index = vPC[3].u.operand;
callFrame->r(dst) = scope->registerAt(index);
@ -1911,7 +1911,7 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
Puts value into global slot index.
*/
JSGlobalObject* scope = static_cast<JSGlobalObject*>(vPC[1].u.jsCell);
ASSERT(scope->isGlobalObject());
Q_ASSERT(scope->isGlobalObject());
int index = vPC[2].u.operand;
int value = vPC[3].u.operand;
@ -1931,16 +1931,16 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
ScopeChainNode* scopeChain = callFrame->scopeChain();
ScopeChainIterator iter = scopeChain->begin();
#if !ASSERT_DISABLED
#ifndef QT_NO_DEBUG
ScopeChainIterator end = scopeChain->end();
#endif
ASSERT(iter != end);
Q_ASSERT(iter != end);
while (skip--) {
++iter;
ASSERT(iter != end);
Q_ASSERT(iter != end);
}
ASSERT((*iter)->isVariableObject());
Q_ASSERT((*iter)->isVariableObject());
JSVariableObject* scope = static_cast<JSVariableObject*>(*iter);
callFrame->r(dst) = scope->registerAt(index);
vPC += OPCODE_LENGTH(op_get_scoped_var);
@ -1956,16 +1956,16 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
ScopeChainNode* scopeChain = callFrame->scopeChain();
ScopeChainIterator iter = scopeChain->begin();
#if !ASSERT_DISABLED
#ifndef QT_NO_DEBUG
ScopeChainIterator end = scopeChain->end();
#endif
ASSERT(iter != end);
Q_ASSERT(iter != end);
while (skip--) {
++iter;
ASSERT(iter != end);
Q_ASSERT(iter != end);
}
ASSERT((*iter)->isVariableObject());
Q_ASSERT((*iter)->isVariableObject());
JSVariableObject* scope = static_cast<JSVariableObject*>(*iter);
scope->registerAt(index) = JSValue(callFrame->r(value).jsValue());
vPC += OPCODE_LENGTH(op_put_scoped_var);
@ -2040,12 +2040,12 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
Structure* structure = vPC[4].u.structure;
if (LIKELY(baseCell->structure() == structure)) {
ASSERT(baseCell->isObject());
Q_ASSERT(baseCell->isObject());
JSObject* baseObject = asObject(baseCell);
int dst = vPC[1].u.operand;
int offset = vPC[5].u.operand;
ASSERT(baseObject->get(callFrame, callFrame->codeBlock()->identifier(vPC[3].u.operand)) == baseObject->getDirectOffset(offset));
Q_ASSERT(baseObject->get(callFrame, callFrame->codeBlock()->identifier(vPC[3].u.operand)) == baseObject->getDirectOffset(offset));
callFrame->r(dst) = JSValue(baseObject->getDirectOffset(offset));
vPC += OPCODE_LENGTH(op_get_by_id_self);
@ -2071,7 +2071,7 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
Structure* structure = vPC[4].u.structure;
if (LIKELY(baseCell->structure() == structure)) {
ASSERT(structure->prototypeForLookup(callFrame).isObject());
Q_ASSERT(structure->prototypeForLookup(callFrame).isObject());
JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
Structure* prototypeStructure = vPC[5].u.structure;
@ -2079,8 +2079,8 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
int dst = vPC[1].u.operand;
int offset = vPC[6].u.operand;
ASSERT(protoObject->get(callFrame, callFrame->codeBlock()->identifier(vPC[3].u.operand)) == protoObject->getDirectOffset(offset));
ASSERT(baseValue.get(callFrame, callFrame->codeBlock()->identifier(vPC[3].u.operand)) == protoObject->getDirectOffset(offset));
Q_ASSERT(protoObject->get(callFrame, callFrame->codeBlock()->identifier(vPC[3].u.operand)) == protoObject->getDirectOffset(offset));
Q_ASSERT(baseValue.get(callFrame, callFrame->codeBlock()->identifier(vPC[3].u.operand)) == protoObject->getDirectOffset(offset));
callFrame->r(dst) = JSValue(protoObject->getDirectOffset(offset));
vPC += OPCODE_LENGTH(op_get_by_id_proto);
@ -2135,8 +2135,8 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
int dst = vPC[1].u.operand;
int offset = vPC[7].u.operand;
ASSERT(baseObject->get(callFrame, callFrame->codeBlock()->identifier(vPC[3].u.operand)) == baseObject->getDirectOffset(offset));
ASSERT(baseValue.get(callFrame, callFrame->codeBlock()->identifier(vPC[3].u.operand)) == baseObject->getDirectOffset(offset));
Q_ASSERT(baseObject->get(callFrame, callFrame->codeBlock()->identifier(vPC[3].u.operand)) == baseObject->getDirectOffset(offset));
Q_ASSERT(baseValue.get(callFrame, callFrame->codeBlock()->identifier(vPC[3].u.operand)) == baseObject->getDirectOffset(offset));
callFrame->r(dst) = JSValue(baseObject->getDirectOffset(offset));
vPC += OPCODE_LENGTH(op_get_by_id_chain);
@ -2258,7 +2258,7 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
Structure* newStructure = vPC[5].u.structure;
if (LIKELY(baseCell->structure() == oldStructure)) {
ASSERT(baseCell->isObject());
Q_ASSERT(baseCell->isObject());
JSObject* baseObject = asObject(baseCell);
RefPtr<Structure>* it = vPC[6].u.structureChain->head();
@ -2277,7 +2277,7 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
int value = vPC[3].u.operand;
unsigned offset = vPC[7].u.operand;
ASSERT(baseObject->offsetForLocation(baseObject->getDirectLocation(callFrame->codeBlock()->identifier(vPC[2].u.operand))) == offset);
Q_ASSERT(baseObject->offsetForLocation(baseObject->getDirectLocation(callFrame->codeBlock()->identifier(vPC[2].u.operand))) == offset);
baseObject->putDirectOffset(offset, callFrame->r(value).jsValue());
vPC += OPCODE_LENGTH(op_put_by_id_transition);
@ -2307,12 +2307,12 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
Structure* structure = vPC[4].u.structure;
if (LIKELY(baseCell->structure() == structure)) {
ASSERT(baseCell->isObject());
Q_ASSERT(baseCell->isObject());
JSObject* baseObject = asObject(baseCell);
int value = vPC[3].u.operand;
unsigned offset = vPC[5].u.operand;
ASSERT(baseObject->offsetForLocation(baseObject->getDirectLocation(callFrame->codeBlock()->identifier(vPC[2].u.operand))) == offset);
Q_ASSERT(baseObject->offsetForLocation(baseObject->getDirectLocation(callFrame->codeBlock()->identifier(vPC[2].u.operand))) == offset);
baseObject->putDirectOffset(offset, callFrame->r(value).jsValue());
vPC += OPCODE_LENGTH(op_put_by_id_replace);
@ -3037,7 +3037,7 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
NEXT_INSTRUCTION();
}
ASSERT(callType == CallTypeNone);
Q_ASSERT(callType == CallTypeNone);
exceptionValue = createNotAFunctionError(callFrame, v, vPC - callFrame->codeBlock()->instructions().begin(), callFrame->codeBlock());
goto vm_throw;
@ -3056,7 +3056,7 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
exceptionValue = createStackOverflowError(callFrame);
goto vm_throw;
}
ASSERT(!asFunction(callFrame->callee())->isHostFunction());
Q_ASSERT(!asFunction(callFrame->callee())->isHostFunction());
int32_t expectedParams = static_cast<JSFunction*>(callFrame->callee())->jsExecutable()->parameterCount();
int32_t inplaceArgs = min(argCount, expectedParams);
int32_t i = 0;
@ -3192,7 +3192,7 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
NEXT_INSTRUCTION();
}
ASSERT(callType == CallTypeNone);
Q_ASSERT(callType == CallTypeNone);
exceptionValue = createNotAFunctionError(callFrame, v, vPC - callFrame->codeBlock()->instructions().begin(), callFrame->codeBlock());
goto vm_throw;
@ -3211,7 +3211,7 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
*/
int src = vPC[1].u.operand;
ASSERT(callFrame->codeBlock()->needsFullScopeChain());
Q_ASSERT(callFrame->codeBlock()->needsFullScopeChain());
asActivation(callFrame->r(src).jsValue())->copyRegisters(callFrame->optionalCalleeArguments());
@ -3231,7 +3231,7 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
This opcode should only be used immediately before op_ret.
*/
ASSERT(callFrame->codeBlock()->usesArguments() && !callFrame->codeBlock()->needsFullScopeChain());
Q_ASSERT(callFrame->codeBlock()->usesArguments() && !callFrame->codeBlock()->needsFullScopeChain());
if (callFrame->optionalCalleeArguments())
callFrame->optionalCalleeArguments()->copyRegisters();
@ -3450,7 +3450,7 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
NEXT_INSTRUCTION();
}
ASSERT(constructType == ConstructTypeNone);
Q_ASSERT(constructType == ConstructTypeNone);
exceptionValue = createNotAConstructorError(callFrame, v, vPC - callFrame->codeBlock()->instructions().begin(), callFrame->codeBlock());
goto vm_throw;
@ -3621,8 +3621,8 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
ex. This is only valid after an exception has been raised,
and usually forms the beginning of an exception handler.
*/
ASSERT(exceptionValue);
ASSERT(!globalData->exception);
Q_ASSERT(exceptionValue);
Q_ASSERT(!globalData->exception);
CodeBlock* codeBlock = callFrame->codeBlock();
Debugger* debugger = callFrame->dynamicGlobalObject()->debugger();
@ -3688,7 +3688,7 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
if (callFrame->codeBlock()->needsFullScopeChain()) {
ScopeChainNode* scopeChain = callFrame->scopeChain();
ASSERT(scopeChain->refCount > 1);
Q_ASSERT(scopeChain->refCount > 1);
scopeChain->deref();
}
int result = vPC[1].u.operand;
@ -3709,10 +3709,10 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
int property = vPC[2].u.operand;
int function = vPC[3].u.operand;
ASSERT(callFrame->r(base).jsValue().isObject());
Q_ASSERT(callFrame->r(base).jsValue().isObject());
JSObject* baseObj = asObject(callFrame->r(base).jsValue());
Identifier& ident = callFrame->codeBlock()->identifier(property);
ASSERT(callFrame->r(function).jsValue().isObject());
Q_ASSERT(callFrame->r(function).jsValue().isObject());
baseObj->defineGetter(callFrame, ident, asObject(callFrame->r(function).jsValue()));
vPC += OPCODE_LENGTH(op_put_getter);
@ -3733,10 +3733,10 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
int property = vPC[2].u.operand;
int function = vPC[3].u.operand;
ASSERT(callFrame->r(base).jsValue().isObject());
Q_ASSERT(callFrame->r(base).jsValue().isObject());
JSObject* baseObj = asObject(callFrame->r(base).jsValue());
Identifier& ident = callFrame->codeBlock()->identifier(property);
ASSERT(callFrame->r(function).jsValue().isObject());
Q_ASSERT(callFrame->r(function).jsValue().isObject());
baseObj->defineSetter(callFrame, ident, asObject(callFrame->r(function).jsValue()), 0);
vPC += OPCODE_LENGTH(op_put_setter);
@ -3817,7 +3817,7 @@ JSValue Interpreter::retrieveArguments(CallFrame* callFrame, JSFunction* functio
CodeBlock* codeBlock = functionCallFrame->codeBlock();
if (codeBlock->usesArguments()) {
ASSERT(codeBlock->codeType() == FunctionCode);
Q_ASSERT(codeBlock->codeType() == FunctionCode);
SymbolTable& symbolTable = *codeBlock->symbolTable();
int argumentsIndex = symbolTable.get(functionCallFrame->propertyNames().arguments.ustring().rep()).getIndex();
if (!functionCallFrame->r(argumentsIndex).jsValue()) {

View file

@ -105,7 +105,7 @@ namespace JSC {
ALWAYS_INLINE Register::Register(const JSValue& v)
{
#if ENABLE(JSC_ZOMBIES)
ASSERT(!v.isZombie());
Q_ASSERT(!v.isZombie());
#endif
u.value = JSValue::encode(v);
}
@ -113,7 +113,7 @@ namespace JSC {
ALWAYS_INLINE Register& Register::operator=(const JSValue& v)
{
#if ENABLE(JSC_ZOMBIES)
ASSERT(!v.isZombie());
Q_ASSERT(!v.isZombie());
#endif
u.value = JSValue::encode(v);
return *this;

View file

@ -170,8 +170,8 @@ namespace JSC {
, m_globalObject(0)
{
// Verify that our values will play nice with mmap and VirtualAlloc.
ASSERT(isPageAligned(maxGlobals));
ASSERT(isPageAligned(capacity));
Q_ASSERT(isPageAligned(maxGlobals));
Q_ASSERT(isPageAligned(capacity));
size_t bufferLength = (capacity + maxGlobals) * sizeof(Register);
#if OS(QNX)

View file

@ -58,7 +58,7 @@ inline size_t roundUpAllocationSize(size_t request, size_t granularity)
// Round up to next page boundary
size_t size = request + (granularity - 1);
size = size & ~(granularity - 1);
ASSERT(size >= request);
Q_ASSERT(size >= request);
return size;
}
@ -84,7 +84,7 @@ public:
void* alloc(size_t n)
{
ASSERT(m_freePtr <= m_end);
Q_ASSERT(m_freePtr <= m_end);
// Round 'n' up to a multiple of word size; if all allocations are of
// word sized quantities, then all subsequent allocations will be aligned.
@ -226,7 +226,7 @@ inline void* ExecutablePool::poolAllocate(size_t n)
Allocation result = systemAlloc(allocSize);
ASSERT(m_end >= m_freePtr);
Q_ASSERT(m_end >= m_freePtr);
if ((allocSize - n) > static_cast<size_t>(m_end - m_freePtr)) {
// Replace allocation pool
m_freePtr = result.pages + n;

View file

@ -175,7 +175,7 @@ void JIT::privateCompileMainPass()
for (m_bytecodeIndex = 0; m_bytecodeIndex < instructionCount; ) {
Instruction* currentInstruction = instructionsBegin + m_bytecodeIndex;
ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeIndex);
Q_ASSERT_X(m_interpreter->isOpcode(currentInstruction->u.opcode), "JIT::privateCompileMainPass", "privateCompileMainPass gone bad");
#if ENABLE(OPCODE_SAMPLING)
if (m_bytecodeIndex > 0) // Avoid the overhead of sampling op_enter twice.
@ -325,8 +325,8 @@ void JIT::privateCompileMainPass()
}
}
ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos());
ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
Q_ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos());
Q_ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
#ifndef NDEBUG
// Reset this, in order to guard its use with ASSERTs.
@ -421,16 +421,16 @@ void JIT::privateCompileSlowCases()
ASSERT_NOT_REACHED();
}
ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to,"Not enough jumps linked in slow case codegen.");
ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
Q_ASSERT_X(iter == m_slowCases.end() || firstTo != iter->to, "JIT::privateCompileSlowCases", "Not enough jumps linked in slow case codegen.");
Q_ASSERT_X(firstTo == (iter - 1)->to, "JIT::privateCompileSlowCases", "Too many jumps linked in slow case codegen.");
emitJumpSlowToHot(jump(), 0);
}
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos());
Q_ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos());
#endif
ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
Q_ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
#ifndef NDEBUG
// Reset this, in order to guard its use with ASSERTs.
@ -476,7 +476,7 @@ JITCode JIT::privateCompile()
jump(afterRegisterFileCheck);
}
ASSERT(m_jmpTable.isEmpty());
Q_ASSERT(m_jmpTable.isEmpty());
LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
@ -486,8 +486,8 @@ JITCode JIT::privateCompile()
unsigned bytecodeIndex = record.bytecodeIndex;
if (record.type != SwitchRecord::String) {
ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character);
ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
Q_ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character);
Q_ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeIndex + record.defaultOffset]);
@ -496,7 +496,7 @@ JITCode JIT::privateCompile()
record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeIndex + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
}
} else {
ASSERT(record.type == SwitchRecord::String);
Q_ASSERT(record.type == SwitchRecord::String);
record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeIndex + record.defaultOffset]);
@ -589,7 +589,7 @@ void JIT::linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* ca
// Currently we only link calls with the exact number of arguments.
// If this is a native call calleeCodeBlock is null so the number of parameters is unimportant
if (!calleeCodeBlock || (callerArgCount == calleeCodeBlock->m_numParameters)) {
ASSERT(!callLinkInfo->isLinked());
Q_ASSERT(!callLinkInfo->isLinked());
if (calleeCodeBlock)
calleeCodeBlock->addCaller(callLinkInfo);

View file

@ -846,7 +846,7 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsi
// Double case 1: Op1 is not int32; Op2 is unknown.
notInt32Op1.link(this);
ASSERT(op1IsInRegisters);
Q_ASSERT(op1IsInRegisters);
// Verify Op1 is double.
if (!types.first().definitelyIsNumber())
@ -914,7 +914,7 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsi
// Double case 2: Op1 is int32; Op2 is not int32.
notInt32Op2.link(this);
ASSERT(op2IsInRegisters);
Q_ASSERT(op2IsInRegisters);
if (!op1IsInRegisters)
emitLoadPayload(op1, regT0);
@ -2167,7 +2167,7 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsign
else if (opcodeID == op_sub)
addSlowCase(branchSub32(Overflow, regT1, regT0));
else {
ASSERT(opcodeID == op_mul);
Q_ASSERT(opcodeID == op_mul);
addSlowCase(branchMul32(Overflow, regT1, regT0));
addSlowCase(branchTest32(Zero, regT0));
}
@ -2253,7 +2253,7 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
else if (opcodeID == op_mul)
mulDouble(fpRegT2, fpRegT1);
else {
ASSERT(opcodeID == op_div);
Q_ASSERT(opcodeID == op_div);
divDouble(fpRegT2, fpRegT1);
}
moveDoubleToPtr(fpRegT1, regT0);
@ -2459,7 +2459,7 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u
emitGetVirtualRegisters(src1, regT0, src2, regT1);
if (types.second().isReusable() && supportsFloatingPoint()) {
ASSERT(types.second().mightBeNumber());
Q_ASSERT(types.second().mightBeNumber());
// Check op2 is a number
Jump op2imm = emitJumpIfImmediateInteger(regT1);
@ -2490,7 +2490,7 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u
else if (opcodeID == op_sub)
subDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
else {
ASSERT(opcodeID == op_mul);
Q_ASSERT(opcodeID == op_mul);
mulDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
}
@ -2505,7 +2505,7 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u
op2imm.link(this);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
} else if (types.first().isReusable() && supportsFloatingPoint()) {
ASSERT(types.first().mightBeNumber());
Q_ASSERT(types.first().mightBeNumber());
// Check op1 is a number
Jump op1imm = emitJumpIfImmediateInteger(regT0);
@ -2537,7 +2537,7 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u
else if (opcodeID == op_sub)
subDouble(fpRegT1, fpRegT0);
else {
ASSERT(opcodeID == op_mul);
Q_ASSERT(opcodeID == op_mul);
mulDouble(fpRegT1, fpRegT0);
}
storeDouble(fpRegT0, Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)));
@ -2563,7 +2563,7 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u
signExtend32ToPtr(regT0, regT0);
emitFastArithReTagImmediate(regT0, regT0);
} else {
ASSERT(opcodeID == op_mul);
Q_ASSERT(opcodeID == op_mul);
// convert eax & edx from JSImmediates to ints, and check if either are zero
emitFastArithImmToInt(regT1);
Jump op1Zero = emitFastArithDeTagImmediateJumpIfZero(regT0);
@ -2683,7 +2683,7 @@ void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>
stubCall.call(result);
} else {
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
ASSERT(types.first().mightBeNumber() && types.second().mightBeNumber());
Q_ASSERT(types.first().mightBeNumber() && types.second().mightBeNumber());
compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types);
}
}

View file

@ -327,7 +327,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
addSlowCase(jumpToSlow);
ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump);
Q_ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump);
m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
@ -411,7 +411,7 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
sampleCodeBlock(m_codeBlock);
// If not, we need an extra case in the if below!
ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
Q_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
// Done! - return back to the hot path.
if (opcodeID == op_construct)
@ -702,7 +702,7 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
sampleCodeBlock(m_codeBlock);
// If not, we need an extra case in the if below!
ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
Q_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
// Done! - return back to the hot path.
if (opcodeID == op_construct)

View file

@ -68,7 +68,7 @@ namespace JSC {
unsigned offsetOf(void* pointerIntoCode)
{
intptr_t result = reinterpret_cast<intptr_t>(pointerIntoCode) - reinterpret_cast<intptr_t>(m_ref.m_code.executableAddress());
ASSERT(static_cast<intptr_t>(static_cast<unsigned>(result)) == result);
Q_ASSERT(static_cast<intptr_t>(static_cast<unsigned>(result)) == result);
return static_cast<unsigned>(result);
}
@ -85,7 +85,7 @@ namespace JSC {
size_t size()
{
ASSERT(m_ref.m_code.executableAddress());
Q_ASSERT(m_ref.m_code.executableAddress());
return m_ref.m_size;
}

View file

@ -72,7 +72,7 @@ ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src)
ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
{
ASSERT(m_codeBlock->isConstantRegisterIndex(src));
Q_ASSERT(m_codeBlock->isConstantRegisterIndex(src));
return m_codeBlock->getConstant(src);
}
@ -104,7 +104,7 @@ ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHead
ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
{
ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
Q_ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
Call nakedCall = nearCall();
m_calls.append(CallRecord(nakedCall, m_bytecodeIndex, function.executableAddress()));
@ -137,8 +137,8 @@ ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace
ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace)
{
#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) == insnSpace);
ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin == constSpace);
Q_ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) == insnSpace);
Q_ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin == constSpace);
#endif
}
@ -217,14 +217,14 @@ ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&
ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
{
ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
Q_ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
m_slowCases.append(SlowCaseEntry(jump, m_bytecodeIndex));
}
ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
{
ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
Q_ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
const JumpList::JumpVector& jumpVector = jumpList.jumps();
size_t size = jumpVector.size();
@ -234,14 +234,14 @@ ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
{
ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
Q_ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
m_jmpTable.append(JumpTable(jump, m_bytecodeIndex + relativeOffset));
}
ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
{
ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
Q_ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
jump.linkTo(m_labels[m_bytecodeIndex + relativeOffset], this);
}
@ -249,15 +249,15 @@ ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
#if ENABLE(SAMPLING_FLAGS)
ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
{
ASSERT(flag >= 1);
ASSERT(flag <= 32);
Q_ASSERT(flag >= 1);
Q_ASSERT(flag <= 32);
or32(Imm32(1u << (flag - 1)), AbsoluteAddress(&SamplingFlags::s_flags));
}
ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
{
ASSERT(flag >= 1);
ASSERT(flag <= 32);
Q_ASSERT(flag >= 1);
Q_ASSERT(flag <= 32);
and32(Imm32(~(1u << (flag - 1))), AbsoluteAddress(&SamplingFlags::s_flags));
}
#endif
@ -370,10 +370,10 @@ inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
inline void JIT::emitLoad(unsigned index, RegisterID tag, RegisterID payload, RegisterID base)
{
ASSERT(tag != payload);
Q_ASSERT(tag != payload);
if (base == callFrameRegister) {
ASSERT(payload != base);
Q_ASSERT(payload != base);
emitLoadPayload(index, payload);
emitLoadTag(index, tag);
return;
@ -616,7 +616,7 @@ ALWAYS_INLINE void JIT::killLastResultRegister()
// get arg puts an arg from the SF register array into a h/w register
ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
{
ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
Q_ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
// TODO: we want to reuse values that are already in registers if we can - add a register allocator!
if (m_codeBlock->isConstantRegisterIndex(src)) {

View file

@ -397,7 +397,7 @@ void JIT::emit_op_end(Instruction* currentInstruction)
{
if (m_codeBlock->needsFullScopeChain())
JITStubCall(this, cti_op_end).call();
ASSERT(returnValueRegister != callFrameRegister);
Q_ASSERT(returnValueRegister != callFrameRegister);
emitLoad(currentInstruction[1].u.operand, regT1, regT0);
restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
ret();
@ -532,7 +532,7 @@ void JIT::emit_op_get_global_var(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
JSGlobalObject* globalObject = static_cast<JSGlobalObject*>(currentInstruction[2].u.jsCell);
ASSERT(globalObject->isGlobalObject());
Q_ASSERT(globalObject->isGlobalObject());
int index = currentInstruction[3].u.operand;
loadPtr(&globalObject->d()->registers, regT2);
@ -545,7 +545,7 @@ void JIT::emit_op_get_global_var(Instruction* currentInstruction)
void JIT::emit_op_put_global_var(Instruction* currentInstruction)
{
JSGlobalObject* globalObject = static_cast<JSGlobalObject*>(currentInstruction[1].u.jsCell);
ASSERT(globalObject->isGlobalObject());
Q_ASSERT(globalObject->isGlobalObject());
int index = currentInstruction[2].u.operand;
int value = currentInstruction[3].u.operand;
@ -1843,7 +1843,7 @@ void JIT::emit_op_end(Instruction* currentInstruction)
{
if (m_codeBlock->needsFullScopeChain())
JITStubCall(this, cti_op_end).call();
ASSERT(returnValueRegister != callFrameRegister);
Q_ASSERT(returnValueRegister != callFrameRegister);
emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
ret();
@ -2030,9 +2030,9 @@ void JIT::emit_op_ret(Instruction* currentInstruction)
if (m_codeBlock->needsFullScopeChain())
JITStubCall(this, cti_op_ret_scopeChain).call();
ASSERT(callFrameRegister != regT1);
ASSERT(regT1 != returnValueRegister);
ASSERT(returnValueRegister != callFrameRegister);
Q_ASSERT(callFrameRegister != regT1);
Q_ASSERT(regT1 != returnValueRegister);
Q_ASSERT(returnValueRegister != callFrameRegister);
// Return the result in %eax.
emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
@ -2332,7 +2332,7 @@ void JIT::emit_op_throw(Instruction* currentInstruction)
JITStubCall stubCall(this, cti_op_throw);
stubCall.addArgument(currentInstruction[1].u.operand, regT2);
stubCall.call();
ASSERT(regT0 == returnValueRegister);
Q_ASSERT(regT0 == returnValueRegister);
#ifndef NDEBUG
// cti_op_throw always changes it's return address,
// this point in the code should never be reached.

View file

@ -198,7 +198,7 @@ void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
void JIT::emit_op_method_check(Instruction* currentInstruction)
{
// Assert that the following instruction is a get_by_id.
ASSERT((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode == op_get_by_id);
Q_ASSERT((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode == op_get_by_id);
currentInstruction += OPCODE_LENGTH(op_method_check);
@ -227,7 +227,7 @@ void JIT::emit_op_method_check(Instruction* currentInstruction)
Jump match = jump();
ASSERT_UNUSED(protoObj, differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj);
ASSERT(differenceBetween(info.structureToCompare, protoStructureToCompare) == patchOffsetMethodCheckProtoStruct);
Q_ASSERT(differenceBetween(info.structureToCompare, protoStructureToCompare) == patchOffsetMethodCheckProtoStruct);
ASSERT_UNUSED(putFunction, differenceBetween(info.structureToCompare, putFunction) == patchOffsetMethodCheckPutFunction);
// Link the failure cases here.
@ -389,12 +389,12 @@ void JIT::compileGetByIdHotPath()
DataLabelPtr structureToCompare;
Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
addSlowCase(structureCheck);
ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
Q_ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
Q_ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT2);
Label externalLoadComplete(this);
ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad);
Q_ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad);
ASSERT_UNUSED(externalLoad, differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad);
DataLabel32 displacementLabel1 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload
@ -403,7 +403,7 @@ void JIT::compileGetByIdHotPath()
ASSERT_UNUSED(displacementLabel2, differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetGetByIdPropertyMapOffset2);
Label putResult(this);
ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
Q_ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
}
@ -439,7 +439,7 @@ void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<Sl
END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
Q_ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
// Track the location of the call; this will be used to recover patch information.
m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
@ -468,12 +468,12 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
// It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
DataLabelPtr structureToCompare;
addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
Q_ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
// Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
Label externalLoadComplete(this);
ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad);
Q_ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad);
ASSERT_UNUSED(externalLoad, differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT0, patchGetByIdDefaultOffset)); // payload
@ -594,7 +594,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
ret();
ASSERT(!failureCases.empty());
Q_ASSERT(!failureCases.empty());
failureCases.link(this);
restoreArgumentReferenceForTrampoline();
Call failureCall = tailRecursiveCall();
@ -604,7 +604,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
patchBuffer.link(failureCall, FunctionPtr(cti_op_put_by_id_fail));
if (willNeedStorageRealloc) {
ASSERT(m_calls.size() == 1);
Q_ASSERT(m_calls.size() == 1);
patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
}
@ -634,12 +634,12 @@ void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, St
void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
{
ASSERT(!methodCallLinkInfo.cachedStructure);
Q_ASSERT(!methodCallLinkInfo.cachedStructure);
methodCallLinkInfo.cachedStructure = structure;
structure->ref();
Structure* prototypeStructure = proto->structure();
ASSERT(!methodCallLinkInfo.cachedPrototypeStructure);
Q_ASSERT(!methodCallLinkInfo.cachedPrototypeStructure);
methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
prototypeStructure->ref();
@ -836,7 +836,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
{
// regT0 holds a JSCell*
ASSERT(count);
Q_ASSERT(count);
JumpList bucketsOfFail;
@ -859,7 +859,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
#endif
}
ASSERT(protoObject);
Q_ASSERT(protoObject);
compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
Jump success = jump();
@ -890,7 +890,7 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
{
// regT0 holds a JSCell*
ASSERT(count);
Q_ASSERT(count);
JumpList bucketsOfFail;
@ -913,7 +913,7 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
#endif
}
ASSERT(protoObject);
Q_ASSERT(protoObject);
compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
Jump success = jump();
@ -944,9 +944,9 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID structure, RegisterID offset)
{
ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t));
ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t));
ASSERT(sizeof(JSValue) == 8);
Q_ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t));
Q_ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t));
Q_ASSERT(sizeof(JSValue) == 8);
Jump notUsingInlineStorage = branch32(NotEqual, Address(structure, OBJECT_OFFSETOF(Structure, m_propertyStorageCapacity)), Imm32(JSObject::inlineStorageCapacity));
loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSObject, m_inlineStorage)+OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
@ -1040,8 +1040,8 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID structure, RegisterID offset, RegisterID scratch)
{
ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t));
ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t));
Q_ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t));
Q_ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t));
Jump notUsingInlineStorage = branch32(NotEqual, Address(structure, OBJECT_OFFSETOF(Structure, m_propertyStorageCapacity)), Imm32(JSObject::inlineStorageCapacity));
loadPtr(BaseIndex(base, offset, ScalePtr, OBJECT_OFFSETOF(JSObject, m_inlineStorage)), result);
@ -1231,7 +1231,7 @@ void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
void JIT::emit_op_method_check(Instruction* currentInstruction)
{
// Assert that the following instruction is a get_by_id.
ASSERT((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode == op_get_by_id);
Q_ASSERT((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode == op_get_by_id);
currentInstruction += OPCODE_LENGTH(op_method_check);
unsigned resultVReg = currentInstruction[1].u.operand;
@ -1532,7 +1532,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
ret();
ASSERT(!failureCases.empty());
Q_ASSERT(!failureCases.empty());
failureCases.link(this);
restoreArgumentReferenceForTrampoline();
Call failureCall = tailRecursiveCall();
@ -1542,7 +1542,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
patchBuffer.link(failureCall, FunctionPtr(cti_op_put_by_id_fail));
if (willNeedStorageRealloc) {
ASSERT(m_calls.size() == 1);
Q_ASSERT(m_calls.size() == 1);
patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
}
@ -1571,12 +1571,12 @@ void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, St
void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
{
ASSERT(!methodCallLinkInfo.cachedStructure);
Q_ASSERT(!methodCallLinkInfo.cachedStructure);
methodCallLinkInfo.cachedStructure = structure;
structure->ref();
Structure* prototypeStructure = proto->structure();
ASSERT(!methodCallLinkInfo.cachedPrototypeStructure);
Q_ASSERT(!methodCallLinkInfo.cachedPrototypeStructure);
methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
prototypeStructure->ref();
@ -1763,7 +1763,7 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
{
ASSERT(count);
Q_ASSERT(count);
JumpList bucketsOfFail;
@ -1787,7 +1787,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
#endif
}
ASSERT(protoObject);
Q_ASSERT(protoObject);
compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
Jump success = jump();
@ -1816,7 +1816,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
{
ASSERT(count);
Q_ASSERT(count);
JumpList bucketsOfFail;
@ -1839,7 +1839,7 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
#endif
}
ASSERT(protoObject);
Q_ASSERT(protoObject);
compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
Jump success = jump();

View file

@ -189,7 +189,7 @@ namespace JSC {
#if USE(JSVALUE32_64)
JIT::Call call(unsigned dst) // dst is a virtual register.
{
ASSERT(m_returnType == Value || m_returnType == Cell);
Q_ASSERT(m_returnType == Value || m_returnType == Cell);
JIT::Call call = this->call();
if (m_returnType == Value)
m_jit->emitStore(dst, JIT::regT1, JIT::regT0);

View file

@ -767,16 +767,16 @@ JITThunks::JITThunks(JSGlobalData* globalData)
// Unfortunate the arm compiler does not like the use of offsetof on JITStackFrame (since it contains non POD types),
// and the OBJECT_OFFSETOF macro does not appear constantish enough for it to be happy with its use in COMPILE_ASSERT
// macros.
ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedReturnAddress) == 0x20);
ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR4) == 0x24);
ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR5) == 0x28);
ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR6) == 0x2c);
Q_ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedReturnAddress) == 0x20);
Q_ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR4) == 0x24);
Q_ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR5) == 0x28);
Q_ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR6) == 0x2c);
ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, registerFile) == 0x30);
ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, callFrame) == 0x34);
ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, exception) == 0x38);
Q_ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, registerFile) == 0x30);
Q_ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, callFrame) == 0x34);
Q_ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, exception) == 0x38);
ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == 0x1C);
Q_ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == 0x1C);
#endif
}
@ -887,7 +887,7 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co
}
if (slot.slotBase() == structure->prototypeForLookup(callFrame)) {
ASSERT(slot.slotBase().isObject());
Q_ASSERT(slot.slotBase().isObject());
JSObject* slotBaseObject = asObject(slot.slotBase());
size_t offset = slot.cachedOffset();
@ -901,8 +901,8 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co
stubInfo->initGetByIdProto(structure, slotBaseObject->structure());
ASSERT(!structure->isDictionary());
ASSERT(!slotBaseObject->structure()->isDictionary());
Q_ASSERT(!structure->isDictionary());
Q_ASSERT(!slotBaseObject->structure()->isDictionary());
JIT::compileGetByIdProto(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, slotBaseObject->structure(), offset, returnAddress);
return;
}
@ -975,7 +975,7 @@ struct StackHack {
// handling code out of line as possible.
static NEVER_INLINE void returnToThrowTrampoline(JSGlobalData* globalData, ReturnAddressPtr exceptionLocation, ReturnAddressPtr& returnAddressSlot)
{
ASSERT(globalData->exception);
Q_ASSERT(globalData->exception);
globalData->exceptionLocation = exceptionLocation;
returnAddressSlot = ReturnAddressPtr(FunctionPtr(ctiVMThrowTrampoline));
}
@ -1109,7 +1109,7 @@ DEFINE_STUB_FUNCTION(void, op_end)
STUB_INIT_STACK_FRAME(stackFrame);
ScopeChainNode* scopeChain = stackFrame.callFrame->scopeChain();
ASSERT(scopeChain->refCount > 1);
Q_ASSERT(scopeChain->refCount > 1);
scopeChain->deref();
}
@ -1274,7 +1274,7 @@ DEFINE_STUB_FUNCTION(JSObject*, op_put_by_id_transition_realloc)
int32_t oldSize = stackFrame.args[3].int32();
int32_t newSize = stackFrame.args[4].int32();
ASSERT(baseValue.isObject());
Q_ASSERT(baseValue.isObject());
JSObject* base = asObject(baseValue);
base->allocatePropertyStorage(oldSize, newSize);
@ -1304,7 +1304,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check)
// If we successfully got something, then the base from which it is being accessed must
// be an object. (Assertion to ensure asObject() call below is safe, which comes after
// an isCacheable() chceck.
ASSERT(!slot.isCacheable() || slot.slotBase().isObject());
Q_ASSERT(!slot.isCacheable() || slot.slotBase().isObject());
// Check that:
// * We're dealing with a JSCell,
@ -1329,7 +1329,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check)
slotBaseObject->flattenDictionaryObject();
// The result fetched should always be the callee!
ASSERT(result == JSValue(callee));
Q_ASSERT(result == JSValue(callee));
// Check to see if the function is on the object's prototype. Patch up the code to optimize.
if (slot.slotBase() == structure->prototypeForLookup(callFrame)) {
@ -1396,13 +1396,13 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail)
CodeBlock* codeBlock = callFrame->codeBlock();
StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
ASSERT(slot.slotBase().isObject());
Q_ASSERT(slot.slotBase().isObject());
PolymorphicAccessStructureList* polymorphicStructureList;
int listIndex = 1;
if (stubInfo->accessType == access_get_by_id_self) {
ASSERT(!stubInfo->stubRoutine);
Q_ASSERT(!stubInfo->stubRoutine);
polymorphicStructureList = new PolymorphicAccessStructureList(CodeLocationLabel(), stubInfo->u.getByIdSelf.baseObjectStructure);
stubInfo->initGetByIdSelfList(polymorphicStructureList, 2);
} else {
@ -1445,7 +1445,7 @@ static PolymorphicAccessStructureList* getPolymorphicAccessStructureListSlot(Str
ASSERT_NOT_REACHED();
}
ASSERT(listIndex < POLYMORPHIC_LIST_CACHE_SIZE);
Q_ASSERT(listIndex < POLYMORPHIC_LIST_CACHE_SIZE);
return prototypeStructureList;
}
@ -1471,7 +1471,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
CodeBlock* codeBlock = callFrame->codeBlock();
StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
ASSERT(slot.slotBase().isObject());
Q_ASSERT(slot.slotBase().isObject());
JSObject* slotBaseObject = asObject(slot.slotBase());
size_t offset = slot.cachedOffset();
@ -1479,7 +1479,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
if (slot.slotBase() == baseValue)
ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
else if (slot.slotBase() == baseValue.asCell()->structure()->prototypeForLookup(callFrame)) {
ASSERT(!baseValue.asCell()->structure()->isDictionary());
Q_ASSERT(!baseValue.asCell()->structure()->isDictionary());
// Since we're accessing a prototype in a loop, it's a good bet that it
// should not be treated as a dictionary.
if (slotBaseObject->structure()->isDictionary()) {
@ -1495,7 +1495,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
} else if (size_t count = normalizePrototypeChain(callFrame, baseValue, slot.slotBase(), propertyName, offset)) {
ASSERT(!baseValue.asCell()->structure()->isDictionary());
Q_ASSERT(!baseValue.asCell()->structure()->isDictionary());
int listIndex;
PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(stubInfo, listIndex);
@ -1570,7 +1570,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_instanceof)
JSValue proto = stackFrame.args[2].jsValue();
// At least one of these checks must have failed to get to the slow case.
ASSERT(!value.isCell() || !baseVal.isCell() || !proto.isCell()
Q_ASSERT(!value.isCell() || !baseVal.isCell() || !proto.isCell()
|| !value.isObject() || !baseVal.isObject() || !proto.isObject()
|| (asObject(baseVal)->structure()->typeInfo().flags() & (ImplementsHasInstance | OverridesHasInstance)) != ImplementsHasInstance);
@ -1585,7 +1585,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_instanceof)
stackFrame.globalData->exception = createInvalidParamError(callFrame, "instanceof", baseVal, vPCIndex, codeBlock);
VM_THROW_EXCEPTION();
}
ASSERT(typeInfo.type() != UnspecifiedType);
Q_ASSERT(typeInfo.type() != UnspecifiedType);
if (!typeInfo.overridesHasInstance()) {
if (!value.isObject())
@ -1645,13 +1645,13 @@ DEFINE_STUB_FUNCTION(void*, op_call_JSFunction)
{
STUB_INIT_STACK_FRAME(stackFrame);
#if !ASSERT_DISABLED
#ifndef QT_NO_DEBUG
CallData callData;
ASSERT(stackFrame.args[0].jsValue().getCallData(callData) == CallTypeJS);
Q_ASSERT(stackFrame.args[0].jsValue().getCallData(callData) == CallTypeJS);
#endif
JSFunction* function = asFunction(stackFrame.args[0].jsValue());
ASSERT(!function->isHostFunction());
Q_ASSERT(!function->isHostFunction());
FunctionExecutable* executable = function->jsExecutable();
ScopeChainNode* callDataScopeChain = function->scope().node();
executable->jitCode(stackFrame.callFrame, callDataScopeChain);
@ -1665,11 +1665,11 @@ DEFINE_STUB_FUNCTION(VoidPtrPair, op_call_arityCheck)
CallFrame* callFrame = stackFrame.callFrame;
JSFunction* callee = asFunction(stackFrame.args[0].jsValue());
ASSERT(!callee->isHostFunction());
Q_ASSERT(!callee->isHostFunction());
CodeBlock* newCodeBlock = &callee->jsExecutable()->generatedBytecode();
int argCount = stackFrame.args[2].int32();
ASSERT(argCount != newCodeBlock->m_numParameters);
Q_ASSERT(argCount != newCodeBlock->m_numParameters);
CallFrame* oldCallFrame = callFrame->callerFrame();
@ -1746,7 +1746,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_NotJSFunction)
CallData callData;
CallType callType = funcVal.getCallData(callData);
ASSERT(callType != CallTypeJS);
Q_ASSERT(callType != CallTypeJS);
if (callType == CallTypeHost) {
int registerOffset = stackFrame.args[1].int32();
@ -1777,7 +1777,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_NotJSFunction)
return JSValue::encode(returnValue);
}
ASSERT(callType == CallTypeNone);
Q_ASSERT(callType == CallTypeNone);
CallFrame* callFrame = stackFrame.callFrame;
CodeBlock* codeBlock = callFrame->codeBlock();
@ -1808,7 +1808,7 @@ DEFINE_STUB_FUNCTION(void, op_tear_off_activation)
{
STUB_INIT_STACK_FRAME(stackFrame);
ASSERT(stackFrame.callFrame->codeBlock()->needsFullScopeChain());
Q_ASSERT(stackFrame.callFrame->codeBlock()->needsFullScopeChain());
asActivation(stackFrame.args[0].jsValue())->copyRegisters(stackFrame.callFrame->optionalCalleeArguments());
}
@ -1816,7 +1816,7 @@ DEFINE_STUB_FUNCTION(void, op_tear_off_arguments)
{
STUB_INIT_STACK_FRAME(stackFrame);
ASSERT(stackFrame.callFrame->codeBlock()->usesArguments() && !stackFrame.callFrame->codeBlock()->needsFullScopeChain());
Q_ASSERT(stackFrame.callFrame->codeBlock()->usesArguments() && !stackFrame.callFrame->codeBlock()->needsFullScopeChain());
if (stackFrame.callFrame->optionalCalleeArguments())
stackFrame.callFrame->optionalCalleeArguments()->copyRegisters();
}
@ -1825,7 +1825,7 @@ DEFINE_STUB_FUNCTION(void, op_ret_scopeChain)
{
STUB_INIT_STACK_FRAME(stackFrame);
ASSERT(stackFrame.callFrame->codeBlock()->needsFullScopeChain());
Q_ASSERT(stackFrame.callFrame->codeBlock()->needsFullScopeChain());
stackFrame.callFrame->scopeChain()->deref();
}
@ -1846,7 +1846,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve)
ScopeChainIterator iter = scopeChain->begin();
ScopeChainIterator end = scopeChain->end();
ASSERT(iter != end);
Q_ASSERT(iter != end);
Identifier& ident = stackFrame.args[0].identifier();
do {
@ -1878,9 +1878,9 @@ DEFINE_STUB_FUNCTION(JSObject*, op_construct_JSConstruct)
VM_THROW_EXCEPTION();
}
#if !ASSERT_DISABLED
#ifndef QT_NO_DEBUG
ConstructData constructData;
ASSERT(constructor->getConstructData(constructData) == ConstructTypeJS);
Q_ASSERT(constructor->getConstructData(constructData) == ConstructTypeJS);
#endif
Structure* structure;
@ -1917,7 +1917,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_construct_NotJSConstruct)
return JSValue::encode(returnValue);
}
ASSERT(constructType == ConstructTypeNone);
Q_ASSERT(constructType == ConstructTypeNone);
CodeBlock* codeBlock = callFrame->codeBlock();
unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
@ -2267,10 +2267,10 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_skip)
ScopeChainIterator iter = scopeChain->begin();
ScopeChainIterator end = scopeChain->end();
ASSERT(iter != end);
Q_ASSERT(iter != end);
while (skip--) {
++iter;
ASSERT(iter != end);
Q_ASSERT(iter != end);
}
Identifier& ident = stackFrame.args[0].identifier();
do {
@ -2297,7 +2297,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_global)
JSGlobalObject* globalObject = stackFrame.args[0].globalObject();
Identifier& ident = stackFrame.args[1].identifier();
unsigned globalResolveInfoIndex = stackFrame.args[2].int32();
ASSERT(globalObject->isGlobalObject());
Q_ASSERT(globalObject->isGlobalObject());
PropertySlot slot(globalObject);
if (globalObject->getPropertySlot(callFrame, ident, slot)) {
@ -2527,8 +2527,8 @@ DEFINE_STUB_FUNCTION(int, op_eq_strings)
JSString* string1 = stackFrame.args[0].jsString();
JSString* string2 = stackFrame.args[1].jsString();
ASSERT(string1->isString());
ASSERT(string2->isString());
Q_ASSERT(string1->isString());
Q_ASSERT(string2->isString());
return string1->value(stackFrame.callFrame) == string2->value(stackFrame.callFrame);
}
@ -2554,7 +2554,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitand)
JSValue src1 = stackFrame.args[0].jsValue();
JSValue src2 = stackFrame.args[1].jsValue();
ASSERT(!src1.isInt32() || !src2.isInt32());
Q_ASSERT(!src1.isInt32() || !src2.isInt32());
CallFrame* callFrame = stackFrame.callFrame;
JSValue result = jsNumber(stackFrame.globalData, src1.toInt32(callFrame) & src2.toInt32(callFrame));
CHECK_FOR_EXCEPTION_AT_END();
@ -2581,7 +2581,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitnot)
JSValue src = stackFrame.args[0].jsValue();
ASSERT(!src.isInt32());
Q_ASSERT(!src.isInt32());
CallFrame* callFrame = stackFrame.callFrame;
JSValue result = jsNumber(stackFrame.globalData, ~src.toInt32(callFrame));
CHECK_FOR_EXCEPTION_AT_END();
@ -2600,7 +2600,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_with_base)
// FIXME: add scopeDepthIsZero optimization
ASSERT(iter != end);
Q_ASSERT(iter != end);
Identifier& ident = stackFrame.args[0].identifier();
JSObject* base;
@ -2775,7 +2775,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_throw)
unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
JSValue exceptionValue = stackFrame.args[0].jsValue();
ASSERT(exceptionValue);
Q_ASSERT(exceptionValue);
HandlerInfo* handler = stackFrame.globalData->interpreter->throwException(callFrame, exceptionValue, vPCIndex, true);
@ -2787,7 +2787,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_throw)
stackFrame.callFrame = callFrame;
void* catchRoutine = handler->nativeCode.executableAddress();
ASSERT(catchRoutine);
Q_ASSERT(catchRoutine);
STUB_SET_RETURN_ADDRESS(catchRoutine);
return JSValue::encode(exceptionValue);
}
@ -3081,9 +3081,9 @@ DEFINE_STUB_FUNCTION(void, op_put_getter)
CallFrame* callFrame = stackFrame.callFrame;
ASSERT(stackFrame.args[0].jsValue().isObject());
Q_ASSERT(stackFrame.args[0].jsValue().isObject());
JSObject* baseObj = asObject(stackFrame.args[0].jsValue());
ASSERT(stackFrame.args[2].jsValue().isObject());
Q_ASSERT(stackFrame.args[2].jsValue().isObject());
baseObj->defineGetter(callFrame, stackFrame.args[1].identifier(), asObject(stackFrame.args[2].jsValue()));
}
@ -3093,9 +3093,9 @@ DEFINE_STUB_FUNCTION(void, op_put_setter)
CallFrame* callFrame = stackFrame.callFrame;
ASSERT(stackFrame.args[0].jsValue().isObject());
Q_ASSERT(stackFrame.args[0].jsValue().isObject());
JSObject* baseObj = asObject(stackFrame.args[0].jsValue());
ASSERT(stackFrame.args[2].jsValue().isObject());
Q_ASSERT(stackFrame.args[2].jsValue().isObject());
baseObj->defineSetter(callFrame, stackFrame.args[1].identifier(), asObject(stackFrame.args[2].jsValue()));
}
@ -3159,7 +3159,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, vm_throw)
unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, globalData->exceptionLocation);
JSValue exceptionValue = globalData->exception;
ASSERT(exceptionValue);
Q_ASSERT(exceptionValue);
globalData->exception = JSValue();
HandlerInfo* handler = globalData->interpreter->throwException(callFrame, exceptionValue, vPCIndex, false);
@ -3171,7 +3171,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, vm_throw)
stackFrame.callFrame = callFrame;
void* catchRoutine = handler->nativeCode.executableAddress();
ASSERT(catchRoutine);
Q_ASSERT(catchRoutine);
STUB_SET_RETURN_ADDRESS(catchRoutine);
return JSValue::encode(exceptionValue);
}

View file

@ -105,14 +105,14 @@ template <typename T> inline NodeDeclarationInfo<T> createNodeDeclarationInfo(T
ParserArenaData<DeclarationStacks::FunctionStack>* funcDecls,
CodeFeatures info, int numConstants)
{
ASSERT((info & ~AllFeatures) == 0);
Q_ASSERT((info & ~AllFeatures) == 0);
NodeDeclarationInfo<T> result = { node, varDecls, funcDecls, info, numConstants };
return result;
}
template <typename T> inline NodeInfo<T> createNodeInfo(T node, CodeFeatures info, int numConstants)
{
ASSERT((info & ~AllFeatures) == 0);
Q_ASSERT((info & ~AllFeatures) == 0);
NodeInfo<T> result = { node, info, numConstants };
return result;
}
@ -1870,7 +1870,7 @@ static ExpressionNode* makeAssignNode(JSGlobalData* globalData, ExpressionNode*
return node;
}
}
ASSERT(loc->isDotAccessorNode());
Q_ASSERT(loc->isDotAccessorNode());
DotAccessorNode* dot = static_cast<DotAccessorNode*>(loc);
if (op == OpEqual)
return new (globalData) AssignDotNode(globalData, dot->base(), dot->identifier(), expr, exprHasAssignments, dot->divot(), dot->divot() - start, end - dot->divot());
@ -1895,7 +1895,7 @@ static ExpressionNode* makePrefixNode(JSGlobalData* globalData, ExpressionNode*
node->setSubexpressionInfo(bracket->divot(), bracket->startOffset());
return node;
}
ASSERT(expr->isDotAccessorNode());
Q_ASSERT(expr->isDotAccessorNode());
DotAccessorNode* dot = static_cast<DotAccessorNode*>(expr);
PrefixDotNode* node = new (globalData) PrefixDotNode(globalData, dot->base(), dot->identifier(), op, divot, divot - start, end - divot);
node->setSubexpressionInfo(dot->divot(), dot->startOffset());
@ -1918,7 +1918,7 @@ static ExpressionNode* makePostfixNode(JSGlobalData* globalData, ExpressionNode*
return node;
}
ASSERT(expr->isDotAccessorNode());
Q_ASSERT(expr->isDotAccessorNode());
DotAccessorNode* dot = static_cast<DotAccessorNode*>(expr);
PostfixDotNode* node = new (globalData) PostfixDotNode(globalData, dot->base(), dot->identifier(), op, divot, divot - start, end - divot);
node->setSubexpressionInfo(dot->divot(), dot->endOffset());
@ -1944,7 +1944,7 @@ static ExpressionNodeInfo makeFunctionCallNode(JSGlobalData* globalData, Express
node->setSubexpressionInfo(bracket->divot(), bracket->endOffset());
return createNodeInfo<ExpressionNode*>(node, features, numConstants);
}
ASSERT(func.m_node->isDotAccessorNode());
Q_ASSERT(func.m_node->isDotAccessorNode());
DotAccessorNode* dot = static_cast<DotAccessorNode*>(func.m_node);
FunctionCallDotNode* node;
if (dot->identifier() == globalData->propertyNames->call)
@ -1978,7 +1978,7 @@ static ExpressionNode* makeDeleteNode(JSGlobalData* globalData, ExpressionNode*
BracketAccessorNode* bracket = static_cast<BracketAccessorNode*>(expr);
return new (globalData) DeleteBracketNode(globalData, bracket->base(), bracket->subscript(), divot, divot - start, end - divot);
}
ASSERT(expr->isDotAccessorNode());
Q_ASSERT(expr->isDotAccessorNode());
DotAccessorNode* dot = static_cast<DotAccessorNode*>(expr);
return new (globalData) DeleteDotNode(globalData, dot->base(), dot->identifier(), divot, divot - start, end - divot);
}

View file

@ -162,7 +162,7 @@ void Lexer::setCode(const SourceCode& source, ParserArena& arena)
// Read the first characters into the 4-character buffer.
shift4();
ASSERT(currentOffset() == source.startOffset());
Q_ASSERT(currentOffset() == source.startOffset());
}
void Lexer::copyCodeWithoutBOMs()
@ -186,7 +186,7 @@ void Lexer::copyCodeWithoutBOMs()
void Lexer::shiftLineTerminator()
{
ASSERT(isLineTerminator(m_current));
Q_ASSERT(isLineTerminator(m_current));
// Allow both CRLF and LFCR.
if (m_current + m_next1 == '\n' + '\r')
@ -250,8 +250,8 @@ static inline int singleEscape(int c)
inline void Lexer::record8(int c)
{
ASSERT(c >= 0);
ASSERT(c <= 0xFF);
Q_ASSERT(c >= 0);
Q_ASSERT(c <= 0xFF);
m_buffer8.append(static_cast<char>(c));
}
@ -262,16 +262,16 @@ inline void Lexer::record16(UChar c)
inline void Lexer::record16(int c)
{
ASSERT(c >= 0);
ASSERT(c <= USHRT_MAX);
Q_ASSERT(c >= 0);
Q_ASSERT(c <= USHRT_MAX);
record16(UChar(static_cast<unsigned short>(c)));
}
int Lexer::lex(void* p1, void* p2)
{
ASSERT(!m_error);
ASSERT(m_buffer8.isEmpty());
ASSERT(m_buffer16.isEmpty());
Q_ASSERT(!m_error);
Q_ASSERT(m_buffer8.isEmpty());
Q_ASSERT(m_buffer16.isEmpty());
YYSTYPE* lvalp = static_cast<YYSTYPE*>(p1);
YYLTYPE* llocp = static_cast<YYLTYPE*>(p2);
@ -899,15 +899,15 @@ returnError:
bool Lexer::scanRegExp(const Identifier*& pattern, const Identifier*& flags, UChar patternPrefix)
{
ASSERT(m_buffer16.isEmpty());
Q_ASSERT(m_buffer16.isEmpty());
bool lastWasEscape = false;
bool inBrackets = false;
if (patternPrefix) {
ASSERT(!isLineTerminator(patternPrefix));
ASSERT(patternPrefix != '/');
ASSERT(patternPrefix != '[');
Q_ASSERT(!isLineTerminator(patternPrefix));
Q_ASSERT(patternPrefix != '/');
Q_ASSERT(patternPrefix != '[');
record16(patternPrefix);
}
@ -1021,7 +1021,7 @@ SourceCode Lexer::sourceCode(int openBrace, int closeBrace, int firstLine)
const UChar* data = m_source->provider()->data();
ASSERT(openBrace < closeBrace);
Q_ASSERT(openBrace < closeBrace);
int numBOMsBeforeOpenBrace = 0;
int numBOMsBetweenBraces = 0;

View file

@ -725,7 +725,7 @@ namespace JSC {
, m_expr3(expr3)
, m_statement(statement)
{
ASSERT(statement);
Q_ASSERT(statement);
}
inline ContinueNode::ContinueNode(JSGlobalData* globalData)

View file

@ -118,9 +118,9 @@ PassRefPtr<ProgramNode> ProgramNode::create(JSGlobalData* globalData, SourceElem
{
RefPtr<ProgramNode> node = new ProgramNode(globalData, children, varStack, funcStack, source, features, numConstants);
ASSERT(node->data()->m_arena.last() == node);
Q_ASSERT(node->data()->m_arena.last() == node);
node->data()->m_arena.removeLast();
ASSERT(!node->data()->m_arena.contains(node.get()));
Q_ASSERT(!node->data()->m_arena.contains(node.get()));
return node.release();
}
@ -136,9 +136,9 @@ PassRefPtr<EvalNode> EvalNode::create(JSGlobalData* globalData, SourceElements*
{
RefPtr<EvalNode> node = new EvalNode(globalData, children, varStack, funcStack, source, features, numConstants);
ASSERT(node->data()->m_arena.last() == node);
Q_ASSERT(node->data()->m_arena.last() == node);
node->data()->m_arena.removeLast();
ASSERT(!node->data()->m_arena.contains(node.get()));
Q_ASSERT(!node->data()->m_arena.contains(node.get()));
return node.release();
}
@ -169,7 +169,7 @@ void FunctionBodyNode::finishParsing(const SourceCode& source, ParameterNode* fi
void FunctionBodyNode::finishParsing(PassRefPtr<FunctionParameters> parameters, const Identifier& ident)
{
ASSERT(!source().isNull());
Q_ASSERT(!source().isNull());
m_parameters = parameters;
m_ident = ident;
}
@ -183,9 +183,9 @@ PassRefPtr<FunctionBodyNode> FunctionBodyNode::create(JSGlobalData* globalData,
{
RefPtr<FunctionBodyNode> node = new FunctionBodyNode(globalData, children, varStack, funcStack, sourceCode, features, numConstants);
ASSERT(node->data()->m_arena.last() == node);
Q_ASSERT(node->data()->m_arena.last() == node);
node->data()->m_arena.removeLast();
ASSERT(!node->data()->m_arena.contains(node.get()));
Q_ASSERT(!node->data()->m_arena.contains(node.get()));
return node.release();
}

View file

@ -116,7 +116,7 @@ namespace JSC {
public:
virtual ~ParserArenaRefCounted()
{
ASSERT(deletionHasBegun());
Q_ASSERT(deletionHasBegun());
}
};
@ -292,7 +292,7 @@ namespace JSC {
void setSubexpressionInfo(uint32_t subexpressionDivot, uint16_t subexpressionOffset)
{
ASSERT(subexpressionDivot <= divot());
Q_ASSERT(subexpressionDivot <= divot());
if ((divot() - subexpressionDivot) & ~0xFFFF) // Overflow means we can't do this safely, so just point at the primary divot
return;
m_subexpressionDivotOffset = divot() - subexpressionDivot;
@ -321,7 +321,7 @@ namespace JSC {
void setSubexpressionInfo(uint32_t subexpressionDivot, uint16_t subexpressionOffset)
{
ASSERT(subexpressionDivot >= divot());
Q_ASSERT(subexpressionDivot >= divot());
if ((subexpressionDivot - divot()) & ~0xFFFF) // Overflow means we can't do this safely, so just point at the primary divot
return;
m_subexpressionDivotOffset = subexpressionDivot - divot();
@ -1383,8 +1383,8 @@ namespace JSC {
void adoptData(std::auto_ptr<ScopeNodeData> data)
{
ASSERT(!data->m_arena.contains(this));
ASSERT(!m_data);
Q_ASSERT(!data->m_arena.contains(this));
Q_ASSERT(!m_data);
m_data.adopt(data);
}
ScopeNodeData* data() const { return m_data.get(); }
@ -1403,12 +1403,12 @@ namespace JSC {
bool usesThis() const { return m_features & ThisFeature; }
bool needsActivation() const { return m_features & (EvalFeature | ClosureFeature | WithFeature | CatchFeature); }
VarStack& varStack() { ASSERT(m_data); return m_data->m_varStack; }
FunctionStack& functionStack() { ASSERT(m_data); return m_data->m_functionStack; }
VarStack& varStack() { Q_ASSERT(m_data); return m_data->m_varStack; }
FunctionStack& functionStack() { Q_ASSERT(m_data); return m_data->m_functionStack; }
int neededConstants()
{
ASSERT(m_data);
Q_ASSERT(m_data);
// We may need 2 more constants than the count given by the parser,
// because of the various uses of jsUndefined() and jsNull().
return m_data->m_numConstants + 2;

View file

@ -40,7 +40,7 @@ ParserArena::ParserArena()
inline void* ParserArena::freeablePool()
{
ASSERT(m_freeablePoolEnd);
Q_ASSERT(m_freeablePoolEnd);
return m_freeablePoolEnd - freeablePoolSize;
}
@ -105,7 +105,7 @@ void ParserArena::allocateFreeablePool()
char* pool = static_cast<char*>(fastMalloc(freeablePoolSize));
m_freeableMemory = pool;
m_freeablePoolEnd = pool + freeablePoolSize;
ASSERT(freeablePool() == pool);
Q_ASSERT(freeablePool() == pool);
}
bool ParserArena::isEmpty() const

View file

@ -76,10 +76,10 @@ namespace JSC {
void* allocateFreeable(size_t size)
{
ASSERT(size);
ASSERT(size <= freeablePoolSize);
Q_ASSERT(size);
Q_ASSERT(size <= freeablePoolSize);
size_t alignedSize = alignSize(size);
ASSERT(alignedSize <= freeablePoolSize);
Q_ASSERT(alignedSize <= freeablePoolSize);
if (UNLIKELY(static_cast<size_t>(m_freeablePoolEnd - m_freeableMemory) < alignedSize))
allocateFreeablePool();
void* block = m_freeableMemory;

View file

@ -2626,7 +2626,7 @@ JSRegExp* jsRegExpCompile(const UChar* pattern, int patternLength,
*code++ = OP_END;
ASSERT(code - codeStart <= length);
Q_ASSERT(code - codeStart <= length);
if (code - codeStart > length)
errorcode = ERR7;

View file

@ -308,7 +308,7 @@ struct MatchStack {
, currentFrame(frames)
, size(1) // match() creates accesses the first frame w/o calling pushNewFrame
{
ASSERT((sizeof(frames) / sizeof(frames[0])) == numFramesOnStack);
Q_ASSERT((sizeof(frames) / sizeof(frames[0])) == numFramesOnStack);
}
MatchFrame frames[numFramesOnStack];
@ -402,8 +402,8 @@ static inline void repeatInformationFromInstructionOffset(short instructionOffse
static const char minimumRepeatsFromInstructionOffset[] = { 0, 0, 1, 1, 0, 0 };
static const int maximumRepeatsFromInstructionOffset[] = { INT_MAX, INT_MAX, INT_MAX, INT_MAX, 1, 1 };
ASSERT(instructionOffset >= 0);
ASSERT(instructionOffset <= (OP_CRMINQUERY - OP_CRSTAR));
Q_ASSERT(instructionOffset >= 0);
Q_ASSERT(instructionOffset <= (OP_CRMINQUERY - OP_CRSTAR));
minimize = (instructionOffset & 1); // this assumes ordering: Instruction, MinimizeInstruction, Instruction2, MinimizeInstruction2
minimumRepeats = minimumRepeatsFromInstructionOffset[instructionOffset];
@ -1679,7 +1679,7 @@ RECURSE:
non-capturing bracket. Don't worry about setting the flag for the error case
here; that is handled in the code for KET. */
ASSERT(*stack.currentFrame->args.instructionPtr > OP_BRA);
Q_ASSERT(*stack.currentFrame->args.instructionPtr > OP_BRA);
stack.currentFrame->locals.number = *stack.currentFrame->args.instructionPtr - OP_BRA;
@ -1884,10 +1884,10 @@ int jsRegExpExecute(const JSRegExp* re,
const UChar* subject, int length, int start_offset, int* offsets,
int offsetCount)
{
ASSERT(re);
ASSERT(subject || !length);
ASSERT(offsetCount >= 0);
ASSERT(offsets || offsetCount == 0);
Q_ASSERT(re);
Q_ASSERT(subject || !length);
Q_ASSERT(offsetCount >= 0);
Q_ASSERT(offsets || offsetCount == 0);
HistogramTimeLogger logger(re);
@ -2008,7 +2008,7 @@ int jsRegExpExecute(const JSRegExp* re,
}
if (returnCode != 1) {
ASSERT(returnCode == JSRegExpErrorHitLimit || returnCode == JSRegExpErrorNoMemory);
Q_ASSERT(returnCode == JSRegExpErrorHitLimit || returnCode == JSRegExpErrorNoMemory);
DPRINTF((">>>> error: returning %d\n", returnCode));
return returnCode;
}

View file

@ -119,14 +119,14 @@ capturing parenthesis numbers in back references. */
static inline void put2ByteValue(unsigned char* opcodePtr, int value)
{
ASSERT(value >= 0 && value <= 0xFFFF);
Q_ASSERT(value >= 0 && value <= 0xFFFF);
opcodePtr[0] = value >> 8;
opcodePtr[1] = value;
}
static inline void put3ByteValue(unsigned char* opcodePtr, int value)
{
ASSERT(value >= 0 && value <= 0xFFFFFF);
Q_ASSERT(value >= 0 && value <= 0xFFFFFF);
opcodePtr[0] = value >> 16;
opcodePtr[1] = value >> 8;
opcodePtr[2] = value;
@ -181,14 +181,14 @@ COMPILE_ASSERT(MAX_PATTERN_SIZE < (1 << (8 * LINK_SIZE)), pcre_max_pattern_fits_
static inline void putLinkValue(unsigned char* opcodePtr, int value)
{
ASSERT(value);
Q_ASSERT(value);
putLinkValueAllowZero(opcodePtr, value);
}
static inline int getLinkValue(const unsigned char* opcodePtr)
{
int value = getLinkValueAllowZero(opcodePtr);
ASSERT(value);
Q_ASSERT(value);
return value;
}
@ -435,7 +435,7 @@ static inline bool isBracketStartOpcode(unsigned char opcode)
static inline void advanceToEndOfBracket(const unsigned char*& opcodePtr)
{
ASSERT(isBracketStartOpcode(*opcodePtr) || *opcodePtr == OP_ALT);
Q_ASSERT(isBracketStartOpcode(*opcodePtr) || *opcodePtr == OP_ALT);
do
opcodePtr += getLinkValue(opcodePtr + 1);
while (*opcodePtr == OP_ALT);

View file

@ -68,8 +68,8 @@ namespace JSC {
void initialize(Register* buffer, size_t size)
{
ASSERT(!m_markSet);
ASSERT(isEmpty());
Q_ASSERT(!m_markSet);
Q_ASSERT(isEmpty());
m_buffer = buffer;
m_size = size;
@ -103,10 +103,10 @@ namespace JSC {
void append(JSValue v)
{
ASSERT(!m_isReadOnly);
Q_ASSERT(!m_isReadOnly);
#if ENABLE(JSC_ZOMBIES)
ASSERT(!v.isZombie());
Q_ASSERT(!v.isZombie());
#endif
if (m_isUsingInlineBuffer && m_size < inlineCapacity) {
@ -123,14 +123,14 @@ namespace JSC {
void removeLast()
{
ASSERT(m_size);
Q_ASSERT(m_size);
m_size--;
m_vector.removeLast();
}
JSValue last()
{
ASSERT(m_size);
Q_ASSERT(m_size);
return m_buffer[m_size - 1].jsValue();
}
@ -193,7 +193,7 @@ namespace JSC {
{
#if ENABLE(JSC_ZOMBIES)
for (size_t i = 0; i < argCount; i++)
ASSERT(!m_args[i].isZombie());
Q_ASSERT(!m_args[i].isZombie());
#endif
}
@ -201,7 +201,7 @@ namespace JSC {
: m_args(reinterpret_cast<JSValue*>(args))
, m_argCount(argCount)
{
ASSERT(argCount >= 0);
Q_ASSERT(argCount >= 0);
}
ArgList(const MarkedArgumentBuffer& args)

View file

@ -112,7 +112,7 @@ namespace JSC {
inline Arguments* asArguments(JSValue value)
{
ASSERT(asObject(value)->inherits(&Arguments::info));
Q_ASSERT(asObject(value)->inherits(&Arguments::info));
return static_cast<Arguments*>(asObject(value));
}
@ -182,7 +182,7 @@ namespace JSC {
, d(new ArgumentsData)
{
if (callFrame->callee() && callFrame->callee()->inherits(&JSC::JSFunction::info))
ASSERT(!asFunction(callFrame->callee())->jsExecutable()->parameterCount());
Q_ASSERT(!asFunction(callFrame->callee())->jsExecutable()->parameterCount());
unsigned numArguments = callFrame->argumentCount() - 1;
@ -211,7 +211,7 @@ namespace JSC {
inline void Arguments::copyRegisters()
{
ASSERT(!isTornOff());
Q_ASSERT(!isTornOff());
if (!d->numParameters)
return;
@ -228,7 +228,7 @@ namespace JSC {
// This JSActivation function is defined here so it can get at Arguments::setRegisters.
inline void JSActivation::copyRegisters(Arguments* arguments)
{
ASSERT(!d()->registerArray);
Q_ASSERT(!d()->registerArray);
size_t numParametersMinusThis = d()->functionExecutable->generatedBytecode().m_numParameters - 1;
size_t numVars = d()->functionExecutable->generatedBytecode().m_numVars;

View file

@ -203,7 +203,7 @@ JSValue JSC_HOST_CALL arrayProtoFuncToString(ExecState* exec, JSObject*, JSValue
if (RefPtr<UString::Rep> rep = strBuffer[i])
buffer.append(rep->data(), rep->size());
}
ASSERT(buffer.size() == totalSize);
Q_ASSERT(buffer.size() == totalSize);
return jsString(exec, UString::adopt(buffer));
}

View file

@ -42,7 +42,7 @@ namespace JSC {
inline BooleanObject* asBooleanObject(JSValue value)
{
ASSERT(asObject(value)->inherits(&BooleanObject::info));
Q_ASSERT(asObject(value)->inherits(&BooleanObject::info));
return static_cast<BooleanObject*>(asObject(value));
}

View file

@ -65,7 +65,7 @@ JSValue JSC_HOST_CALL booleanProtoFuncToString(ExecState* exec, JSObject*, JSVal
if (asBooleanObject(thisValue)->internalValue() == jsBoolean(false))
return jsNontrivialString(exec, "false");
ASSERT(asBooleanObject(thisValue)->internalValue() == jsBoolean(true));
Q_ASSERT(asBooleanObject(thisValue)->internalValue() == jsBoolean(true));
return jsNontrivialString(exec, "true");
}

View file

@ -52,7 +52,7 @@ JSValue call(ExecState* exec, JSValue functionObject, CallType callType, const C
{
if (callType == CallTypeHost)
return callData.native.function(exec, asObject(functionObject), thisValue, args);
ASSERT(callType == CallTypeJS);
Q_ASSERT(callType == CallTypeJS);
// FIXME: Can this be done more efficiently using the callData?
return asFunction(functionObject)->call(exec, thisValue, args);
}

View file

@ -106,7 +106,7 @@ Heap::Heap(JSGlobalData* globalData)
: m_markListSet(0)
, m_globalData(globalData)
{
ASSERT(globalData);
Q_ASSERT(globalData);
memset(&m_heap, 0, sizeof(CollectorHeap));
allocateBlock();
}
@ -114,7 +114,7 @@ Heap::Heap(JSGlobalData* globalData)
Heap::~Heap()
{
// The destroy function must already have been called, so assert this.
ASSERT(!m_globalData);
Q_ASSERT(!m_globalData);
}
void Heap::destroy()
@ -122,8 +122,8 @@ void Heap::destroy()
if (!m_globalData)
return;
ASSERT(!m_globalData->dynamicGlobalObject);
ASSERT(!isBusy());
Q_ASSERT(!m_globalData->dynamicGlobalObject);
Q_ASSERT(!isBusy());
// The global object is not GC protected at this point, so sweeping may delete it
// (and thus the global data) before other objects that may use the global data.
@ -259,7 +259,7 @@ void Heap::freeBlocks()
for ( ; it != end; ++it)
(*it)->~JSCell();
ASSERT(!protectedObjectCount());
Q_ASSERT(!protectedObjectCount());
protectedValuesEnd = protectedValuesCopy.end();
for (ProtectCountSet::iterator it = protectedValuesCopy.begin(); it != protectedValuesEnd; ++it)
@ -304,11 +304,11 @@ void* Heap::allocate(size_t s)
ASSERT_UNUSED(s, s <= HeapConstants::cellSize);
ASSERT(m_heap.operationInProgress == NoOperation);
Q_ASSERT(m_heap.operationInProgress == NoOperation);
#if COLLECT_ON_EVERY_ALLOCATION
collectAllGarbage();
ASSERT(m_heap.operationInProgress == NoOperation);
Q_ASSERT(m_heap.operationInProgress == NoOperation);
#endif
allocate:
@ -316,10 +316,10 @@ allocate:
// Fast case: find the next garbage cell and recycle it.
do {
ASSERT(m_heap.nextBlock < m_heap.usedBlocks);
Q_ASSERT(m_heap.nextBlock < m_heap.usedBlocks);
Block* block = reinterpret_cast<Block*>(m_heap.blocks[m_heap.nextBlock]);
do {
ASSERT(m_heap.nextCell < HeapConstants::cellsPerBlock);
Q_ASSERT(m_heap.nextCell < HeapConstants::cellsPerBlock);
if (!block->marked.get(m_heap.nextCell)) { // Always false for the last cell in the block
Cell* cell = block->cells + m_heap.nextCell;
@ -360,14 +360,14 @@ void Heap::resizeBlocks()
void Heap::growBlocks(size_t neededBlocks)
{
ASSERT(m_heap.usedBlocks < neededBlocks);
Q_ASSERT(m_heap.usedBlocks < neededBlocks);
while (m_heap.usedBlocks < neededBlocks)
allocateBlock();
}
void Heap::shrinkBlocks(size_t neededBlocks)
{
ASSERT(m_heap.usedBlocks > neededBlocks);
Q_ASSERT(m_heap.usedBlocks > neededBlocks);
// Clear the always-on last bit, so isEmpty() isn't fooled by it.
for (size_t i = 0; i < m_heap.usedBlocks; ++i)
@ -569,7 +569,7 @@ static inline void* currentThreadStackBase()
#endif
int rc = pthread_attr_getstack(&sattr, &stackBase, &stackSize);
(void)rc; // FIXME: Deal with error code somehow? Seems fatal.
ASSERT(stackBase);
Q_ASSERT(stackBase);
pthread_attr_destroy(&sattr);
stackThread = thread;
}
@ -619,9 +619,9 @@ void Heap::markConservatively(MarkStack& markStack, void* start, void* end)
end = tmp;
}
ASSERT((static_cast<char*>(end) - static_cast<char*>(start)) < 0x1000000);
ASSERT(isPointerAligned(start));
ASSERT(isPointerAligned(end));
Q_ASSERT((static_cast<char*>(end) - static_cast<char*>(start)) < 0x1000000);
Q_ASSERT(isPointerAligned(start));
Q_ASSERT(isPointerAligned(end));
char** p = static_cast<char**>(start);
char** e = static_cast<char**>(end);
@ -683,8 +683,8 @@ void Heap::markCurrentThreadConservatively(MarkStack& markStack)
void Heap::protect(JSValue k)
{
ASSERT(k);
ASSERT(!m_globalData->isSharedInstance);
Q_ASSERT(k);
Q_ASSERT(!m_globalData->isSharedInstance);
if (!k.isCell())
return;
@ -694,8 +694,8 @@ void Heap::protect(JSValue k)
void Heap::unprotect(JSValue k)
{
ASSERT(k);
ASSERT(!m_globalData->isSharedInstance);
Q_ASSERT(k);
Q_ASSERT(!m_globalData->isSharedInstance);
if (!k.isCell())
return;
@ -727,8 +727,8 @@ void Heap::clearMarkBits(CollectorBlock* block)
size_t Heap::markedCells(size_t startBlock, size_t startCell) const
{
ASSERT(startBlock <= m_heap.usedBlocks);
ASSERT(startCell < HeapConstants::cellsPerBlock);
Q_ASSERT(startBlock <= m_heap.usedBlocks);
Q_ASSERT(startCell < HeapConstants::cellsPerBlock);
if (startBlock >= m_heap.usedBlocks)
return 0;
@ -743,7 +743,7 @@ size_t Heap::markedCells(size_t startBlock, size_t startCell) const
void Heap::sweep()
{
ASSERT(m_heap.operationInProgress == NoOperation);
Q_ASSERT(m_heap.operationInProgress == NoOperation);
if (m_heap.operationInProgress != NoOperation)
CRASH();
m_heap.operationInProgress = Collection;
@ -775,7 +775,7 @@ void Heap::sweep()
void Heap::markRoots()
{
ASSERT(m_heap.operationInProgress == NoOperation);
Q_ASSERT(m_heap.operationInProgress == NoOperation);
if (m_heap.operationInProgress != NoOperation)
CRASH();
@ -881,7 +881,7 @@ static const char* typeName(JSCell* cell)
return "value wrapper";
if (cell->isPropertyNameIterator())
return "for-in iterator";
ASSERT(cell->isObject());
Q_ASSERT(cell->isObject());
const ClassInfo* info = cell->classInfo();
return info ? info->className : "Object";
}

View file

@ -116,7 +116,7 @@ namespace JSC {
{
do {
advance(HeapConstants::cellsPerBlock);
ASSERT(m_block > m_heap.nextBlock || (m_block == m_heap.nextBlock && m_cell >= m_heap.nextCell));
Q_ASSERT(m_block > m_heap.nextBlock || (m_block == m_heap.nextBlock && m_cell >= m_heap.nextCell));
} while (m_block < m_heap.usedBlocks && m_heap.blocks[m_block]->marked.get(m_cell));
return *this;
}

View file

@ -51,7 +51,7 @@ JSObject* construct(ExecState* exec, JSValue object, ConstructType constructType
{
if (constructType == ConstructTypeHost)
return constructData.native.function(exec, asObject(object), args);
ASSERT(constructType == ConstructTypeJS);
Q_ASSERT(constructType == ConstructTypeJS);
// FIXME: Can this be done more efficiently using the constructData?
return asFunction(object)->construct(exec, args);
}

View file

@ -72,7 +72,7 @@ namespace JSC {
inline DateInstance* asDateInstance(JSValue value)
{
ASSERT(asObject(value)->inherits(&DateInstance::info));
Q_ASSERT(asObject(value)->inherits(&DateInstance::info));
return static_cast<DateInstance*>(asObject(value));
}

View file

@ -170,7 +170,7 @@ JSObject* createNotAnObjectError(ExecState* exec, JSNotAnObjectErrorStub* error,
// thrown by these instances op_get_by_id need to reflect this.
OpcodeID followingOpcodeID;
if (codeBlock->getByIdExceptionInfoForBytecodeOffset(exec, bytecodeOffset, followingOpcodeID)) {
ASSERT(followingOpcodeID == op_construct || followingOpcodeID == op_instanceof);
Q_ASSERT(followingOpcodeID == op_construct || followingOpcodeID == op_instanceof);
if (followingOpcodeID == op_construct)
return createNotAConstructorError(exec, error->isNull() ? jsNull() : jsUndefined(), bytecodeOffset, codeBlock);
return createInvalidParamError(exec, "instanceof", error->isNull() ? jsNull() : jsUndefined(), bytecodeOffset, codeBlock);

View file

@ -72,7 +72,7 @@ JSObject* EvalExecutable::compile(ExecState* exec, ScopeChainNode* scopeChainNod
ScopeChain scopeChain(scopeChainNode);
JSGlobalObject* globalObject = scopeChain.globalObject();
ASSERT(!m_evalCodeBlock);
Q_ASSERT(!m_evalCodeBlock);
m_evalCodeBlock = new EvalCodeBlock(this, globalObject, source().provider(), scopeChain.localDepth());
OwnPtr<BytecodeGenerator> generator(new BytecodeGenerator(evalNode.get(), globalObject->debugger(), scopeChain, m_evalCodeBlock->symbolTable(), m_evalCodeBlock));
generator->generate();
@ -103,7 +103,7 @@ JSObject* ProgramExecutable::compile(ExecState* exec, ScopeChainNode* scopeChain
ScopeChain scopeChain(scopeChainNode);
JSGlobalObject* globalObject = scopeChain.globalObject();
ASSERT(!m_programCodeBlock);
Q_ASSERT(!m_programCodeBlock);
m_programCodeBlock = new ProgramCodeBlock(this, GlobalCode, globalObject, source().provider());
OwnPtr<BytecodeGenerator> generator(new BytecodeGenerator(programNode.get(), globalObject->debugger(), scopeChain, &globalObject->symbolTable(), m_programCodeBlock));
generator->generate();
@ -124,12 +124,12 @@ void FunctionExecutable::compile(ExecState*, ScopeChainNode* scopeChainNode)
ScopeChain scopeChain(scopeChainNode);
JSGlobalObject* globalObject = scopeChain.globalObject();
ASSERT(!m_codeBlock);
Q_ASSERT(!m_codeBlock);
m_codeBlock = new FunctionCodeBlock(this, FunctionCode, source().provider(), source().startOffset());
OwnPtr<BytecodeGenerator> generator(new BytecodeGenerator(body.get(), globalObject->debugger(), scopeChain, m_codeBlock->symbolTable(), m_codeBlock));
generator->generate();
m_numParameters = m_codeBlock->m_numParameters;
ASSERT(m_numParameters);
Q_ASSERT(m_numParameters);
m_numVariables = m_codeBlock->m_numVars;
body->destroyData();
@ -195,11 +195,11 @@ ExceptionInfo* FunctionExecutable::reparseExceptionInfo(JSGlobalData* globalData
generator->setRegeneratingForExceptionInfo(static_cast<FunctionCodeBlock*>(codeBlock));
generator->generate();
ASSERT(newCodeBlock->instructionCount() == codeBlock->instructionCount());
Q_ASSERT(newCodeBlock->instructionCount() == codeBlock->instructionCount());
#if ENABLE(JIT)
JITCode newJITCode = JIT::compile(globalData, newCodeBlock.get());
ASSERT(newJITCode.size() == generatedJITCode().size());
Q_ASSERT(newJITCode.size() == generatedJITCode().size());
#endif
globalData->functionCodeBlockBeingReparsed = 0;
@ -220,11 +220,11 @@ ExceptionInfo* EvalExecutable::reparseExceptionInfo(JSGlobalData* globalData, Sc
generator->setRegeneratingForExceptionInfo(static_cast<EvalCodeBlock*>(codeBlock));
generator->generate();
ASSERT(newCodeBlock->instructionCount() == codeBlock->instructionCount());
Q_ASSERT(newCodeBlock->instructionCount() == codeBlock->instructionCount());
#if ENABLE(JIT)
JITCode newJITCode = JIT::compile(globalData, newCodeBlock.get());
ASSERT(newJITCode.size() == generatedJITCode().size());
Q_ASSERT(newJITCode.size() == generatedJITCode().size());
#endif
return newCodeBlock->extractExceptionInfo();
@ -247,19 +247,19 @@ PassRefPtr<FunctionExecutable> FunctionExecutable::fromGlobalCode(const Identifi
return 0;
StatementNode* exprStatement = program->singleStatement();
ASSERT(exprStatement);
ASSERT(exprStatement->isExprStatement());
Q_ASSERT(exprStatement);
Q_ASSERT(exprStatement->isExprStatement());
if (!exprStatement || !exprStatement->isExprStatement())
return 0;
ExpressionNode* funcExpr = static_cast<ExprStatementNode*>(exprStatement)->expr();
ASSERT(funcExpr);
ASSERT(funcExpr->isFuncExprNode());
Q_ASSERT(funcExpr);
Q_ASSERT(funcExpr->isFuncExprNode());
if (!funcExpr || !funcExpr->isFuncExprNode())
return 0;
FunctionBodyNode* body = static_cast<FuncExprNode*>(funcExpr)->body();
ASSERT(body);
Q_ASSERT(body);
return FunctionExecutable::create(&exec->globalData(), functionName, body->source(), body->usesArguments(), body->parameters(), body->lineNo(), body->lastLine());
}

View file

@ -65,7 +65,7 @@ namespace JSC {
public:
JITCode& generatedJITCode()
{
ASSERT(m_jitCode);
Q_ASSERT(m_jitCode);
return m_jitCode;
}
@ -267,7 +267,7 @@ namespace JSC {
CodeBlock& bytecode(ExecState* exec, ScopeChainNode* scopeChainNode)
{
ASSERT(scopeChainNode);
Q_ASSERT(scopeChainNode);
if (!m_codeBlock)
compile(exec, scopeChainNode);
return *m_codeBlock;
@ -280,7 +280,7 @@ namespace JSC {
CodeBlock& generatedBytecode()
{
ASSERT(m_codeBlock);
Q_ASSERT(m_codeBlock);
return *m_codeBlock;
}
@ -344,13 +344,13 @@ namespace JSC {
inline FunctionExecutable* JSFunction::jsExecutable() const
{
ASSERT(!isHostFunctionNonInline());
Q_ASSERT(!isHostFunctionNonInline());
return static_cast<FunctionExecutable*>(m_executable.get());
}
inline bool JSFunction::isHostFunction() const
{
ASSERT(m_executable);
Q_ASSERT(m_executable);
return m_executable->isHostFunction();
}

View file

@ -69,8 +69,8 @@ CallType FunctionPrototype::getCallData(CallData& callData)
// Compatibility hack for the Optimost JavaScript library. (See <rdar://problem/6595040>.)
static inline void insertSemicolonIfNeeded(UString& functionBody)
{
ASSERT(functionBody[0] == '{');
ASSERT(functionBody[functionBody.size() - 1] == '}');
Q_ASSERT(functionBody[0] == '{');
Q_ASSERT(functionBody[functionBody.size() - 1] == '}');
for (size_t i = functionBody.size() - 2; i > 0; --i) {
UChar ch = functionBody[i];

View file

@ -63,7 +63,7 @@ namespace JSC {
inline GetterSetter* asGetterSetter(JSValue value)
{
ASSERT(value.asCell()->isGetterSetter());
Q_ASSERT(value.asCell()->isGetterSetter());
return static_cast<GetterSetter*>(value.asCell());
}

View file

@ -36,7 +36,7 @@ GlobalEvalFunction::GlobalEvalFunction(ExecState* exec, NonNullPassRefPtr<Struct
: PrototypeFunction(exec, structure, len, name, function)
, m_cachedGlobalObject(cachedGlobalObject)
{
ASSERT_ARG(cachedGlobalObject, cachedGlobalObject);
Q_ASSERT_X(cachedGlobalObject, "GlobalEvalFunction::GlobalEvalFunction", "cachedGlobalObject");
}
void GlobalEvalFunction::markChildren(MarkStack& markStack)

View file

@ -210,7 +210,7 @@ PassRefPtr<UString::Rep> Identifier::add(ExecState* exec, const UChar* s, int le
PassRefPtr<UString::Rep> Identifier::addSlowCase(JSGlobalData* globalData, UString::Rep* r)
{
ASSERT(!r->isIdentifier());
Q_ASSERT(!r->isIdentifier());
if (r->size() == 1) {
UChar c = r->data()[0];
if (c <= 0xFF)
@ -267,7 +267,7 @@ ThreadSpecific<ThreadIdentifierTableData>* g_identifierTableSpecific = 0;
void createIdentifierTableSpecific()
{
ASSERT(!g_identifierTableSpecific);
Q_ASSERT(!g_identifierTableSpecific);
g_identifierTableSpecific = new ThreadSpecific<ThreadIdentifierTableData>();
}

View file

@ -59,7 +59,7 @@ namespace JSC {
inline InternalFunction* asInternalFunction(JSValue value)
{
ASSERT(asObject(value)->inherits(&InternalFunction::info));
Q_ASSERT(asObject(value)->inherits(&InternalFunction::info));
return static_cast<InternalFunction*>(asObject(value));
}

View file

@ -48,7 +48,7 @@ namespace JSC {
: JSCell(exec->globalData().apiWrapperStructure.get())
, m_value(value)
{
ASSERT(!value.isCell());
Q_ASSERT(!value.isCell());
}
JSValue m_value;

View file

@ -86,14 +86,14 @@ bool JSActivation::getOwnPropertySlot(ExecState* exec, const Identifier& propert
// We don't call through to JSObject because there's no way to give an
// activation object getter properties or a prototype.
ASSERT(!hasGetterSetterProperties());
ASSERT(prototype().isNull());
Q_ASSERT(!hasGetterSetterProperties());
Q_ASSERT(prototype().isNull());
return false;
}
void JSActivation::put(ExecState*, const Identifier& propertyName, JSValue value, PutPropertySlot& slot)
{
ASSERT(!Heap::heap(value) || Heap::heap(value) == Heap::heap(this));
Q_ASSERT(!Heap::heap(value) || Heap::heap(value) == Heap::heap(this));
if (symbolTablePut(propertyName, value))
return;
@ -101,14 +101,14 @@ void JSActivation::put(ExecState*, const Identifier& propertyName, JSValue value
// We don't call through to JSObject because __proto__ and getter/setter
// properties are non-standard extensions that other implementations do not
// expose in the activation object.
ASSERT(!hasGetterSetterProperties());
Q_ASSERT(!hasGetterSetterProperties());
putDirect(propertyName, value, 0, true, slot);
}
// FIXME: Make this function honor ReadOnly (const) and DontEnum
void JSActivation::putWithAttributes(ExecState* exec, const Identifier& propertyName, JSValue value, unsigned attributes)
{
ASSERT(!Heap::heap(value) || Heap::heap(value) == Heap::heap(this));
Q_ASSERT(!Heap::heap(value) || Heap::heap(value) == Heap::heap(this));
if (symbolTablePutWithAttributes(propertyName, value, attributes))
return;
@ -116,7 +116,7 @@ void JSActivation::putWithAttributes(ExecState* exec, const Identifier& property
// We don't call through to JSObject because __proto__ and getter/setter
// properties are non-standard extensions that other implementations do not
// expose in the activation object.
ASSERT(!hasGetterSetterProperties());
Q_ASSERT(!hasGetterSetterProperties());
PutPropertySlot slot;
JSObject::putWithAttributes(exec, propertyName, value, attributes, true, slot);
}
@ -156,7 +156,7 @@ JSValue JSActivation::argumentsGetter(ExecState* exec, const Identifier&, const
arguments->copyRegisters();
callFrame->setCalleeArguments(arguments);
}
ASSERT(arguments->inherits(&Arguments::info));
Q_ASSERT(arguments->inherits(&Arguments::info));
return arguments;
}

View file

@ -99,7 +99,7 @@ namespace JSC {
inline JSActivation* asActivation(JSValue value)
{
ASSERT(asObject(value)->inherits(&JSActivation::info));
Q_ASSERT(asObject(value)->inherits(&JSActivation::info));
return static_cast<JSActivation*>(asObject(value));
}

View file

@ -90,21 +90,21 @@ const ClassInfo JSArray::info = {"Array", 0, 0, 0};
static inline size_t storageSize(unsigned vectorLength)
{
ASSERT(vectorLength <= MAX_STORAGE_VECTOR_LENGTH);
Q_ASSERT(vectorLength <= MAX_STORAGE_VECTOR_LENGTH);
// MAX_STORAGE_VECTOR_LENGTH is defined such that provided (vectorLength <= MAX_STORAGE_VECTOR_LENGTH)
// - as asserted above - the following calculation cannot overflow.
size_t size = (sizeof(ArrayStorage) - sizeof(JSValue)) + (vectorLength * sizeof(JSValue));
// Assertion to detect integer overflow in previous calculation (should not be possible, provided that
// MAX_STORAGE_VECTOR_LENGTH is correctly defined).
ASSERT(((size - (sizeof(ArrayStorage) - sizeof(JSValue))) / sizeof(JSValue) == vectorLength) && (size >= (sizeof(ArrayStorage) - sizeof(JSValue))));
Q_ASSERT(((size - (sizeof(ArrayStorage) - sizeof(JSValue))) / sizeof(JSValue) == vectorLength) && (size >= (sizeof(ArrayStorage) - sizeof(JSValue))));
return size;
}
static inline unsigned increasedVectorLength(unsigned newLength)
{
ASSERT(newLength <= MAX_STORAGE_VECTOR_LENGTH);
Q_ASSERT(newLength <= MAX_STORAGE_VECTOR_LENGTH);
// Mathematically equivalent to:
// increasedLength = (newLength * 3 + 1) / 2;
@ -112,7 +112,7 @@ static inline unsigned increasedVectorLength(unsigned newLength)
// increasedLength = (unsigned)ceil(newLength * 1.5));
// This form is not prone to internal overflow.
unsigned increasedLength = newLength + (newLength >> 1) + (newLength & 1);
ASSERT(increasedLength >= newLength);
Q_ASSERT(increasedLength >= newLength);
return min(increasedLength, MAX_STORAGE_VECTOR_LENGTH);
}
@ -188,7 +188,7 @@ JSArray::JSArray(NonNullPassRefPtr<Structure> structure, const ArgList& list)
JSArray::~JSArray()
{
ASSERT(vptr() == JSGlobalData::jsArrayVPtr);
Q_ASSERT(vptr() == JSGlobalData::jsArrayVPtr);
checkConsistency(DestructorConsistencyCheck);
delete m_storage->m_sparseValueMap;
@ -504,8 +504,8 @@ bool JSArray::increaseVectorLength(unsigned newLength)
ArrayStorage* storage = m_storage;
unsigned vectorLength = m_vectorLength;
ASSERT(newLength > vectorLength);
ASSERT(newLength <= MAX_STORAGE_VECTOR_INDEX);
Q_ASSERT(newLength > vectorLength);
Q_ASSERT(newLength <= MAX_STORAGE_VECTOR_INDEX);
unsigned newVectorLength = increasedVectorLength(newLength);
storage = static_cast<ArrayStorage*>(tryFastRealloc(storage, storageSize(newVectorLength)));
@ -709,7 +709,7 @@ void JSArray::sort(ExecState* exec)
for (size_t i = 0; i < lengthNotIncludingUndefined; i++) {
JSValue value = m_storage->m_vector[i];
ASSERT(!value.isUndefined());
Q_ASSERT(!value.isUndefined());
values[i].first = value;
}
@ -796,8 +796,8 @@ struct AVLTreeAbstractorForArrayCompare {
int compare_key_key(key va, key vb)
{
ASSERT(!va.isUndefined());
ASSERT(!vb.isUndefined());
Q_ASSERT(!va.isUndefined());
Q_ASSERT(!vb.isUndefined());
if (m_exec->hadException())
return 1;
@ -831,7 +831,7 @@ void JSArray::sort(ExecState* exec, JSValue compareFunction, CallType callType,
// The maximum tree depth is compiled in - but the caller is clearly up to no good
// if a larger array is passed.
ASSERT(m_storage->m_length <= static_cast<unsigned>(std::numeric_limits<int>::max()));
Q_ASSERT(m_storage->m_length <= static_cast<unsigned>(std::numeric_limits<int>::max()));
if (m_storage->m_length > static_cast<unsigned>(std::numeric_limits<int>::max()))
return;
@ -906,7 +906,7 @@ void JSArray::sort(ExecState* exec, JSValue compareFunction, CallType callType,
m_storage->m_sparseValueMap = 0;
}
ASSERT(tree.abstractor().m_nodes.size() >= numDefined);
Q_ASSERT(tree.abstractor().m_nodes.size() >= numDefined);
// FIXME: If the compare function changed the length of the array, the following might be
// modifying the vector incorrectly.
@ -1037,33 +1037,33 @@ void JSArray::setLazyCreationData(void* d)
void JSArray::checkConsistency(ConsistencyCheckType type)
{
ASSERT(m_storage);
Q_ASSERT(m_storage);
if (type == SortConsistencyCheck)
ASSERT(!m_storage->m_sparseValueMap);
Q_ASSERT(!m_storage->m_sparseValueMap);
unsigned numValuesInVector = 0;
for (unsigned i = 0; i < m_vectorLength; ++i) {
if (JSValue value = m_storage->m_vector[i]) {
ASSERT(i < m_storage->m_length);
Q_ASSERT(i < m_storage->m_length);
if (type != DestructorConsistencyCheck)
value->type(); // Likely to crash if the object was deallocated.
++numValuesInVector;
} else {
if (type == SortConsistencyCheck)
ASSERT(i >= m_storage->m_numValuesInVector);
Q_ASSERT(i >= m_storage->m_numValuesInVector);
}
}
ASSERT(numValuesInVector == m_storage->m_numValuesInVector);
ASSERT(numValuesInVector <= m_storage->m_length);
Q_ASSERT(numValuesInVector == m_storage->m_numValuesInVector);
Q_ASSERT(numValuesInVector <= m_storage->m_length);
if (m_storage->m_sparseValueMap) {
SparseArrayValueMap::iterator end = m_storage->m_sparseValueMap->end();
for (SparseArrayValueMap::iterator it = m_storage->m_sparseValueMap->begin(); it != end; ++it) {
unsigned index = it->first;
ASSERT(index < m_storage->m_length);
ASSERT(index >= m_vectorLength);
ASSERT(index <= MAX_ARRAY_INDEX);
ASSERT(it->second);
Q_ASSERT(index < m_storage->m_length);
Q_ASSERT(index >= m_vectorLength);
Q_ASSERT(index <= MAX_ARRAY_INDEX);
Q_ASSERT(it->second);
if (type != DestructorConsistencyCheck)
it->second->type(); // Likely to crash if the object was deallocated.
}

View file

@ -66,14 +66,14 @@ namespace JSC {
bool canGetIndex(unsigned i) { return i < m_vectorLength && m_storage->m_vector[i]; }
JSValue getIndex(unsigned i)
{
ASSERT(canGetIndex(i));
Q_ASSERT(canGetIndex(i));
return m_storage->m_vector[i];
}
bool canSetIndex(unsigned i) { return i < m_vectorLength; }
void setIndex(unsigned i, JSValue v)
{
ASSERT(canSetIndex(i));
Q_ASSERT(canSetIndex(i));
JSValue& x = m_storage->m_vector[i];
if (!x) {
++m_storage->m_numValuesInVector;
@ -123,7 +123,7 @@ namespace JSC {
inline JSArray* asArray(JSCell* cell)
{
ASSERT(cell->inherits(&JSArray::info));
Q_ASSERT(cell->inherits(&JSArray::info));
return static_cast<JSArray*>(cell);
}
@ -156,15 +156,15 @@ namespace JSC {
inline void MarkStack::markChildren(JSCell* cell)
{
ASSERT(Heap::isCellMarked(cell));
Q_ASSERT(Heap::isCellMarked(cell));
if (!cell->structure()->typeInfo().overridesMarkChildren()) {
#ifdef NDEBUG
asObject(cell)->markChildrenDirect(*this);
#else
ASSERT(!m_isCheckingForDefaultMarkViolation);
Q_ASSERT(!m_isCheckingForDefaultMarkViolation);
m_isCheckingForDefaultMarkViolation = true;
cell->markChildren(*this);
ASSERT(m_isCheckingForDefaultMarkViolation);
Q_ASSERT(m_isCheckingForDefaultMarkViolation);
m_isCheckingForDefaultMarkViolation = false;
#endif
return;
@ -180,14 +180,14 @@ namespace JSC {
{
while (!m_markSets.isEmpty() || !m_values.isEmpty()) {
while (!m_markSets.isEmpty() && m_values.size() < 50) {
ASSERT(!m_markSets.isEmpty());
Q_ASSERT(!m_markSets.isEmpty());
MarkSet& current = m_markSets.last();
ASSERT(current.m_values);
Q_ASSERT(current.m_values);
JSValue* end = current.m_end;
ASSERT(current.m_values);
ASSERT(current.m_values != end);
Q_ASSERT(current.m_values);
Q_ASSERT(current.m_values != end);
findNextUnmarkedNullValue:
ASSERT(current.m_values != end);
Q_ASSERT(current.m_values != end);
JSValue value = *current.m_values;
current.m_values++;

View file

@ -43,10 +43,10 @@ JSByteArray::JSByteArray(ExecState* exec, NonNullPassRefPtr<Structure> structure
putDirect(exec->globalData().propertyNames->length, jsNumber(exec, m_storage->length()), ReadOnly | DontDelete);
}
#if !ASSERT_DISABLED
#ifndef QT_NO_DEBUG
JSByteArray::~JSByteArray()
{
ASSERT(vptr() == JSGlobalData::jsByteArrayVPtr);
Q_ASSERT(vptr() == JSGlobalData::jsByteArrayVPtr);
}
#endif

View file

@ -38,13 +38,13 @@ namespace JSC {
bool canAccessIndex(unsigned i) { return i < m_storage->length(); }
JSValue getIndex(ExecState* exec, unsigned i)
{
ASSERT(canAccessIndex(i));
Q_ASSERT(canAccessIndex(i));
return jsNumber(exec, m_storage->data()[i]);
}
void setIndex(unsigned i, int value)
{
ASSERT(canAccessIndex(i));
Q_ASSERT(canAccessIndex(i));
if (value & ~0xFF) {
if (value < 0)
value = 0;
@ -56,7 +56,7 @@ namespace JSC {
void setIndex(unsigned i, double value)
{
ASSERT(canAccessIndex(i));
Q_ASSERT(canAccessIndex(i));
if (!(value > 0)) // Clamp NaN to 0
value = 0;
else if (value > 255)
@ -91,7 +91,7 @@ namespace JSC {
WTF::ByteArray* storage() const { return m_storage.get(); }
#if !ASSERT_DISABLED
#ifndef QT_NO_DEBUG
virtual ~JSByteArray();
#endif

View file

@ -227,7 +227,7 @@ namespace JSC {
#if !USE(JSVALUE32_64)
ALWAYS_INLINE JSCell* JSValue::asCell() const
{
ASSERT(isCell());
Q_ASSERT(isCell());
return m_ptr;
}
#endif // !USE(JSVALUE32_64)
@ -261,7 +261,7 @@ namespace JSC {
value = *this;
return true;
}
ASSERT(isUndefined());
Q_ASSERT(isUndefined());
number = nonInlineNaN();
value = *this;
return true;
@ -324,8 +324,8 @@ namespace JSC {
ALWAYS_INLINE void MarkStack::append(JSCell* cell)
{
ASSERT(!m_isCheckingForDefaultMarkViolation);
ASSERT(cell);
Q_ASSERT(!m_isCheckingForDefaultMarkViolation);
Q_ASSERT(cell);
if (Heap::isCellMarked(cell))
return;
Heap::markCell(cell);
@ -335,7 +335,7 @@ namespace JSC {
ALWAYS_INLINE void MarkStack::append(JSValue value)
{
ASSERT(value);
Q_ASSERT(value);
if (value.isCell())
append(value.asCell());
}

View file

@ -81,14 +81,14 @@ JSFunction::JSFunction(ExecState* exec, NonNullPassRefPtr<FunctionExecutable> ex
JSFunction::~JSFunction()
{
ASSERT(vptr() == JSGlobalData::jsFunctionVPtr);
Q_ASSERT(vptr() == JSGlobalData::jsFunctionVPtr);
// JIT code for other functions may have had calls linked directly to the code for this function; these links
// are based on a check for the this pointer value for this JSFunction - which will no longer be valid once
// this memory is freed and may be reused (potentially for another, different JSFunction).
if (!isHostFunction()) {
#if ENABLE(JIT_OPTIMIZE_CALL)
ASSERT(m_executable);
Q_ASSERT(m_executable);
if (jsExecutable()->isGenerated())
jsExecutable()->generatedBytecode().unlinkCallers();
#endif
@ -118,28 +118,28 @@ CallType JSFunction::getCallData(CallData& callData)
JSValue JSFunction::call(ExecState* exec, JSValue thisValue, const ArgList& args)
{
ASSERT(!isHostFunction());
Q_ASSERT(!isHostFunction());
return exec->interpreter()->execute(jsExecutable(), exec, this, thisValue.toThisObject(exec), args, scopeChain().node(), exec->exceptionSlot());
}
JSValue JSFunction::argumentsGetter(ExecState* exec, const Identifier&, const PropertySlot& slot)
{
JSFunction* thisObj = asFunction(slot.slotBase());
ASSERT(!thisObj->isHostFunction());
Q_ASSERT(!thisObj->isHostFunction());
return exec->interpreter()->retrieveArguments(exec, thisObj);
}
JSValue JSFunction::callerGetter(ExecState* exec, const Identifier&, const PropertySlot& slot)
{
JSFunction* thisObj = asFunction(slot.slotBase());
ASSERT(!thisObj->isHostFunction());
Q_ASSERT(!thisObj->isHostFunction());
return exec->interpreter()->retrieveCaller(exec, thisObj);
}
JSValue JSFunction::lengthGetter(ExecState* exec, const Identifier&, const PropertySlot& slot)
{
JSFunction* thisObj = asFunction(slot.slotBase());
ASSERT(!thisObj->isHostFunction());
Q_ASSERT(!thisObj->isHostFunction());
return jsNumber(exec, thisObj->jsExecutable()->parameterCount());
}
@ -251,7 +251,7 @@ ConstructType JSFunction::getConstructData(ConstructData& constructData)
JSObject* JSFunction::construct(ExecState* exec, const ArgList& args)
{
ASSERT(!isHostFunction());
Q_ASSERT(!isHostFunction());
Structure* structure;
JSValue prototype = get(exec, exec->propertyNames().prototype);
if (prototype.isObject())

View file

@ -97,22 +97,22 @@ namespace JSC {
RefPtr<ExecutableBase> m_executable;
ScopeChain& scopeChain()
{
ASSERT(!isHostFunctionNonInline());
Q_ASSERT(!isHostFunctionNonInline());
return *WTF::bitwise_cast<ScopeChain*>(m_data);
}
void clearScopeChain()
{
ASSERT(!isHostFunctionNonInline());
Q_ASSERT(!isHostFunctionNonInline());
new (m_data) ScopeChain(NoScopeChain());
}
void setScopeChain(ScopeChainNode* sc)
{
ASSERT(!isHostFunctionNonInline());
Q_ASSERT(!isHostFunctionNonInline());
new (m_data) ScopeChain(sc);
}
void setScopeChain(const ScopeChain& sc)
{
ASSERT(!isHostFunctionNonInline());
Q_ASSERT(!isHostFunctionNonInline());
*WTF::bitwise_cast<ScopeChain*>(m_data) = sc;
}
void setNativeFunction(NativeFunction func)
@ -126,7 +126,7 @@ namespace JSC {
inline JSFunction* asFunction(JSValue value)
{
ASSERT(asObject(value)->inherits(&JSFunction::info));
Q_ASSERT(asObject(value)->inherits(&JSFunction::info));
return static_cast<JSFunction*>(asObject(value));
}

View file

@ -134,7 +134,7 @@ void JSGlobalObject::init(JSObject* thisValue)
void JSGlobalObject::put(ExecState* exec, const Identifier& propertyName, JSValue value, PutPropertySlot& slot)
{
ASSERT(!Heap::heap(value) || Heap::heap(value) == Heap::heap(this));
Q_ASSERT(!Heap::heap(value) || Heap::heap(value) == Heap::heap(this));
if (symbolTablePut(propertyName, value))
return;
@ -143,7 +143,7 @@ void JSGlobalObject::put(ExecState* exec, const Identifier& propertyName, JSValu
void JSGlobalObject::putWithAttributes(ExecState* exec, const Identifier& propertyName, JSValue value, unsigned attributes)
{
ASSERT(!Heap::heap(value) || Heap::heap(value) == Heap::heap(this));
Q_ASSERT(!Heap::heap(value) || Heap::heap(value) == Heap::heap(this));
if (symbolTablePutWithAttributes(propertyName, value, attributes))
return;
@ -413,8 +413,8 @@ bool JSGlobalObject::isDynamicScope() const
void JSGlobalObject::copyGlobalsFrom(RegisterFile& registerFile)
{
ASSERT(!d()->registerArray);
ASSERT(!d()->registerArraySize);
Q_ASSERT(!d()->registerArray);
Q_ASSERT(!d()->registerArraySize);
int numGlobals = registerFile.numGlobals();
if (!numGlobals) {

View file

@ -298,7 +298,7 @@ namespace JSC {
inline JSGlobalObject* asGlobalObject(JSValue value)
{
ASSERT(asObject(value)->isGlobalObject());
Q_ASSERT(asObject(value)->isGlobalObject());
return static_cast<JSGlobalObject*>(asObject(value));
}
@ -319,7 +319,7 @@ namespace JSC {
for (int i = 0, index = -static_cast<int>(oldSize) - 1; i < count; ++i, --index) {
GlobalPropertyInfo& global = globals[i];
ASSERT(global.attributes & DontDelete);
Q_ASSERT(global.attributes & DontDelete);
SymbolTableEntry newEntry(index, global.attributes);
symbolTable().add(global.identifier.ustring().rep(), newEntry);
registerAt(index) = global.value;
@ -358,10 +358,10 @@ namespace JSC {
if (typeInfo().type() == StringType)
return exec->lexicalGlobalObject()->stringPrototype();
ASSERT(typeInfo().type() == NumberType);
Q_ASSERT(typeInfo().type() == NumberType);
return exec->lexicalGlobalObject()->numberPrototype();
#else
ASSERT(typeInfo().type() == StringType);
Q_ASSERT(typeInfo().type() == StringType);
return exec->lexicalGlobalObject()->stringPrototype();
#endif
}
@ -399,7 +399,7 @@ namespace JSC {
// For any ExecState that's not a globalExec, the
// dynamic global object must be set since code is running
ASSERT(globalData().dynamicGlobalObject);
Q_ASSERT(globalData().dynamicGlobalObject);
return globalData().dynamicGlobalObject;
}

View file

@ -379,14 +379,14 @@ namespace JSC {
ALWAYS_INLINE bool JSImmediate::toBoolean(JSValue v)
{
ASSERT(isImmediate(v));
Q_ASSERT(isImmediate(v));
return isNumber(v) ? isIntegerNumber(v) ? v != zeroImmediate()
: doubleToBoolean(doubleValue(v)) : v == trueImmediate();
}
#else
ALWAYS_INLINE bool JSImmediate::toBoolean(JSValue v)
{
ASSERT(isImmediate(v));
Q_ASSERT(isImmediate(v));
return isIntegerNumber(v) ? v != zeroImmediate() : v == trueImmediate();
}
#endif
@ -394,7 +394,7 @@ namespace JSC {
ALWAYS_INLINE uint32_t JSImmediate::getTruncatedUInt32(JSValue v)
{
// FIXME: should probably be asserting isPositiveIntegerNumber here.
ASSERT(isIntegerNumber(v));
Q_ASSERT(isIntegerNumber(v));
return intValue(v);
}
@ -494,30 +494,30 @@ namespace JSC {
ALWAYS_INLINE int32_t JSImmediate::getTruncatedInt32(JSValue v)
{
ASSERT(isIntegerNumber(v));
Q_ASSERT(isIntegerNumber(v));
return intValue(v);
}
ALWAYS_INLINE double JSImmediate::toDouble(JSValue v)
{
ASSERT(isImmediate(v));
Q_ASSERT(isImmediate(v));
if (isIntegerNumber(v))
return intValue(v);
#if USE(JSVALUE64)
if (isNumber(v)) {
ASSERT(isDouble(v));
Q_ASSERT(isDouble(v));
return doubleValue(v);
}
#else
ASSERT(!isNumber(v));
Q_ASSERT(!isNumber(v));
#endif
if (rawValue(v) == FullTagTypeUndefined)
return nonInlineNaN();
ASSERT(JSImmediate::isBoolean(v) || (v == JSImmediate::nullImmediate()));
Q_ASSERT(JSImmediate::isBoolean(v) || (v == JSImmediate::nullImmediate()));
return rawValue(v) >> ExtendedPayloadShift;
}
@ -605,7 +605,7 @@ namespace JSC {
inline int32_t JSValue::asInt32() const
{
ASSERT(isInt32());
Q_ASSERT(isInt32());
return JSImmediate::getTruncatedInt32(asValue());
}
@ -616,7 +616,7 @@ namespace JSC {
inline uint32_t JSValue::asUInt32() const
{
ASSERT(isUInt32());
Q_ASSERT(isUInt32());
return JSImmediate::getTruncatedUInt32(asValue());
}
@ -629,31 +629,31 @@ namespace JSC {
static ALWAYS_INLINE JSValue equal(JSValue v1, JSValue v2)
{
ASSERT(canDoFastBitwiseOperations(v1, v2));
Q_ASSERT(canDoFastBitwiseOperations(v1, v2));
return jsBoolean(v1 == v2);
}
static ALWAYS_INLINE JSValue notEqual(JSValue v1, JSValue v2)
{
ASSERT(canDoFastBitwiseOperations(v1, v2));
Q_ASSERT(canDoFastBitwiseOperations(v1, v2));
return jsBoolean(v1 != v2);
}
static ALWAYS_INLINE JSValue andImmediateNumbers(JSValue v1, JSValue v2)
{
ASSERT(canDoFastBitwiseOperations(v1, v2));
Q_ASSERT(canDoFastBitwiseOperations(v1, v2));
return JSImmediate::makeValue(JSImmediate::rawValue(v1) & JSImmediate::rawValue(v2));
}
static ALWAYS_INLINE JSValue xorImmediateNumbers(JSValue v1, JSValue v2)
{
ASSERT(canDoFastBitwiseOperations(v1, v2));
Q_ASSERT(canDoFastBitwiseOperations(v1, v2));
return JSImmediate::makeValue((JSImmediate::rawValue(v1) ^ JSImmediate::rawValue(v2)) | JSImmediate::TagTypeNumber);
}
static ALWAYS_INLINE JSValue orImmediateNumbers(JSValue v1, JSValue v2)
{
ASSERT(canDoFastBitwiseOperations(v1, v2));
Q_ASSERT(canDoFastBitwiseOperations(v1, v2));
return JSImmediate::makeValue(JSImmediate::rawValue(v1) | JSImmediate::rawValue(v2));
}
@ -669,7 +669,7 @@ namespace JSC {
static ALWAYS_INLINE JSValue rightShiftImmediateNumbers(JSValue val, JSValue shift)
{
ASSERT(canDoFastRshift(val, shift) || canDoFastUrshift(val, shift));
Q_ASSERT(canDoFastRshift(val, shift) || canDoFastUrshift(val, shift));
#if USE(JSVALUE64)
return JSImmediate::makeValue(static_cast<intptr_t>(static_cast<uint32_t>(static_cast<int32_t>(JSImmediate::rawValue(val)) >> ((JSImmediate::rawValue(shift) >> JSImmediate::IntegerPayloadShift) & 0x1f))) | JSImmediate::TagTypeNumber);
#else
@ -693,25 +693,25 @@ namespace JSC {
static ALWAYS_INLINE JSValue addImmediateNumbers(JSValue v1, JSValue v2)
{
ASSERT(canDoFastAdditiveOperations(v1, v2));
Q_ASSERT(canDoFastAdditiveOperations(v1, v2));
return JSImmediate::makeValue(JSImmediate::rawValue(v1) + JSImmediate::rawValue(v2) - JSImmediate::TagTypeNumber);
}
static ALWAYS_INLINE JSValue subImmediateNumbers(JSValue v1, JSValue v2)
{
ASSERT(canDoFastAdditiveOperations(v1, v2));
Q_ASSERT(canDoFastAdditiveOperations(v1, v2));
return JSImmediate::makeValue(JSImmediate::rawValue(v1) - JSImmediate::rawValue(v2) + JSImmediate::TagTypeNumber);
}
static ALWAYS_INLINE JSValue incImmediateNumber(JSValue v)
{
ASSERT(canDoFastAdditiveOperations(v));
Q_ASSERT(canDoFastAdditiveOperations(v));
return JSImmediate::makeValue(JSImmediate::rawValue(v) + (1 << JSImmediate::IntegerPayloadShift));
}
static ALWAYS_INLINE JSValue decImmediateNumber(JSValue v)
{
ASSERT(canDoFastAdditiveOperations(v));
Q_ASSERT(canDoFastAdditiveOperations(v));
return JSImmediate::makeValue(JSImmediate::rawValue(v) - (1 << JSImmediate::IntegerPayloadShift));
}
};

View file

@ -105,7 +105,7 @@ namespace JSC {
inline JSNumberCell* asNumberCell(JSValue v)
{
ASSERT(isNumberCell(v));
Q_ASSERT(isNumberCell(v));
return static_cast<JSNumberCell*>(v.asCell());
}
@ -191,7 +191,7 @@ namespace JSC {
inline double JSValue::uncheckedGetNumber() const
{
ASSERT(isNumber());
Q_ASSERT(isNumber());
return JSImmediate::isImmediate(asValue()) ? JSImmediate::toDouble(asValue()) : asDouble();
}
@ -206,70 +206,70 @@ namespace JSC {
inline JSValue::JSValue(ExecState*, double d)
{
JSValue v = JSImmediate::from(d);
ASSERT(v);
Q_ASSERT(v);
*this = v;
}
inline JSValue::JSValue(ExecState*, int i)
{
JSValue v = JSImmediate::from(i);
ASSERT(v);
Q_ASSERT(v);
*this = v;
}
inline JSValue::JSValue(ExecState*, unsigned i)
{
JSValue v = JSImmediate::from(i);
ASSERT(v);
Q_ASSERT(v);
*this = v;
}
inline JSValue::JSValue(ExecState*, long i)
{
JSValue v = JSImmediate::from(i);
ASSERT(v);
Q_ASSERT(v);
*this = v;
}
inline JSValue::JSValue(ExecState*, unsigned long i)
{
JSValue v = JSImmediate::from(i);
ASSERT(v);
Q_ASSERT(v);
*this = v;
}
inline JSValue::JSValue(ExecState*, long long i)
{
JSValue v = JSImmediate::from(static_cast<double>(i));
ASSERT(v);
Q_ASSERT(v);
*this = v;
}
inline JSValue::JSValue(ExecState*, unsigned long long i)
{
JSValue v = JSImmediate::from(static_cast<double>(i));
ASSERT(v);
Q_ASSERT(v);
*this = v;
}
inline JSValue::JSValue(JSGlobalData*, double d)
{
JSValue v = JSImmediate::from(d);
ASSERT(v);
Q_ASSERT(v);
*this = v;
}
inline JSValue::JSValue(JSGlobalData*, int i)
{
JSValue v = JSImmediate::from(i);
ASSERT(v);
Q_ASSERT(v);
*this = v;
}
inline JSValue::JSValue(JSGlobalData*, unsigned i)
{
JSValue v = JSImmediate::from(i);
ASSERT(v);
Q_ASSERT(v);
*this = v;
}
@ -290,7 +290,7 @@ namespace JSC {
inline double JSValue::uncheckedGetNumber() const
{
ASSERT(isNumber());
Q_ASSERT(isNumber());
return JSImmediate::toDouble(asValue());
}
@ -300,25 +300,25 @@ namespace JSC {
inline JSValue::JSValue(ExecState*, char i)
{
ASSERT(JSImmediate::from(i));
Q_ASSERT(JSImmediate::from(i));
*this = JSImmediate::from(i);
}
inline JSValue::JSValue(ExecState*, unsigned char i)
{
ASSERT(JSImmediate::from(i));
Q_ASSERT(JSImmediate::from(i));
*this = JSImmediate::from(i);
}
inline JSValue::JSValue(ExecState*, short i)
{
ASSERT(JSImmediate::from(i));
Q_ASSERT(JSImmediate::from(i));
*this = JSImmediate::from(i);
}
inline JSValue::JSValue(ExecState*, unsigned short i)
{
ASSERT(JSImmediate::from(i));
Q_ASSERT(JSImmediate::from(i));
*this = JSImmediate::from(i);
}
@ -346,7 +346,7 @@ namespace JSC {
else if (LIKELY(isDouble()))
result = asDouble();
else {
ASSERT(!isNumber());
Q_ASSERT(!isNumber());
return false;
}
return true;

View file

@ -240,7 +240,7 @@ Stringifier::Stringifier(ExecState* exec, JSValue replacer, JSValue space)
Stringifier::~Stringifier()
{
ASSERT(m_exec->globalData().firstStringifierToMark == this);
Q_ASSERT(m_exec->globalData().firstStringifierToMark == this);
m_exec->globalData().firstStringifierToMark = m_nextStringifierToMark;
}
@ -335,7 +335,7 @@ void Stringifier::appendQuotedString(StringBuilder& builder, const UString& valu
inline JSValue Stringifier::toJSON(JSValue value, const PropertyNameForFunctionCall& propertyName)
{
ASSERT(!m_exec->hadException());
Q_ASSERT(!m_exec->hadException());
if (!value.isObject() || !asObject(value)->hasProperty(m_exec, m_exec->globalData().propertyNames->toJSON))
return value;
@ -469,13 +469,13 @@ inline void Stringifier::indent()
int newSize = m_indent.size() + m_gap.size();
if (newSize > m_repeatedGap.size())
m_repeatedGap = makeString(m_repeatedGap, m_gap);
ASSERT(newSize <= m_repeatedGap.size());
Q_ASSERT(newSize <= m_repeatedGap.size());
m_indent = m_repeatedGap.substr(0, newSize);
}
inline void Stringifier::unindent()
{
ASSERT(m_indent.size() >= m_gap.size());
Q_ASSERT(m_indent.size() >= m_gap.size());
m_indent = m_repeatedGap.substr(0, m_indent.size() - m_gap.size());
}
@ -496,7 +496,7 @@ inline Stringifier::Holder::Holder(JSObject* object)
bool Stringifier::Holder::appendNextProperty(Stringifier& stringifier, StringBuilder& builder)
{
ASSERT(m_index <= m_size);
Q_ASSERT(m_index <= m_size);
ExecState* exec = stringifier.m_exec;
@ -681,8 +681,8 @@ NEVER_INLINE JSValue Walker::walk(JSValue unfiltered)
switch (state) {
arrayStartState:
case ArrayStartState: {
ASSERT(inValue.isObject());
ASSERT(isJSArray(&m_exec->globalData(), asObject(inValue)) || asObject(inValue)->inherits(&JSArray::info));
Q_ASSERT(inValue.isObject());
Q_ASSERT(isJSArray(&m_exec->globalData(), asObject(inValue)) || asObject(inValue)->inherits(&JSArray::info));
if (objectStack.size() + arrayStack.size() > maximumFilterRecursion) {
m_exec->setException(createStackOverflowError(m_exec));
return jsUndefined();
@ -746,8 +746,8 @@ NEVER_INLINE JSValue Walker::walk(JSValue unfiltered)
}
objectStartState:
case ObjectStartState: {
ASSERT(inValue.isObject());
ASSERT(!isJSArray(&m_exec->globalData(), asObject(inValue)) && !asObject(inValue)->inherits(&JSArray::info));
Q_ASSERT(inValue.isObject());
Q_ASSERT(!isJSArray(&m_exec->globalData(), asObject(inValue)) && !asObject(inValue)->inherits(&JSArray::info));
if (objectStack.size() + arrayStack.size() > maximumFilterRecursion) {
m_exec->setException(createStackOverflowError(m_exec));
return jsUndefined();

View file

@ -50,7 +50,7 @@ static inline void getClassPropertyNames(ExecState* exec, const ClassInfo* class
if (!table)
continue;
table->initializeIfNeeded(exec);
ASSERT(table->table);
Q_ASSERT(table->table);
int hashSizeMask = table->compactSize - 1;
const HashEntry* entry = table->table;
@ -96,8 +96,8 @@ static void throwSetterError(ExecState* exec)
// ECMA 8.6.2.2
void JSObject::put(ExecState* exec, const Identifier& propertyName, JSValue value, PutPropertySlot& slot)
{
ASSERT(value);
ASSERT(!Heap::heap(value) || Heap::heap(value) == Heap::heap(this));
Q_ASSERT(value);
Q_ASSERT(!Heap::heap(value) || Heap::heap(value) == Heap::heap(this));
if (propertyName == exec->propertyNames().underscoreProto) {
// Setting __proto__ to a non-object, non-null value is silently ignored to match Mozilla.
@ -245,7 +245,7 @@ static ALWAYS_INLINE JSValue callDefaultValueFunction(ExecState* exec, const JSO
return exec->exception();
JSValue result = call(exec, function, callType, callData, const_cast<JSObject*>(object), exec->emptyList());
ASSERT(!result.isGetterSetter());
Q_ASSERT(!result.isGetterSetter());
if (exec->hadException())
return exec->exception();
if (result.isObject())
@ -280,7 +280,7 @@ JSValue JSObject::defaultValue(ExecState* exec, PreferredPrimitiveType hint) con
return value;
}
ASSERT(!exec->hadException());
Q_ASSERT(!exec->hadException());
return throwError(exec, TypeError, "No default value");
}
@ -300,7 +300,7 @@ void JSObject::defineGetter(ExecState* exec, const Identifier& propertyName, JSO
{
JSValue object = getDirect(propertyName);
if (object && object.isGetterSetter()) {
ASSERT(m_structure->hasGetterSetterProperties());
Q_ASSERT(m_structure->hasGetterSetterProperties());
asGetterSetter(object)->setGetter(getterFunction);
return;
}
@ -327,7 +327,7 @@ void JSObject::defineSetter(ExecState* exec, const Identifier& propertyName, JSO
{
JSValue object = getDirect(propertyName);
if (object && object.isGetterSetter()) {
ASSERT(m_structure->hasGetterSetterProperties());
Q_ASSERT(m_structure->hasGetterSetterProperties());
asGetterSetter(object)->setSetter(setterFunction);
return;
}
@ -652,7 +652,7 @@ bool JSObject::defineOwnProperty(ExecState* exec, const Identifier& propertyName
}
// Changing the accessor functions of an existing accessor property
ASSERT(descriptor.isAccessorDescriptor());
Q_ASSERT(descriptor.isAccessorDescriptor());
if (!current.configurable()) {
if (descriptor.setterPresent() && !(current.setter() && JSValue::strictEqual(exec, current.setter(), descriptor.setter()))) {
if (throwException)

View file

@ -273,7 +273,7 @@ namespace JSC {
inline JSObject* asObject(JSCell* cell)
{
ASSERT(cell->isObject());
Q_ASSERT(cell->isObject());
return static_cast<JSObject*>(cell);
}
@ -285,17 +285,17 @@ inline JSObject* asObject(JSValue value)
inline JSObject::JSObject(NonNullPassRefPtr<Structure> structure)
: JSCell(structure.releaseRef()) // ~JSObject balances this ref()
{
ASSERT(m_structure->propertyStorageCapacity() == inlineStorageCapacity);
ASSERT(m_structure->isEmpty());
ASSERT(prototype().isNull() || Heap::heap(this) == Heap::heap(prototype()));
Q_ASSERT(m_structure->propertyStorageCapacity() == inlineStorageCapacity);
Q_ASSERT(m_structure->isEmpty());
Q_ASSERT(prototype().isNull() || Heap::heap(this) == Heap::heap(prototype()));
#if USE(JSVALUE64) || USE(JSVALUE32_64)
ASSERT(OBJECT_OFFSETOF(JSObject, m_inlineStorage) % sizeof(double) == 0);
Q_ASSERT(OBJECT_OFFSETOF(JSObject, m_inlineStorage) % sizeof(double) == 0);
#endif
}
inline JSObject::~JSObject()
{
ASSERT(m_structure);
Q_ASSERT(m_structure);
if (!isUsingInlineStorage())
delete [] m_externalStorage;
m_structure->deref();
@ -308,7 +308,7 @@ inline JSValue JSObject::prototype() const
inline void JSObject::setPrototype(JSValue prototype)
{
ASSERT(prototype);
Q_ASSERT(prototype);
RefPtr<Structure> newStructure = Structure::changePrototypeTransition(m_structure, prototype);
setStructure(newStructure.release());
}
@ -428,8 +428,8 @@ inline JSValue JSObject::get(ExecState* exec, unsigned propertyName) const
inline void JSObject::putDirectInternal(const Identifier& propertyName, JSValue value, unsigned attributes, bool checkReadOnly, PutPropertySlot& slot, JSCell* specificFunction)
{
ASSERT(value);
ASSERT(!Heap::heap(value) || Heap::heap(value) == Heap::heap(this));
Q_ASSERT(value);
Q_ASSERT(!Heap::heap(value) || Heap::heap(value) == Heap::heap(this));
if (m_structure->isDictionary()) {
unsigned currentAttributes;
@ -451,7 +451,7 @@ inline void JSObject::putDirectInternal(const Identifier& propertyName, JSValue
if (currentCapacity != m_structure->propertyStorageCapacity())
allocatePropertyStorage(currentCapacity, m_structure->propertyStorageCapacity());
ASSERT(offset < m_structure->propertyStorageCapacity());
Q_ASSERT(offset < m_structure->propertyStorageCapacity());
putDirectOffset(offset, value);
// See comment on setNewProperty call below.
if (!specificFunction)
@ -465,7 +465,7 @@ inline void JSObject::putDirectInternal(const Identifier& propertyName, JSValue
if (currentCapacity != structure->propertyStorageCapacity())
allocatePropertyStorage(currentCapacity, structure->propertyStorageCapacity());
ASSERT(offset < structure->propertyStorageCapacity());
Q_ASSERT(offset < structure->propertyStorageCapacity());
setStructure(structure.release());
putDirectOffset(offset, value);
// See comment on setNewProperty call below.
@ -507,7 +507,7 @@ inline void JSObject::putDirectInternal(const Identifier& propertyName, JSValue
if (currentCapacity != structure->propertyStorageCapacity())
allocatePropertyStorage(currentCapacity, structure->propertyStorageCapacity());
ASSERT(offset < structure->propertyStorageCapacity());
Q_ASSERT(offset < structure->propertyStorageCapacity());
setStructure(structure.release());
putDirectOffset(offset, value);
// Function transitions are not currently cachable, so leave the slot in an uncachable state.
@ -517,8 +517,8 @@ inline void JSObject::putDirectInternal(const Identifier& propertyName, JSValue
inline void JSObject::putDirectInternal(JSGlobalData& globalData, const Identifier& propertyName, JSValue value, unsigned attributes, bool checkReadOnly, PutPropertySlot& slot)
{
ASSERT(value);
ASSERT(!Heap::heap(value) || Heap::heap(value) == Heap::heap(this));
Q_ASSERT(value);
Q_ASSERT(!Heap::heap(value) || Heap::heap(value) == Heap::heap(this));
putDirectInternal(propertyName, value, attributes, checkReadOnly, slot, getJSFunction(globalData, value));
}
@ -542,8 +542,8 @@ inline void JSObject::addAnonymousSlots(unsigned count)
inline void JSObject::putDirect(const Identifier& propertyName, JSValue value, unsigned attributes, bool checkReadOnly, PutPropertySlot& slot)
{
ASSERT(value);
ASSERT(!Heap::heap(value) || Heap::heap(value) == Heap::heap(this));
Q_ASSERT(value);
Q_ASSERT(!Heap::heap(value) || Heap::heap(value) == Heap::heap(this));
putDirectInternal(propertyName, value, attributes, checkReadOnly, slot, 0);
}
@ -667,7 +667,7 @@ inline void JSValue::put(ExecState* exec, unsigned propertyName, JSValue value)
ALWAYS_INLINE void JSObject::allocatePropertyStorageInline(size_t oldSize, size_t newSize)
{
ASSERT(newSize > oldSize);
Q_ASSERT(newSize > oldSize);
// It's important that this function not rely on m_structure, since
// we might be in the middle of a transition.

View file

@ -37,7 +37,7 @@ ASSERT_CLASS_FITS_IN_CELL(JSPropertyNameIterator);
JSPropertyNameIterator* JSPropertyNameIterator::create(ExecState* exec, JSObject* o)
{
ASSERT(!o->structure()->enumerationCache() ||
Q_ASSERT(!o->structure()->enumerationCache() ||
o->structure()->enumerationCache()->cachedStructure() != o->structure() ||
o->structure()->enumerationCache()->cachedPrototypeChain() != o->structure()->prototypeChain(exec));

View file

@ -96,7 +96,7 @@ inline JSPropertyNameIterator::JSPropertyNameIterator(ExecState* exec, PropertyN
inline void Structure::setEnumerationCache(JSPropertyNameIterator* enumerationCache)
{
ASSERT(!isDictionary());
Q_ASSERT(!isDictionary());
m_enumerationCache = enumerationCache;
}

View file

@ -65,7 +65,7 @@ bool JSStaticScopeObject::isDynamicScope() const
JSStaticScopeObject::~JSStaticScopeObject()
{
ASSERT(d());
Q_ASSERT(d());
delete d();
}

View file

@ -78,7 +78,7 @@ JSString::Rope::~Rope()
// rope-of-ropes.)
void JSString::resolveRope(ExecState* exec) const
{
ASSERT(isRope());
Q_ASSERT(isRope());
// Allocate the buffer to hold the final string, position initially points to the end.
UChar* buffer;
@ -90,8 +90,8 @@ void JSString::resolveRope(ExecState* exec) const
m_fibers[i] = static_cast<void*>(0);
}
m_ropeLength = 0;
ASSERT(!isRope());
ASSERT(m_value == UString());
Q_ASSERT(!isRope());
Q_ASSERT(m_value == UString());
throwOutOfMemoryError(exec);
return;
}
@ -121,14 +121,14 @@ void JSString::resolveRope(ExecState* exec) const
// Was this the last item in the work queue?
if (workQueue.isEmpty()) {
// Create a string from the UChar buffer, clear the rope RefPtr.
ASSERT(buffer == position);
Q_ASSERT(buffer == position);
for (unsigned i = 0; i < m_ropeLength; ++i) {
m_fibers[i].deref();
m_fibers[i] = static_cast<void*>(0);
}
m_ropeLength = 0;
ASSERT(!isRope());
Q_ASSERT(!isRope());
return;
}

View file

@ -203,11 +203,11 @@ namespace JSC {
, m_stringLength(s1->length() + s2->length())
, m_ropeLength(ropeLength)
{
ASSERT(ropeLength <= s_maxInternalRopeLength);
Q_ASSERT(ropeLength <= s_maxInternalRopeLength);
unsigned index = 0;
appendStringInConstruct(index, s1);
appendStringInConstruct(index, s2);
ASSERT(ropeLength == index);
Q_ASSERT(ropeLength == index);
}
// This constructor constructs a new string by concatenating s1 & s2.
// This should only be called with ropeLength <= 3.
@ -216,11 +216,11 @@ namespace JSC {
, m_stringLength(s1->length() + u2.size())
, m_ropeLength(ropeLength)
{
ASSERT(ropeLength <= s_maxInternalRopeLength);
Q_ASSERT(ropeLength <= s_maxInternalRopeLength);
unsigned index = 0;
appendStringInConstruct(index, s1);
appendStringInConstruct(index, u2);
ASSERT(ropeLength == index);
Q_ASSERT(ropeLength == index);
}
// This constructor constructs a new string by concatenating s1 & s2.
// This should only be called with ropeLength <= 3.
@ -229,11 +229,11 @@ namespace JSC {
, m_stringLength(u1.size() + s2->length())
, m_ropeLength(ropeLength)
{
ASSERT(ropeLength <= s_maxInternalRopeLength);
Q_ASSERT(ropeLength <= s_maxInternalRopeLength);
unsigned index = 0;
appendStringInConstruct(index, u1);
appendStringInConstruct(index, s2);
ASSERT(ropeLength == index);
Q_ASSERT(ropeLength == index);
}
// This constructor constructs a new string by concatenating v1, v2 & v3.
// This should only be called with ropeLength <= 3 ... which since every
@ -248,7 +248,7 @@ namespace JSC {
appendValueInConstructAndIncrementLength(exec, index, v1);
appendValueInConstructAndIncrementLength(exec, index, v2);
appendValueInConstructAndIncrementLength(exec, index, v3);
ASSERT(index == s_maxInternalRopeLength);
Q_ASSERT(index == s_maxInternalRopeLength);
}
JSString(JSGlobalData* globalData, const UString& value, JSStringFinalizerCallback finalizer, void* context)
@ -265,7 +265,7 @@ namespace JSC {
~JSString()
{
ASSERT(vptr() == JSGlobalData::jsStringVPtr);
Q_ASSERT(vptr() == JSGlobalData::jsStringVPtr);
for (unsigned i = 0; i < m_ropeLength; ++i)
m_fibers[i].deref();
@ -325,9 +325,9 @@ namespace JSC {
void appendValueInConstructAndIncrementLength(ExecState* exec, unsigned& index, JSValue v)
{
if (v.isString()) {
ASSERT(v.asCell()->isString());
Q_ASSERT(v.asCell()->isString());
JSString* s = static_cast<JSString*>(v.asCell());
ASSERT(s->ropeLength() == 1);
Q_ASSERT(s->ropeLength() == 1);
appendStringInConstruct(index, s);
m_stringLength += s->length();
} else {
@ -362,7 +362,7 @@ namespace JSC {
mutable Rope::Fiber m_fibers[s_maxInternalRopeLength];
bool isRope() const { return m_ropeLength; }
UString& string() { ASSERT(!isRope()); return m_value; }
UString& string() { Q_ASSERT(!isRope()); return m_value; }
unsigned ropeLength() { return m_ropeLength ? m_ropeLength : 1; }
friend JSValue jsString(ExecState* exec, JSString* s1, JSString* s2);
@ -377,7 +377,7 @@ namespace JSC {
inline JSString* asString(JSValue value)
{
ASSERT(value.asCell()->isString());
Q_ASSERT(value.asCell()->isString());
return static_cast<JSString*>(value.asCell());
}
@ -395,7 +395,7 @@ namespace JSC {
inline JSString* jsSingleCharacterSubstring(JSGlobalData* globalData, const UString& s, unsigned offset)
{
ASSERT(offset < static_cast<unsigned>(s.size()));
Q_ASSERT(offset < static_cast<unsigned>(s.size()));
UChar c = s.data()[offset];
if (c <= 0xFF)
return globalData->smallStrings.singleCharacterString(globalData, c);
@ -404,21 +404,21 @@ namespace JSC {
inline JSString* jsNontrivialString(JSGlobalData* globalData, const char* s)
{
ASSERT(s);
ASSERT(s[0]);
ASSERT(s[1]);
Q_ASSERT(s);
Q_ASSERT(s[0]);
Q_ASSERT(s[1]);
return new (globalData) JSString(globalData, s);
}
inline JSString* jsNontrivialString(JSGlobalData* globalData, const UString& s)
{
ASSERT(s.size() > 1);
Q_ASSERT(s.size() > 1);
return new (globalData) JSString(globalData, s);
}
inline JSString* JSString::getIndex(ExecState* exec, unsigned i)
{
ASSERT(canGetIndex(i));
Q_ASSERT(canGetIndex(i));
return jsSingleCharacterSubstring(&exec->globalData(), value(exec), i);
}
@ -437,16 +437,16 @@ namespace JSC {
inline JSString* jsStringWithFinalizer(ExecState* exec, const UString& s, JSStringFinalizerCallback callback, void* context)
{
ASSERT(s.size() && (s.size() > 1 || s.data()[0] > 0xFF));
Q_ASSERT(s.size() && (s.size() > 1 || s.data()[0] > 0xFF));
JSGlobalData* globalData = &exec->globalData();
return new (globalData) JSString(globalData, s, callback, context);
}
inline JSString* jsSubstring(JSGlobalData* globalData, const UString& s, unsigned offset, unsigned length)
{
ASSERT(offset <= static_cast<unsigned>(s.size()));
ASSERT(length <= static_cast<unsigned>(s.size()));
ASSERT(offset + length <= static_cast<unsigned>(s.size()));
Q_ASSERT(offset <= static_cast<unsigned>(s.size()));
Q_ASSERT(length <= static_cast<unsigned>(s.size()));
Q_ASSERT(offset + length <= static_cast<unsigned>(s.size()));
if (!length)
return globalData->smallStrings.emptyString(globalData);
if (length == 1) {
@ -531,7 +531,7 @@ namespace JSC {
return "null";
if (isUndefined())
return "undefined";
ASSERT(isCell());
Q_ASSERT(isCell());
return asCell()->toString(exec);
}
@ -551,7 +551,7 @@ namespace JSC {
return "null";
if (isUndefined())
return "undefined";
ASSERT(isCell());
Q_ASSERT(isCell());
return asCell()->toPrimitive(exec, NoPreference).toString(exec);
}

View file

@ -55,13 +55,13 @@ double JSValue::toIntegerPreserveNaN(ExecState* exec) const
JSObject* JSValue::toObjectSlowCase(ExecState* exec) const
{
ASSERT(!isCell());
Q_ASSERT(!isCell());
if (isInt32() || isDouble())
return constructNumber(exec, asValue());
if (isTrue() || isFalse())
return constructBooleanFromImmediateBoolean(exec, asValue());
ASSERT(isUndefinedOrNull());
Q_ASSERT(isUndefinedOrNull());
JSNotAnObjectErrorStub* exception = createNotAnObjectErrorStub(exec, isNull());
exec->setException(exception);
return new (exec) JSNotAnObject(exec, exception);
@ -69,19 +69,19 @@ JSObject* JSValue::toObjectSlowCase(ExecState* exec) const
JSObject* JSValue::toThisObjectSlowCase(ExecState* exec) const
{
ASSERT(!isCell());
Q_ASSERT(!isCell());
if (isInt32() || isDouble())
return constructNumber(exec, asValue());
if (isTrue() || isFalse())
return constructBooleanFromImmediateBoolean(exec, asValue());
ASSERT(isUndefinedOrNull());
Q_ASSERT(isUndefinedOrNull());
return exec->globalThisValue();
}
JSObject* JSValue::synthesizeObject(ExecState* exec) const
{
ASSERT(!isCell());
Q_ASSERT(!isCell());
if (isNumber())
return constructNumber(exec, asValue());
if (isBoolean())
@ -94,7 +94,7 @@ JSObject* JSValue::synthesizeObject(ExecState* exec) const
JSObject* JSValue::synthesizePrototype(ExecState* exec) const
{
ASSERT(!isCell());
Q_ASSERT(!isCell());
if (isNumber())
return exec->lexicalGlobalObject()->numberPrototype();
if (isBoolean())
@ -126,7 +126,7 @@ char* JSValue::description()
else if (isNull())
snprintf(description, size, "Null");
else {
ASSERT(isUndefined());
Q_ASSERT(isUndefined());
snprintf(description, size, "Undefined");
}

View file

@ -438,7 +438,7 @@ namespace JSC {
JSValue v;
v.u.asEncodedJSValue = encodedJSValue;
#if ENABLE(JSC_ZOMBIES)
ASSERT(!v.isZombie());
Q_ASSERT(!v.isZombie());
#endif
return v;
}
@ -487,7 +487,7 @@ namespace JSC {
u.asBits.tag = EmptyValueTag;
u.asBits.payload = reinterpret_cast<int32_t>(ptr);
#if ENABLE(JSC_ZOMBIES)
ASSERT(!isZombie());
Q_ASSERT(!isZombie());
#endif
}
@ -499,13 +499,13 @@ namespace JSC {
u.asBits.tag = EmptyValueTag;
u.asBits.payload = reinterpret_cast<int32_t>(const_cast<JSCell*>(ptr));
#if ENABLE(JSC_ZOMBIES)
ASSERT(!isZombie());
Q_ASSERT(!isZombie());
#endif
}
inline JSValue::operator bool() const
{
ASSERT(tag() != DeletedValueTag);
Q_ASSERT(tag() != DeletedValueTag);
return tag() != EmptyValueTag;
}
@ -576,25 +576,25 @@ namespace JSC {
inline int32_t JSValue::asInt32() const
{
ASSERT(isInt32());
Q_ASSERT(isInt32());
return u.asBits.payload;
}
inline uint32_t JSValue::asUInt32() const
{
ASSERT(isUInt32());
Q_ASSERT(isUInt32());
return u.asBits.payload;
}
inline double JSValue::asDouble() const
{
ASSERT(isDouble());
Q_ASSERT(isDouble());
return u.asDouble;
}
ALWAYS_INLINE JSCell* JSValue::asCell() const
{
ASSERT(isCell());
Q_ASSERT(isCell());
return reinterpret_cast<JSCell*>(u.asBits.payload);
}
@ -743,13 +743,13 @@ namespace JSC {
inline bool JSValue::getBoolean() const
{
ASSERT(isBoolean());
Q_ASSERT(isBoolean());
return tag() == TrueTag;
}
inline double JSValue::uncheckedGetNumber() const
{
ASSERT(isNumber());
Q_ASSERT(isNumber());
return isInt32() ? asInt32() : asDouble();
}
@ -810,7 +810,7 @@ namespace JSC {
: m_ptr(ptr)
{
#if ENABLE(JSC_ZOMBIES)
ASSERT(!isZombie());
Q_ASSERT(!isZombie());
#endif
}
@ -818,7 +818,7 @@ namespace JSC {
: m_ptr(const_cast<JSCell*>(ptr))
{
#if ENABLE(JSC_ZOMBIES)
ASSERT(!isZombie());
Q_ASSERT(!isZombie());
#endif
}

View file

@ -71,7 +71,7 @@ namespace JSC {
: symbolTable(symbolTable)
, registers(registers)
{
ASSERT(symbolTable);
Q_ASSERT(symbolTable);
}
SymbolTable* symbolTable; // Maps name -> offset from "r" in register file.
@ -124,7 +124,7 @@ namespace JSC {
inline bool JSVariableObject::symbolTablePut(const Identifier& propertyName, JSValue value)
{
ASSERT(!Heap::heap(value) || Heap::heap(value) == Heap::heap(this));
Q_ASSERT(!Heap::heap(value) || Heap::heap(value) == Heap::heap(this));
SymbolTableEntry entry = symbolTable().inlineGet(propertyName.ustring().rep());
if (entry.isNull())
@ -137,13 +137,13 @@ namespace JSC {
inline bool JSVariableObject::symbolTablePutWithAttributes(const Identifier& propertyName, JSValue value, unsigned attributes)
{
ASSERT(!Heap::heap(value) || Heap::heap(value) == Heap::heap(this));
Q_ASSERT(!Heap::heap(value) || Heap::heap(value) == Heap::heap(this));
SymbolTable::iterator iter = symbolTable().find(propertyName.ustring().rep());
if (iter == symbolTable().end())
return false;
SymbolTableEntry& entry = iter->second;
ASSERT(!entry.isNull());
Q_ASSERT(!entry.isNull());
entry.setAttributes(attributes);
registerAt(entry.getIndex()) = value;
return true;
@ -159,7 +159,7 @@ namespace JSC {
inline void JSVariableObject::setRegisters(Register* registers, Register* registerArray)
{
ASSERT(registerArray != d->registerArray.get());
Q_ASSERT(registerArray != d->registerArray.get());
d->registerArray.set(registerArray);
d->registers = registers;
}

View file

@ -56,8 +56,8 @@ namespace JSC {
inline void JSWrapperObject::setInternalValue(JSValue value)
{
ASSERT(value);
ASSERT(!value.isObject());
Q_ASSERT(value);
Q_ASSERT(!value.isObject());
m_internalValue = value;
putAnonymousValue(0, value);
}

View file

@ -40,7 +40,7 @@ LiteralParser::TokenType LiteralParser::Lexer::lex(LiteralParserToken& token)
while (m_ptr < m_end && isASCIISpace(*m_ptr))
++m_ptr;
ASSERT(m_ptr <= m_end);
Q_ASSERT(m_ptr <= m_end);
if (m_ptr >= m_end) {
token.type = TokEnd;
token.start = token.end = m_ptr;
@ -270,13 +270,13 @@ LiteralParser::TokenType LiteralParser::Lexer::lexNumber(LiteralParserToken& tok
Vector<char, 64> buffer(token.end - token.start + 1);
int i;
for (i = 0; i < token.end - token.start; i++) {
ASSERT(static_cast<char>(token.start[i]) == token.start[i]);
Q_ASSERT(static_cast<char>(token.start[i]) == token.start[i]);
buffer[i] = static_cast<char>(token.start[i]);
}
buffer[i] = 0;
char* end;
token.numberToken = WTF::strtod(buffer.data(), &end);
ASSERT(buffer.data() + (token.end - token.start) == end);
Q_ASSERT(buffer.data() + (token.end - token.start) == end);
return TokNumber;
}
@ -434,7 +434,7 @@ JSValue LiteralParser::parse(ParserState initialState)
}
}
case StartParseStatementEndStatement: {
ASSERT(stateStack.isEmpty());
Q_ASSERT(stateStack.isEmpty());
if (m_lexer.currentToken().type != TokRParen)
return JSValue();
if (m_lexer.next() == TokEnd)

Some files were not shown because too many files have changed in this diff Show more