mirror of
https://github.com/rubyjs/therubyracer
synced 2023-03-27 23:21:42 -04:00
upgrade to V8 2.2.14
This commit is contained in:
parent
ed1d49ad7e
commit
c1223abcd8
1103 changed files with 121222 additions and 11019 deletions
|
@ -1,263 +0,0 @@
|
|||
// Copyright (c) 1994-2006 Sun Microsystems Inc.
|
||||
// All Rights Reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions
|
||||
// are met:
|
||||
//
|
||||
// - Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
//
|
||||
// - Redistribution in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// - Neither the name of Sun Microsystems or the names of contributors may
|
||||
// be used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
|
||||
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
|
||||
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
|
||||
// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
|
||||
// OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// The original source code covered by the above license above has been modified
|
||||
// significantly by Google Inc.
|
||||
// Copyright 2006-2008 the V8 project authors. All rights reserved.
|
||||
|
||||
#ifndef V8_ARM_ASSEMBLER_THUMB2_INL_H_
|
||||
#define V8_ARM_ASSEMBLER_THUMB2_INL_H_
|
||||
|
||||
#include "arm/assembler-thumb2.h"
|
||||
#include "cpu.h"
|
||||
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
Condition NegateCondition(Condition cc) {
|
||||
ASSERT(cc != al);
|
||||
return static_cast<Condition>(cc ^ ne);
|
||||
}
|
||||
|
||||
|
||||
void RelocInfo::apply(intptr_t delta) {
|
||||
if (RelocInfo::IsInternalReference(rmode_)) {
|
||||
// absolute code pointer inside code object moves with the code object.
|
||||
int32_t* p = reinterpret_cast<int32_t*>(pc_);
|
||||
*p += delta; // relocate entry
|
||||
}
|
||||
// We do not use pc relative addressing on ARM, so there is
|
||||
// nothing else to do.
|
||||
}
|
||||
|
||||
|
||||
Address RelocInfo::target_address() {
|
||||
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
|
||||
return Assembler::target_address_at(pc_);
|
||||
}
|
||||
|
||||
|
||||
Address RelocInfo::target_address_address() {
|
||||
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
|
||||
return reinterpret_cast<Address>(Assembler::target_address_address_at(pc_));
|
||||
}
|
||||
|
||||
|
||||
void RelocInfo::set_target_address(Address target) {
|
||||
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
|
||||
Assembler::set_target_address_at(pc_, target);
|
||||
}
|
||||
|
||||
|
||||
Object* RelocInfo::target_object() {
|
||||
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
return Memory::Object_at(Assembler::target_address_address_at(pc_));
|
||||
}
|
||||
|
||||
|
||||
Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
|
||||
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
return Memory::Object_Handle_at(Assembler::target_address_address_at(pc_));
|
||||
}
|
||||
|
||||
|
||||
Object** RelocInfo::target_object_address() {
|
||||
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
return reinterpret_cast<Object**>(Assembler::target_address_address_at(pc_));
|
||||
}
|
||||
|
||||
|
||||
void RelocInfo::set_target_object(Object* target) {
|
||||
ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
||||
Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
|
||||
}
|
||||
|
||||
|
||||
Address* RelocInfo::target_reference_address() {
|
||||
ASSERT(rmode_ == EXTERNAL_REFERENCE);
|
||||
return reinterpret_cast<Address*>(Assembler::target_address_address_at(pc_));
|
||||
}
|
||||
|
||||
|
||||
Address RelocInfo::call_address() {
|
||||
ASSERT(IsPatchedReturnSequence());
|
||||
// The 2 instructions offset assumes patched return sequence.
|
||||
ASSERT(IsJSReturn(rmode()));
|
||||
return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
|
||||
}
|
||||
|
||||
|
||||
void RelocInfo::set_call_address(Address target) {
|
||||
ASSERT(IsPatchedReturnSequence());
|
||||
// The 2 instructions offset assumes patched return sequence.
|
||||
ASSERT(IsJSReturn(rmode()));
|
||||
Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
|
||||
}
|
||||
|
||||
|
||||
Object* RelocInfo::call_object() {
|
||||
return *call_object_address();
|
||||
}
|
||||
|
||||
|
||||
Object** RelocInfo::call_object_address() {
|
||||
ASSERT(IsPatchedReturnSequence());
|
||||
// The 2 instructions offset assumes patched return sequence.
|
||||
ASSERT(IsJSReturn(rmode()));
|
||||
return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
|
||||
}
|
||||
|
||||
|
||||
void RelocInfo::set_call_object(Object* target) {
|
||||
*call_object_address() = target;
|
||||
}
|
||||
|
||||
|
||||
bool RelocInfo::IsPatchedReturnSequence() {
|
||||
// On ARM a "call instruction" is actually two instructions.
|
||||
// mov lr, pc
|
||||
// ldr pc, [pc, #XXX]
|
||||
return (Assembler::instr_at(pc_) == kMovLrPc)
|
||||
&& ((Assembler::instr_at(pc_ + Assembler::kInstrSize) & kLdrPCPattern)
|
||||
== kLdrPCPattern);
|
||||
}
|
||||
|
||||
|
||||
Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
|
||||
rm_ = no_reg;
|
||||
imm32_ = immediate;
|
||||
rmode_ = rmode;
|
||||
}
|
||||
|
||||
|
||||
Operand::Operand(const char* s) {
|
||||
rm_ = no_reg;
|
||||
imm32_ = reinterpret_cast<int32_t>(s);
|
||||
rmode_ = RelocInfo::EMBEDDED_STRING;
|
||||
}
|
||||
|
||||
|
||||
Operand::Operand(const ExternalReference& f) {
|
||||
rm_ = no_reg;
|
||||
imm32_ = reinterpret_cast<int32_t>(f.address());
|
||||
rmode_ = RelocInfo::EXTERNAL_REFERENCE;
|
||||
}
|
||||
|
||||
|
||||
Operand::Operand(Smi* value) {
|
||||
rm_ = no_reg;
|
||||
imm32_ = reinterpret_cast<intptr_t>(value);
|
||||
rmode_ = RelocInfo::NONE;
|
||||
}
|
||||
|
||||
|
||||
Operand::Operand(Register rm) {
|
||||
rm_ = rm;
|
||||
rs_ = no_reg;
|
||||
shift_op_ = LSL;
|
||||
shift_imm_ = 0;
|
||||
}
|
||||
|
||||
|
||||
bool Operand::is_reg() const {
|
||||
return rm_.is_valid() &&
|
||||
rs_.is(no_reg) &&
|
||||
shift_op_ == LSL &&
|
||||
shift_imm_ == 0;
|
||||
}
|
||||
|
||||
|
||||
void Assembler::CheckBuffer() {
|
||||
if (buffer_space() <= kGap) {
|
||||
GrowBuffer();
|
||||
}
|
||||
if (pc_offset() >= next_buffer_check_) {
|
||||
CheckConstPool(false, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void Assembler::emit(Instr x) {
|
||||
CheckBuffer();
|
||||
*reinterpret_cast<Instr*>(pc_) = x;
|
||||
pc_ += kInstrSize;
|
||||
}
|
||||
|
||||
|
||||
Address Assembler::target_address_address_at(Address pc) {
|
||||
Address target_pc = pc;
|
||||
Instr instr = Memory::int32_at(target_pc);
|
||||
// If we have a bx instruction, the instruction before the bx is
|
||||
// what we need to patch.
|
||||
static const int32_t kBxInstMask = 0x0ffffff0;
|
||||
static const int32_t kBxInstPattern = 0x012fff10;
|
||||
if ((instr & kBxInstMask) == kBxInstPattern) {
|
||||
target_pc -= kInstrSize;
|
||||
instr = Memory::int32_at(target_pc);
|
||||
}
|
||||
// Verify that the instruction to patch is a
|
||||
// ldr<cond> <Rd>, [pc +/- offset_12].
|
||||
ASSERT((instr & 0x0f7f0000) == 0x051f0000);
|
||||
int offset = instr & 0xfff; // offset_12 is unsigned
|
||||
if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
|
||||
// Verify that the constant pool comes after the instruction referencing it.
|
||||
ASSERT(offset >= -4);
|
||||
return target_pc + offset + 8;
|
||||
}
|
||||
|
||||
|
||||
Address Assembler::target_address_at(Address pc) {
|
||||
return Memory::Address_at(target_address_address_at(pc));
|
||||
}
|
||||
|
||||
|
||||
void Assembler::set_target_at(Address constant_pool_entry,
|
||||
Address target) {
|
||||
Memory::Address_at(constant_pool_entry) = target;
|
||||
}
|
||||
|
||||
|
||||
void Assembler::set_target_address_at(Address pc, Address target) {
|
||||
Memory::Address_at(target_address_address_at(pc)) = target;
|
||||
// Intuitively, we would think it is necessary to flush the instruction cache
|
||||
// after patching a target address in the code as follows:
|
||||
// CPU::FlushICache(pc, sizeof(target));
|
||||
// However, on ARM, no instruction was actually patched by the assignment
|
||||
// above; the target address is not part of an instruction, it is patched in
|
||||
// the constant pool and is read via a data access; the instruction accessing
|
||||
// this address in the constant pool remains unchanged.
|
||||
}
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_ARM_ASSEMBLER_THUMB2_INL_H_
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -1,66 +0,0 @@
|
|||
// Copyright 2008 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
var s = "test";
|
||||
|
||||
assertEquals("t", s.charAt());
|
||||
assertEquals("t", s.charAt("string"));
|
||||
assertEquals("t", s.charAt(null));
|
||||
assertEquals("t", s.charAt(void 0));
|
||||
assertEquals("t", s.charAt(false));
|
||||
assertEquals("e", s.charAt(true));
|
||||
assertEquals("", s.charAt(-1));
|
||||
assertEquals("", s.charAt(4));
|
||||
assertEquals("t", s.charAt(0));
|
||||
assertEquals("t", s.charAt(3));
|
||||
assertEquals("t", s.charAt(NaN));
|
||||
|
||||
assertEquals(116, s.charCodeAt());
|
||||
assertEquals(116, s.charCodeAt("string"));
|
||||
assertEquals(116, s.charCodeAt(null));
|
||||
assertEquals(116, s.charCodeAt(void 0));
|
||||
assertEquals(116, s.charCodeAt(false));
|
||||
assertEquals(101, s.charCodeAt(true));
|
||||
assertEquals(116, s.charCodeAt(0));
|
||||
assertEquals(116, s.charCodeAt(3));
|
||||
assertEquals(116, s.charCodeAt(NaN));
|
||||
assertTrue(isNaN(s.charCodeAt(-1)));
|
||||
assertTrue(isNaN(s.charCodeAt(4)));
|
||||
|
||||
// Make sure enough of the one-char string cache is filled.
|
||||
var alpha = ['@'];
|
||||
for (var i = 1; i < 128; i++) {
|
||||
var c = String.fromCharCode(i);
|
||||
alpha[i] = c.charAt(0);
|
||||
}
|
||||
var alphaStr = alpha.join("");
|
||||
|
||||
// Now test chars.
|
||||
for (var i = 1; i < 128; i++) {
|
||||
assertEquals(alpha[i], alphaStr.charAt(i));
|
||||
assertEquals(String.fromCharCode(i), alphaStr.charAt(i));
|
||||
}
|
|
@ -18,6 +18,7 @@ Jan de Mooij <jandemooij@gmail.com>
|
|||
Jay Freeman <saurik@saurik.com>
|
||||
Joel Stanley <joel.stan@gmail.com>
|
||||
John Jozwiak <jjozwiak@codeaurora.org>
|
||||
Kun Zhang <zhangk@codeaurora.org>
|
||||
Matt Hanselman <mjhanselman@gmail.com>
|
||||
Martyn Capewell <martyn.capewell@arm.com>
|
||||
Paolo Giarrusso <p.giarrusso@gmail.com>
|
|
@ -1,3 +1,71 @@
|
|||
2010-06-02: Version 2.2.14
|
||||
|
||||
Fixed a crash in code generated for String.charCodeAt.
|
||||
|
||||
Fixed a compilation issue with some GCC versions (issue 727).
|
||||
|
||||
Performance optimizations on x64 and ARM platforms.
|
||||
|
||||
|
||||
2010-05-31: Version 2.2.13
|
||||
|
||||
Implement Object.getOwnPropertyDescriptor for element indices and
|
||||
strings (issue 599).
|
||||
|
||||
Fix bug for windows 64 bit C calls from generated code.
|
||||
|
||||
Add new scons flag unalignedaccesses for arm builds.
|
||||
|
||||
Performance improvements on all platforms.
|
||||
|
||||
|
||||
2010-05-26: Version 2.2.12
|
||||
|
||||
Allowed accessors to be defined on objects rather than just object
|
||||
templates.
|
||||
|
||||
Changed the ScriptData API.
|
||||
|
||||
|
||||
2010-05-21: Version 2.2.11
|
||||
|
||||
Fix crash bug in liveedit on 64 bit.
|
||||
|
||||
Use 'full compiler' when debugging is active. This should increase
|
||||
the density of possible break points, making single step more fine
|
||||
grained. This will only take effect for functions compiled after
|
||||
debugging has been started, so recompilation of all functions is
|
||||
required to get the full effect. IA32 and x64 only for now.
|
||||
|
||||
Misc. fixes to the Solaris build.
|
||||
|
||||
Add new flags --print-cumulative-gc-stat and --trace-gc-nvp.
|
||||
|
||||
Add filtering of CPU profiles by security context.
|
||||
|
||||
Fix crash bug on ARM when running without VFP2 or VFP3.
|
||||
|
||||
Incremental performance improvements in all backends.
|
||||
|
||||
|
||||
2010-05-17: Version 2.2.10
|
||||
|
||||
Performance improvements in the x64 and ARM backends.
|
||||
|
||||
|
||||
2010-05-10: Version 2.2.9
|
||||
|
||||
Allow Object.create to be called with a function (issue 697).
|
||||
|
||||
Fixed bug with Date.parse returning a non-NaN value when called on a
|
||||
non date string (issue 696).
|
||||
|
||||
Allow unaligned memory accesses on ARM targets that support it (by
|
||||
Subrato K De of CodeAurora <subratokde@codeaurora.org>).
|
||||
|
||||
C++ API for retrieving JavaScript stack trace information.
|
||||
|
||||
|
||||
2010-05-05: Version 2.2.8
|
||||
|
||||
Performance improvements in the x64 and ARM backends.
|
|
@ -84,6 +84,7 @@ ANDROID_FLAGS = ['-march=armv7-a',
|
|||
'-finline-limit=64',
|
||||
'-DCAN_USE_VFP_INSTRUCTIONS=1',
|
||||
'-DCAN_USE_ARMV7_INSTRUCTIONS=1',
|
||||
'-DCAN_USE_UNALIGNED_ACCESSES=1',
|
||||
'-MD']
|
||||
|
||||
ANDROID_INCLUDES = [ANDROID_TOP + '/bionic/libc/arch-arm/include',
|
||||
|
@ -178,6 +179,9 @@ LIBRARY_FLAGS = {
|
|||
'CCFLAGS': ['-ansi'],
|
||||
},
|
||||
'os:solaris': {
|
||||
# On Solaris, to get isinf, INFINITY, fpclassify and other macros one
|
||||
# needs to define __C99FEATURES__.
|
||||
'CPPDEFINES': ['__C99FEATURES__'],
|
||||
'CPPPATH' : ['/usr/local/include'],
|
||||
'LIBPATH' : ['/usr/local/lib'],
|
||||
'CCFLAGS': ['-ansi'],
|
||||
|
@ -200,18 +204,18 @@ LIBRARY_FLAGS = {
|
|||
'LINKFLAGS': ['-m32']
|
||||
},
|
||||
'arch:arm': {
|
||||
'CPPDEFINES': ['V8_TARGET_ARCH_ARM']
|
||||
'CPPDEFINES': ['V8_TARGET_ARCH_ARM'],
|
||||
'unalignedaccesses:on' : {
|
||||
'CPPDEFINES' : ['CAN_USE_UNALIGNED_ACCESSES=1']
|
||||
},
|
||||
'unalignedaccesses:off' : {
|
||||
'CPPDEFINES' : ['CAN_USE_UNALIGNED_ACCESSES=0']
|
||||
}
|
||||
},
|
||||
'simulator:arm': {
|
||||
'CCFLAGS': ['-m32'],
|
||||
'LINKFLAGS': ['-m32']
|
||||
},
|
||||
'armvariant:thumb2': {
|
||||
'CPPDEFINES': ['V8_ARM_VARIANT_THUMB']
|
||||
},
|
||||
'armvariant:arm': {
|
||||
'CPPDEFINES': ['V8_ARM_VARIANT_ARM']
|
||||
},
|
||||
'arch:mips': {
|
||||
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
|
||||
'simulator:none': {
|
||||
|
@ -736,6 +740,11 @@ SIMPLE_OPTIONS = {
|
|||
'default': 'none',
|
||||
'help': 'build with simulator'
|
||||
},
|
||||
'unalignedaccesses': {
|
||||
'values': ['default', 'on', 'off'],
|
||||
'default': 'default',
|
||||
'help': 'set whether the ARM target supports unaligned accesses'
|
||||
},
|
||||
'disassembler': {
|
||||
'values': ['on', 'off'],
|
||||
'default': 'off',
|
||||
|
@ -761,11 +770,6 @@ SIMPLE_OPTIONS = {
|
|||
'default': 'hidden',
|
||||
'help': 'shared library symbol visibility'
|
||||
},
|
||||
'armvariant': {
|
||||
'values': ['arm', 'thumb2', 'none'],
|
||||
'default': 'none',
|
||||
'help': 'generate thumb2 instructions instead of arm instructions (default)'
|
||||
},
|
||||
'pgo': {
|
||||
'values': ['off', 'instrument', 'optimize'],
|
||||
'default': 'off',
|
||||
|
@ -859,6 +863,10 @@ def VerifyOptions(env):
|
|||
Abort("Shared Object soname not applicable for static library.")
|
||||
if env['os'] != 'win32' and env['pgo'] != 'off':
|
||||
Abort("Profile guided optimization only supported on Windows.")
|
||||
if not (env['arch'] == 'arm' or env['simulator'] == 'arm') and ('unalignedaccesses' in ARGUMENTS):
|
||||
print env['arch']
|
||||
print env['simulator']
|
||||
Abort("Option unalignedaccesses only supported for the ARM architecture.")
|
||||
for (name, option) in SIMPLE_OPTIONS.iteritems():
|
||||
if (not option.get('default')) and (name not in ARGUMENTS):
|
||||
message = ("A value for option %s must be specified (%s)." %
|
||||
|
@ -959,10 +967,6 @@ def PostprocessOptions(options, os):
|
|||
if 'msvcltcg' in ARGUMENTS:
|
||||
print "Warning: forcing msvcltcg on as it is required for pgo (%s)" % options['pgo']
|
||||
options['msvcltcg'] = 'on'
|
||||
if (options['armvariant'] == 'none' and options['arch'] == 'arm'):
|
||||
options['armvariant'] = 'arm'
|
||||
if (options['armvariant'] != 'none' and options['arch'] != 'arm'):
|
||||
options['armvariant'] = 'none'
|
||||
if options['arch'] == 'mips':
|
||||
if ('regexp' in ARGUMENTS) and options['regexp'] == 'native':
|
||||
# Print a warning if native regexp is specified for mips
|
|
@ -61,3 +61,11 @@ Removed duplicate line in random seed code, and changed the name of
|
|||
the Object.prototype.inherits function in the DeltaBlue benchmark to
|
||||
inheritsFrom to avoid name clashes when running in Chromium with
|
||||
extensions enabled.
|
||||
|
||||
|
||||
Changes from Version 5 to Version 6
|
||||
===================================
|
||||
|
||||
Removed dead code from the RayTrace benchmark and changed the Splay
|
||||
benchmark to avoid converting the same numeric key to a string over
|
||||
and over again.
|
|
@ -78,7 +78,7 @@ BenchmarkSuite.suites = [];
|
|||
// Scores are not comparable across versions. Bump the version if
|
||||
// you're making changes that will affect that scores, e.g. if you add
|
||||
// a new benchmark or change an existing one.
|
||||
BenchmarkSuite.version = '5';
|
||||
BenchmarkSuite.version = '6 (candidate)';
|
||||
|
||||
|
||||
// To make the benchmark results predictable, we replace Math.random
|
|
@ -205,12 +205,6 @@ Flog.RayTracer.Light.prototype = {
|
|||
this.intensity = (intensity ? intensity : 10.0);
|
||||
},
|
||||
|
||||
getIntensity: function(distance){
|
||||
if(distance >= intensity) return 0;
|
||||
|
||||
return Math.pow((intensity - distance) / strength, 0.2);
|
||||
},
|
||||
|
||||
toString : function () {
|
||||
return 'Light [' + this.position.x + ',' + this.position.y + ',' + this.position.z + ']';
|
||||
}
|
||||
|
@ -420,31 +414,6 @@ if(typeof(Flog) == 'undefined') var Flog = {};
|
|||
if(typeof(Flog.RayTracer) == 'undefined') Flog.RayTracer = {};
|
||||
if(typeof(Flog.RayTracer.Shape) == 'undefined') Flog.RayTracer.Shape = {};
|
||||
|
||||
Flog.RayTracer.Shape.BaseShape = Class.create();
|
||||
|
||||
Flog.RayTracer.Shape.BaseShape.prototype = {
|
||||
position: null,
|
||||
material: null,
|
||||
|
||||
initialize : function() {
|
||||
this.position = new Vector(0,0,0);
|
||||
this.material = new Flog.RayTracer.Material.SolidMaterial(
|
||||
new Flog.RayTracer.Color(1,0,1),
|
||||
0,
|
||||
0,
|
||||
0
|
||||
);
|
||||
},
|
||||
|
||||
toString : function () {
|
||||
return 'Material [gloss=' + this.gloss + ', transparency=' + this.transparency + ', hasTexture=' + this.hasTexture +']';
|
||||
}
|
||||
}
|
||||
/* Fake a Flog.* namespace */
|
||||
if(typeof(Flog) == 'undefined') var Flog = {};
|
||||
if(typeof(Flog.RayTracer) == 'undefined') Flog.RayTracer = {};
|
||||
if(typeof(Flog.RayTracer.Shape) == 'undefined') Flog.RayTracer.Shape = {};
|
||||
|
||||
Flog.RayTracer.Shape.Sphere = Class.create();
|
||||
|
||||
Flog.RayTracer.Shape.Sphere.prototype = {
|
|
@ -20,6 +20,13 @@ the benchmark suite.
|
|||
|
||||
</p>
|
||||
|
||||
<div class="subtitle"><h3>Version 6 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v6/run.html">link</a>)</h3></div>
|
||||
|
||||
<p>Removed dead code from the RayTrace benchmark and changed the Splay
|
||||
benchmark to avoid converting the same numeric key to a string over
|
||||
and over again.
|
||||
</p>
|
||||
|
||||
<div class="subtitle"><h3>Version 5 (<a href="http://v8.googlecode.com/svn/data/benchmarks/v5/run.html">link</a>)</h3></div>
|
||||
|
||||
<p>Removed duplicate line in random seed code, and changed the name of
|
|
@ -111,12 +111,12 @@ higher scores means better performance: <em>Bigger is better!</em>
|
|||
<li><b>Richards</b><br>OS kernel simulation benchmark, originally written in BCPL by Martin Richards (<i>539 lines</i>).</li>
|
||||
<li><b>DeltaBlue</b><br>One-way constraint solver, originally written in Smalltalk by John Maloney and Mario Wolczko (<i>880 lines</i>).</li>
|
||||
<li><b>Crypto</b><br>Encryption and decryption benchmark based on code by Tom Wu (<i>1698 lines</i>).</li>
|
||||
<li><b>RayTrace</b><br>Ray tracer benchmark based on code by <a href="http://flog.co.nz/">Adam Burmister</a> (<i>935 lines</i>).</li>
|
||||
<li><b>EarleyBoyer</b><br>Classic Scheme benchmarks, translated to JavaScript by Florian Loitsch's Scheme2Js compiler (<i>4685 lines</i>).</li>
|
||||
<li><b>RayTrace</b><br>Ray tracer benchmark based on code by <a href="http://flog.co.nz/">Adam Burmister</a> (<i>904 lines</i>).</li>
|
||||
<li><b>EarleyBoyer</b><br>Classic Scheme benchmarks, translated to JavaScript by Florian Loitsch's Scheme2Js compiler (<i>4684 lines</i>).</li>
|
||||
<li><b>RegExp</b><br>Regular expression benchmark generated by extracting regular expression operations from 50 of the most popular web pages
|
||||
(<i>1614 lines</i>).
|
||||
</li>
|
||||
<li><b>Splay</b><br>Data manipulation benchmark that deals with splay trees and exercises the automatic memory management subsystem (<i>378 lines</i>).</li>
|
||||
<li><b>Splay</b><br>Data manipulation benchmark that deals with splay trees and exercises the automatic memory management subsystem (<i>379 lines</i>).</li>
|
||||
</ul>
|
||||
|
||||
<p>
|
|
@ -46,16 +46,16 @@ var kSplayTreePayloadDepth = 5;
|
|||
var splayTree = null;
|
||||
|
||||
|
||||
function GeneratePayloadTree(depth, key) {
|
||||
function GeneratePayloadTree(depth, tag) {
|
||||
if (depth == 0) {
|
||||
return {
|
||||
array : [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 ],
|
||||
string : 'String for key ' + key + ' in leaf node'
|
||||
string : 'String for key ' + tag + ' in leaf node'
|
||||
};
|
||||
} else {
|
||||
return {
|
||||
left: GeneratePayloadTree(depth - 1, key),
|
||||
right: GeneratePayloadTree(depth - 1, key)
|
||||
left: GeneratePayloadTree(depth - 1, tag),
|
||||
right: GeneratePayloadTree(depth - 1, tag)
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -74,7 +74,8 @@ function InsertNewNode() {
|
|||
do {
|
||||
key = GenerateKey();
|
||||
} while (splayTree.find(key) != null);
|
||||
splayTree.insert(key, GeneratePayloadTree(kSplayTreePayloadDepth, key));
|
||||
var payload = GeneratePayloadTree(kSplayTreePayloadDepth, String(key));
|
||||
splayTree.insert(key, payload);
|
||||
return key;
|
||||
}
|
||||
|
Before Width: | Height: | Size: 24 KiB After Width: | Height: | Size: 24 KiB |
|
@ -143,6 +143,39 @@ class EXPORT Debug {
|
|||
};
|
||||
|
||||
|
||||
/**
|
||||
* An event details object passed to the debug event listener.
|
||||
*/
|
||||
class EventDetails {
|
||||
public:
|
||||
/**
|
||||
* Event type.
|
||||
*/
|
||||
virtual DebugEvent GetEvent() const = 0;
|
||||
|
||||
/**
|
||||
* Access to execution state and event data of the debug event. Don't store
|
||||
* these cross callbacks as their content becomes invalid.
|
||||
*/
|
||||
virtual Handle<Object> GetExecutionState() const = 0;
|
||||
virtual Handle<Object> GetEventData() const = 0;
|
||||
|
||||
/**
|
||||
* Get the context active when the debug event happened. Note this is not
|
||||
* the current active context as the JavaScript part of the debugger is
|
||||
* running in it's own context which is entered at this point.
|
||||
*/
|
||||
virtual Handle<Context> GetEventContext() const = 0;
|
||||
|
||||
/**
|
||||
* Client data passed with the corresponding callbak whet it was registered.
|
||||
*/
|
||||
virtual Handle<Value> GetCallbackData() const = 0;
|
||||
|
||||
virtual ~EventDetails() {}
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Debug event callback function.
|
||||
*
|
||||
|
@ -157,6 +190,15 @@ class EXPORT Debug {
|
|||
Handle<Object> event_data,
|
||||
Handle<Value> data);
|
||||
|
||||
/**
|
||||
* Debug event callback function.
|
||||
*
|
||||
* \param event_details object providing information about the debug event
|
||||
*
|
||||
* A EventCallback2 does not take possession of the event data,
|
||||
* and must not rely on the data persisting after the handler returns.
|
||||
*/
|
||||
typedef void (*EventCallback2)(const EventDetails& event_details);
|
||||
|
||||
/**
|
||||
* Debug message callback function.
|
||||
|
@ -165,7 +207,7 @@ class EXPORT Debug {
|
|||
* \param length length of the message
|
||||
* \param client_data the data value passed when registering the message handler
|
||||
|
||||
* A MessageHandler does not take posession of the message string,
|
||||
* A MessageHandler does not take possession of the message string,
|
||||
* and must not rely on the data persisting after the handler returns.
|
||||
*
|
||||
* This message handler is deprecated. Use MessageHandler2 instead.
|
||||
|
@ -178,7 +220,7 @@ class EXPORT Debug {
|
|||
*
|
||||
* \param message the debug message handler message object
|
||||
|
||||
* A MessageHandler does not take posession of the message data,
|
||||
* A MessageHandler does not take possession of the message data,
|
||||
* and must not rely on the data persisting after the handler returns.
|
||||
*/
|
||||
typedef void (*MessageHandler2)(const Message& message);
|
||||
|
@ -196,6 +238,8 @@ class EXPORT Debug {
|
|||
// Set a C debug event listener.
|
||||
static bool SetDebugEventListener(EventCallback that,
|
||||
Handle<Value> data = Handle<Value>());
|
||||
static bool SetDebugEventListener2(EventCallback2 that,
|
||||
Handle<Value> data = Handle<Value>());
|
||||
|
||||
// Set a JavaScript debug event listener.
|
||||
static bool SetDebugEventListener(v8::Handle<v8::Object> that,
|
|
@ -109,7 +109,7 @@ class V8EXPORT CpuProfileNode {
|
|||
/** Retrieves a child node by index. */
|
||||
const CpuProfileNode* GetChild(int index) const;
|
||||
|
||||
static const int kNoLineNumberInfo = 0;
|
||||
static const int kNoLineNumberInfo = Message::kNoLineNumberInfo;
|
||||
};
|
||||
|
||||
|
||||
|
@ -139,6 +139,15 @@ class V8EXPORT CpuProfile {
|
|||
*/
|
||||
class V8EXPORT CpuProfiler {
|
||||
public:
|
||||
/**
|
||||
* A note on security tokens usage. As scripts from different
|
||||
* origins can run inside a single V8 instance, it is possible to
|
||||
* have functions from different security contexts intermixed in a
|
||||
* single CPU profile. To avoid exposing function names belonging to
|
||||
* other contexts, filtering by security token is performed while
|
||||
* obtaining profiling results.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Returns the number of profiles collected (doesn't include
|
||||
* profiles that are being collected at the moment of call.)
|
||||
|
@ -146,16 +155,22 @@ class V8EXPORT CpuProfiler {
|
|||
static int GetProfilesCount();
|
||||
|
||||
/** Returns a profile by index. */
|
||||
static const CpuProfile* GetProfile(int index);
|
||||
static const CpuProfile* GetProfile(
|
||||
int index,
|
||||
Handle<Value> security_token = Handle<Value>());
|
||||
|
||||
/** Returns a profile by uid. */
|
||||
static const CpuProfile* FindProfile(unsigned uid);
|
||||
static const CpuProfile* FindProfile(
|
||||
unsigned uid,
|
||||
Handle<Value> security_token = Handle<Value>());
|
||||
|
||||
/**
|
||||
* Starts collecting CPU profile. Title may be an empty string. It
|
||||
* is allowed to have several profiles being collected at
|
||||
* once. Attempts to start collecting several profiles with the same
|
||||
* title are silently ignored.
|
||||
* title are silently ignored. While collecting a profile, functions
|
||||
* from all security contexts are included in it. The token-based
|
||||
* filtering is only performed when querying for a profile.
|
||||
*/
|
||||
static void StartProfiling(Handle<String> title);
|
||||
|
||||
|
@ -163,7 +178,9 @@ class V8EXPORT CpuProfiler {
|
|||
* Stops collecting CPU profile with a given title and returns it.
|
||||
* If the title given is empty, finishes the last profile started.
|
||||
*/
|
||||
static const CpuProfile* StopProfiling(Handle<String> title);
|
||||
static const CpuProfile* StopProfiling(
|
||||
Handle<String> title,
|
||||
Handle<Value> security_token = Handle<Value>());
|
||||
};
|
||||
|
||||
|
|
@ -126,6 +126,9 @@ template <class T> class Persistent;
|
|||
class FunctionTemplate;
|
||||
class ObjectTemplate;
|
||||
class Data;
|
||||
class AccessorInfo;
|
||||
class StackTrace;
|
||||
class StackFrame;
|
||||
|
||||
namespace internal {
|
||||
|
||||
|
@ -510,11 +513,37 @@ class V8EXPORT Data {
|
|||
class V8EXPORT ScriptData { // NOLINT
|
||||
public:
|
||||
virtual ~ScriptData() { }
|
||||
/**
|
||||
* Pre-compiles the specified script (context-independent).
|
||||
*
|
||||
* \param input Pointer to UTF-8 script source code.
|
||||
* \param length Length of UTF-8 script source code.
|
||||
*/
|
||||
static ScriptData* PreCompile(const char* input, int length);
|
||||
static ScriptData* New(unsigned* data, int length);
|
||||
|
||||
/**
|
||||
* Load previous pre-compilation data.
|
||||
*
|
||||
* \param data Pointer to data returned by a call to Data() of a previous
|
||||
* ScriptData. Ownership is not transferred.
|
||||
* \param length Length of data.
|
||||
*/
|
||||
static ScriptData* New(const char* data, int length);
|
||||
|
||||
/**
|
||||
* Returns the length of Data().
|
||||
*/
|
||||
virtual int Length() = 0;
|
||||
virtual unsigned* Data() = 0;
|
||||
|
||||
/**
|
||||
* Returns a serialized representation of this ScriptData that can later be
|
||||
* passed to New(). NOTE: Serialized data is platform-dependent.
|
||||
*/
|
||||
virtual const char* Data() = 0;
|
||||
|
||||
/**
|
||||
* Returns true if the source code could not be parsed.
|
||||
*/
|
||||
virtual bool HasError() = 0;
|
||||
};
|
||||
|
||||
|
@ -691,6 +720,106 @@ class V8EXPORT Message {
|
|||
|
||||
// TODO(1245381): Print to a string instead of on a FILE.
|
||||
static void PrintCurrentStackTrace(FILE* out);
|
||||
|
||||
static const int kNoLineNumberInfo = 0;
|
||||
static const int kNoColumnInfo = 0;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Representation of a JavaScript stack trace. The information collected is a
|
||||
* snapshot of the execution stack and the information remains valid after
|
||||
* execution continues.
|
||||
*/
|
||||
class V8EXPORT StackTrace {
|
||||
public:
|
||||
/**
|
||||
* Flags that determine what information is placed captured for each
|
||||
* StackFrame when grabbing the current stack trace.
|
||||
*/
|
||||
enum StackTraceOptions {
|
||||
kLineNumber = 1,
|
||||
kColumnOffset = 1 << 1 | kLineNumber,
|
||||
kScriptName = 1 << 2,
|
||||
kFunctionName = 1 << 3,
|
||||
kIsEval = 1 << 4,
|
||||
kIsConstructor = 1 << 5,
|
||||
kOverview = kLineNumber | kColumnOffset | kScriptName | kFunctionName,
|
||||
kDetailed = kOverview | kIsEval | kIsConstructor
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns a StackFrame at a particular index.
|
||||
*/
|
||||
Local<StackFrame> GetFrame(uint32_t index) const;
|
||||
|
||||
/**
|
||||
* Returns the number of StackFrames.
|
||||
*/
|
||||
int GetFrameCount() const;
|
||||
|
||||
/**
|
||||
* Returns StackTrace as a v8::Array that contains StackFrame objects.
|
||||
*/
|
||||
Local<Array> AsArray();
|
||||
|
||||
/**
|
||||
* Grab a snapshot of the the current JavaScript execution stack.
|
||||
*
|
||||
* \param frame_limit The maximum number of stack frames we want to capture.
|
||||
* \param options Enumerates the set of things we will capture for each
|
||||
* StackFrame.
|
||||
*/
|
||||
static Local<StackTrace> CurrentStackTrace(
|
||||
int frame_limit,
|
||||
StackTraceOptions options = kOverview);
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* A single JavaScript stack frame.
|
||||
*/
|
||||
class V8EXPORT StackFrame {
|
||||
public:
|
||||
/**
|
||||
* Returns the number, 1-based, of the line for the associate function call.
|
||||
* This method will return Message::kNoLineNumberInfo if it is unable to
|
||||
* retrieve the line number, or if kLineNumber was not passed as an option
|
||||
* when capturing the StackTrace.
|
||||
*/
|
||||
int GetLineNumber() const;
|
||||
|
||||
/**
|
||||
* Returns the 1-based column offset on the line for the associated function
|
||||
* call.
|
||||
* This method will return Message::kNoColumnInfo if it is unable to retrieve
|
||||
* the column number, or if kColumnOffset was not passed as an option when
|
||||
* capturing the StackTrace.
|
||||
*/
|
||||
int GetColumn() const;
|
||||
|
||||
/**
|
||||
* Returns the name of the resource that contains the script for the
|
||||
* function for this StackFrame.
|
||||
*/
|
||||
Local<String> GetScriptName() const;
|
||||
|
||||
/**
|
||||
* Returns the name of the function associated with this stack frame.
|
||||
*/
|
||||
Local<String> GetFunctionName() const;
|
||||
|
||||
/**
|
||||
* Returns whether or not the associated function is compiled via a call to
|
||||
* eval().
|
||||
*/
|
||||
bool IsEval() const;
|
||||
|
||||
/**
|
||||
* Returns whther or not the associated function is called as a
|
||||
* constructor via "new".
|
||||
*/
|
||||
bool IsConstructor() const;
|
||||
};
|
||||
|
||||
|
||||
|
@ -1203,6 +1332,41 @@ enum ExternalArrayType {
|
|||
kExternalFloatArray
|
||||
};
|
||||
|
||||
/**
|
||||
* Accessor[Getter|Setter] are used as callback functions when
|
||||
* setting|getting a particular property. See Object and ObjectTemplate's
|
||||
* method SetAccessor.
|
||||
*/
|
||||
typedef Handle<Value> (*AccessorGetter)(Local<String> property,
|
||||
const AccessorInfo& info);
|
||||
|
||||
|
||||
typedef void (*AccessorSetter)(Local<String> property,
|
||||
Local<Value> value,
|
||||
const AccessorInfo& info);
|
||||
|
||||
|
||||
/**
|
||||
* Access control specifications.
|
||||
*
|
||||
* Some accessors should be accessible across contexts. These
|
||||
* accessors have an explicit access control parameter which specifies
|
||||
* the kind of cross-context access that should be allowed.
|
||||
*
|
||||
* Additionally, for security, accessors can prohibit overwriting by
|
||||
* accessors defined in JavaScript. For objects that have such
|
||||
* accessors either locally or in their prototype chain it is not
|
||||
* possible to overwrite the accessor by using __defineGetter__ or
|
||||
* __defineSetter__ from JavaScript code.
|
||||
*/
|
||||
enum AccessControl {
|
||||
DEFAULT = 0,
|
||||
ALL_CAN_READ = 1,
|
||||
ALL_CAN_WRITE = 1 << 1,
|
||||
PROHIBITS_OVERWRITING = 1 << 2
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* A JavaScript object (ECMA-262, 4.3.3)
|
||||
*/
|
||||
|
@ -1245,6 +1409,13 @@ class V8EXPORT Object : public Value {
|
|||
|
||||
bool Delete(uint32_t index);
|
||||
|
||||
bool SetAccessor(Handle<String> name,
|
||||
AccessorGetter getter,
|
||||
AccessorSetter setter = 0,
|
||||
Handle<Value> data = Handle<Value>(),
|
||||
AccessControl settings = DEFAULT,
|
||||
PropertyAttribute attribute = None);
|
||||
|
||||
/**
|
||||
* Returns an array containing the names of the enumerable properties
|
||||
* of this object, including properties from prototype objects. The
|
||||
|
@ -1539,19 +1710,6 @@ typedef Handle<Value> (*InvocationCallback)(const Arguments& args);
|
|||
|
||||
typedef int (*LookupCallback)(Local<Object> self, Local<String> name);
|
||||
|
||||
/**
|
||||
* Accessor[Getter|Setter] are used as callback functions when
|
||||
* setting|getting a particular property. See objectTemplate::SetAccessor.
|
||||
*/
|
||||
typedef Handle<Value> (*AccessorGetter)(Local<String> property,
|
||||
const AccessorInfo& info);
|
||||
|
||||
|
||||
typedef void (*AccessorSetter)(Local<String> property,
|
||||
Local<Value> value,
|
||||
const AccessorInfo& info);
|
||||
|
||||
|
||||
/**
|
||||
* NamedProperty[Getter|Setter] are used as interceptors on object.
|
||||
* See ObjectTemplate::SetNamedPropertyHandler.
|
||||
|
@ -1631,27 +1789,6 @@ typedef Handle<Boolean> (*IndexedPropertyDeleter)(uint32_t index,
|
|||
typedef Handle<Array> (*IndexedPropertyEnumerator)(const AccessorInfo& info);
|
||||
|
||||
|
||||
/**
|
||||
* Access control specifications.
|
||||
*
|
||||
* Some accessors should be accessible across contexts. These
|
||||
* accessors have an explicit access control parameter which specifies
|
||||
* the kind of cross-context access that should be allowed.
|
||||
*
|
||||
* Additionally, for security, accessors can prohibit overwriting by
|
||||
* accessors defined in JavaScript. For objects that have such
|
||||
* accessors either locally or in their prototype chain it is not
|
||||
* possible to overwrite the accessor by using __defineGetter__ or
|
||||
* __defineSetter__ from JavaScript code.
|
||||
*/
|
||||
enum AccessControl {
|
||||
DEFAULT = 0,
|
||||
ALL_CAN_READ = 1,
|
||||
ALL_CAN_WRITE = 1 << 1,
|
||||
PROHIBITS_OVERWRITING = 1 << 2
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Access type specification.
|
||||
*/
|
||||
|
@ -2122,7 +2259,7 @@ class V8EXPORT ResourceConstraints {
|
|||
};
|
||||
|
||||
|
||||
bool SetResourceConstraints(ResourceConstraints* constraints);
|
||||
bool V8EXPORT SetResourceConstraints(ResourceConstraints* constraints);
|
||||
|
||||
|
||||
// --- E x c e p t i o n s ---
|
||||
|
@ -2764,7 +2901,12 @@ class V8EXPORT Context {
|
|||
*/
|
||||
void ReattachGlobal(Handle<Object> global_object);
|
||||
|
||||
/** Creates a new context. */
|
||||
/** Creates a new context.
|
||||
*
|
||||
* Returns a persistent handle to the newly allocated context. This
|
||||
* persistent handle has to be disposed when the context is no
|
||||
* longer used so the context can be garbage collected.
|
||||
*/
|
||||
static Persistent<Context> New(
|
||||
ExtensionConfiguration* extensions = NULL,
|
||||
Handle<ObjectTemplate> global_template = Handle<ObjectTemplate>(),
|
||||
|
@ -3035,7 +3177,7 @@ class Internals {
|
|||
static const int kProxyProxyOffset = sizeof(void*);
|
||||
static const int kJSObjectHeaderSize = 3 * sizeof(void*);
|
||||
static const int kFullStringRepresentationMask = 0x07;
|
||||
static const int kExternalTwoByteRepresentationTag = 0x03;
|
||||
static const int kExternalTwoByteRepresentationTag = 0x02;
|
||||
|
||||
// These constants are compiler dependent so their values must be
|
||||
// defined within the implementation.
|
|
@ -294,7 +294,7 @@ Handle<Object> JsHttpRequestProcessor::WrapMap(map<string, string>* obj) {
|
|||
|
||||
// Fetch the template for creating JavaScript map wrappers.
|
||||
// It only has to be created once, which we do on demand.
|
||||
if (request_template_.IsEmpty()) {
|
||||
if (map_template_.IsEmpty()) {
|
||||
Handle<ObjectTemplate> raw_template = MakeMapTemplate();
|
||||
map_template_ = Persistent<ObjectTemplate>::New(raw_template);
|
||||
}
|
|
@ -299,5 +299,10 @@ void ReportException(v8::TryCatch* try_catch) {
|
|||
printf("^");
|
||||
}
|
||||
printf("\n");
|
||||
v8::String::Utf8Value stack_trace(try_catch->StackTrace());
|
||||
if (stack_trace.length() > 0) {
|
||||
const char* stack_trace_string = ToCString(stack_trace);
|
||||
printf("%s\n", stack_trace_string);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -58,6 +58,7 @@ SOURCES = {
|
|||
debug.cc
|
||||
disassembler.cc
|
||||
diy-fp.cc
|
||||
dtoa.cc
|
||||
execution.cc
|
||||
factory.cc
|
||||
flags.cc
|
||||
|
@ -68,6 +69,7 @@ SOURCES = {
|
|||
func-name-inferrer.cc
|
||||
global-handles.cc
|
||||
fast-dtoa.cc
|
||||
fixed-dtoa.cc
|
||||
handles.cc
|
||||
hashmap.cc
|
||||
heap-profiler.cc
|
||||
|
@ -134,13 +136,8 @@ SOURCES = {
|
|||
arm/register-allocator-arm.cc
|
||||
arm/stub-cache-arm.cc
|
||||
arm/virtual-frame-arm.cc
|
||||
"""),
|
||||
'armvariant:arm': Split("""
|
||||
arm/assembler-arm.cc
|
||||
"""),
|
||||
'armvariant:thumb2': Split("""
|
||||
arm/assembler-thumb2.cc
|
||||
"""),
|
||||
'arch:mips': Split("""
|
||||
fast-codegen.cc
|
||||
mips/assembler-mips.cc
|
|
@ -48,7 +48,7 @@
|
|||
|
||||
#define LOG_API(expr) LOG(ApiEntryCall(expr))
|
||||
|
||||
#ifdef ENABLE_HEAP_PROTECTION
|
||||
#ifdef ENABLE_VMSTATE_TRACKING
|
||||
#define ENTER_V8 i::VMState __state__(i::OTHER)
|
||||
#define LEAVE_V8 i::VMState __state__(i::EXTERNAL)
|
||||
#else
|
||||
|
@ -58,11 +58,10 @@
|
|||
|
||||
namespace v8 {
|
||||
|
||||
|
||||
#define ON_BAILOUT(location, code) \
|
||||
if (IsDeadCheck(location)) { \
|
||||
code; \
|
||||
UNREACHABLE(); \
|
||||
#define ON_BAILOUT(location, code) \
|
||||
if (IsDeadCheck(location) || v8::V8::IsExecutionTerminating()) { \
|
||||
code; \
|
||||
UNREACHABLE(); \
|
||||
}
|
||||
|
||||
|
||||
|
@ -776,6 +775,28 @@ void FunctionTemplate::SetCallHandler(InvocationCallback callback,
|
|||
}
|
||||
|
||||
|
||||
static i::Handle<i::AccessorInfo> MakeAccessorInfo(
|
||||
v8::Handle<String> name,
|
||||
AccessorGetter getter,
|
||||
AccessorSetter setter,
|
||||
v8::Handle<Value> data,
|
||||
v8::AccessControl settings,
|
||||
v8::PropertyAttribute attributes) {
|
||||
i::Handle<i::AccessorInfo> obj = i::Factory::NewAccessorInfo();
|
||||
ASSERT(getter != NULL);
|
||||
obj->set_getter(*FromCData(getter));
|
||||
obj->set_setter(*FromCData(setter));
|
||||
if (data.IsEmpty()) data = v8::Undefined();
|
||||
obj->set_data(*Utils::OpenHandle(*data));
|
||||
obj->set_name(*Utils::OpenHandle(*name));
|
||||
if (settings & ALL_CAN_READ) obj->set_all_can_read(true);
|
||||
if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true);
|
||||
if (settings & PROHIBITS_OVERWRITING) obj->set_prohibits_overwriting(true);
|
||||
obj->set_property_attributes(static_cast<PropertyAttributes>(attributes));
|
||||
return obj;
|
||||
}
|
||||
|
||||
|
||||
void FunctionTemplate::AddInstancePropertyAccessor(
|
||||
v8::Handle<String> name,
|
||||
AccessorGetter getter,
|
||||
|
@ -788,18 +809,10 @@ void FunctionTemplate::AddInstancePropertyAccessor(
|
|||
}
|
||||
ENTER_V8;
|
||||
HandleScope scope;
|
||||
i::Handle<i::AccessorInfo> obj = i::Factory::NewAccessorInfo();
|
||||
ASSERT(getter != NULL);
|
||||
obj->set_getter(*FromCData(getter));
|
||||
obj->set_setter(*FromCData(setter));
|
||||
if (data.IsEmpty()) data = v8::Undefined();
|
||||
obj->set_data(*Utils::OpenHandle(*data));
|
||||
obj->set_name(*Utils::OpenHandle(*name));
|
||||
if (settings & ALL_CAN_READ) obj->set_all_can_read(true);
|
||||
if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true);
|
||||
if (settings & PROHIBITS_OVERWRITING) obj->set_prohibits_overwriting(true);
|
||||
obj->set_property_attributes(static_cast<PropertyAttributes>(attributes));
|
||||
|
||||
i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(name,
|
||||
getter, setter, data,
|
||||
settings, attributes);
|
||||
i::Handle<i::Object> list(Utils::OpenHandle(this)->property_accessors());
|
||||
if (list->IsUndefined()) {
|
||||
list = NeanderArray().value();
|
||||
|
@ -1106,8 +1119,19 @@ ScriptData* ScriptData::PreCompile(const char* input, int length) {
|
|||
}
|
||||
|
||||
|
||||
ScriptData* ScriptData::New(unsigned* data, int length) {
|
||||
return new i::ScriptDataImpl(i::Vector<unsigned>(data, length));
|
||||
ScriptData* ScriptData::New(const char* data, int length) {
|
||||
// Return an empty ScriptData if the length is obviously invalid.
|
||||
if (length % sizeof(unsigned) != 0) {
|
||||
return new i::ScriptDataImpl(i::Vector<unsigned>());
|
||||
}
|
||||
|
||||
// Copy the data to ensure it is properly aligned.
|
||||
int deserialized_data_length = length / sizeof(unsigned);
|
||||
unsigned* deserialized_data = i::NewArray<unsigned>(deserialized_data_length);
|
||||
memcpy(deserialized_data, data, length);
|
||||
|
||||
return new i::ScriptDataImpl(
|
||||
i::Vector<unsigned>(deserialized_data, deserialized_data_length));
|
||||
}
|
||||
|
||||
|
||||
|
@ -1438,7 +1462,7 @@ static i::Handle<i::Object> CallV8HeapFunction(const char* name,
|
|||
|
||||
|
||||
int Message::GetLineNumber() const {
|
||||
ON_BAILOUT("v8::Message::GetLineNumber()", return -1);
|
||||
ON_BAILOUT("v8::Message::GetLineNumber()", return kNoLineNumberInfo);
|
||||
ENTER_V8;
|
||||
HandleScope scope;
|
||||
EXCEPTION_PREAMBLE();
|
||||
|
@ -1470,7 +1494,7 @@ int Message::GetEndPosition() const {
|
|||
|
||||
|
||||
int Message::GetStartColumn() const {
|
||||
if (IsDeadCheck("v8::Message::GetStartColumn()")) return 0;
|
||||
if (IsDeadCheck("v8::Message::GetStartColumn()")) return kNoColumnInfo;
|
||||
ENTER_V8;
|
||||
HandleScope scope;
|
||||
i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
|
||||
|
@ -1485,7 +1509,7 @@ int Message::GetStartColumn() const {
|
|||
|
||||
|
||||
int Message::GetEndColumn() const {
|
||||
if (IsDeadCheck("v8::Message::GetEndColumn()")) return 0;
|
||||
if (IsDeadCheck("v8::Message::GetEndColumn()")) return kNoColumnInfo;
|
||||
ENTER_V8;
|
||||
HandleScope scope;
|
||||
i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
|
||||
|
@ -1525,6 +1549,118 @@ void Message::PrintCurrentStackTrace(FILE* out) {
|
|||
}
|
||||
|
||||
|
||||
// --- S t a c k T r a c e ---
|
||||
|
||||
Local<StackFrame> StackTrace::GetFrame(uint32_t index) const {
|
||||
if (IsDeadCheck("v8::StackTrace::GetFrame()")) return Local<StackFrame>();
|
||||
ENTER_V8;
|
||||
HandleScope scope;
|
||||
i::Handle<i::JSArray> self = Utils::OpenHandle(this);
|
||||
i::Handle<i::JSObject> obj(i::JSObject::cast(self->GetElement(index)));
|
||||
return scope.Close(Utils::StackFrameToLocal(obj));
|
||||
}
|
||||
|
||||
|
||||
int StackTrace::GetFrameCount() const {
|
||||
if (IsDeadCheck("v8::StackTrace::GetFrameCount()")) return -1;
|
||||
ENTER_V8;
|
||||
return i::Smi::cast(Utils::OpenHandle(this)->length())->value();
|
||||
}
|
||||
|
||||
|
||||
Local<Array> StackTrace::AsArray() {
|
||||
if (IsDeadCheck("v8::StackTrace::AsArray()")) Local<Array>();
|
||||
ENTER_V8;
|
||||
return Utils::ToLocal(Utils::OpenHandle(this));
|
||||
}
|
||||
|
||||
|
||||
Local<StackTrace> StackTrace::CurrentStackTrace(int frame_limit,
|
||||
StackTraceOptions options) {
|
||||
if (IsDeadCheck("v8::StackTrace::CurrentStackTrace()")) Local<StackTrace>();
|
||||
ENTER_V8;
|
||||
return i::Top::CaptureCurrentStackTrace(frame_limit, options);
|
||||
}
|
||||
|
||||
|
||||
// --- S t a c k F r a m e ---
|
||||
|
||||
int StackFrame::GetLineNumber() const {
|
||||
if (IsDeadCheck("v8::StackFrame::GetLineNumber()")) {
|
||||
return Message::kNoLineNumberInfo;
|
||||
}
|
||||
ENTER_V8;
|
||||
i::HandleScope scope;
|
||||
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
|
||||
i::Handle<i::Object> line = GetProperty(self, "lineNumber");
|
||||
if (!line->IsSmi()) {
|
||||
return Message::kNoLineNumberInfo;
|
||||
}
|
||||
return i::Smi::cast(*line)->value();
|
||||
}
|
||||
|
||||
|
||||
int StackFrame::GetColumn() const {
|
||||
if (IsDeadCheck("v8::StackFrame::GetColumn()")) {
|
||||
return Message::kNoColumnInfo;
|
||||
}
|
||||
ENTER_V8;
|
||||
i::HandleScope scope;
|
||||
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
|
||||
i::Handle<i::Object> column = GetProperty(self, "column");
|
||||
if (!column->IsSmi()) {
|
||||
return Message::kNoColumnInfo;
|
||||
}
|
||||
return i::Smi::cast(*column)->value();
|
||||
}
|
||||
|
||||
|
||||
Local<String> StackFrame::GetScriptName() const {
|
||||
if (IsDeadCheck("v8::StackFrame::GetScriptName()")) return Local<String>();
|
||||
ENTER_V8;
|
||||
HandleScope scope;
|
||||
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
|
||||
i::Handle<i::Object> name = GetProperty(self, "scriptName");
|
||||
if (!name->IsString()) {
|
||||
return Local<String>();
|
||||
}
|
||||
return scope.Close(Local<String>::Cast(Utils::ToLocal(name)));
|
||||
}
|
||||
|
||||
|
||||
Local<String> StackFrame::GetFunctionName() const {
|
||||
if (IsDeadCheck("v8::StackFrame::GetFunctionName()")) return Local<String>();
|
||||
ENTER_V8;
|
||||
HandleScope scope;
|
||||
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
|
||||
i::Handle<i::Object> name = GetProperty(self, "functionName");
|
||||
if (!name->IsString()) {
|
||||
return Local<String>();
|
||||
}
|
||||
return scope.Close(Local<String>::Cast(Utils::ToLocal(name)));
|
||||
}
|
||||
|
||||
|
||||
bool StackFrame::IsEval() const {
|
||||
if (IsDeadCheck("v8::StackFrame::IsEval()")) return false;
|
||||
ENTER_V8;
|
||||
i::HandleScope scope;
|
||||
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
|
||||
i::Handle<i::Object> is_eval = GetProperty(self, "isEval");
|
||||
return is_eval->IsTrue();
|
||||
}
|
||||
|
||||
|
||||
bool StackFrame::IsConstructor() const {
|
||||
if (IsDeadCheck("v8::StackFrame::IsConstructor()")) return false;
|
||||
ENTER_V8;
|
||||
i::HandleScope scope;
|
||||
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
|
||||
i::Handle<i::Object> is_constructor = GetProperty(self, "isConstructor");
|
||||
return is_constructor->IsTrue();
|
||||
}
|
||||
|
||||
|
||||
// --- D a t a ---
|
||||
|
||||
bool Value::IsUndefined() const {
|
||||
|
@ -2185,10 +2321,10 @@ Local<String> v8::Object::ObjectProtoToString() {
|
|||
int postfix_len = i::StrLength(postfix);
|
||||
|
||||
int buf_len = prefix_len + str_len + postfix_len;
|
||||
char* buf = i::NewArray<char>(buf_len);
|
||||
i::ScopedVector<char> buf(buf_len);
|
||||
|
||||
// Write prefix.
|
||||
char* ptr = buf;
|
||||
char* ptr = buf.start();
|
||||
memcpy(ptr, prefix, prefix_len * v8::internal::kCharSize);
|
||||
ptr += prefix_len;
|
||||
|
||||
|
@ -2200,8 +2336,7 @@ Local<String> v8::Object::ObjectProtoToString() {
|
|||
memcpy(ptr, postfix, postfix_len * v8::internal::kCharSize);
|
||||
|
||||
// Copy the buffer into a heap-allocated string and return it.
|
||||
Local<String> result = v8::String::New(buf, buf_len);
|
||||
i::DeleteArray(buf);
|
||||
Local<String> result = v8::String::New(buf.start(), buf_len);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
@ -2243,6 +2378,23 @@ bool v8::Object::Has(uint32_t index) {
|
|||
}
|
||||
|
||||
|
||||
bool Object::SetAccessor(Handle<String> name,
|
||||
AccessorGetter getter,
|
||||
AccessorSetter setter,
|
||||
v8::Handle<Value> data,
|
||||
AccessControl settings,
|
||||
PropertyAttribute attributes) {
|
||||
ON_BAILOUT("v8::Object::SetAccessor()", return false);
|
||||
ENTER_V8;
|
||||
HandleScope scope;
|
||||
i::Handle<i::AccessorInfo> info = MakeAccessorInfo(name,
|
||||
getter, setter, data,
|
||||
settings, attributes);
|
||||
i::Handle<i::Object> result = i::SetAccessor(Utils::OpenHandle(this), info);
|
||||
return !result.is_null() && !result->IsUndefined();
|
||||
}
|
||||
|
||||
|
||||
bool v8::Object::HasRealNamedProperty(Handle<String> key) {
|
||||
ON_BAILOUT("v8::Object::HasRealNamedProperty()", return false);
|
||||
return Utils::OpenHandle(this)->HasRealNamedProperty(
|
||||
|
@ -3881,10 +4033,40 @@ Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) {
|
|||
// --- D e b u g S u p p o r t ---
|
||||
|
||||
#ifdef ENABLE_DEBUGGER_SUPPORT
|
||||
|
||||
static v8::Debug::EventCallback event_callback = NULL;
|
||||
|
||||
static void EventCallbackWrapper(const v8::Debug::EventDetails& event_details) {
|
||||
if (event_callback) {
|
||||
event_callback(event_details.GetEvent(),
|
||||
event_details.GetExecutionState(),
|
||||
event_details.GetEventData(),
|
||||
event_details.GetCallbackData());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool Debug::SetDebugEventListener(EventCallback that, Handle<Value> data) {
|
||||
EnsureInitialized("v8::Debug::SetDebugEventListener()");
|
||||
ON_BAILOUT("v8::Debug::SetDebugEventListener()", return false);
|
||||
ENTER_V8;
|
||||
|
||||
event_callback = that;
|
||||
|
||||
HandleScope scope;
|
||||
i::Handle<i::Object> proxy = i::Factory::undefined_value();
|
||||
if (that != NULL) {
|
||||
proxy = i::Factory::NewProxy(FUNCTION_ADDR(EventCallbackWrapper));
|
||||
}
|
||||
i::Debugger::SetEventListener(proxy, Utils::OpenHandle(*data));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
bool Debug::SetDebugEventListener2(EventCallback2 that, Handle<Value> data) {
|
||||
EnsureInitialized("v8::Debug::SetDebugEventListener2()");
|
||||
ON_BAILOUT("v8::Debug::SetDebugEventListener2()", return false);
|
||||
ENTER_V8;
|
||||
HandleScope scope;
|
||||
i::Handle<i::Object> proxy = i::Factory::undefined_value();
|
||||
if (that != NULL) {
|
||||
|
@ -4139,15 +4321,23 @@ int CpuProfiler::GetProfilesCount() {
|
|||
}
|
||||
|
||||
|
||||
const CpuProfile* CpuProfiler::GetProfile(int index) {
|
||||
const CpuProfile* CpuProfiler::GetProfile(int index,
|
||||
Handle<Value> security_token) {
|
||||
IsDeadCheck("v8::CpuProfiler::GetProfile");
|
||||
return reinterpret_cast<const CpuProfile*>(i::CpuProfiler::GetProfile(index));
|
||||
return reinterpret_cast<const CpuProfile*>(
|
||||
i::CpuProfiler::GetProfile(
|
||||
security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
|
||||
index));
|
||||
}
|
||||
|
||||
|
||||
const CpuProfile* CpuProfiler::FindProfile(unsigned uid) {
|
||||
const CpuProfile* CpuProfiler::FindProfile(unsigned uid,
|
||||
Handle<Value> security_token) {
|
||||
IsDeadCheck("v8::CpuProfiler::FindProfile");
|
||||
return reinterpret_cast<const CpuProfile*>(i::CpuProfiler::FindProfile(uid));
|
||||
return reinterpret_cast<const CpuProfile*>(
|
||||
i::CpuProfiler::FindProfile(
|
||||
security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
|
||||
uid));
|
||||
}
|
||||
|
||||
|
||||
|
@ -4157,10 +4347,13 @@ void CpuProfiler::StartProfiling(Handle<String> title) {
|
|||
}
|
||||
|
||||
|
||||
const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title) {
|
||||
const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title,
|
||||
Handle<Value> security_token) {
|
||||
IsDeadCheck("v8::CpuProfiler::StopProfiling");
|
||||
return reinterpret_cast<const CpuProfile*>(
|
||||
i::CpuProfiler::StopProfiling(*Utils::OpenHandle(*title)));
|
||||
i::CpuProfiler::StopProfiling(
|
||||
security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token),
|
||||
*Utils::OpenHandle(*title)));
|
||||
}
|
||||
|
||||
#endif // ENABLE_LOGGING_AND_PROFILING
|
|
@ -192,6 +192,10 @@ class Utils {
|
|||
v8::internal::Handle<v8::internal::Proxy> obj);
|
||||
static inline Local<Message> MessageToLocal(
|
||||
v8::internal::Handle<v8::internal::Object> obj);
|
||||
static inline Local<StackTrace> StackTraceToLocal(
|
||||
v8::internal::Handle<v8::internal::JSArray> obj);
|
||||
static inline Local<StackFrame> StackFrameToLocal(
|
||||
v8::internal::Handle<v8::internal::JSObject> obj);
|
||||
static inline Local<Number> NumberToLocal(
|
||||
v8::internal::Handle<v8::internal::Object> obj);
|
||||
static inline Local<Integer> IntegerToLocal(
|
||||
|
@ -227,6 +231,10 @@ class Utils {
|
|||
OpenHandle(const Function* data);
|
||||
static inline v8::internal::Handle<v8::internal::JSObject>
|
||||
OpenHandle(const Message* message);
|
||||
static inline v8::internal::Handle<v8::internal::JSArray>
|
||||
OpenHandle(const StackTrace* stack_trace);
|
||||
static inline v8::internal::Handle<v8::internal::JSObject>
|
||||
OpenHandle(const StackFrame* stack_frame);
|
||||
static inline v8::internal::Handle<v8::internal::Context>
|
||||
OpenHandle(const v8::Context* context);
|
||||
static inline v8::internal::Handle<v8::internal::SignatureInfo>
|
||||
|
@ -275,6 +283,8 @@ MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
|
|||
MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
|
||||
MAKE_TO_LOCAL(ToLocal, TypeSwitchInfo, TypeSwitch)
|
||||
MAKE_TO_LOCAL(MessageToLocal, Object, Message)
|
||||
MAKE_TO_LOCAL(StackTraceToLocal, JSArray, StackTrace)
|
||||
MAKE_TO_LOCAL(StackFrameToLocal, JSObject, StackFrame)
|
||||
MAKE_TO_LOCAL(NumberToLocal, Object, Number)
|
||||
MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
|
||||
MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
|
||||
|
@ -305,6 +315,8 @@ MAKE_OPEN_HANDLE(Function, JSFunction)
|
|||
MAKE_OPEN_HANDLE(Message, JSObject)
|
||||
MAKE_OPEN_HANDLE(Context, Context)
|
||||
MAKE_OPEN_HANDLE(External, Proxy)
|
||||
MAKE_OPEN_HANDLE(StackTrace, JSArray)
|
||||
MAKE_OPEN_HANDLE(StackFrame, JSObject)
|
||||
|
||||
#undef MAKE_OPEN_HANDLE
|
||||
|
|
@ -39,6 +39,7 @@
|
|||
|
||||
#include "arm/assembler-arm.h"
|
||||
#include "cpu.h"
|
||||
#include "debug.h"
|
||||
|
||||
|
||||
namespace v8 {
|
||||
|
@ -73,6 +74,11 @@ Address RelocInfo::target_address_address() {
|
|||
}
|
||||
|
||||
|
||||
int RelocInfo::target_address_size() {
|
||||
return Assembler::kExternalTargetSize;
|
||||
}
|
||||
|
||||
|
||||
void RelocInfo::set_target_address(Address target) {
|
||||
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
|
||||
Assembler::set_target_address_at(pc_, target);
|
||||
|
@ -162,6 +168,26 @@ bool RelocInfo::IsPatchedReturnSequence() {
|
|||
}
|
||||
|
||||
|
||||
void RelocInfo::Visit(ObjectVisitor* visitor) {
|
||||
RelocInfo::Mode mode = rmode();
|
||||
if (mode == RelocInfo::EMBEDDED_OBJECT) {
|
||||
visitor->VisitPointer(target_object_address());
|
||||
} else if (RelocInfo::IsCodeTarget(mode)) {
|
||||
visitor->VisitCodeTarget(this);
|
||||
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
|
||||
visitor->VisitExternalReference(target_reference_address());
|
||||
#ifdef ENABLE_DEBUGGER_SUPPORT
|
||||
} else if (Debug::has_break_points() &&
|
||||
RelocInfo::IsJSReturn(mode) &&
|
||||
IsPatchedReturnSequence()) {
|
||||
visitor->VisitDebugTarget(this);
|
||||
#endif
|
||||
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
|
||||
visitor->VisitRuntimeEntry(this);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
|
||||
rm_ = no_reg;
|
||||
imm32_ = immediate;
|
||||
|
@ -169,13 +195,6 @@ Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
|
|||
}
|
||||
|
||||
|
||||
Operand::Operand(const char* s) {
|
||||
rm_ = no_reg;
|
||||
imm32_ = reinterpret_cast<int32_t>(s);
|
||||
rmode_ = RelocInfo::EMBEDDED_STRING;
|
||||
}
|
||||
|
||||
|
||||
Operand::Operand(const ExternalReference& f) {
|
||||
rm_ = no_reg;
|
||||
imm32_ = reinterpret_cast<int32_t>(f.address());
|
|
@ -36,6 +36,8 @@
|
|||
|
||||
#include "v8.h"
|
||||
|
||||
#if defined(V8_TARGET_ARCH_ARM)
|
||||
|
||||
#include "arm/assembler-arm-inl.h"
|
||||
#include "serialize.h"
|
||||
|
||||
|
@ -106,6 +108,15 @@ void CpuFeatures::Probe() {
|
|||
const int RelocInfo::kApplyMask = 0;
|
||||
|
||||
|
||||
bool RelocInfo::IsCodedSpecially() {
|
||||
// The deserializer needs to know whether a pointer is specially coded. Being
|
||||
// specially coded on ARM means that it is a movw/movt instruction. We don't
|
||||
// generate those yet.
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
|
||||
void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
|
||||
// Patch the code at the current address with the supplied instructions.
|
||||
Instr* pc = reinterpret_cast<Instr*>(pc_);
|
||||
|
@ -268,6 +279,20 @@ const Instr kBlxRegMask =
|
|||
15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
|
||||
const Instr kBlxRegPattern =
|
||||
B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | 3 * B4;
|
||||
// A mask for the Rd register for push, pop, ldr, str instructions.
|
||||
const Instr kRdMask = 0x0000f000;
|
||||
static const int kRdShift = 12;
|
||||
static const Instr kLdrRegFpOffsetPattern =
|
||||
al | B26 | L | Offset | fp.code() * B16;
|
||||
static const Instr kStrRegFpOffsetPattern =
|
||||
al | B26 | Offset | fp.code() * B16;
|
||||
static const Instr kLdrRegFpNegOffsetPattern =
|
||||
al | B26 | L | NegOffset | fp.code() * B16;
|
||||
static const Instr kStrRegFpNegOffsetPattern =
|
||||
al | B26 | NegOffset | fp.code() * B16;
|
||||
static const Instr kLdrStrInstrTypeMask = 0xffff0000;
|
||||
static const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
|
||||
static const Instr kLdrStrOffsetMask = 0x00000fff;
|
||||
|
||||
// Spare buffer.
|
||||
static const int kMinimalBufferSize = 4*KB;
|
||||
|
@ -395,6 +420,43 @@ Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
|
|||
}
|
||||
|
||||
|
||||
Register Assembler::GetRd(Instr instr) {
|
||||
Register reg;
|
||||
reg.code_ = ((instr & kRdMask) >> kRdShift);
|
||||
return reg;
|
||||
}
|
||||
|
||||
|
||||
bool Assembler::IsPush(Instr instr) {
|
||||
return ((instr & ~kRdMask) == kPushRegPattern);
|
||||
}
|
||||
|
||||
|
||||
bool Assembler::IsPop(Instr instr) {
|
||||
return ((instr & ~kRdMask) == kPopRegPattern);
|
||||
}
|
||||
|
||||
|
||||
bool Assembler::IsStrRegFpOffset(Instr instr) {
|
||||
return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
|
||||
}
|
||||
|
||||
|
||||
bool Assembler::IsLdrRegFpOffset(Instr instr) {
|
||||
return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
|
||||
}
|
||||
|
||||
|
||||
bool Assembler::IsStrRegFpNegOffset(Instr instr) {
|
||||
return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
|
||||
}
|
||||
|
||||
|
||||
bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
|
||||
return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
|
||||
}
|
||||
|
||||
|
||||
// Labels refer to positions in the (to be) generated code.
|
||||
// There are bound, linked, and unused labels.
|
||||
//
|
||||
|
@ -841,20 +903,6 @@ void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
|
|||
|
||||
// Data-processing instructions.
|
||||
|
||||
// UBFX <Rd>,<Rn>,#<lsb>,#<width - 1>
|
||||
// Instruction details available in ARM DDI 0406A, A8-464.
|
||||
// cond(31-28) | 01111(27-23)| 1(22) | 1(21) | widthm1(20-16) |
|
||||
// Rd(15-12) | lsb(11-7) | 101(6-4) | Rn(3-0)
|
||||
void Assembler::ubfx(Register dst, Register src1, const Operand& src2,
|
||||
const Operand& src3, Condition cond) {
|
||||
ASSERT(!src2.rm_.is_valid() && !src3.rm_.is_valid());
|
||||
ASSERT(static_cast<uint32_t>(src2.imm32_) <= 0x1f);
|
||||
ASSERT(static_cast<uint32_t>(src3.imm32_) <= 0x1f);
|
||||
emit(cond | 0x3F*B21 | src3.imm32_*B16 |
|
||||
dst.code()*B12 | src2.imm32_*B7 | 0x5*B4 | src1.code());
|
||||
}
|
||||
|
||||
|
||||
void Assembler::and_(Register dst, Register src1, const Operand& src2,
|
||||
SBit s, Condition cond) {
|
||||
addrmod1(cond | 0*B21 | s, src1, dst, src2);
|
||||
|
@ -887,15 +935,12 @@ void Assembler::add(Register dst, Register src1, const Operand& src2,
|
|||
// str(src, MemOperand(sp, 4, NegPreIndex), al);
|
||||
// add(sp, sp, Operand(kPointerSize));
|
||||
// Both instructions can be eliminated.
|
||||
int pattern_size = 2 * kInstrSize;
|
||||
if (FLAG_push_pop_elimination &&
|
||||
last_bound_pos_ <= (pc_offset() - pattern_size) &&
|
||||
reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
|
||||
if (can_peephole_optimize(2) &&
|
||||
// Pattern.
|
||||
instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
|
||||
(instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
|
||||
pc_ -= 2 * kInstrSize;
|
||||
if (FLAG_print_push_pop_elimination) {
|
||||
if (FLAG_print_peephole_optimization) {
|
||||
PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
|
||||
}
|
||||
}
|
||||
|
@ -1047,6 +1092,82 @@ void Assembler::clz(Register dst, Register src, Condition cond) {
|
|||
}
|
||||
|
||||
|
||||
// Bitfield manipulation instructions.
|
||||
|
||||
// Unsigned bit field extract.
|
||||
// Extracts #width adjacent bits from position #lsb in a register, and
|
||||
// writes them to the low bits of a destination register.
|
||||
// ubfx dst, src, #lsb, #width
|
||||
void Assembler::ubfx(Register dst,
|
||||
Register src,
|
||||
int lsb,
|
||||
int width,
|
||||
Condition cond) {
|
||||
// v7 and above.
|
||||
ASSERT(CpuFeatures::IsSupported(ARMv7));
|
||||
ASSERT(!dst.is(pc) && !src.is(pc));
|
||||
ASSERT((lsb >= 0) && (lsb <= 31));
|
||||
ASSERT((width >= 1) && (width <= (32 - lsb)));
|
||||
emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
|
||||
lsb*B7 | B6 | B4 | src.code());
|
||||
}
|
||||
|
||||
|
||||
// Signed bit field extract.
|
||||
// Extracts #width adjacent bits from position #lsb in a register, and
|
||||
// writes them to the low bits of a destination register. The extracted
|
||||
// value is sign extended to fill the destination register.
|
||||
// sbfx dst, src, #lsb, #width
|
||||
void Assembler::sbfx(Register dst,
|
||||
Register src,
|
||||
int lsb,
|
||||
int width,
|
||||
Condition cond) {
|
||||
// v7 and above.
|
||||
ASSERT(CpuFeatures::IsSupported(ARMv7));
|
||||
ASSERT(!dst.is(pc) && !src.is(pc));
|
||||
ASSERT((lsb >= 0) && (lsb <= 31));
|
||||
ASSERT((width >= 1) && (width <= (32 - lsb)));
|
||||
emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
|
||||
lsb*B7 | B6 | B4 | src.code());
|
||||
}
|
||||
|
||||
|
||||
// Bit field clear.
|
||||
// Sets #width adjacent bits at position #lsb in the destination register
|
||||
// to zero, preserving the value of the other bits.
|
||||
// bfc dst, #lsb, #width
|
||||
void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
|
||||
// v7 and above.
|
||||
ASSERT(CpuFeatures::IsSupported(ARMv7));
|
||||
ASSERT(!dst.is(pc));
|
||||
ASSERT((lsb >= 0) && (lsb <= 31));
|
||||
ASSERT((width >= 1) && (width <= (32 - lsb)));
|
||||
int msb = lsb + width - 1;
|
||||
emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
|
||||
}
|
||||
|
||||
|
||||
// Bit field insert.
|
||||
// Inserts #width adjacent bits from the low bits of the source register
|
||||
// into position #lsb of the destination register.
|
||||
// bfi dst, src, #lsb, #width
|
||||
void Assembler::bfi(Register dst,
|
||||
Register src,
|
||||
int lsb,
|
||||
int width,
|
||||
Condition cond) {
|
||||
// v7 and above.
|
||||
ASSERT(CpuFeatures::IsSupported(ARMv7));
|
||||
ASSERT(!dst.is(pc) && !src.is(pc));
|
||||
ASSERT((lsb >= 0) && (lsb <= 31));
|
||||
ASSERT((width >= 1) && (width <= (32 - lsb)));
|
||||
int msb = lsb + width - 1;
|
||||
emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
|
||||
src.code());
|
||||
}
|
||||
|
||||
|
||||
// Status register access instructions.
|
||||
void Assembler::mrs(Register dst, SRegister s, Condition cond) {
|
||||
ASSERT(!dst.is(pc));
|
||||
|
@ -1086,20 +1207,171 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
|
|||
}
|
||||
addrmod2(cond | B26 | L, dst, src);
|
||||
|
||||
// Eliminate pattern: push(r), pop(r)
|
||||
// str(r, MemOperand(sp, 4, NegPreIndex), al)
|
||||
// ldr(r, MemOperand(sp, 4, PostIndex), al)
|
||||
// Both instructions can be eliminated.
|
||||
int pattern_size = 2 * kInstrSize;
|
||||
if (FLAG_push_pop_elimination &&
|
||||
last_bound_pos_ <= (pc_offset() - pattern_size) &&
|
||||
reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
|
||||
// Pattern.
|
||||
instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
|
||||
instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
|
||||
pc_ -= 2 * kInstrSize;
|
||||
if (FLAG_print_push_pop_elimination) {
|
||||
PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
|
||||
// Eliminate pattern: push(ry), pop(rx)
|
||||
// str(ry, MemOperand(sp, 4, NegPreIndex), al)
|
||||
// ldr(rx, MemOperand(sp, 4, PostIndex), al)
|
||||
// Both instructions can be eliminated if ry = rx.
|
||||
// If ry != rx, a register copy from ry to rx is inserted
|
||||
// after eliminating the push and the pop instructions.
|
||||
if (can_peephole_optimize(2)) {
|
||||
Instr push_instr = instr_at(pc_ - 2 * kInstrSize);
|
||||
Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
|
||||
|
||||
if (IsPush(push_instr) && IsPop(pop_instr)) {
|
||||
if ((pop_instr & kRdMask) != (push_instr & kRdMask)) {
|
||||
// For consecutive push and pop on different registers,
|
||||
// we delete both the push & pop and insert a register move.
|
||||
// push ry, pop rx --> mov rx, ry
|
||||
Register reg_pushed, reg_popped;
|
||||
reg_pushed = GetRd(push_instr);
|
||||
reg_popped = GetRd(pop_instr);
|
||||
pc_ -= 2 * kInstrSize;
|
||||
// Insert a mov instruction, which is better than a pair of push & pop
|
||||
mov(reg_popped, reg_pushed);
|
||||
if (FLAG_print_peephole_optimization) {
|
||||
PrintF("%x push/pop (diff reg) replaced by a reg move\n",
|
||||
pc_offset());
|
||||
}
|
||||
} else {
|
||||
// For consecutive push and pop on the same register,
|
||||
// both the push and the pop can be deleted.
|
||||
pc_ -= 2 * kInstrSize;
|
||||
if (FLAG_print_peephole_optimization) {
|
||||
PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (can_peephole_optimize(2)) {
|
||||
Instr str_instr = instr_at(pc_ - 2 * kInstrSize);
|
||||
Instr ldr_instr = instr_at(pc_ - 1 * kInstrSize);
|
||||
|
||||
if ((IsStrRegFpOffset(str_instr) &&
|
||||
IsLdrRegFpOffset(ldr_instr)) ||
|
||||
(IsStrRegFpNegOffset(str_instr) &&
|
||||
IsLdrRegFpNegOffset(ldr_instr))) {
|
||||
if ((ldr_instr & kLdrStrInstrArgumentMask) ==
|
||||
(str_instr & kLdrStrInstrArgumentMask)) {
|
||||
// Pattern: Ldr/str same fp+offset, same register.
|
||||
//
|
||||
// The following:
|
||||
// str rx, [fp, #-12]
|
||||
// ldr rx, [fp, #-12]
|
||||
//
|
||||
// Becomes:
|
||||
// str rx, [fp, #-12]
|
||||
|
||||
pc_ -= 1 * kInstrSize;
|
||||
if (FLAG_print_peephole_optimization) {
|
||||
PrintF("%x str/ldr (fp + same offset), same reg\n", pc_offset());
|
||||
}
|
||||
} else if ((ldr_instr & kLdrStrOffsetMask) ==
|
||||
(str_instr & kLdrStrOffsetMask)) {
|
||||
// Pattern: Ldr/str same fp+offset, different register.
|
||||
//
|
||||
// The following:
|
||||
// str rx, [fp, #-12]
|
||||
// ldr ry, [fp, #-12]
|
||||
//
|
||||
// Becomes:
|
||||
// str rx, [fp, #-12]
|
||||
// mov ry, rx
|
||||
|
||||
Register reg_stored, reg_loaded;
|
||||
reg_stored = GetRd(str_instr);
|
||||
reg_loaded = GetRd(ldr_instr);
|
||||
pc_ -= 1 * kInstrSize;
|
||||
// Insert a mov instruction, which is better than ldr.
|
||||
mov(reg_loaded, reg_stored);
|
||||
if (FLAG_print_peephole_optimization) {
|
||||
PrintF("%x str/ldr (fp + same offset), diff reg \n", pc_offset());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (can_peephole_optimize(3)) {
|
||||
Instr mem_write_instr = instr_at(pc_ - 3 * kInstrSize);
|
||||
Instr ldr_instr = instr_at(pc_ - 2 * kInstrSize);
|
||||
Instr mem_read_instr = instr_at(pc_ - 1 * kInstrSize);
|
||||
if (IsPush(mem_write_instr) &&
|
||||
IsPop(mem_read_instr)) {
|
||||
if ((IsLdrRegFpOffset(ldr_instr) ||
|
||||
IsLdrRegFpNegOffset(ldr_instr))) {
|
||||
if ((mem_write_instr & kRdMask) ==
|
||||
(mem_read_instr & kRdMask)) {
|
||||
// Pattern: push & pop from/to same register,
|
||||
// with a fp+offset ldr in between
|
||||
//
|
||||
// The following:
|
||||
// str rx, [sp, #-4]!
|
||||
// ldr rz, [fp, #-24]
|
||||
// ldr rx, [sp], #+4
|
||||
//
|
||||
// Becomes:
|
||||
// if(rx == rz)
|
||||
// delete all
|
||||
// else
|
||||
// ldr rz, [fp, #-24]
|
||||
|
||||
if ((mem_write_instr & kRdMask) == (ldr_instr & kRdMask)) {
|
||||
pc_ -= 3 * kInstrSize;
|
||||
} else {
|
||||
pc_ -= 3 * kInstrSize;
|
||||
// Reinsert back the ldr rz.
|
||||
emit(ldr_instr);
|
||||
}
|
||||
if (FLAG_print_peephole_optimization) {
|
||||
PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
|
||||
}
|
||||
} else {
|
||||
// Pattern: push & pop from/to different registers
|
||||
// with a fp+offset ldr in between
|
||||
//
|
||||
// The following:
|
||||
// str rx, [sp, #-4]!
|
||||
// ldr rz, [fp, #-24]
|
||||
// ldr ry, [sp], #+4
|
||||
//
|
||||
// Becomes:
|
||||
// if(ry == rz)
|
||||
// mov ry, rx;
|
||||
// else if(rx != rz)
|
||||
// ldr rz, [fp, #-24]
|
||||
// mov ry, rx
|
||||
// else if((ry != rz) || (rx == rz)) becomes:
|
||||
// mov ry, rx
|
||||
// ldr rz, [fp, #-24]
|
||||
|
||||
Register reg_pushed, reg_popped;
|
||||
if ((mem_read_instr & kRdMask) == (ldr_instr & kRdMask)) {
|
||||
reg_pushed = GetRd(mem_write_instr);
|
||||
reg_popped = GetRd(mem_read_instr);
|
||||
pc_ -= 3 * kInstrSize;
|
||||
mov(reg_popped, reg_pushed);
|
||||
} else if ((mem_write_instr & kRdMask)
|
||||
!= (ldr_instr & kRdMask)) {
|
||||
reg_pushed = GetRd(mem_write_instr);
|
||||
reg_popped = GetRd(mem_read_instr);
|
||||
pc_ -= 3 * kInstrSize;
|
||||
emit(ldr_instr);
|
||||
mov(reg_popped, reg_pushed);
|
||||
} else if (((mem_read_instr & kRdMask)
|
||||
!= (ldr_instr & kRdMask)) ||
|
||||
((mem_write_instr & kRdMask)
|
||||
== (ldr_instr & kRdMask)) ) {
|
||||
reg_pushed = GetRd(mem_write_instr);
|
||||
reg_popped = GetRd(mem_read_instr);
|
||||
pc_ -= 3 * kInstrSize;
|
||||
mov(reg_popped, reg_pushed);
|
||||
emit(ldr_instr);
|
||||
}
|
||||
if (FLAG_print_peephole_optimization) {
|
||||
PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1111,16 +1383,13 @@ void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
|
|||
// Eliminate pattern: pop(), push(r)
|
||||
// add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
|
||||
// -> str r, [sp, 0], al
|
||||
int pattern_size = 2 * kInstrSize;
|
||||
if (FLAG_push_pop_elimination &&
|
||||
last_bound_pos_ <= (pc_offset() - pattern_size) &&
|
||||
reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
|
||||
if (can_peephole_optimize(2) &&
|
||||
// Pattern.
|
||||
instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
|
||||
instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
|
||||
pc_ -= 2 * kInstrSize;
|
||||
emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
|
||||
if (FLAG_print_push_pop_elimination) {
|
||||
if (FLAG_print_peephole_optimization) {
|
||||
PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
|
||||
}
|
||||
}
|
||||
|
@ -1157,6 +1426,27 @@ void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
|
|||
}
|
||||
|
||||
|
||||
void Assembler::ldrd(Register dst1, Register dst2,
|
||||
const MemOperand& src, Condition cond) {
|
||||
ASSERT(CpuFeatures::IsEnabled(ARMv7));
|
||||
ASSERT(src.rm().is(no_reg));
|
||||
ASSERT(!dst1.is(lr)); // r14.
|
||||
ASSERT_EQ(0, dst1.code() % 2);
|
||||
ASSERT_EQ(dst1.code() + 1, dst2.code());
|
||||
addrmod3(cond | B7 | B6 | B4, dst1, src);
|
||||
}
|
||||
|
||||
|
||||
void Assembler::strd(Register src1, Register src2,
|
||||
const MemOperand& dst, Condition cond) {
|
||||
ASSERT(dst.rm().is(no_reg));
|
||||
ASSERT(!src1.is(lr)); // r14.
|
||||
ASSERT_EQ(0, src1.code() % 2);
|
||||
ASSERT_EQ(src1.code() + 1, src2.code());
|
||||
ASSERT(CpuFeatures::IsEnabled(ARMv7));
|
||||
addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
|
||||
}
|
||||
|
||||
// Load/Store multiple instructions.
|
||||
void Assembler::ldm(BlockAddrMode am,
|
||||
Register base,
|
||||
|
@ -1187,26 +1477,6 @@ void Assembler::stm(BlockAddrMode am,
|
|||
}
|
||||
|
||||
|
||||
// Semaphore instructions.
|
||||
void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
|
||||
ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
|
||||
ASSERT(!dst.is(base) && !src.is(base));
|
||||
emit(cond | P | base.code()*B16 | dst.code()*B12 |
|
||||
B7 | B4 | src.code());
|
||||
}
|
||||
|
||||
|
||||
void Assembler::swpb(Register dst,
|
||||
Register src,
|
||||
Register base,
|
||||
Condition cond) {
|
||||
ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
|
||||
ASSERT(!dst.is(base) && !src.is(base));
|
||||
emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
|
||||
B7 | B4 | src.code());
|
||||
}
|
||||
|
||||
|
||||
// Exception-generating instructions and debugging support.
|
||||
void Assembler::stop(const char* msg) {
|
||||
#ifndef __arm__
|
||||
|
@ -1750,34 +2020,6 @@ void Assembler::nop(int type) {
|
|||
}
|
||||
|
||||
|
||||
void Assembler::lea(Register dst,
|
||||
const MemOperand& x,
|
||||
SBit s,
|
||||
Condition cond) {
|
||||
int am = x.am_;
|
||||
if (!x.rm_.is_valid()) {
|
||||
// Immediate offset.
|
||||
if ((am & P) == 0) // post indexing
|
||||
mov(dst, Operand(x.rn_), s, cond);
|
||||
else if ((am & U) == 0) // negative indexing
|
||||
sub(dst, x.rn_, Operand(x.offset_), s, cond);
|
||||
else
|
||||
add(dst, x.rn_, Operand(x.offset_), s, cond);
|
||||
} else {
|
||||
// Register offset (shift_imm_ and shift_op_ are 0) or scaled
|
||||
// register offset the constructors make sure than both shift_imm_
|
||||
// and shift_op_ are initialized.
|
||||
ASSERT(!x.rm_.is(pc));
|
||||
if ((am & P) == 0) // post indexing
|
||||
mov(dst, Operand(x.rn_), s, cond);
|
||||
else if ((am & U) == 0) // negative indexing
|
||||
sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
|
||||
else
|
||||
add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
|
||||
uint32_t dummy1;
|
||||
uint32_t dummy2;
|
||||
|
@ -2033,3 +2275,5 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
|
|||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_TARGET_ARCH_ARM
|
|
@ -80,6 +80,11 @@ struct Register {
|
|||
return 1 << code_;
|
||||
}
|
||||
|
||||
void set_code(int code) {
|
||||
code_ = code;
|
||||
ASSERT(is_valid());
|
||||
}
|
||||
|
||||
// Unfortunately we can't make this private in a struct.
|
||||
int code_;
|
||||
};
|
||||
|
@ -448,6 +453,19 @@ class MemOperand BASE_EMBEDDED {
|
|||
explicit MemOperand(Register rn, Register rm,
|
||||
ShiftOp shift_op, int shift_imm, AddrMode am = Offset);
|
||||
|
||||
void set_offset(int32_t offset) {
|
||||
ASSERT(rm_.is(no_reg));
|
||||
offset_ = offset;
|
||||
}
|
||||
|
||||
uint32_t offset() {
|
||||
ASSERT(rm_.is(no_reg));
|
||||
return offset_;
|
||||
}
|
||||
|
||||
Register rn() const { return rn_; }
|
||||
Register rm() const { return rm_; }
|
||||
|
||||
private:
|
||||
Register rn_; // base
|
||||
Register rm_; // register offset
|
||||
|
@ -653,8 +671,6 @@ class Assembler : public Malloced {
|
|||
void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above
|
||||
|
||||
// Data-processing instructions
|
||||
void ubfx(Register dst, Register src1, const Operand& src2,
|
||||
const Operand& src3, Condition cond = al);
|
||||
|
||||
void and_(Register dst, Register src1, const Operand& src2,
|
||||
SBit s = LeaveCC, Condition cond = al);
|
||||
|
@ -674,6 +690,10 @@ class Assembler : public Malloced {
|
|||
|
||||
void add(Register dst, Register src1, const Operand& src2,
|
||||
SBit s = LeaveCC, Condition cond = al);
|
||||
void add(Register dst, Register src1, Register src2,
|
||||
SBit s = LeaveCC, Condition cond = al) {
|
||||
add(dst, src1, Operand(src2), s, cond);
|
||||
}
|
||||
|
||||
void adc(Register dst, Register src1, const Operand& src2,
|
||||
SBit s = LeaveCC, Condition cond = al);
|
||||
|
@ -741,6 +761,19 @@ class Assembler : public Malloced {
|
|||
|
||||
void clz(Register dst, Register src, Condition cond = al); // v5 and above
|
||||
|
||||
// Bitfield manipulation instructions. v7 and above.
|
||||
|
||||
void ubfx(Register dst, Register src, int lsb, int width,
|
||||
Condition cond = al);
|
||||
|
||||
void sbfx(Register dst, Register src, int lsb, int width,
|
||||
Condition cond = al);
|
||||
|
||||
void bfc(Register dst, int lsb, int width, Condition cond = al);
|
||||
|
||||
void bfi(Register dst, Register src, int lsb, int width,
|
||||
Condition cond = al);
|
||||
|
||||
// Status register access instructions
|
||||
|
||||
void mrs(Register dst, SRegister s, Condition cond = al);
|
||||
|
@ -755,15 +788,17 @@ class Assembler : public Malloced {
|
|||
void strh(Register src, const MemOperand& dst, Condition cond = al);
|
||||
void ldrsb(Register dst, const MemOperand& src, Condition cond = al);
|
||||
void ldrsh(Register dst, const MemOperand& src, Condition cond = al);
|
||||
void ldrd(Register dst1,
|
||||
Register dst2,
|
||||
const MemOperand& src, Condition cond = al);
|
||||
void strd(Register src1,
|
||||
Register src2,
|
||||
const MemOperand& dst, Condition cond = al);
|
||||
|
||||
// Load/Store multiple instructions
|
||||
void ldm(BlockAddrMode am, Register base, RegList dst, Condition cond = al);
|
||||
void stm(BlockAddrMode am, Register base, RegList src, Condition cond = al);
|
||||
|
||||
// Semaphore instructions
|
||||
void swp(Register dst, Register src, Register base, Condition cond = al);
|
||||
void swpb(Register dst, Register src, Register base, Condition cond = al);
|
||||
|
||||
// Exception-generating instructions and debugging support
|
||||
void stop(const char* msg);
|
||||
|
||||
|
@ -910,10 +945,6 @@ class Assembler : public Malloced {
|
|||
add(sp, sp, Operand(kPointerSize));
|
||||
}
|
||||
|
||||
// Load effective address of memory operand x into register dst
|
||||
void lea(Register dst, const MemOperand& x,
|
||||
SBit s = LeaveCC, Condition cond = al);
|
||||
|
||||
// Jump unconditionally to given label.
|
||||
void jmp(Label* L) { b(L, al); }
|
||||
|
||||
|
@ -962,6 +993,12 @@ class Assembler : public Malloced {
|
|||
int current_position() const { return current_position_; }
|
||||
int current_statement_position() const { return current_statement_position_; }
|
||||
|
||||
bool can_peephole_optimize(int instructions) {
|
||||
if (!FLAG_peephole_optimization) return false;
|
||||
if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false;
|
||||
return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize;
|
||||
}
|
||||
|
||||
// Read/patch instructions
|
||||
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
|
||||
static void instr_at_put(byte* pc, Instr instr) {
|
||||
|
@ -973,6 +1010,13 @@ class Assembler : public Malloced {
|
|||
static bool IsLdrRegisterImmediate(Instr instr);
|
||||
static int GetLdrRegisterImmediateOffset(Instr instr);
|
||||
static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
|
||||
static Register GetRd(Instr instr);
|
||||
static bool IsPush(Instr instr);
|
||||
static bool IsPop(Instr instr);
|
||||
static bool IsStrRegFpOffset(Instr instr);
|
||||
static bool IsLdrRegFpOffset(Instr instr);
|
||||
static bool IsStrRegFpNegOffset(Instr instr);
|
||||
static bool IsLdrRegFpNegOffset(Instr instr);
|
||||
|
||||
|
||||
protected:
|
|
@ -27,6 +27,8 @@
|
|||
|
||||
#include "v8.h"
|
||||
|
||||
#if defined(V8_TARGET_ARCH_ARM)
|
||||
|
||||
#include "codegen-inl.h"
|
||||
#include "debug.h"
|
||||
#include "runtime.h"
|
||||
|
@ -107,7 +109,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
|
|||
// Allocate the JSArray object together with space for a fixed array with the
|
||||
// requested elements.
|
||||
int size = JSArray::kSize + FixedArray::SizeFor(initial_capacity);
|
||||
__ AllocateInNewSpace(size / kPointerSize,
|
||||
__ AllocateInNewSpace(size,
|
||||
result,
|
||||
scratch2,
|
||||
scratch3,
|
||||
|
@ -130,13 +132,13 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
|
|||
// of the JSArray.
|
||||
// result: JSObject
|
||||
// scratch2: start of next object
|
||||
__ lea(scratch1, MemOperand(result, JSArray::kSize));
|
||||
__ add(scratch1, result, Operand(JSArray::kSize));
|
||||
__ str(scratch1, FieldMemOperand(result, JSArray::kElementsOffset));
|
||||
|
||||
// Clear the heap tag on the elements array.
|
||||
__ and_(scratch1, scratch1, Operand(~kHeapObjectTagMask));
|
||||
|
||||
// Initialize the FixedArray and fill it with holes. FixedArray length is not
|
||||
// Initialize the FixedArray and fill it with holes. FixedArray length is
|
||||
// stored as a smi.
|
||||
// result: JSObject
|
||||
// scratch1: elements array (untagged)
|
||||
|
@ -144,7 +146,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
|
|||
__ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
|
||||
ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
|
||||
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
|
||||
__ mov(scratch3, Operand(initial_capacity));
|
||||
__ mov(scratch3, Operand(Smi::FromInt(initial_capacity)));
|
||||
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
|
||||
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
|
||||
|
||||
|
@ -191,7 +193,7 @@ static void AllocateJSArray(MacroAssembler* masm,
|
|||
// keeps the code below free of special casing for the empty array.
|
||||
int size = JSArray::kSize +
|
||||
FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
|
||||
__ AllocateInNewSpace(size / kPointerSize,
|
||||
__ AllocateInNewSpace(size,
|
||||
result,
|
||||
elements_array_end,
|
||||
scratch1,
|
||||
|
@ -208,12 +210,13 @@ static void AllocateJSArray(MacroAssembler* masm,
|
|||
__ add(elements_array_end,
|
||||
elements_array_end,
|
||||
Operand(array_size, ASR, kSmiTagSize));
|
||||
__ AllocateInNewSpace(elements_array_end,
|
||||
result,
|
||||
scratch1,
|
||||
scratch2,
|
||||
gc_required,
|
||||
TAG_OBJECT);
|
||||
__ AllocateInNewSpace(
|
||||
elements_array_end,
|
||||
result,
|
||||
scratch1,
|
||||
scratch2,
|
||||
gc_required,
|
||||
static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
|
||||
|
||||
// Allocated the JSArray. Now initialize the fields except for the elements
|
||||
// array.
|
||||
|
@ -240,23 +243,23 @@ static void AllocateJSArray(MacroAssembler* masm,
|
|||
__ and_(elements_array_storage,
|
||||
elements_array_storage,
|
||||
Operand(~kHeapObjectTagMask));
|
||||
// Initialize the fixed array and fill it with holes. FixedArray length is not
|
||||
// Initialize the fixed array and fill it with holes. FixedArray length is
|
||||
// stored as a smi.
|
||||
// result: JSObject
|
||||
// elements_array_storage: elements array (untagged)
|
||||
// array_size: size of array (smi)
|
||||
ASSERT(kSmiTag == 0);
|
||||
__ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
|
||||
ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
|
||||
__ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
|
||||
// Convert array_size from smi to value.
|
||||
__ mov(array_size,
|
||||
Operand(array_size, ASR, kSmiTagSize));
|
||||
ASSERT(kSmiTag == 0);
|
||||
__ tst(array_size, array_size);
|
||||
// Length of the FixedArray is the number of pre-allocated elements if
|
||||
// the actual JSArray has length 0 and the size of the JSArray for non-empty
|
||||
// JSArrays. The length of a FixedArray is not stored as a smi.
|
||||
__ mov(array_size, Operand(JSArray::kPreallocatedArrayElements), LeaveCC, eq);
|
||||
// JSArrays. The length of a FixedArray is stored as a smi.
|
||||
__ mov(array_size,
|
||||
Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)),
|
||||
LeaveCC,
|
||||
eq);
|
||||
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
|
||||
__ str(array_size,
|
||||
MemOperand(elements_array_storage, kPointerSize, PostIndex));
|
||||
|
@ -264,10 +267,11 @@ static void AllocateJSArray(MacroAssembler* masm,
|
|||
// Calculate elements array and elements array end.
|
||||
// result: JSObject
|
||||
// elements_array_storage: elements array element storage
|
||||
// array_size: size of elements array
|
||||
// array_size: smi-tagged size of elements array
|
||||
ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
|
||||
__ add(elements_array_end,
|
||||
elements_array_storage,
|
||||
Operand(array_size, LSL, kPointerSizeLog2));
|
||||
Operand(array_size, LSL, kPointerSizeLog2 - kSmiTagSize));
|
||||
|
||||
// Fill the allocated FixedArray with the hole value if requested.
|
||||
// result: JSObject
|
||||
|
@ -540,7 +544,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
|||
|
||||
// Load the initial map and verify that it is in fact a map.
|
||||
// r1: constructor function
|
||||
// r7: undefined
|
||||
// r7: undefined value
|
||||
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
|
||||
__ tst(r2, Operand(kSmiTagMask));
|
||||
__ b(eq, &rt_call);
|
||||
|
@ -552,16 +556,16 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
|||
// instance type would be JS_FUNCTION_TYPE.
|
||||
// r1: constructor function
|
||||
// r2: initial map
|
||||
// r7: undefined
|
||||
// r7: undefined value
|
||||
__ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
|
||||
__ b(eq, &rt_call);
|
||||
|
||||
// Now allocate the JSObject on the heap.
|
||||
// r1: constructor function
|
||||
// r2: initial map
|
||||
// r7: undefined
|
||||
// r7: undefined value
|
||||
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
|
||||
__ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, NO_ALLOCATION_FLAGS);
|
||||
__ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
|
||||
|
||||
// Allocated the JSObject, now initialize the fields. Map is set to initial
|
||||
// map and properties and elements are set to empty fixed array.
|
||||
|
@ -569,7 +573,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
|||
// r2: initial map
|
||||
// r3: object size
|
||||
// r4: JSObject (not tagged)
|
||||
// r7: undefined
|
||||
// r7: undefined value
|
||||
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
|
||||
__ mov(r5, r4);
|
||||
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
|
||||
|
@ -585,7 +589,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
|||
// r3: object size (in words)
|
||||
// r4: JSObject (not tagged)
|
||||
// r5: First in-object property of JSObject (not tagged)
|
||||
// r7: undefined
|
||||
// r7: undefined value
|
||||
__ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
|
||||
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
|
||||
{ Label loop, entry;
|
||||
|
@ -608,7 +612,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
|||
// r1: constructor function
|
||||
// r4: JSObject
|
||||
// r5: start of next object (not tagged)
|
||||
// r7: undefined
|
||||
// r7: undefined value
|
||||
__ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
|
||||
// The field instance sizes contains both pre-allocated property fields and
|
||||
// in-object properties.
|
||||
|
@ -630,27 +634,29 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
|
|||
// r3: number of elements in properties array
|
||||
// r4: JSObject
|
||||
// r5: start of next object
|
||||
// r7: undefined
|
||||
// r7: undefined value
|
||||
__ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
|
||||
__ AllocateInNewSpace(r0,
|
||||
r5,
|
||||
r6,
|
||||
r2,
|
||||
&undo_allocation,
|
||||
RESULT_CONTAINS_TOP);
|
||||
__ AllocateInNewSpace(
|
||||
r0,
|
||||
r5,
|
||||
r6,
|
||||
r2,
|
||||
&undo_allocation,
|
||||
static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
|
||||
|
||||
// Initialize the FixedArray.
|
||||
// r1: constructor
|
||||
// r3: number of elements in properties array
|
||||
// r4: JSObject
|
||||
// r5: FixedArray (not tagged)
|
||||
// r7: undefined
|
||||
// r7: undefined value
|
||||
__ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
|
||||
__ mov(r2, r5);
|
||||
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
|
||||
__ str(r6, MemOperand(r2, kPointerSize, PostIndex));
|
||||
ASSERT_EQ(1 * kPointerSize, Array::kLengthOffset);
|
||||
__ str(r3, MemOperand(r2, kPointerSize, PostIndex));
|
||||
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
|
||||
__ mov(r0, Operand(r3, LSL, kSmiTagSize));
|
||||
__ str(r0, MemOperand(r2, kPointerSize, PostIndex));
|
||||
|
||||
// Initialize the fields to undefined.
|
||||
// r1: constructor function
|
||||
|
@ -1043,6 +1049,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
|
|||
__ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ ldr(r2,
|
||||
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
|
||||
__ mov(r2, Operand(r2, ASR, kSmiTagSize));
|
||||
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
|
||||
__ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ cmp(r2, r0); // Check formal and actual parameter counts.
|
||||
|
@ -1309,3 +1316,5 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
|
|||
#undef __
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_TARGET_ARCH_ARM
|
|
@ -36,30 +36,6 @@ namespace internal {
|
|||
|
||||
#define __ ACCESS_MASM(masm_)
|
||||
|
||||
void CodeGenerator::LoadConditionAndSpill(Expression* expression,
|
||||
JumpTarget* true_target,
|
||||
JumpTarget* false_target,
|
||||
bool force_control) {
|
||||
LoadCondition(expression, true_target, false_target, force_control);
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::LoadAndSpill(Expression* expression) {
|
||||
ASSERT(VirtualFrame::SpilledScope::is_spilled());
|
||||
Load(expression);
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::VisitAndSpill(Statement* statement) {
|
||||
Visit(statement);
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
|
||||
VisitStatements(statements);
|
||||
}
|
||||
|
||||
|
||||
// Platform-specific inline functions.
|
||||
|
||||
void DeferredCode::Jump() { __ jmp(&entry_label_); }
|
File diff suppressed because it is too large
Load diff
|
@ -29,6 +29,7 @@
|
|||
#define V8_ARM_CODEGEN_ARM_H_
|
||||
|
||||
#include "ic-inl.h"
|
||||
#include "ast.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
@ -36,6 +37,7 @@ namespace internal {
|
|||
// Forward declarations
|
||||
class CompilationInfo;
|
||||
class DeferredCode;
|
||||
class JumpTarget;
|
||||
class RegisterAllocator;
|
||||
class RegisterFile;
|
||||
|
||||
|
@ -99,6 +101,11 @@ class Reference BASE_EMBEDDED {
|
|||
// is popped from beneath it (unloaded).
|
||||
void SetValue(InitState init_state);
|
||||
|
||||
// This is in preparation for something that uses the reference on the stack.
|
||||
// If we need this reference afterwards get then dup it now. Otherwise mark
|
||||
// it as used.
|
||||
inline void DupIfPersist();
|
||||
|
||||
private:
|
||||
CodeGenerator* cgen_;
|
||||
Expression* expression_;
|
||||
|
@ -217,6 +224,10 @@ class CodeGenerator: public AstVisitor {
|
|||
// expected arguments. Otherwise return -1.
|
||||
static int InlineRuntimeCallArgumentsCount(Handle<String> name);
|
||||
|
||||
// Constants related to patching of inlined load/store.
|
||||
static const int kInlinedKeyedLoadInstructionsAfterPatch = 19;
|
||||
static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
|
||||
|
||||
private:
|
||||
// Construction/Destruction
|
||||
explicit CodeGenerator(MacroAssembler* masm);
|
||||
|
@ -246,16 +257,6 @@ class CodeGenerator: public AstVisitor {
|
|||
AST_NODE_LIST(DEF_VISIT)
|
||||
#undef DEF_VISIT
|
||||
|
||||
// Visit a statement and then spill the virtual frame if control flow can
|
||||
// reach the end of the statement (ie, it does not exit via break,
|
||||
// continue, return, or throw). This function is used temporarily while
|
||||
// the code generator is being transformed.
|
||||
inline void VisitAndSpill(Statement* statement);
|
||||
|
||||
// Visit a list of statements and then spill the virtual frame if control
|
||||
// flow can reach the end of the list.
|
||||
inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
|
||||
|
||||
// Main code generation function
|
||||
void Generate(CompilationInfo* info);
|
||||
|
||||
|
@ -293,22 +294,10 @@ class CodeGenerator: public AstVisitor {
|
|||
void LoadGlobal();
|
||||
void LoadGlobalReceiver(Register scratch);
|
||||
|
||||
// Generate code to push the value of an expression on top of the frame
|
||||
// and then spill the frame fully to memory. This function is used
|
||||
// temporarily while the code generator is being transformed.
|
||||
inline void LoadAndSpill(Expression* expression);
|
||||
|
||||
// Call LoadCondition and then spill the virtual frame unless control flow
|
||||
// cannot reach the end of the expression (ie, by emitting only
|
||||
// unconditional jumps to the control targets).
|
||||
inline void LoadConditionAndSpill(Expression* expression,
|
||||
JumpTarget* true_target,
|
||||
JumpTarget* false_target,
|
||||
bool force_control);
|
||||
|
||||
// Read a value from a slot and leave it on top of the expression stack.
|
||||
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
|
||||
void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
|
||||
|
||||
// Store the value on top of the stack to a slot.
|
||||
void StoreToSlot(Slot* slot, InitState init_state);
|
||||
|
||||
|
@ -338,6 +327,15 @@ class CodeGenerator: public AstVisitor {
|
|||
TypeofState typeof_state,
|
||||
JumpTarget* slow);
|
||||
|
||||
// Support for loading from local/global variables and arguments
|
||||
// whose location is known unless they are shadowed by
|
||||
// eval-introduced bindings. Generates no code for unsupported slot
|
||||
// types and therefore expects to fall through to the slow jump target.
|
||||
void EmitDynamicLoadFromSlotFastCase(Slot* slot,
|
||||
TypeofState typeof_state,
|
||||
JumpTarget* slow,
|
||||
JumpTarget* done);
|
||||
|
||||
// Special code for typeof expressions: Unfortunately, we must
|
||||
// be careful when loading the expression in 'typeof'
|
||||
// expressions. We are not allowed to throw reference errors for
|
||||
|
@ -429,10 +427,13 @@ class CodeGenerator: public AstVisitor {
|
|||
void GenerateSetValueOf(ZoneList<Expression*>* args);
|
||||
|
||||
// Fast support for charCodeAt(n).
|
||||
void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
|
||||
void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
|
||||
|
||||
// Fast support for string.charAt(n) and string[n].
|
||||
void GenerateCharFromCode(ZoneList<Expression*>* args);
|
||||
void GenerateStringCharFromCode(ZoneList<Expression*>* args);
|
||||
|
||||
// Fast support for string.charAt(n) and string[n].
|
||||
void GenerateStringCharAt(ZoneList<Expression*>* args);
|
||||
|
||||
// Fast support for object equality testing.
|
||||
void GenerateObjectEquals(ZoneList<Expression*>* args);
|
||||
|
@ -677,38 +678,6 @@ class GenericBinaryOpStub : public CodeStub {
|
|||
|
||||
class StringHelper : public AllStatic {
|
||||
public:
|
||||
// Generates fast code for getting a char code out of a string
|
||||
// object at the given index. May bail out for four reasons (in the
|
||||
// listed order):
|
||||
// * Receiver is not a string (receiver_not_string label).
|
||||
// * Index is not a smi (index_not_smi label).
|
||||
// * Index is out of range (index_out_of_range).
|
||||
// * Some other reason (slow_case label). In this case it's
|
||||
// guaranteed that the above conditions are not violated,
|
||||
// e.g. it's safe to assume the receiver is a string and the
|
||||
// index is a non-negative smi < length.
|
||||
// When successful, object, index, and scratch are clobbered.
|
||||
// Otherwise, scratch and result are clobbered.
|
||||
static void GenerateFastCharCodeAt(MacroAssembler* masm,
|
||||
Register object,
|
||||
Register index,
|
||||
Register scratch,
|
||||
Register result,
|
||||
Label* receiver_not_string,
|
||||
Label* index_not_smi,
|
||||
Label* index_out_of_range,
|
||||
Label* slow_case);
|
||||
|
||||
// Generates code for creating a one-char string from the given char
|
||||
// code. May do a runtime call, so any register can be clobbered
|
||||
// and, if the given invoke flag specifies a call, an internal frame
|
||||
// is required. In tail call mode the result must be r0 register.
|
||||
static void GenerateCharFromCode(MacroAssembler* masm,
|
||||
Register code,
|
||||
Register scratch,
|
||||
Register result,
|
||||
InvokeFlag flag);
|
||||
|
||||
// Generate code for copying characters using a simple loop. This should only
|
||||
// be used in places where the number of characters is small and the
|
||||
// additional setup and checking in GenerateCopyCharactersLong adds too much
|
|
@ -27,6 +27,8 @@
|
|||
|
||||
#include "v8.h"
|
||||
|
||||
#if defined(V8_TARGET_ARCH_ARM)
|
||||
|
||||
#include "constants-arm.h"
|
||||
|
||||
|
||||
|
@ -128,3 +130,5 @@ int Registers::Number(const char* name) {
|
|||
|
||||
|
||||
} } // namespace assembler::arm
|
||||
|
||||
#endif // V8_TARGET_ARCH_ARM
|
|
@ -66,10 +66,19 @@
|
|||
# define CAN_USE_THUMB_INSTRUCTIONS 1
|
||||
#endif
|
||||
|
||||
// Simulator should support ARM5 instructions.
|
||||
// Simulator should support ARM5 instructions and unaligned access by default.
|
||||
#if !defined(__arm__)
|
||||
# define CAN_USE_ARMV5_INSTRUCTIONS 1
|
||||
# define CAN_USE_THUMB_INSTRUCTIONS 1
|
||||
|
||||
# ifndef CAN_USE_UNALIGNED_ACCESSES
|
||||
# define CAN_USE_UNALIGNED_ACCESSES 1
|
||||
# endif
|
||||
|
||||
#endif
|
||||
|
||||
#if CAN_USE_UNALIGNED_ACCESSES
|
||||
#define V8_TARGET_CAN_READ_UNALIGNED 1
|
||||
#endif
|
||||
|
||||
// Using blx may yield better code, so use it when required or when available
|
|
@ -32,6 +32,8 @@
|
|||
|
||||
#include "v8.h"
|
||||
|
||||
#if defined(V8_TARGET_ARCH_ARM)
|
||||
|
||||
#include "cpu.h"
|
||||
#include "macro-assembler.h"
|
||||
|
||||
|
@ -136,3 +138,5 @@ void CPU::DebugBreak() {
|
|||
}
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_TARGET_ARCH_ARM
|
|
@ -27,6 +27,8 @@
|
|||
|
||||
#include "v8.h"
|
||||
|
||||
#if defined(V8_TARGET_ARCH_ARM)
|
||||
|
||||
#include "codegen-inl.h"
|
||||
#include "debug.h"
|
||||
|
||||
|
@ -170,10 +172,11 @@ void Debug::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
|
|||
|
||||
void Debug::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
|
||||
// ---------- S t a t e --------------
|
||||
// -- r0 : value
|
||||
// -- r1 : key
|
||||
// -- r2 : receiver
|
||||
// -- lr : return address
|
||||
// -- sp[0] : key
|
||||
// -- sp[4] : receiver
|
||||
Generate_DebugBreakCallHelper(masm, 0);
|
||||
Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit());
|
||||
}
|
||||
|
||||
|
||||
|
@ -237,3 +240,5 @@ const int Debug::kFrameDropperFrameSize = -1;
|
|||
#endif // ENABLE_DEBUGGER_SUPPORT
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_TARGET_ARCH_ARM
|
|
@ -56,6 +56,8 @@
|
|||
|
||||
#include "v8.h"
|
||||
|
||||
#if defined(V8_TARGET_ARCH_ARM)
|
||||
|
||||
#include "constants-arm.h"
|
||||
#include "disasm.h"
|
||||
#include "macro-assembler.h"
|
||||
|
@ -399,6 +401,20 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
|
|||
PrintCondition(instr);
|
||||
return 4;
|
||||
}
|
||||
case 'f': { // 'f: bitfield instructions - v7 and above.
|
||||
uint32_t lsbit = instr->Bits(11, 7);
|
||||
uint32_t width = instr->Bits(20, 16) + 1;
|
||||
if (instr->Bit(21) == 0) {
|
||||
// BFC/BFI:
|
||||
// Bits 20-16 represent most-significant bit. Covert to width.
|
||||
width -= lsbit;
|
||||
ASSERT(width > 0);
|
||||
}
|
||||
ASSERT((width + lsbit) <= 32);
|
||||
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
|
||||
"#%d, #%d", lsbit, width);
|
||||
return 1;
|
||||
}
|
||||
case 'h': { // 'h: halfword operation for extra loads and stores
|
||||
if (instr->HasH()) {
|
||||
Print("h");
|
||||
|
@ -418,6 +434,12 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
|
|||
ASSERT(STRING_STARTS_WITH(format, "memop"));
|
||||
if (instr->HasL()) {
|
||||
Print("ldr");
|
||||
} else if ((instr->Bits(27, 25) == 0) && (instr->Bit(20) == 0)) {
|
||||
if (instr->Bits(7, 4) == 0xf) {
|
||||
Print("strd");
|
||||
} else {
|
||||
Print("ldrd");
|
||||
}
|
||||
} else {
|
||||
Print("str");
|
||||
}
|
||||
|
@ -438,16 +460,6 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
|
|||
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
|
||||
"%d", instr->Offset12Field());
|
||||
return 5;
|
||||
} else if ((format[3] == '1') && (format[4] == '6')) {
|
||||
ASSERT(STRING_STARTS_WITH(format, "off16to20"));
|
||||
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
|
||||
"%d", instr->Bits(20, 16) +1);
|
||||
return 9;
|
||||
} else if (format[3] == '7') {
|
||||
ASSERT(STRING_STARTS_WITH(format, "off7to11"));
|
||||
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
|
||||
"%d", instr->ShiftAmountField());
|
||||
return 8;
|
||||
} else if (format[3] == '0') {
|
||||
// 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0.
|
||||
ASSERT(STRING_STARTS_WITH(format, "off0to3and8to19"));
|
||||
|
@ -614,6 +626,47 @@ void Decoder::DecodeType01(Instr* instr) {
|
|||
} else {
|
||||
Unknown(instr); // not used by V8
|
||||
}
|
||||
} else if ((instr->Bit(20) == 0) && ((instr->Bits(7, 4) & 0xd) == 0xd)) {
|
||||
// ldrd, strd
|
||||
switch (instr->PUField()) {
|
||||
case 0: {
|
||||
if (instr->Bit(22) == 0) {
|
||||
Format(instr, "'memop'cond's 'rd, ['rn], -'rm");
|
||||
} else {
|
||||
Format(instr, "'memop'cond's 'rd, ['rn], #-'off8");
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 1: {
|
||||
if (instr->Bit(22) == 0) {
|
||||
Format(instr, "'memop'cond's 'rd, ['rn], +'rm");
|
||||
} else {
|
||||
Format(instr, "'memop'cond's 'rd, ['rn], #+'off8");
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
if (instr->Bit(22) == 0) {
|
||||
Format(instr, "'memop'cond's 'rd, ['rn, -'rm]'w");
|
||||
} else {
|
||||
Format(instr, "'memop'cond's 'rd, ['rn, #-'off8]'w");
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 3: {
|
||||
if (instr->Bit(22) == 0) {
|
||||
Format(instr, "'memop'cond's 'rd, ['rn, +'rm]'w");
|
||||
} else {
|
||||
Format(instr, "'memop'cond's 'rd, ['rn, #+'off8]'w");
|
||||
}
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
// The PU field is a 2-bit field.
|
||||
UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// extra load/store instructions
|
||||
switch (instr->PUField()) {
|
||||
|
@ -833,10 +886,26 @@ void Decoder::DecodeType3(Instr* instr) {
|
|||
case 3: {
|
||||
if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
|
||||
uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
|
||||
uint32_t lsbit = static_cast<uint32_t>(instr->ShiftAmountField());
|
||||
uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
|
||||
uint32_t msbit = widthminus1 + lsbit;
|
||||
if (msbit <= 31) {
|
||||
Format(instr, "ubfx'cond 'rd, 'rm, #'off7to11, #'off16to20");
|
||||
if (instr->Bit(22)) {
|
||||
Format(instr, "ubfx'cond 'rd, 'rm, 'f");
|
||||
} else {
|
||||
Format(instr, "sbfx'cond 'rd, 'rm, 'f");
|
||||
}
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
} else if (!instr->HasW() && (instr->Bits(6, 4) == 0x1)) {
|
||||
uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
|
||||
uint32_t msbit = static_cast<uint32_t>(instr->Bits(20, 16));
|
||||
if (msbit >= lsbit) {
|
||||
if (instr->RmField() == 15) {
|
||||
Format(instr, "bfc'cond 'rd, 'f");
|
||||
} else {
|
||||
Format(instr, "bfi'cond 'rd, 'rm, 'f");
|
||||
}
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
@ -1309,3 +1378,5 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
|
|||
|
||||
|
||||
} // namespace disasm
|
||||
|
||||
#endif // V8_TARGET_ARCH_ARM
|
|
@ -27,6 +27,8 @@
|
|||
|
||||
#include "v8.h"
|
||||
|
||||
#if defined(V8_TARGET_ARCH_ARM)
|
||||
|
||||
#include "codegen-inl.h"
|
||||
#include "fast-codegen.h"
|
||||
#include "scopes.h"
|
||||
|
@ -236,3 +238,5 @@ void FastCodeGenerator::Generate(CompilationInfo* compilation_info) {
|
|||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_TARGET_ARCH_ARM
|
|
@ -27,12 +27,10 @@
|
|||
|
||||
#include "v8.h"
|
||||
|
||||
#if defined(V8_TARGET_ARCH_ARM)
|
||||
|
||||
#include "frames-inl.h"
|
||||
#ifdef V8_ARM_VARIANT_THUMB
|
||||
#include "arm/assembler-thumb2-inl.h"
|
||||
#else
|
||||
#include "arm/assembler-arm-inl.h"
|
||||
#endif
|
||||
|
||||
|
||||
namespace v8 {
|
||||
|
@ -121,3 +119,5 @@ Address InternalFrame::GetCallerStackPointer() const {
|
|||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_TARGET_ARCH_ARM
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -27,6 +27,8 @@
|
|||
|
||||
#include "v8.h"
|
||||
|
||||
#if defined(V8_TARGET_ARCH_ARM)
|
||||
|
||||
#include "codegen-inl.h"
|
||||
#include "jump-target-inl.h"
|
||||
#include "register-allocator-inl.h"
|
||||
|
@ -47,28 +49,15 @@ void JumpTarget::DoJump() {
|
|||
// which are still live in the C++ code.
|
||||
ASSERT(cgen()->HasValidEntryRegisters());
|
||||
|
||||
if (is_bound()) {
|
||||
// Backward jump. There already a frame expectation at the target.
|
||||
ASSERT(direction_ == BIDIRECTIONAL);
|
||||
cgen()->frame()->MergeTo(entry_frame_);
|
||||
if (entry_frame_set_) {
|
||||
// There already a frame expectation at the target.
|
||||
cgen()->frame()->MergeTo(&entry_frame_);
|
||||
cgen()->DeleteFrame();
|
||||
} else {
|
||||
// Use the current frame as the expected one at the target if necessary.
|
||||
if (entry_frame_ == NULL) {
|
||||
entry_frame_ = cgen()->frame();
|
||||
RegisterFile empty;
|
||||
cgen()->SetFrame(NULL, &empty);
|
||||
} else {
|
||||
cgen()->frame()->MergeTo(entry_frame_);
|
||||
cgen()->DeleteFrame();
|
||||
}
|
||||
|
||||
// The predicate is_linked() should be made true. Its implementation
|
||||
// detects the presence of a frame pointer in the reaching_frames_ list.
|
||||
if (!is_linked()) {
|
||||
reaching_frames_.Add(NULL);
|
||||
ASSERT(is_linked());
|
||||
}
|
||||
// Clone the current frame to use as the expected one at the target.
|
||||
set_entry_frame(cgen()->frame());
|
||||
RegisterFile empty;
|
||||
cgen()->SetFrame(NULL, &empty);
|
||||
}
|
||||
__ jmp(&entry_label_);
|
||||
}
|
||||
|
@ -77,25 +66,18 @@ void JumpTarget::DoJump() {
|
|||
void JumpTarget::DoBranch(Condition cc, Hint ignored) {
|
||||
ASSERT(cgen()->has_valid_frame());
|
||||
|
||||
if (is_bound()) {
|
||||
ASSERT(direction_ == BIDIRECTIONAL);
|
||||
if (entry_frame_set_) {
|
||||
// Backward branch. We have an expected frame to merge to on the
|
||||
// backward edge.
|
||||
cgen()->frame()->MergeTo(entry_frame_);
|
||||
cgen()->frame()->MergeTo(&entry_frame_, cc);
|
||||
} else {
|
||||
// Clone the current frame to use as the expected one at the target if
|
||||
// necessary.
|
||||
if (entry_frame_ == NULL) {
|
||||
entry_frame_ = new VirtualFrame(cgen()->frame());
|
||||
}
|
||||
// The predicate is_linked() should be made true. Its implementation
|
||||
// detects the presence of a frame pointer in the reaching_frames_ list.
|
||||
if (!is_linked()) {
|
||||
reaching_frames_.Add(NULL);
|
||||
ASSERT(is_linked());
|
||||
}
|
||||
// Clone the current frame to use as the expected one at the target.
|
||||
set_entry_frame(cgen()->frame());
|
||||
}
|
||||
__ b(cc, &entry_label_);
|
||||
if (cc == al) {
|
||||
cgen()->DeleteFrame();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -113,15 +95,10 @@ void JumpTarget::Call() {
|
|||
|
||||
// Calls are always 'forward' so we use a copy of the current frame (plus
|
||||
// one for a return address) as the expected frame.
|
||||
ASSERT(entry_frame_ == NULL);
|
||||
VirtualFrame* target_frame = new VirtualFrame(cgen()->frame());
|
||||
target_frame->Adjust(1);
|
||||
entry_frame_ = target_frame;
|
||||
|
||||
// The predicate is_linked() should now be made true. Its implementation
|
||||
// detects the presence of a frame pointer in the reaching_frames_ list.
|
||||
reaching_frames_.Add(NULL);
|
||||
ASSERT(is_linked());
|
||||
ASSERT(!entry_frame_set_);
|
||||
VirtualFrame target_frame = *cgen()->frame();
|
||||
target_frame.Adjust(1);
|
||||
set_entry_frame(&target_frame);
|
||||
|
||||
__ bl(&entry_label_);
|
||||
}
|
||||
|
@ -136,77 +113,27 @@ void JumpTarget::DoBind() {
|
|||
|
||||
if (cgen()->has_valid_frame()) {
|
||||
// If there is a current frame we can use it on the fall through.
|
||||
if (entry_frame_ == NULL) {
|
||||
entry_frame_ = new VirtualFrame(cgen()->frame());
|
||||
if (!entry_frame_set_) {
|
||||
entry_frame_ = *cgen()->frame();
|
||||
entry_frame_set_ = true;
|
||||
} else {
|
||||
ASSERT(cgen()->frame()->Equals(entry_frame_));
|
||||
cgen()->frame()->MergeTo(&entry_frame_);
|
||||
}
|
||||
} else {
|
||||
// If there is no current frame we must have an entry frame which we can
|
||||
// copy.
|
||||
ASSERT(entry_frame_ != NULL);
|
||||
ASSERT(entry_frame_set_);
|
||||
RegisterFile empty;
|
||||
cgen()->SetFrame(new VirtualFrame(entry_frame_), &empty);
|
||||
}
|
||||
|
||||
// The predicate is_linked() should be made false. Its implementation
|
||||
// detects the presence (or absence) of frame pointers in the
|
||||
// reaching_frames_ list. If we inserted a bogus frame to make
|
||||
// is_linked() true, remove it now.
|
||||
if (is_linked()) {
|
||||
reaching_frames_.Clear();
|
||||
cgen()->SetFrame(new VirtualFrame(&entry_frame_), &empty);
|
||||
}
|
||||
|
||||
__ bind(&entry_label_);
|
||||
}
|
||||
|
||||
|
||||
void BreakTarget::Jump() {
|
||||
// On ARM we do not currently emit merge code for jumps, so we need to do
|
||||
// it explicitly here. The only merging necessary is to drop extra
|
||||
// statement state from the stack.
|
||||
ASSERT(cgen()->has_valid_frame());
|
||||
int count = cgen()->frame()->height() - expected_height_;
|
||||
cgen()->frame()->Drop(count);
|
||||
DoJump();
|
||||
}
|
||||
|
||||
|
||||
void BreakTarget::Jump(Result* arg) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
|
||||
void BreakTarget::Bind() {
|
||||
#ifdef DEBUG
|
||||
// All the forward-reaching frames should have been adjusted at the
|
||||
// jumps to this target.
|
||||
for (int i = 0; i < reaching_frames_.length(); i++) {
|
||||
ASSERT(reaching_frames_[i] == NULL ||
|
||||
reaching_frames_[i]->height() == expected_height_);
|
||||
}
|
||||
#endif
|
||||
// Drop leftover statement state from the frame before merging, even
|
||||
// on the fall through. This is so we can bind the return target
|
||||
// with state on the frame.
|
||||
if (cgen()->has_valid_frame()) {
|
||||
int count = cgen()->frame()->height() - expected_height_;
|
||||
// On ARM we do not currently emit merge code at binding sites, so we need
|
||||
// to do it explicitly here. The only merging necessary is to drop extra
|
||||
// statement state from the stack.
|
||||
cgen()->frame()->Drop(count);
|
||||
}
|
||||
|
||||
DoBind();
|
||||
}
|
||||
|
||||
|
||||
void BreakTarget::Bind(Result* arg) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
|
||||
|
||||
#undef __
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_TARGET_ARCH_ARM
|
|
@ -27,6 +27,8 @@
|
|||
|
||||
#include "v8.h"
|
||||
|
||||
#if defined(V8_TARGET_ARCH_ARM)
|
||||
|
||||
#include "bootstrapper.h"
|
||||
#include "codegen-inl.h"
|
||||
#include "debug.h"
|
||||
|
@ -181,15 +183,18 @@ void MacroAssembler::Drop(int count, Condition cond) {
|
|||
}
|
||||
|
||||
|
||||
void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
|
||||
void MacroAssembler::Swap(Register reg1,
|
||||
Register reg2,
|
||||
Register scratch,
|
||||
Condition cond) {
|
||||
if (scratch.is(no_reg)) {
|
||||
eor(reg1, reg1, Operand(reg2));
|
||||
eor(reg2, reg2, Operand(reg1));
|
||||
eor(reg1, reg1, Operand(reg2));
|
||||
eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
|
||||
eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
|
||||
eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
|
||||
} else {
|
||||
mov(scratch, reg1);
|
||||
mov(reg1, reg2);
|
||||
mov(reg2, scratch);
|
||||
mov(scratch, reg1, LeaveCC, cond);
|
||||
mov(reg1, reg2, LeaveCC, cond);
|
||||
mov(reg2, scratch, LeaveCC, cond);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -232,6 +237,13 @@ void MacroAssembler::LoadRoot(Register destination,
|
|||
}
|
||||
|
||||
|
||||
void MacroAssembler::StoreRoot(Register source,
|
||||
Heap::RootListIndex index,
|
||||
Condition cond) {
|
||||
str(source, MemOperand(roots, index << kPointerSizeLog2), cond);
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::RecordWriteHelper(Register object,
|
||||
Register offset,
|
||||
Register scratch) {
|
||||
|
@ -243,63 +255,21 @@ void MacroAssembler::RecordWriteHelper(Register object,
|
|||
bind(¬_in_new_space);
|
||||
}
|
||||
|
||||
// This is how much we shift the remembered set bit offset to get the
|
||||
// offset of the word in the remembered set. We divide by kBitsPerInt (32,
|
||||
// shift right 5) and then multiply by kIntSize (4, shift left 2).
|
||||
const int kRSetWordShift = 3;
|
||||
mov(ip, Operand(Page::kPageAlignmentMask)); // Load mask only once.
|
||||
|
||||
Label fast;
|
||||
// Calculate region number.
|
||||
add(offset, object, Operand(offset)); // Add offset into the object.
|
||||
and_(offset, offset, Operand(ip)); // Offset into page of the object.
|
||||
mov(offset, Operand(offset, LSR, Page::kRegionSizeLog2));
|
||||
|
||||
// Compute the bit offset in the remembered set.
|
||||
// object: heap object pointer (with tag)
|
||||
// offset: offset to store location from the object
|
||||
mov(ip, Operand(Page::kPageAlignmentMask)); // load mask only once
|
||||
and_(scratch, object, Operand(ip)); // offset into page of the object
|
||||
add(offset, scratch, Operand(offset)); // add offset into the object
|
||||
mov(offset, Operand(offset, LSR, kObjectAlignmentBits));
|
||||
|
||||
// Compute the page address from the heap object pointer.
|
||||
// object: heap object pointer (with tag)
|
||||
// offset: bit offset of store position in the remembered set
|
||||
// Calculate page address.
|
||||
bic(object, object, Operand(ip));
|
||||
|
||||
// If the bit offset lies beyond the normal remembered set range, it is in
|
||||
// the extra remembered set area of a large object.
|
||||
// object: page start
|
||||
// offset: bit offset of store position in the remembered set
|
||||
cmp(offset, Operand(Page::kPageSize / kPointerSize));
|
||||
b(lt, &fast);
|
||||
|
||||
// Adjust the bit offset to be relative to the start of the extra
|
||||
// remembered set and the start address to be the address of the extra
|
||||
// remembered set.
|
||||
sub(offset, offset, Operand(Page::kPageSize / kPointerSize));
|
||||
// Load the array length into 'scratch' and multiply by four to get the
|
||||
// size in bytes of the elements.
|
||||
ldr(scratch, MemOperand(object, Page::kObjectStartOffset
|
||||
+ FixedArray::kLengthOffset));
|
||||
mov(scratch, Operand(scratch, LSL, kObjectAlignmentBits));
|
||||
// Add the page header (including remembered set), array header, and array
|
||||
// body size to the page address.
|
||||
add(object, object, Operand(Page::kObjectStartOffset
|
||||
+ FixedArray::kHeaderSize));
|
||||
add(object, object, Operand(scratch));
|
||||
|
||||
bind(&fast);
|
||||
// Get address of the rset word.
|
||||
// object: start of the remembered set (page start for the fast case)
|
||||
// offset: bit offset of store position in the remembered set
|
||||
bic(scratch, offset, Operand(kBitsPerInt - 1)); // clear the bit offset
|
||||
add(object, object, Operand(scratch, LSR, kRSetWordShift));
|
||||
// Get bit offset in the rset word.
|
||||
// object: address of remembered set word
|
||||
// offset: bit offset of store position
|
||||
and_(offset, offset, Operand(kBitsPerInt - 1));
|
||||
|
||||
ldr(scratch, MemOperand(object));
|
||||
// Mark region dirty.
|
||||
ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
|
||||
mov(ip, Operand(1));
|
||||
orr(scratch, scratch, Operand(ip, LSL, offset));
|
||||
str(scratch, MemOperand(object));
|
||||
str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
|
||||
}
|
||||
|
||||
|
||||
|
@ -327,7 +297,7 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
|
|||
Label done;
|
||||
|
||||
// First, test that the object is not in the new space. We cannot set
|
||||
// remembered set bits in the new space.
|
||||
// region marks for new space pages.
|
||||
InNewSpace(object, scratch, eq, &done);
|
||||
|
||||
// Record the actual write.
|
||||
|
@ -345,6 +315,51 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
|
|||
}
|
||||
|
||||
|
||||
void MacroAssembler::Ldrd(Register dst1, Register dst2,
|
||||
const MemOperand& src, Condition cond) {
|
||||
ASSERT(src.rm().is(no_reg));
|
||||
ASSERT(!dst1.is(lr)); // r14.
|
||||
ASSERT_EQ(0, dst1.code() % 2);
|
||||
ASSERT_EQ(dst1.code() + 1, dst2.code());
|
||||
|
||||
// Generate two ldr instructions if ldrd is not available.
|
||||
if (CpuFeatures::IsSupported(ARMv7)) {
|
||||
CpuFeatures::Scope scope(ARMv7);
|
||||
ldrd(dst1, dst2, src, cond);
|
||||
} else {
|
||||
MemOperand src2(src);
|
||||
src2.set_offset(src2.offset() + 4);
|
||||
if (dst1.is(src.rn())) {
|
||||
ldr(dst2, src2, cond);
|
||||
ldr(dst1, src, cond);
|
||||
} else {
|
||||
ldr(dst1, src, cond);
|
||||
ldr(dst2, src2, cond);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::Strd(Register src1, Register src2,
|
||||
const MemOperand& dst, Condition cond) {
|
||||
ASSERT(dst.rm().is(no_reg));
|
||||
ASSERT(!src1.is(lr)); // r14.
|
||||
ASSERT_EQ(0, src1.code() % 2);
|
||||
ASSERT_EQ(src1.code() + 1, src2.code());
|
||||
|
||||
// Generate two str instructions if strd is not available.
|
||||
if (CpuFeatures::IsSupported(ARMv7)) {
|
||||
CpuFeatures::Scope scope(ARMv7);
|
||||
strd(src1, src2, dst, cond);
|
||||
} else {
|
||||
MemOperand dst2(dst);
|
||||
dst2.set_offset(dst2.offset() + 4);
|
||||
str(src1, dst, cond);
|
||||
str(src2, dst2, cond);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::EnterFrame(StackFrame::Type type) {
|
||||
// r0-r3: preserved
|
||||
stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
|
||||
|
@ -610,6 +625,7 @@ void MacroAssembler::InvokeFunction(Register fun,
|
|||
ldr(expected_reg,
|
||||
FieldMemOperand(code_reg,
|
||||
SharedFunctionInfo::kFormalParameterCountOffset));
|
||||
mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
|
||||
ldr(code_reg,
|
||||
MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
|
||||
add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
|
@ -926,6 +942,12 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
|
|||
ASSERT(!result.is(scratch1));
|
||||
ASSERT(!scratch1.is(scratch2));
|
||||
|
||||
// Make object size into bytes.
|
||||
if ((flags & SIZE_IN_WORDS) != 0) {
|
||||
object_size *= kPointerSize;
|
||||
}
|
||||
ASSERT_EQ(0, object_size & kObjectAlignmentMask);
|
||||
|
||||
// Load address of new object into result and allocation top address into
|
||||
// scratch1.
|
||||
ExternalReference new_space_allocation_top =
|
||||
|
@ -948,23 +970,16 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
|
|||
ExternalReference::new_space_allocation_limit_address();
|
||||
mov(scratch2, Operand(new_space_allocation_limit));
|
||||
ldr(scratch2, MemOperand(scratch2));
|
||||
add(result, result, Operand(object_size * kPointerSize));
|
||||
add(result, result, Operand(object_size));
|
||||
cmp(result, Operand(scratch2));
|
||||
b(hi, gc_required);
|
||||
|
||||
// Update allocation top. result temporarily holds the new top.
|
||||
if (FLAG_debug_code) {
|
||||
tst(result, Operand(kObjectAlignmentMask));
|
||||
Check(eq, "Unaligned allocation in new space");
|
||||
}
|
||||
str(result, MemOperand(scratch1));
|
||||
|
||||
// Tag and adjust back to start of new object.
|
||||
if ((flags & TAG_OBJECT) != 0) {
|
||||
sub(result, result, Operand((object_size * kPointerSize) -
|
||||
kHeapObjectTag));
|
||||
sub(result, result, Operand(object_size - kHeapObjectTag));
|
||||
} else {
|
||||
sub(result, result, Operand(object_size * kPointerSize));
|
||||
sub(result, result, Operand(object_size));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1001,7 +1016,11 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
|
|||
ExternalReference::new_space_allocation_limit_address();
|
||||
mov(scratch2, Operand(new_space_allocation_limit));
|
||||
ldr(scratch2, MemOperand(scratch2));
|
||||
add(result, result, Operand(object_size, LSL, kPointerSizeLog2));
|
||||
if ((flags & SIZE_IN_WORDS) != 0) {
|
||||
add(result, result, Operand(object_size, LSL, kPointerSizeLog2));
|
||||
} else {
|
||||
add(result, result, Operand(object_size));
|
||||
}
|
||||
cmp(result, Operand(scratch2));
|
||||
b(hi, gc_required);
|
||||
|
||||
|
@ -1013,7 +1032,11 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
|
|||
str(result, MemOperand(scratch1));
|
||||
|
||||
// Adjust back to start of new object.
|
||||
sub(result, result, Operand(object_size, LSL, kPointerSizeLog2));
|
||||
if ((flags & SIZE_IN_WORDS) != 0) {
|
||||
sub(result, result, Operand(object_size, LSL, kPointerSizeLog2));
|
||||
} else {
|
||||
sub(result, result, Operand(object_size));
|
||||
}
|
||||
|
||||
// Tag object if requested.
|
||||
if ((flags & TAG_OBJECT) != 0) {
|
||||
|
@ -1054,10 +1077,7 @@ void MacroAssembler::AllocateTwoByteString(Register result,
|
|||
mov(scratch1, Operand(length, LSL, 1)); // Length in bytes, not chars.
|
||||
add(scratch1, scratch1,
|
||||
Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
|
||||
// AllocateInNewSpace expects the size in words, so we can round down
|
||||
// to kObjectAlignment and divide by kPointerSize in the same shift.
|
||||
ASSERT_EQ(kPointerSize, kObjectAlignmentMask + 1);
|
||||
mov(scratch1, Operand(scratch1, ASR, kPointerSizeLog2));
|
||||
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
|
||||
|
||||
// Allocate two-byte string in new space.
|
||||
AllocateInNewSpace(scratch1,
|
||||
|
@ -1088,10 +1108,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
|
|||
ASSERT(kCharSize == 1);
|
||||
add(scratch1, length,
|
||||
Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize));
|
||||
// AllocateInNewSpace expects the size in words, so we can round down
|
||||
// to kObjectAlignment and divide by kPointerSize in the same shift.
|
||||
ASSERT_EQ(kPointerSize, kObjectAlignmentMask + 1);
|
||||
mov(scratch1, Operand(scratch1, ASR, kPointerSizeLog2));
|
||||
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
|
||||
|
||||
// Allocate ASCII string in new space.
|
||||
AllocateInNewSpace(scratch1,
|
||||
|
@ -1115,7 +1132,7 @@ void MacroAssembler::AllocateTwoByteConsString(Register result,
|
|||
Register scratch1,
|
||||
Register scratch2,
|
||||
Label* gc_required) {
|
||||
AllocateInNewSpace(ConsString::kSize / kPointerSize,
|
||||
AllocateInNewSpace(ConsString::kSize,
|
||||
result,
|
||||
scratch1,
|
||||
scratch2,
|
||||
|
@ -1135,7 +1152,7 @@ void MacroAssembler::AllocateAsciiConsString(Register result,
|
|||
Register scratch1,
|
||||
Register scratch2,
|
||||
Label* gc_required) {
|
||||
AllocateInNewSpace(ConsString::kSize / kPointerSize,
|
||||
AllocateInNewSpace(ConsString::kSize,
|
||||
result,
|
||||
scratch1,
|
||||
scratch2,
|
||||
|
@ -1273,7 +1290,7 @@ void MacroAssembler::GetLeastBitsFromSmi(Register dst,
|
|||
Register src,
|
||||
int num_least_bits) {
|
||||
if (CpuFeatures::IsSupported(ARMv7)) {
|
||||
ubfx(dst, src, Operand(kSmiTagSize), Operand(num_least_bits - 1));
|
||||
ubfx(dst, src, kSmiTagSize, num_least_bits);
|
||||
} else {
|
||||
mov(dst, Operand(src, ASR, kSmiTagSize));
|
||||
and_(dst, dst, Operand((1 << num_least_bits) - 1));
|
||||
|
@ -1549,7 +1566,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
|
|||
Label* gc_required) {
|
||||
// Allocate an object in the heap for the heap number and tag it as a heap
|
||||
// object.
|
||||
AllocateInNewSpace(HeapNumber::kSize / kPointerSize,
|
||||
AllocateInNewSpace(HeapNumber::kSize,
|
||||
result,
|
||||
scratch1,
|
||||
scratch2,
|
||||
|
@ -1717,3 +1734,5 @@ void CodePatcher::Emit(Address addr) {
|
|||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_TARGET_ARCH_ARM
|
|
@ -52,6 +52,21 @@ enum InvokeJSFlags {
|
|||
};
|
||||
|
||||
|
||||
// Flags used for the AllocateInNewSpace functions.
|
||||
enum AllocationFlags {
|
||||
// No special flags.
|
||||
NO_ALLOCATION_FLAGS = 0,
|
||||
// Return the pointer to the allocated already tagged as a heap object.
|
||||
TAG_OBJECT = 1 << 0,
|
||||
// The content of the result register already contains the allocation top in
|
||||
// new space.
|
||||
RESULT_CONTAINS_TOP = 1 << 1,
|
||||
// Specify that the requested size of the space to allocate is specified in
|
||||
// words instead of bytes.
|
||||
SIZE_IN_WORDS = 1 << 2
|
||||
};
|
||||
|
||||
|
||||
// MacroAssembler implements a collection of frequently used macros.
|
||||
class MacroAssembler: public Assembler {
|
||||
public:
|
||||
|
@ -73,7 +88,10 @@ class MacroAssembler: public Assembler {
|
|||
|
||||
// Swap two registers. If the scratch register is omitted then a slightly
|
||||
// less efficient form using xor instead of mov is emitted.
|
||||
void Swap(Register reg1, Register reg2, Register scratch = no_reg);
|
||||
void Swap(Register reg1,
|
||||
Register reg2,
|
||||
Register scratch = no_reg,
|
||||
Condition cond = al);
|
||||
|
||||
void Call(Label* target);
|
||||
void Move(Register dst, Handle<Object> value);
|
||||
|
@ -85,6 +103,10 @@ class MacroAssembler: public Assembler {
|
|||
void LoadRoot(Register destination,
|
||||
Heap::RootListIndex index,
|
||||
Condition cond = al);
|
||||
// Store an object to the root table.
|
||||
void StoreRoot(Register source,
|
||||
Heap::RootListIndex index,
|
||||
Condition cond = al);
|
||||
|
||||
|
||||
// Check if object is in new space.
|
||||
|
@ -95,16 +117,14 @@ class MacroAssembler: public Assembler {
|
|||
Label* branch);
|
||||
|
||||
|
||||
// Set the remebered set bit for an offset into an
|
||||
// object. RecordWriteHelper only works if the object is not in new
|
||||
// space.
|
||||
void RecordWriteHelper(Register object, Register offset, Register scracth);
|
||||
// For the page containing |object| mark the region covering [object+offset]
|
||||
// dirty. The object address must be in the first 8K of an allocated page.
|
||||
void RecordWriteHelper(Register object, Register offset, Register scratch);
|
||||
|
||||
// Sets the remembered set bit for [address+offset], where address is the
|
||||
// address of the heap object 'object'. The address must be in the first 8K
|
||||
// of an allocated page. The 'scratch' register is used in the
|
||||
// implementation and all 3 registers are clobbered by the operation, as
|
||||
// well as the ip register.
|
||||
// For the page containing |object| mark the region covering [object+offset]
|
||||
// dirty. The object address must be in the first 8K of an allocated page.
|
||||
// The 'scratch' register is used in the implementation and all 3 registers
|
||||
// are clobbered by the operation, as well as the ip register.
|
||||
void RecordWrite(Register object, Register offset, Register scratch);
|
||||
|
||||
// Push two registers. Pushes leftmost register first (to highest address).
|
||||
|
@ -166,6 +186,18 @@ class MacroAssembler: public Assembler {
|
|||
}
|
||||
}
|
||||
|
||||
// Load two consecutive registers with two consecutive memory locations.
|
||||
void Ldrd(Register dst1,
|
||||
Register dst2,
|
||||
const MemOperand& src,
|
||||
Condition cond = al);
|
||||
|
||||
// Store two consecutive registers to two consecutive memory locations.
|
||||
void Strd(Register src1,
|
||||
Register src2,
|
||||
const MemOperand& dst,
|
||||
Condition cond = al);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Stack limit support
|
||||
|
||||
|
@ -280,7 +312,9 @@ class MacroAssembler: public Assembler {
|
|||
// Allocate an object in new space. The object_size is specified in words (not
|
||||
// bytes). If the new space is exhausted control continues at the gc_required
|
||||
// label. The allocated object is returned in result. If the flag
|
||||
// tag_allocated_object is true the result is tagged as as a heap object.
|
||||
// tag_allocated_object is true the result is tagged as as a heap object. All
|
||||
// registers are clobbered also when control continues at the gc_required
|
||||
// label.
|
||||
void AllocateInNewSpace(int object_size,
|
||||
Register result,
|
||||
Register scratch1,
|
||||
|
@ -324,8 +358,9 @@ class MacroAssembler: public Assembler {
|
|||
Register scratch2,
|
||||
Label* gc_required);
|
||||
|
||||
// Allocates a heap number or jumps to the need_gc label if the young space
|
||||
// is full and a scavenge is needed.
|
||||
// Allocates a heap number or jumps to the gc_required label if the young
|
||||
// space is full and a scavenge is needed. All registers are clobbered also
|
||||
// when control continues at the gc_required label.
|
||||
void AllocateHeapNumber(Register result,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
|
@ -26,6 +26,9 @@
|
|||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "v8.h"
|
||||
|
||||
#if defined(V8_TARGET_ARCH_ARM)
|
||||
|
||||
#include "unicode.h"
|
||||
#include "log.h"
|
||||
#include "ast.h"
|
||||
|
@ -1210,14 +1213,31 @@ void RegExpMacroAssemblerARM::LoadCurrentCharacterUnchecked(int cp_offset,
|
|||
__ add(r0, current_input_offset(), Operand(cp_offset * char_size()));
|
||||
offset = r0;
|
||||
}
|
||||
// We assume that we cannot do unaligned loads on ARM, so this function
|
||||
// must only be used to load a single character at a time.
|
||||
// The ldr, str, ldrh, strh instructions can do unaligned accesses, if the CPU
|
||||
// and the operating system running on the target allow it.
|
||||
// If unaligned load/stores are not supported then this function must only
|
||||
// be used to load a single character at a time.
|
||||
#if !V8_TARGET_CAN_READ_UNALIGNED
|
||||
ASSERT(characters == 1);
|
||||
#endif
|
||||
|
||||
if (mode_ == ASCII) {
|
||||
__ ldrb(current_character(), MemOperand(end_of_input_address(), offset));
|
||||
if (characters == 4) {
|
||||
__ ldr(current_character(), MemOperand(end_of_input_address(), offset));
|
||||
} else if (characters == 2) {
|
||||
__ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
|
||||
} else {
|
||||
ASSERT(characters == 1);
|
||||
__ ldrb(current_character(), MemOperand(end_of_input_address(), offset));
|
||||
}
|
||||
} else {
|
||||
ASSERT(mode_ == UC16);
|
||||
__ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
|
||||
if (characters == 2) {
|
||||
__ ldr(current_character(), MemOperand(end_of_input_address(), offset));
|
||||
} else {
|
||||
ASSERT(characters == 1);
|
||||
__ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1238,3 +1258,5 @@ void RegExpCEntryStub::Generate(MacroAssembler* masm_) {
|
|||
#endif // V8_INTERPRETED_REGEXP
|
||||
|
||||
}} // namespace v8::internal
|
||||
|
||||
#endif // V8_TARGET_ARCH_ARM
|
|
@ -27,6 +27,8 @@
|
|||
|
||||
#include "v8.h"
|
||||
|
||||
#if defined(V8_TARGET_ARCH_ARM)
|
||||
|
||||
#include "codegen-inl.h"
|
||||
#include "register-allocator-inl.h"
|
||||
|
||||
|
@ -57,3 +59,5 @@ Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
|
|||
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_TARGET_ARCH_ARM
|
|
@ -29,6 +29,8 @@
|
|||
#include <cstdarg>
|
||||
#include "v8.h"
|
||||
|
||||
#if defined(V8_TARGET_ARCH_ARM)
|
||||
|
||||
#include "disasm.h"
|
||||
#include "assembler.h"
|
||||
#include "arm/constants-arm.h"
|
||||
|
@ -728,6 +730,13 @@ int32_t Simulator::get_register(int reg) const {
|
|||
}
|
||||
|
||||
|
||||
void Simulator::set_dw_register(int dreg, const int* dbl) {
|
||||
ASSERT((dreg >= 0) && (dreg < num_d_registers));
|
||||
registers_[dreg] = dbl[0];
|
||||
registers_[dreg + 1] = dbl[1];
|
||||
}
|
||||
|
||||
|
||||
// Raw access to the PC register.
|
||||
void Simulator::set_pc(int32_t value) {
|
||||
pc_modified_ = true;
|
||||
|
@ -864,27 +873,42 @@ void Simulator::TrashCallerSaveRegisters() {
|
|||
registers_[12] = 0x50Bad4U;
|
||||
}
|
||||
|
||||
|
||||
// The ARM cannot do unaligned reads and writes. On some ARM platforms an
|
||||
// interrupt is caused. On others it does a funky rotation thing. For now we
|
||||
// simply disallow unaligned reads, but at some point we may want to move to
|
||||
// emulating the rotate behaviour. Note that simulator runs have the runtime
|
||||
// Some Operating Systems allow unaligned access on ARMv7 targets. We
|
||||
// assume that unaligned accesses are not allowed unless the v8 build system
|
||||
// defines the CAN_USE_UNALIGNED_ACCESSES macro to be non-zero.
|
||||
// The following statements below describes the behavior of the ARM CPUs
|
||||
// that don't support unaligned access.
|
||||
// Some ARM platforms raise an interrupt on detecting unaligned access.
|
||||
// On others it does a funky rotation thing. For now we
|
||||
// simply disallow unaligned reads. Note that simulator runs have the runtime
|
||||
// system running directly on the host system and only generated code is
|
||||
// executed in the simulator. Since the host is typically IA32 we will not
|
||||
// get the correct ARM-like behaviour on unaligned accesses.
|
||||
// get the correct ARM-like behaviour on unaligned accesses for those ARM
|
||||
// targets that don't support unaligned loads and stores.
|
||||
|
||||
|
||||
int Simulator::ReadW(int32_t addr, Instr* instr) {
|
||||
#if V8_TARGET_CAN_READ_UNALIGNED
|
||||
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
|
||||
return *ptr;
|
||||
#else
|
||||
if ((addr & 3) == 0) {
|
||||
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
|
||||
return *ptr;
|
||||
}
|
||||
PrintF("Unaligned read at 0x%08x\n", addr);
|
||||
PrintF("Unaligned read at 0x%08x, pc=%p\n", addr, instr);
|
||||
UNIMPLEMENTED();
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void Simulator::WriteW(int32_t addr, int value, Instr* instr) {
|
||||
#if V8_TARGET_CAN_READ_UNALIGNED
|
||||
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
|
||||
*ptr = value;
|
||||
return;
|
||||
#else
|
||||
if ((addr & 3) == 0) {
|
||||
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
|
||||
*ptr = value;
|
||||
|
@ -892,10 +916,15 @@ void Simulator::WriteW(int32_t addr, int value, Instr* instr) {
|
|||
}
|
||||
PrintF("Unaligned write at 0x%08x, pc=%p\n", addr, instr);
|
||||
UNIMPLEMENTED();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
uint16_t Simulator::ReadHU(int32_t addr, Instr* instr) {
|
||||
#if V8_TARGET_CAN_READ_UNALIGNED
|
||||
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
|
||||
return *ptr;
|
||||
#else
|
||||
if ((addr & 1) == 0) {
|
||||
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
|
||||
return *ptr;
|
||||
|
@ -903,10 +932,15 @@ uint16_t Simulator::ReadHU(int32_t addr, Instr* instr) {
|
|||
PrintF("Unaligned unsigned halfword read at 0x%08x, pc=%p\n", addr, instr);
|
||||
UNIMPLEMENTED();
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
int16_t Simulator::ReadH(int32_t addr, Instr* instr) {
|
||||
#if V8_TARGET_CAN_READ_UNALIGNED
|
||||
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
|
||||
return *ptr;
|
||||
#else
|
||||
if ((addr & 1) == 0) {
|
||||
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
|
||||
return *ptr;
|
||||
|
@ -914,10 +948,16 @@ int16_t Simulator::ReadH(int32_t addr, Instr* instr) {
|
|||
PrintF("Unaligned signed halfword read at 0x%08x\n", addr);
|
||||
UNIMPLEMENTED();
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void Simulator::WriteH(int32_t addr, uint16_t value, Instr* instr) {
|
||||
#if V8_TARGET_CAN_READ_UNALIGNED
|
||||
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
|
||||
*ptr = value;
|
||||
return;
|
||||
#else
|
||||
if ((addr & 1) == 0) {
|
||||
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
|
||||
*ptr = value;
|
||||
|
@ -925,10 +965,16 @@ void Simulator::WriteH(int32_t addr, uint16_t value, Instr* instr) {
|
|||
}
|
||||
PrintF("Unaligned unsigned halfword write at 0x%08x, pc=%p\n", addr, instr);
|
||||
UNIMPLEMENTED();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void Simulator::WriteH(int32_t addr, int16_t value, Instr* instr) {
|
||||
#if V8_TARGET_CAN_READ_UNALIGNED
|
||||
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
|
||||
*ptr = value;
|
||||
return;
|
||||
#else
|
||||
if ((addr & 1) == 0) {
|
||||
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
|
||||
*ptr = value;
|
||||
|
@ -936,6 +982,7 @@ void Simulator::WriteH(int32_t addr, int16_t value, Instr* instr) {
|
|||
}
|
||||
PrintF("Unaligned halfword write at 0x%08x, pc=%p\n", addr, instr);
|
||||
UNIMPLEMENTED();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
|
@ -963,6 +1010,41 @@ void Simulator::WriteB(int32_t addr, int8_t value) {
|
|||
}
|
||||
|
||||
|
||||
int32_t* Simulator::ReadDW(int32_t addr) {
|
||||
#if V8_TARGET_CAN_READ_UNALIGNED
|
||||
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
|
||||
return ptr;
|
||||
#else
|
||||
if ((addr & 3) == 0) {
|
||||
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
|
||||
return ptr;
|
||||
}
|
||||
PrintF("Unaligned read at 0x%08x\n", addr);
|
||||
UNIMPLEMENTED();
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void Simulator::WriteDW(int32_t addr, int32_t value1, int32_t value2) {
|
||||
#if V8_TARGET_CAN_READ_UNALIGNED
|
||||
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
|
||||
*ptr++ = value1;
|
||||
*ptr = value2;
|
||||
return;
|
||||
#else
|
||||
if ((addr & 3) == 0) {
|
||||
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
|
||||
*ptr++ = value1;
|
||||
*ptr = value2;
|
||||
return;
|
||||
}
|
||||
PrintF("Unaligned write at 0x%08x\n", addr);
|
||||
UNIMPLEMENTED();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
// Returns the limit of the stack area to enable checking for stack overflows.
|
||||
uintptr_t Simulator::StackLimit() const {
|
||||
// Leave a safety margin of 256 bytes to prevent overrunning the stack when
|
||||
|
@ -1590,7 +1672,19 @@ void Simulator::DecodeType01(Instr* instr) {
|
|||
}
|
||||
}
|
||||
}
|
||||
if (instr->HasH()) {
|
||||
if (((instr->Bits(7, 4) & 0xd) == 0xd) && (instr->Bit(20) == 0)) {
|
||||
ASSERT((rd % 2) == 0);
|
||||
if (instr->HasH()) {
|
||||
// The strd instruction.
|
||||
int32_t value1 = get_register(rd);
|
||||
int32_t value2 = get_register(rd+1);
|
||||
WriteDW(addr, value1, value2);
|
||||
} else {
|
||||
// The ldrd instruction.
|
||||
int* rn_data = ReadDW(addr);
|
||||
set_dw_register(rd, rn_data);
|
||||
}
|
||||
} else if (instr->HasH()) {
|
||||
if (instr->HasSign()) {
|
||||
if (instr->HasL()) {
|
||||
int16_t val = ReadH(addr, instr);
|
||||
|
@ -1937,7 +2031,6 @@ void Simulator::DecodeType2(Instr* instr) {
|
|||
|
||||
|
||||
void Simulator::DecodeType3(Instr* instr) {
|
||||
ASSERT(instr->Bits(6, 4) == 0x5 || instr->Bit(4) == 0);
|
||||
int rd = instr->RdField();
|
||||
int rn = instr->RnField();
|
||||
int32_t rn_val = get_register(rn);
|
||||
|
@ -1964,17 +2057,47 @@ void Simulator::DecodeType3(Instr* instr) {
|
|||
break;
|
||||
}
|
||||
case 3: {
|
||||
// UBFX.
|
||||
if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
|
||||
uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
|
||||
uint32_t lsbit = static_cast<uint32_t>(instr->ShiftAmountField());
|
||||
uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
|
||||
uint32_t msbit = widthminus1 + lsbit;
|
||||
if (msbit <= 31) {
|
||||
uint32_t rm_val =
|
||||
static_cast<uint32_t>(get_register(instr->RmField()));
|
||||
uint32_t extr_val = rm_val << (31 - msbit);
|
||||
extr_val = extr_val >> (31 - widthminus1);
|
||||
set_register(instr->RdField(), extr_val);
|
||||
if (instr->Bit(22)) {
|
||||
// ubfx - unsigned bitfield extract.
|
||||
uint32_t rm_val =
|
||||
static_cast<uint32_t>(get_register(instr->RmField()));
|
||||
uint32_t extr_val = rm_val << (31 - msbit);
|
||||
extr_val = extr_val >> (31 - widthminus1);
|
||||
set_register(instr->RdField(), extr_val);
|
||||
} else {
|
||||
// sbfx - signed bitfield extract.
|
||||
int32_t rm_val = get_register(instr->RmField());
|
||||
int32_t extr_val = rm_val << (31 - msbit);
|
||||
extr_val = extr_val >> (31 - widthminus1);
|
||||
set_register(instr->RdField(), extr_val);
|
||||
}
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
return;
|
||||
} else if (!instr->HasW() && (instr->Bits(6, 4) == 0x1)) {
|
||||
uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
|
||||
uint32_t msbit = static_cast<uint32_t>(instr->Bits(20, 16));
|
||||
if (msbit >= lsbit) {
|
||||
// bfc or bfi - bitfield clear/insert.
|
||||
uint32_t rd_val =
|
||||
static_cast<uint32_t>(get_register(instr->RdField()));
|
||||
uint32_t bitcount = msbit - lsbit + 1;
|
||||
uint32_t mask = (1 << bitcount) - 1;
|
||||
rd_val &= ~(mask << lsbit);
|
||||
if (instr->RmField() != 15) {
|
||||
// bfi - bitfield insert.
|
||||
uint32_t rm_val =
|
||||
static_cast<uint32_t>(get_register(instr->RmField()));
|
||||
rm_val &= mask;
|
||||
rd_val |= rm_val << lsbit;
|
||||
}
|
||||
set_register(instr->RdField(), rd_val);
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
@ -2639,3 +2762,5 @@ uintptr_t Simulator::PopAddress() {
|
|||
} } // namespace assembler::arm
|
||||
|
||||
#endif // __arm__
|
||||
|
||||
#endif // V8_TARGET_ARCH_ARM
|
|
@ -159,6 +159,7 @@ class Simulator {
|
|||
// instruction.
|
||||
void set_register(int reg, int32_t value);
|
||||
int32_t get_register(int reg) const;
|
||||
void set_dw_register(int dreg, const int* dbl);
|
||||
|
||||
// Support for VFP.
|
||||
void set_s_register(int reg, unsigned int value);
|
||||
|
@ -252,6 +253,9 @@ class Simulator {
|
|||
inline int ReadW(int32_t addr, Instr* instr);
|
||||
inline void WriteW(int32_t addr, int value, Instr* instr);
|
||||
|
||||
int32_t* ReadDW(int32_t addr);
|
||||
void WriteDW(int32_t addr, int32_t value1, int32_t value2);
|
||||
|
||||
// Executing is handled based on the instruction type.
|
||||
void DecodeType01(Instr* instr); // both type 0 and type 1 rolled into one
|
||||
void DecodeType2(Instr* instr);
|
|
@ -27,6 +27,8 @@
|
|||
|
||||
#include "v8.h"
|
||||
|
||||
#if defined(V8_TARGET_ARCH_ARM)
|
||||
|
||||
#include "ic-inl.h"
|
||||
#include "codegen-inl.h"
|
||||
#include "stub-cache.h"
|
||||
|
@ -424,177 +426,6 @@ static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
|
|||
}
|
||||
|
||||
|
||||
class LoadInterceptorCompiler BASE_EMBEDDED {
|
||||
public:
|
||||
explicit LoadInterceptorCompiler(Register name) : name_(name) {}
|
||||
|
||||
void CompileCacheable(MacroAssembler* masm,
|
||||
StubCompiler* stub_compiler,
|
||||
Register receiver,
|
||||
Register holder,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
JSObject* holder_obj,
|
||||
LookupResult* lookup,
|
||||
String* name,
|
||||
Label* miss_label) {
|
||||
AccessorInfo* callback = NULL;
|
||||
bool optimize = false;
|
||||
// So far the most popular follow ups for interceptor loads are FIELD
|
||||
// and CALLBACKS, so inline only them, other cases may be added
|
||||
// later.
|
||||
if (lookup->type() == FIELD) {
|
||||
optimize = true;
|
||||
} else if (lookup->type() == CALLBACKS) {
|
||||
Object* callback_object = lookup->GetCallbackObject();
|
||||
if (callback_object->IsAccessorInfo()) {
|
||||
callback = AccessorInfo::cast(callback_object);
|
||||
optimize = callback->getter() != NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (!optimize) {
|
||||
CompileRegular(masm, receiver, holder, scratch2, holder_obj, miss_label);
|
||||
return;
|
||||
}
|
||||
|
||||
// Note: starting a frame here makes GC aware of pointers pushed below.
|
||||
__ EnterInternalFrame();
|
||||
|
||||
__ push(receiver);
|
||||
__ Push(holder, name_);
|
||||
|
||||
CompileCallLoadPropertyWithInterceptor(masm,
|
||||
receiver,
|
||||
holder,
|
||||
name_,
|
||||
holder_obj);
|
||||
|
||||
Label interceptor_failed;
|
||||
// Compare with no_interceptor_result_sentinel.
|
||||
__ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
|
||||
__ cmp(r0, scratch1);
|
||||
__ b(eq, &interceptor_failed);
|
||||
__ LeaveInternalFrame();
|
||||
__ Ret();
|
||||
|
||||
__ bind(&interceptor_failed);
|
||||
__ pop(name_);
|
||||
__ pop(holder);
|
||||
__ pop(receiver);
|
||||
|
||||
__ LeaveInternalFrame();
|
||||
|
||||
if (lookup->type() == FIELD) {
|
||||
holder = stub_compiler->CheckPrototypes(holder_obj,
|
||||
holder,
|
||||
lookup->holder(),
|
||||
scratch1,
|
||||
scratch2,
|
||||
name,
|
||||
miss_label);
|
||||
stub_compiler->GenerateFastPropertyLoad(masm,
|
||||
r0,
|
||||
holder,
|
||||
lookup->holder(),
|
||||
lookup->GetFieldIndex());
|
||||
__ Ret();
|
||||
} else {
|
||||
ASSERT(lookup->type() == CALLBACKS);
|
||||
ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
|
||||
ASSERT(callback != NULL);
|
||||
ASSERT(callback->getter() != NULL);
|
||||
|
||||
Label cleanup;
|
||||
__ pop(scratch2);
|
||||
__ Push(receiver, scratch2);
|
||||
|
||||
holder = stub_compiler->CheckPrototypes(holder_obj, holder,
|
||||
lookup->holder(), scratch1,
|
||||
scratch2,
|
||||
name,
|
||||
&cleanup);
|
||||
|
||||
__ push(holder);
|
||||
__ Move(holder, Handle<AccessorInfo>(callback));
|
||||
__ push(holder);
|
||||
__ ldr(scratch1, FieldMemOperand(holder, AccessorInfo::kDataOffset));
|
||||
__ Push(scratch1, name_);
|
||||
|
||||
ExternalReference ref =
|
||||
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
|
||||
__ TailCallExternalReference(ref, 5, 1);
|
||||
|
||||
__ bind(&cleanup);
|
||||
__ pop(scratch1);
|
||||
__ pop(scratch2);
|
||||
__ push(scratch1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CompileRegular(MacroAssembler* masm,
|
||||
Register receiver,
|
||||
Register holder,
|
||||
Register scratch,
|
||||
JSObject* holder_obj,
|
||||
Label* miss_label) {
|
||||
PushInterceptorArguments(masm, receiver, holder, name_, holder_obj);
|
||||
|
||||
ExternalReference ref = ExternalReference(
|
||||
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
|
||||
__ TailCallExternalReference(ref, 5, 1);
|
||||
}
|
||||
|
||||
private:
|
||||
Register name_;
|
||||
};
|
||||
|
||||
|
||||
static void CompileLoadInterceptor(LoadInterceptorCompiler* compiler,
|
||||
StubCompiler* stub_compiler,
|
||||
MacroAssembler* masm,
|
||||
JSObject* object,
|
||||
JSObject* holder,
|
||||
String* name,
|
||||
LookupResult* lookup,
|
||||
Register receiver,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
Label* miss) {
|
||||
ASSERT(holder->HasNamedInterceptor());
|
||||
ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
|
||||
|
||||
// Check that the receiver isn't a smi.
|
||||
__ BranchOnSmi(receiver, miss);
|
||||
|
||||
// Check that the maps haven't changed.
|
||||
Register reg =
|
||||
stub_compiler->CheckPrototypes(object, receiver, holder,
|
||||
scratch1, scratch2, name, miss);
|
||||
|
||||
if (lookup->IsProperty() && lookup->IsCacheable()) {
|
||||
compiler->CompileCacheable(masm,
|
||||
stub_compiler,
|
||||
receiver,
|
||||
reg,
|
||||
scratch1,
|
||||
scratch2,
|
||||
holder,
|
||||
lookup,
|
||||
name,
|
||||
miss);
|
||||
} else {
|
||||
compiler->CompileRegular(masm,
|
||||
receiver,
|
||||
reg,
|
||||
scratch2,
|
||||
holder,
|
||||
miss);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Reserves space for the extra arguments to FastHandleApiCall in the
|
||||
// caller's frame.
|
||||
//
|
||||
|
@ -715,7 +546,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
|
|||
Register receiver,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
JSObject* holder_obj,
|
||||
JSObject* interceptor_holder,
|
||||
LookupResult* lookup,
|
||||
String* name,
|
||||
const CallOptimization& optimization,
|
||||
|
@ -728,10 +559,13 @@ class CallInterceptorCompiler BASE_EMBEDDED {
|
|||
bool can_do_fast_api_call = false;
|
||||
if (optimization.is_simple_api_call() &&
|
||||
!lookup->holder()->IsGlobalObject()) {
|
||||
depth1 = optimization.GetPrototypeDepthOfExpectedType(object, holder_obj);
|
||||
depth1 =
|
||||
optimization.GetPrototypeDepthOfExpectedType(object,
|
||||
interceptor_holder);
|
||||
if (depth1 == kInvalidProtoDepth) {
|
||||
depth2 = optimization.GetPrototypeDepthOfExpectedType(holder_obj,
|
||||
lookup->holder());
|
||||
depth2 =
|
||||
optimization.GetPrototypeDepthOfExpectedType(interceptor_holder,
|
||||
lookup->holder());
|
||||
}
|
||||
can_do_fast_api_call = (depth1 != kInvalidProtoDepth) ||
|
||||
(depth2 != kInvalidProtoDepth);
|
||||
|
@ -746,23 +580,39 @@ class CallInterceptorCompiler BASE_EMBEDDED {
|
|||
ReserveSpaceForFastApiCall(masm, scratch1);
|
||||
}
|
||||
|
||||
// Check that the maps from receiver to interceptor's holder
|
||||
// haven't changed and thus we can invoke interceptor.
|
||||
Label miss_cleanup;
|
||||
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
|
||||
Register holder =
|
||||
stub_compiler_->CheckPrototypes(object, receiver, holder_obj, scratch1,
|
||||
stub_compiler_->CheckPrototypes(object, receiver,
|
||||
interceptor_holder, scratch1,
|
||||
scratch2, name, depth1, miss);
|
||||
|
||||
// Invoke an interceptor and if it provides a value,
|
||||
// branch to |regular_invoke|.
|
||||
Label regular_invoke;
|
||||
LoadWithInterceptor(masm, receiver, holder, holder_obj, scratch2,
|
||||
LoadWithInterceptor(masm, receiver, holder, interceptor_holder, scratch2,
|
||||
®ular_invoke);
|
||||
|
||||
// Generate code for the failed interceptor case.
|
||||
// Interceptor returned nothing for this property. Try to use cached
|
||||
// constant function.
|
||||
|
||||
// Check the lookup is still valid.
|
||||
stub_compiler_->CheckPrototypes(holder_obj, receiver,
|
||||
lookup->holder(), scratch1,
|
||||
scratch2, name, depth2, miss);
|
||||
// Check that the maps from interceptor's holder to constant function's
|
||||
// holder haven't changed and thus we can use cached constant function.
|
||||
if (interceptor_holder != lookup->holder()) {
|
||||
stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
|
||||
lookup->holder(), scratch1,
|
||||
scratch2, name, depth2, miss);
|
||||
} else {
|
||||
// CheckPrototypes has a side effect of fetching a 'holder'
|
||||
// for API (object which is instanceof for the signature). It's
|
||||
// safe to omit it here, as if present, it should be fetched
|
||||
// by the previous CheckPrototypes.
|
||||
ASSERT(depth2 == kInvalidProtoDepth);
|
||||
}
|
||||
|
||||
// Invoke function.
|
||||
if (can_do_fast_api_call) {
|
||||
GenerateFastApiCall(masm, optimization, arguments_.immediate());
|
||||
} else {
|
||||
|
@ -770,12 +620,14 @@ class CallInterceptorCompiler BASE_EMBEDDED {
|
|||
JUMP_FUNCTION);
|
||||
}
|
||||
|
||||
// Deferred code for fast API call case---clean preallocated space.
|
||||
if (can_do_fast_api_call) {
|
||||
__ bind(&miss_cleanup);
|
||||
FreeSpaceForFastApiCall(masm);
|
||||
__ b(miss_label);
|
||||
}
|
||||
|
||||
// Invoke a regular function.
|
||||
__ bind(®ular_invoke);
|
||||
if (can_do_fast_api_call) {
|
||||
FreeSpaceForFastApiCall(masm);
|
||||
|
@ -788,10 +640,10 @@ class CallInterceptorCompiler BASE_EMBEDDED {
|
|||
Register scratch1,
|
||||
Register scratch2,
|
||||
String* name,
|
||||
JSObject* holder_obj,
|
||||
JSObject* interceptor_holder,
|
||||
Label* miss_label) {
|
||||
Register holder =
|
||||
stub_compiler_->CheckPrototypes(object, receiver, holder_obj,
|
||||
stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
|
||||
scratch1, scratch2, name,
|
||||
miss_label);
|
||||
|
||||
|
@ -804,7 +656,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
|
|||
receiver,
|
||||
holder,
|
||||
name_,
|
||||
holder_obj);
|
||||
interceptor_holder);
|
||||
|
||||
__ CallExternalReference(
|
||||
ExternalReference(
|
||||
|
@ -986,7 +838,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
|
|||
|
||||
|
||||
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
|
||||
JSObject* holder,
|
||||
JSObject* interceptor_holder,
|
||||
LookupResult* lookup,
|
||||
Register receiver,
|
||||
Register name_reg,
|
||||
|
@ -994,18 +846,133 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
|
|||
Register scratch2,
|
||||
String* name,
|
||||
Label* miss) {
|
||||
LoadInterceptorCompiler compiler(name_reg);
|
||||
CompileLoadInterceptor(&compiler,
|
||||
this,
|
||||
masm(),
|
||||
object,
|
||||
holder,
|
||||
name,
|
||||
lookup,
|
||||
receiver,
|
||||
scratch1,
|
||||
scratch2,
|
||||
miss);
|
||||
ASSERT(interceptor_holder->HasNamedInterceptor());
|
||||
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
|
||||
|
||||
// Check that the receiver isn't a smi.
|
||||
__ BranchOnSmi(receiver, miss);
|
||||
|
||||
// So far the most popular follow ups for interceptor loads are FIELD
|
||||
// and CALLBACKS, so inline only them, other cases may be added
|
||||
// later.
|
||||
bool compile_followup_inline = false;
|
||||
if (lookup->IsProperty() && lookup->IsCacheable()) {
|
||||
if (lookup->type() == FIELD) {
|
||||
compile_followup_inline = true;
|
||||
} else if (lookup->type() == CALLBACKS &&
|
||||
lookup->GetCallbackObject()->IsAccessorInfo() &&
|
||||
AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
|
||||
compile_followup_inline = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (compile_followup_inline) {
|
||||
// Compile the interceptor call, followed by inline code to load the
|
||||
// property from further up the prototype chain if the call fails.
|
||||
// Check that the maps haven't changed.
|
||||
Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
|
||||
scratch1, scratch2, name, miss);
|
||||
ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
|
||||
|
||||
// Save necessary data before invoking an interceptor.
|
||||
// Requires a frame to make GC aware of pushed pointers.
|
||||
__ EnterInternalFrame();
|
||||
|
||||
if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
|
||||
// CALLBACKS case needs a receiver to be passed into C++ callback.
|
||||
__ Push(receiver, holder_reg, name_reg);
|
||||
} else {
|
||||
__ Push(holder_reg, name_reg);
|
||||
}
|
||||
|
||||
// Invoke an interceptor. Note: map checks from receiver to
|
||||
// interceptor's holder has been compiled before (see a caller
|
||||
// of this method.)
|
||||
CompileCallLoadPropertyWithInterceptor(masm(),
|
||||
receiver,
|
||||
holder_reg,
|
||||
name_reg,
|
||||
interceptor_holder);
|
||||
|
||||
// Check if interceptor provided a value for property. If it's
|
||||
// the case, return immediately.
|
||||
Label interceptor_failed;
|
||||
__ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
|
||||
__ cmp(r0, scratch1);
|
||||
__ b(eq, &interceptor_failed);
|
||||
__ LeaveInternalFrame();
|
||||
__ Ret();
|
||||
|
||||
__ bind(&interceptor_failed);
|
||||
__ pop(name_reg);
|
||||
__ pop(holder_reg);
|
||||
if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
|
||||
__ pop(receiver);
|
||||
}
|
||||
|
||||
__ LeaveInternalFrame();
|
||||
|
||||
// Check that the maps from interceptor's holder to lookup's holder
|
||||
// haven't changed. And load lookup's holder into |holder| register.
|
||||
if (interceptor_holder != lookup->holder()) {
|
||||
holder_reg = CheckPrototypes(interceptor_holder,
|
||||
holder_reg,
|
||||
lookup->holder(),
|
||||
scratch1,
|
||||
scratch2,
|
||||
name,
|
||||
miss);
|
||||
}
|
||||
|
||||
if (lookup->type() == FIELD) {
|
||||
// We found FIELD property in prototype chain of interceptor's holder.
|
||||
// Retrieve a field from field's holder.
|
||||
GenerateFastPropertyLoad(masm(), r0, holder_reg,
|
||||
lookup->holder(), lookup->GetFieldIndex());
|
||||
__ Ret();
|
||||
} else {
|
||||
// We found CALLBACKS property in prototype chain of interceptor's
|
||||
// holder.
|
||||
ASSERT(lookup->type() == CALLBACKS);
|
||||
ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
|
||||
AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
|
||||
ASSERT(callback != NULL);
|
||||
ASSERT(callback->getter() != NULL);
|
||||
|
||||
// Tail call to runtime.
|
||||
// Important invariant in CALLBACKS case: the code above must be
|
||||
// structured to never clobber |receiver| register.
|
||||
__ Move(scratch2, Handle<AccessorInfo>(callback));
|
||||
// holder_reg is either receiver or scratch1.
|
||||
if (!receiver.is(holder_reg)) {
|
||||
ASSERT(scratch1.is(holder_reg));
|
||||
__ Push(receiver, holder_reg, scratch2);
|
||||
__ ldr(scratch1,
|
||||
FieldMemOperand(holder_reg, AccessorInfo::kDataOffset));
|
||||
__ Push(scratch1, name_reg);
|
||||
} else {
|
||||
__ push(receiver);
|
||||
__ ldr(scratch1,
|
||||
FieldMemOperand(holder_reg, AccessorInfo::kDataOffset));
|
||||
__ Push(holder_reg, scratch2, scratch1, name_reg);
|
||||
}
|
||||
|
||||
ExternalReference ref =
|
||||
ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
|
||||
__ TailCallExternalReference(ref, 5, 1);
|
||||
}
|
||||
} else { // !compile_followup_inline
|
||||
// Call the runtime system to load the interceptor.
|
||||
// Check that the maps haven't changed.
|
||||
Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
|
||||
scratch1, scratch2, name, miss);
|
||||
PushInterceptorArguments(masm(), receiver, holder_reg,
|
||||
name_reg, interceptor_holder);
|
||||
|
||||
ExternalReference ref = ExternalReference(
|
||||
IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
|
||||
__ TailCallExternalReference(ref, 5, 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -1121,11 +1088,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
|
|||
__ Jump(ic, RelocInfo::CODE_TARGET);
|
||||
|
||||
// Return the generated code.
|
||||
String* function_name = NULL;
|
||||
if (function->shared()->name()->IsString()) {
|
||||
function_name = String::cast(function->shared()->name());
|
||||
}
|
||||
return GetCode(CONSTANT_FUNCTION, function_name);
|
||||
return GetCode(function);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1175,11 +1138,27 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
|
|||
__ Jump(ic, RelocInfo::CODE_TARGET);
|
||||
|
||||
// Return the generated code.
|
||||
String* function_name = NULL;
|
||||
if (function->shared()->name()->IsString()) {
|
||||
function_name = String::cast(function->shared()->name());
|
||||
}
|
||||
return GetCode(CONSTANT_FUNCTION, function_name);
|
||||
return GetCode(function);
|
||||
}
|
||||
|
||||
|
||||
Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
|
||||
JSObject* holder,
|
||||
JSFunction* function,
|
||||
String* name,
|
||||
CheckType check) {
|
||||
// TODO(722): implement this.
|
||||
return Heap::undefined_value();
|
||||
}
|
||||
|
||||
|
||||
Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
|
||||
JSObject* holder,
|
||||
JSFunction* function,
|
||||
String* name,
|
||||
CheckType check) {
|
||||
// TODO(722): implement this.
|
||||
return Heap::undefined_value();
|
||||
}
|
||||
|
||||
|
||||
|
@ -1194,9 +1173,9 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
|
|||
// -----------------------------------
|
||||
SharedFunctionInfo* function_info = function->shared();
|
||||
if (function_info->HasCustomCallGenerator()) {
|
||||
CustomCallGenerator generator =
|
||||
ToCData<CustomCallGenerator>(function_info->function_data());
|
||||
Object* result = generator(this, object, holder, function, name, check);
|
||||
const int id = function_info->custom_call_generator_id();
|
||||
Object* result =
|
||||
CompileCustomCall(id, object, holder, function, name, check);
|
||||
// undefined means bail out to regular compiler.
|
||||
if (!result->IsUndefined()) {
|
||||
return result;
|
||||
|
@ -1334,11 +1313,7 @@ Object* CallStubCompiler::CompileCallConstant(Object* object,
|
|||
__ Jump(ic, RelocInfo::CODE_TARGET);
|
||||
|
||||
// Return the generated code.
|
||||
String* function_name = NULL;
|
||||
if (function->shared()->name()->IsString()) {
|
||||
function_name = String::cast(function->shared()->name());
|
||||
}
|
||||
return GetCode(CONSTANT_FUNCTION, function_name);
|
||||
return GetCode(function);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1630,15 +1605,11 @@ Object* LoadStubCompiler::CompileLoadNonexistent(String* name,
|
|||
JSObject* object,
|
||||
JSObject* last) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- r2 : name
|
||||
// -- r0 : receiver
|
||||
// -- lr : return address
|
||||
// -- [sp] : receiver
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
// Load receiver.
|
||||
__ ldr(r0, MemOperand(sp, 0));
|
||||
|
||||
// Check that receiver is not a smi.
|
||||
__ tst(r0, Operand(kSmiTagMask));
|
||||
__ b(eq, &miss);
|
||||
|
@ -1675,14 +1646,12 @@ Object* LoadStubCompiler::CompileLoadField(JSObject* object,
|
|||
int index,
|
||||
String* name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- r0 : receiver
|
||||
// -- r2 : name
|
||||
// -- lr : return address
|
||||
// -- [sp] : receiver
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
__ ldr(r0, MemOperand(sp, 0));
|
||||
|
||||
GenerateLoadField(object, holder, r0, r3, r1, index, name, &miss);
|
||||
__ bind(&miss);
|
||||
GenerateLoadMiss(masm(), Code::LOAD_IC);
|
||||
|
@ -1697,13 +1666,12 @@ Object* LoadStubCompiler::CompileLoadCallback(String* name,
|
|||
JSObject* holder,
|
||||
AccessorInfo* callback) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- r0 : receiver
|
||||
// -- r2 : name
|
||||
// -- lr : return address
|
||||
// -- [sp] : receiver
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
__ ldr(r0, MemOperand(sp, 0));
|
||||
Failure* failure = Failure::InternalError();
|
||||
bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1,
|
||||
callback, name, &miss, &failure);
|
||||
|
@ -1722,14 +1690,12 @@ Object* LoadStubCompiler::CompileLoadConstant(JSObject* object,
|
|||
Object* value,
|
||||
String* name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- r0 : receiver
|
||||
// -- r2 : name
|
||||
// -- lr : return address
|
||||
// -- [sp] : receiver
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
__ ldr(r0, MemOperand(sp, 0));
|
||||
|
||||
GenerateLoadConstant(object, holder, r0, r3, r1, value, name, &miss);
|
||||
__ bind(&miss);
|
||||
GenerateLoadMiss(masm(), Code::LOAD_IC);
|
||||
|
@ -1743,14 +1709,12 @@ Object* LoadStubCompiler::CompileLoadInterceptor(JSObject* object,
|
|||
JSObject* holder,
|
||||
String* name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- r0 : receiver
|
||||
// -- r2 : name
|
||||
// -- lr : return address
|
||||
// -- [sp] : receiver
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
__ ldr(r0, MemOperand(sp, 0));
|
||||
|
||||
LookupResult lookup;
|
||||
LookupPostInterceptor(holder, name, &lookup);
|
||||
GenerateLoadInterceptor(object,
|
||||
|
@ -1776,10 +1740,9 @@ Object* LoadStubCompiler::CompileLoadGlobal(JSObject* object,
|
|||
String* name,
|
||||
bool is_dont_delete) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- r0 : receiver
|
||||
// -- r2 : name
|
||||
// -- lr : return address
|
||||
// -- r0 : receiver
|
||||
// -- sp[0] : receiver
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
|
@ -1825,8 +1788,7 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
|
|||
// ----------- S t a t e -------------
|
||||
// -- lr : return address
|
||||
// -- r0 : key
|
||||
// -- sp[0] : key
|
||||
// -- sp[4] : receiver
|
||||
// -- r1 : receiver
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
|
@ -1834,7 +1796,6 @@ Object* KeyedLoadStubCompiler::CompileLoadField(String* name,
|
|||
__ cmp(r0, Operand(Handle<String>(name)));
|
||||
__ b(ne, &miss);
|
||||
|
||||
__ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver.
|
||||
GenerateLoadField(receiver, holder, r1, r2, r3, index, name, &miss);
|
||||
__ bind(&miss);
|
||||
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
|
||||
|
@ -1850,8 +1811,7 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
|
|||
// ----------- S t a t e -------------
|
||||
// -- lr : return address
|
||||
// -- r0 : key
|
||||
// -- sp[0] : key
|
||||
// -- sp[4] : receiver
|
||||
// -- r1 : receiver
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
|
@ -1860,7 +1820,6 @@ Object* KeyedLoadStubCompiler::CompileLoadCallback(String* name,
|
|||
__ b(ne, &miss);
|
||||
|
||||
Failure* failure = Failure::InternalError();
|
||||
__ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver.
|
||||
bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3,
|
||||
callback, name, &miss, &failure);
|
||||
if (!success) return failure;
|
||||
|
@ -1879,8 +1838,7 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
|
|||
// ----------- S t a t e -------------
|
||||
// -- lr : return address
|
||||
// -- r0 : key
|
||||
// -- sp[0] : key
|
||||
// -- sp[4] : receiver
|
||||
// -- r1 : receiver
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
|
@ -1888,7 +1846,6 @@ Object* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
|
|||
__ cmp(r0, Operand(Handle<String>(name)));
|
||||
__ b(ne, &miss);
|
||||
|
||||
__ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver.
|
||||
GenerateLoadConstant(receiver, holder, r1, r2, r3, value, name, &miss);
|
||||
__ bind(&miss);
|
||||
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
|
||||
|
@ -1904,8 +1861,7 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
|
|||
// ----------- S t a t e -------------
|
||||
// -- lr : return address
|
||||
// -- r0 : key
|
||||
// -- sp[0] : key
|
||||
// -- sp[4] : receiver
|
||||
// -- r1 : receiver
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
|
@ -1915,7 +1871,6 @@ Object* KeyedLoadStubCompiler::CompileLoadInterceptor(JSObject* receiver,
|
|||
|
||||
LookupResult lookup;
|
||||
LookupPostInterceptor(holder, name, &lookup);
|
||||
__ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver.
|
||||
GenerateLoadInterceptor(receiver,
|
||||
holder,
|
||||
&lookup,
|
||||
|
@ -1936,8 +1891,7 @@ Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
|
|||
// ----------- S t a t e -------------
|
||||
// -- lr : return address
|
||||
// -- r0 : key
|
||||
// -- sp[0] : key
|
||||
// -- sp[4] : receiver
|
||||
// -- r1 : receiver
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
|
@ -1945,7 +1899,6 @@ Object* KeyedLoadStubCompiler::CompileLoadArrayLength(String* name) {
|
|||
__ cmp(r0, Operand(Handle<String>(name)));
|
||||
__ b(ne, &miss);
|
||||
|
||||
__ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver.
|
||||
GenerateLoadArrayLength(masm(), r1, r2, &miss);
|
||||
__ bind(&miss);
|
||||
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
|
||||
|
@ -1958,8 +1911,7 @@ Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
|
|||
// ----------- S t a t e -------------
|
||||
// -- lr : return address
|
||||
// -- r0 : key
|
||||
// -- sp[0] : key
|
||||
// -- sp[4] : receiver
|
||||
// -- r1 : receiver
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
__ IncrementCounter(&Counters::keyed_load_string_length, 1, r1, r3);
|
||||
|
@ -1968,7 +1920,6 @@ Object* KeyedLoadStubCompiler::CompileLoadStringLength(String* name) {
|
|||
__ cmp(r0, Operand(Handle<String>(name)));
|
||||
__ b(ne, &miss);
|
||||
|
||||
__ ldr(r1, MemOperand(sp, kPointerSize)); // Receiver.
|
||||
GenerateLoadStringLength(masm(), r1, r2, r3, &miss);
|
||||
__ bind(&miss);
|
||||
__ DecrementCounter(&Counters::keyed_load_string_length, 1, r1, r3);
|
||||
|
@ -1984,8 +1935,7 @@ Object* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
|
|||
// ----------- S t a t e -------------
|
||||
// -- lr : return address
|
||||
// -- r0 : key
|
||||
// -- sp[0] : key
|
||||
// -- sp[4] : receiver
|
||||
// -- r1 : receiver
|
||||
// -----------------------------------
|
||||
GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
|
||||
|
||||
|
@ -1999,32 +1949,31 @@ Object* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
|
|||
String* name) {
|
||||
// ----------- S t a t e -------------
|
||||
// -- r0 : value
|
||||
// -- r2 : name
|
||||
// -- r1 : key
|
||||
// -- r2 : receiver
|
||||
// -- lr : return address
|
||||
// -- [sp] : receiver
|
||||
// -----------------------------------
|
||||
Label miss;
|
||||
|
||||
__ IncrementCounter(&Counters::keyed_store_field, 1, r1, r3);
|
||||
__ IncrementCounter(&Counters::keyed_store_field, 1, r3, r4);
|
||||
|
||||
// Check that the name has not changed.
|
||||
__ cmp(r2, Operand(Handle<String>(name)));
|
||||
__ cmp(r1, Operand(Handle<String>(name)));
|
||||
__ b(ne, &miss);
|
||||
|
||||
// Load receiver from the stack.
|
||||
__ ldr(r3, MemOperand(sp));
|
||||
// r1 is used as scratch register, r3 and r2 might be clobbered.
|
||||
// r3 is used as scratch register. r1 and r2 keep their values if a jump to
|
||||
// the miss label is generated.
|
||||
GenerateStoreField(masm(),
|
||||
object,
|
||||
index,
|
||||
transition,
|
||||
r3, r2, r1,
|
||||
r2, r1, r3,
|
||||
&miss);
|
||||
__ bind(&miss);
|
||||
|
||||
__ DecrementCounter(&Counters::keyed_store_field, 1, r1, r3);
|
||||
__ mov(r2, Operand(Handle<String>(name))); // restore name register.
|
||||
__ DecrementCounter(&Counters::keyed_store_field, 1, r3, r4);
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
|
||||
|
||||
__ Jump(ic, RelocInfo::CODE_TARGET);
|
||||
|
||||
// Return the generated code.
|
||||
|
@ -2085,7 +2034,7 @@ Object* ConstructStubCompiler::CompileConstructStub(
|
|||
r5,
|
||||
r6,
|
||||
&generic_stub_call,
|
||||
NO_ALLOCATION_FLAGS);
|
||||
SIZE_IN_WORDS);
|
||||
|
||||
// Allocated the JSObject, now initialize the fields. Map is set to initial
|
||||
// map and properties and elements are set to empty fixed array.
|
||||
|
@ -2178,3 +2127,5 @@ Object* ConstructStubCompiler::CompileConstructStub(
|
|||
#undef __
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_TARGET_ARCH_ARM
|
|
@ -25,27 +25,29 @@
|
|||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
function get(){return x}
|
||||
function set(x){this.x=x};
|
||||
#ifndef V8_VIRTUAL_FRAME_ARM_INL_H_
|
||||
#define V8_VIRTUAL_FRAME_ARM_INL_H_
|
||||
|
||||
var obj = {x:1};
|
||||
obj.__defineGetter__("accessor", get);
|
||||
obj.__defineSetter__("accessor", set);
|
||||
#include "assembler-arm.h"
|
||||
#include "virtual-frame-arm.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
var descIsData = Object.getOwnPropertyDescriptor(obj,'x');
|
||||
assertTrue(descIsData.enumerable);
|
||||
assertTrue(descIsData.writable);
|
||||
assertTrue(descIsData.configurable);
|
||||
// These VirtualFrame methods should actually be in a virtual-frame-arm-inl.h
|
||||
// file if such a thing existed.
|
||||
MemOperand VirtualFrame::ParameterAt(int index) {
|
||||
// Index -1 corresponds to the receiver.
|
||||
ASSERT(-1 <= index); // -1 is the receiver.
|
||||
ASSERT(index <= parameter_count());
|
||||
return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize);
|
||||
}
|
||||
|
||||
var descIsAccessor = Object.getOwnPropertyDescriptor(obj, 'accessor');
|
||||
assertTrue(descIsAccessor.enumerable);
|
||||
assertTrue(descIsAccessor.configurable);
|
||||
assertTrue(descIsAccessor.get == get);
|
||||
assertTrue(descIsAccessor.set == set);
|
||||
// The receiver frame slot.
|
||||
MemOperand VirtualFrame::Receiver() {
|
||||
return ParameterAt(-1);
|
||||
}
|
||||
|
||||
var descIsNotData = Object.getOwnPropertyDescriptor(obj, 'not-x');
|
||||
assertTrue(descIsNotData == undefined);
|
||||
} } // namespace v8::internal
|
||||
|
||||
var descIsNotAccessor = Object.getOwnPropertyDescriptor(obj, 'not-accessor');
|
||||
assertTrue(descIsNotAccessor == undefined);
|
||||
#endif // V8_VIRTUAL_FRAME_ARM_INL_H_
|
|
@ -27,6 +27,8 @@
|
|||
|
||||
#include "v8.h"
|
||||
|
||||
#if defined(V8_TARGET_ARCH_ARM)
|
||||
|
||||
#include "codegen-inl.h"
|
||||
#include "register-allocator-inl.h"
|
||||
#include "scopes.h"
|
||||
|
@ -38,10 +40,8 @@ namespace internal {
|
|||
#define __ ACCESS_MASM(masm())
|
||||
|
||||
void VirtualFrame::PopToR1R0() {
|
||||
VirtualFrame where_to_go = *this;
|
||||
// Shuffle things around so the top of stack is in r0 and r1.
|
||||
where_to_go.top_of_stack_state_ = R0_R1_TOS;
|
||||
MergeTo(&where_to_go);
|
||||
MergeTOSTo(R0_R1_TOS);
|
||||
// Pop the two registers off the stack so they are detached from the frame.
|
||||
element_count_ -= 2;
|
||||
top_of_stack_state_ = NO_TOS_REGISTERS;
|
||||
|
@ -49,10 +49,8 @@ void VirtualFrame::PopToR1R0() {
|
|||
|
||||
|
||||
void VirtualFrame::PopToR1() {
|
||||
VirtualFrame where_to_go = *this;
|
||||
// Shuffle things around so the top of stack is only in r1.
|
||||
where_to_go.top_of_stack_state_ = R1_TOS;
|
||||
MergeTo(&where_to_go);
|
||||
MergeTOSTo(R1_TOS);
|
||||
// Pop the register off the stack so it is detached from the frame.
|
||||
element_count_ -= 1;
|
||||
top_of_stack_state_ = NO_TOS_REGISTERS;
|
||||
|
@ -60,93 +58,98 @@ void VirtualFrame::PopToR1() {
|
|||
|
||||
|
||||
void VirtualFrame::PopToR0() {
|
||||
VirtualFrame where_to_go = *this;
|
||||
// Shuffle things around so the top of stack only in r0.
|
||||
where_to_go.top_of_stack_state_ = R0_TOS;
|
||||
MergeTo(&where_to_go);
|
||||
MergeTOSTo(R0_TOS);
|
||||
// Pop the register off the stack so it is detached from the frame.
|
||||
element_count_ -= 1;
|
||||
top_of_stack_state_ = NO_TOS_REGISTERS;
|
||||
}
|
||||
|
||||
|
||||
void VirtualFrame::MergeTo(VirtualFrame* expected) {
|
||||
void VirtualFrame::MergeTo(const VirtualFrame* expected, Condition cond) {
|
||||
if (Equals(expected)) return;
|
||||
MergeTOSTo(expected->top_of_stack_state_, cond);
|
||||
ASSERT(register_allocation_map_ == expected->register_allocation_map_);
|
||||
}
|
||||
|
||||
|
||||
void VirtualFrame::MergeTOSTo(
|
||||
VirtualFrame::TopOfStack expected_top_of_stack_state, Condition cond) {
|
||||
#define CASE_NUMBER(a, b) ((a) * TOS_STATES + (b))
|
||||
switch (CASE_NUMBER(top_of_stack_state_, expected->top_of_stack_state_)) {
|
||||
switch (CASE_NUMBER(top_of_stack_state_, expected_top_of_stack_state)) {
|
||||
case CASE_NUMBER(NO_TOS_REGISTERS, NO_TOS_REGISTERS):
|
||||
break;
|
||||
case CASE_NUMBER(NO_TOS_REGISTERS, R0_TOS):
|
||||
__ pop(r0);
|
||||
__ pop(r0, cond);
|
||||
break;
|
||||
case CASE_NUMBER(NO_TOS_REGISTERS, R1_TOS):
|
||||
__ pop(r1);
|
||||
__ pop(r1, cond);
|
||||
break;
|
||||
case CASE_NUMBER(NO_TOS_REGISTERS, R0_R1_TOS):
|
||||
__ pop(r0);
|
||||
__ pop(r1);
|
||||
__ pop(r0, cond);
|
||||
__ pop(r1, cond);
|
||||
break;
|
||||
case CASE_NUMBER(NO_TOS_REGISTERS, R1_R0_TOS):
|
||||
__ pop(r1);
|
||||
__ pop(r0);
|
||||
__ pop(r1, cond);
|
||||
__ pop(r0, cond);
|
||||
break;
|
||||
case CASE_NUMBER(R0_TOS, NO_TOS_REGISTERS):
|
||||
__ push(r0);
|
||||
__ push(r0, cond);
|
||||
break;
|
||||
case CASE_NUMBER(R0_TOS, R0_TOS):
|
||||
break;
|
||||
case CASE_NUMBER(R0_TOS, R1_TOS):
|
||||
__ mov(r1, r0);
|
||||
__ mov(r1, r0, LeaveCC, cond);
|
||||
break;
|
||||
case CASE_NUMBER(R0_TOS, R0_R1_TOS):
|
||||
__ pop(r1);
|
||||
__ pop(r1, cond);
|
||||
break;
|
||||
case CASE_NUMBER(R0_TOS, R1_R0_TOS):
|
||||
__ mov(r1, r0);
|
||||
__ pop(r0);
|
||||
__ mov(r1, r0, LeaveCC, cond);
|
||||
__ pop(r0, cond);
|
||||
break;
|
||||
case CASE_NUMBER(R1_TOS, NO_TOS_REGISTERS):
|
||||
__ push(r1);
|
||||
__ push(r1, cond);
|
||||
break;
|
||||
case CASE_NUMBER(R1_TOS, R0_TOS):
|
||||
__ mov(r0, r1);
|
||||
__ mov(r0, r1, LeaveCC, cond);
|
||||
break;
|
||||
case CASE_NUMBER(R1_TOS, R1_TOS):
|
||||
break;
|
||||
case CASE_NUMBER(R1_TOS, R0_R1_TOS):
|
||||
__ mov(r0, r1);
|
||||
__ pop(r1);
|
||||
__ mov(r0, r1, LeaveCC, cond);
|
||||
__ pop(r1, cond);
|
||||
break;
|
||||
case CASE_NUMBER(R1_TOS, R1_R0_TOS):
|
||||
__ pop(r0);
|
||||
__ pop(r0, cond);
|
||||
break;
|
||||
case CASE_NUMBER(R0_R1_TOS, NO_TOS_REGISTERS):
|
||||
__ Push(r1, r0);
|
||||
__ Push(r1, r0, cond);
|
||||
break;
|
||||
case CASE_NUMBER(R0_R1_TOS, R0_TOS):
|
||||
__ push(r1);
|
||||
__ push(r1, cond);
|
||||
break;
|
||||
case CASE_NUMBER(R0_R1_TOS, R1_TOS):
|
||||
__ push(r1);
|
||||
__ mov(r1, r0);
|
||||
__ push(r1, cond);
|
||||
__ mov(r1, r0, LeaveCC, cond);
|
||||
break;
|
||||
case CASE_NUMBER(R0_R1_TOS, R0_R1_TOS):
|
||||
break;
|
||||
case CASE_NUMBER(R0_R1_TOS, R1_R0_TOS):
|
||||
__ Swap(r0, r1, ip);
|
||||
__ Swap(r0, r1, ip, cond);
|
||||
break;
|
||||
case CASE_NUMBER(R1_R0_TOS, NO_TOS_REGISTERS):
|
||||
__ Push(r0, r1);
|
||||
__ Push(r0, r1, cond);
|
||||
break;
|
||||
case CASE_NUMBER(R1_R0_TOS, R0_TOS):
|
||||
__ push(r0);
|
||||
__ mov(r0, r1);
|
||||
__ push(r0, cond);
|
||||
__ mov(r0, r1, LeaveCC, cond);
|
||||
break;
|
||||
case CASE_NUMBER(R1_R0_TOS, R1_TOS):
|
||||
__ push(r0);
|
||||
__ push(r0, cond);
|
||||
break;
|
||||
case CASE_NUMBER(R1_R0_TOS, R0_R1_TOS):
|
||||
__ Swap(r0, r1, ip);
|
||||
__ Swap(r0, r1, ip, cond);
|
||||
break;
|
||||
case CASE_NUMBER(R1_R0_TOS, R1_R0_TOS):
|
||||
break;
|
||||
|
@ -154,7 +157,16 @@ void VirtualFrame::MergeTo(VirtualFrame* expected) {
|
|||
UNREACHABLE();
|
||||
#undef CASE_NUMBER
|
||||
}
|
||||
ASSERT(register_allocation_map_ == expected->register_allocation_map_);
|
||||
// A conditional merge will be followed by a conditional branch and the
|
||||
// fall-through code will have an unchanged virtual frame state. If the
|
||||
// merge is unconditional ('al'ways) then it might be followed by a fall
|
||||
// through. We need to update the virtual frame state to match the code we
|
||||
// are falling into. The final case is an unconditional merge followed by an
|
||||
// unconditional branch, in which case it doesn't matter what we do to the
|
||||
// virtual frame state, because the virtual frame will be invalidated.
|
||||
if (cond == al) {
|
||||
top_of_stack_state_ = expected_top_of_stack_state;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -255,7 +267,8 @@ void VirtualFrame::PushTryHandler(HandlerType type) {
|
|||
|
||||
void VirtualFrame::CallJSFunction(int arg_count) {
|
||||
// InvokeFunction requires function in r1.
|
||||
EmitPop(r1);
|
||||
PopToR1();
|
||||
SpillAll();
|
||||
|
||||
// +1 for receiver.
|
||||
Forget(arg_count + 1);
|
||||
|
@ -268,7 +281,7 @@ void VirtualFrame::CallJSFunction(int arg_count) {
|
|||
|
||||
|
||||
void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
|
||||
ASSERT(SpilledScope::is_spilled());
|
||||
SpillAll();
|
||||
Forget(arg_count);
|
||||
ASSERT(cgen()->HasValidEntryRegisters());
|
||||
__ CallRuntime(f, arg_count);
|
||||
|
@ -276,6 +289,7 @@ void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
|
|||
|
||||
|
||||
void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
|
||||
SpillAll();
|
||||
Forget(arg_count);
|
||||
ASSERT(cgen()->HasValidEntryRegisters());
|
||||
__ CallRuntime(id, arg_count);
|
||||
|
@ -300,7 +314,8 @@ void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
|
|||
|
||||
void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
|
||||
SpillAllButCopyTOSToR0();
|
||||
PopToR0();
|
||||
SpillAll();
|
||||
__ mov(r2, Operand(name));
|
||||
CallCodeObject(ic, mode, 0);
|
||||
}
|
||||
|
@ -323,14 +338,17 @@ void VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
|
|||
|
||||
void VirtualFrame::CallKeyedLoadIC() {
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
|
||||
SpillAllButCopyTOSToR0();
|
||||
PopToR1R0();
|
||||
SpillAll();
|
||||
CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
|
||||
}
|
||||
|
||||
|
||||
void VirtualFrame::CallKeyedStoreIC() {
|
||||
ASSERT(SpilledScope::is_spilled());
|
||||
Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
|
||||
PopToR1R0();
|
||||
SpillAll();
|
||||
EmitPop(r2);
|
||||
CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
|
||||
}
|
||||
|
||||
|
@ -417,7 +435,7 @@ void VirtualFrame::Pop() {
|
|||
|
||||
|
||||
void VirtualFrame::EmitPop(Register reg) {
|
||||
ASSERT(!is_used(reg));
|
||||
ASSERT(!is_used(RegisterAllocator::ToNumber(reg)));
|
||||
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
|
||||
__ pop(reg);
|
||||
} else {
|
||||
|
@ -497,42 +515,88 @@ Register VirtualFrame::Peek() {
|
|||
|
||||
|
||||
void VirtualFrame::Dup() {
|
||||
AssertIsNotSpilled();
|
||||
switch (top_of_stack_state_) {
|
||||
case NO_TOS_REGISTERS:
|
||||
__ ldr(r0, MemOperand(sp, 0));
|
||||
top_of_stack_state_ = R0_TOS;
|
||||
break;
|
||||
case R0_TOS:
|
||||
__ mov(r1, r0);
|
||||
top_of_stack_state_ = R0_R1_TOS;
|
||||
break;
|
||||
case R1_TOS:
|
||||
__ mov(r0, r1);
|
||||
top_of_stack_state_ = R0_R1_TOS;
|
||||
break;
|
||||
case R0_R1_TOS:
|
||||
__ push(r1);
|
||||
__ mov(r1, r0);
|
||||
// No need to change state as r0 and r1 now contains the same value.
|
||||
break;
|
||||
case R1_R0_TOS:
|
||||
__ push(r0);
|
||||
__ mov(r0, r1);
|
||||
// No need to change state as r0 and r1 now contains the same value.
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
if (SpilledScope::is_spilled()) {
|
||||
__ ldr(ip, MemOperand(sp, 0));
|
||||
__ push(ip);
|
||||
} else {
|
||||
switch (top_of_stack_state_) {
|
||||
case NO_TOS_REGISTERS:
|
||||
__ ldr(r0, MemOperand(sp, 0));
|
||||
top_of_stack_state_ = R0_TOS;
|
||||
break;
|
||||
case R0_TOS:
|
||||
__ mov(r1, r0);
|
||||
// r0 and r1 contains the same value. Prefer state with r0 holding TOS.
|
||||
top_of_stack_state_ = R0_R1_TOS;
|
||||
break;
|
||||
case R1_TOS:
|
||||
__ mov(r0, r1);
|
||||
// r0 and r1 contains the same value. Prefer state with r0 holding TOS.
|
||||
top_of_stack_state_ = R0_R1_TOS;
|
||||
break;
|
||||
case R0_R1_TOS:
|
||||
__ push(r1);
|
||||
__ mov(r1, r0);
|
||||
// r0 and r1 contains the same value. Prefer state with r0 holding TOS.
|
||||
top_of_stack_state_ = R0_R1_TOS;
|
||||
break;
|
||||
case R1_R0_TOS:
|
||||
__ push(r0);
|
||||
__ mov(r0, r1);
|
||||
// r0 and r1 contains the same value. Prefer state with r0 holding TOS.
|
||||
top_of_stack_state_ = R0_R1_TOS;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
element_count_++;
|
||||
}
|
||||
|
||||
|
||||
void VirtualFrame::Dup2() {
|
||||
if (SpilledScope::is_spilled()) {
|
||||
__ ldr(ip, MemOperand(sp, kPointerSize));
|
||||
__ push(ip);
|
||||
__ ldr(ip, MemOperand(sp, kPointerSize));
|
||||
__ push(ip);
|
||||
} else {
|
||||
switch (top_of_stack_state_) {
|
||||
case NO_TOS_REGISTERS:
|
||||
__ ldr(r0, MemOperand(sp, 0));
|
||||
__ ldr(r1, MemOperand(sp, kPointerSize));
|
||||
top_of_stack_state_ = R0_R1_TOS;
|
||||
break;
|
||||
case R0_TOS:
|
||||
__ push(r0);
|
||||
__ ldr(r1, MemOperand(sp, kPointerSize));
|
||||
top_of_stack_state_ = R0_R1_TOS;
|
||||
break;
|
||||
case R1_TOS:
|
||||
__ push(r1);
|
||||
__ ldr(r0, MemOperand(sp, kPointerSize));
|
||||
top_of_stack_state_ = R1_R0_TOS;
|
||||
break;
|
||||
case R0_R1_TOS:
|
||||
__ Push(r1, r0);
|
||||
top_of_stack_state_ = R0_R1_TOS;
|
||||
break;
|
||||
case R1_R0_TOS:
|
||||
__ Push(r0, r1);
|
||||
top_of_stack_state_ = R1_R0_TOS;
|
||||
break;
|
||||
default:
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
element_count_ += 2;
|
||||
}
|
||||
|
||||
|
||||
Register VirtualFrame::PopToRegister(Register but_not_to_this_one) {
|
||||
ASSERT(but_not_to_this_one.is(r0) ||
|
||||
but_not_to_this_one.is(r1) ||
|
||||
but_not_to_this_one.is(no_reg));
|
||||
AssertIsNotSpilled();
|
||||
element_count_--;
|
||||
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
|
||||
if (but_not_to_this_one.is(r0)) {
|
||||
|
@ -563,7 +627,17 @@ void VirtualFrame::EnsureOneFreeTOSRegister() {
|
|||
|
||||
void VirtualFrame::EmitPush(Register reg) {
|
||||
element_count_++;
|
||||
if (reg.is(cp)) {
|
||||
// If we are pushing cp then we are about to make a call and things have to
|
||||
// be pushed to the physical stack. There's nothing to be gained my moving
|
||||
// to a TOS register and then pushing that, we might as well push to the
|
||||
// physical stack immediately.
|
||||
MergeTOSTo(NO_TOS_REGISTERS);
|
||||
__ push(reg);
|
||||
return;
|
||||
}
|
||||
if (SpilledScope::is_spilled()) {
|
||||
ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
|
||||
__ push(reg);
|
||||
return;
|
||||
}
|
||||
|
@ -584,6 +658,39 @@ void VirtualFrame::EmitPush(Register reg) {
|
|||
}
|
||||
|
||||
|
||||
void VirtualFrame::SetElementAt(Register reg, int this_far_down) {
|
||||
if (this_far_down == 0) {
|
||||
Pop();
|
||||
Register dest = GetTOSRegister();
|
||||
if (dest.is(reg)) {
|
||||
// We already popped one item off the top of the stack. If the only
|
||||
// free register is the one we were asked to push then we have been
|
||||
// asked to push a register that was already in use, which cannot
|
||||
// happen. It therefore folows that there are two free TOS registers:
|
||||
ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
|
||||
dest = dest.is(r0) ? r1 : r0;
|
||||
}
|
||||
__ mov(dest, reg);
|
||||
EmitPush(dest);
|
||||
} else if (this_far_down == 1) {
|
||||
int virtual_elements = kVirtualElements[top_of_stack_state_];
|
||||
if (virtual_elements < 2) {
|
||||
__ str(reg, ElementAt(this_far_down));
|
||||
} else {
|
||||
ASSERT(virtual_elements == 2);
|
||||
ASSERT(!reg.is(r0));
|
||||
ASSERT(!reg.is(r1));
|
||||
Register dest = kBottomRegister[top_of_stack_state_];
|
||||
__ mov(dest, reg);
|
||||
}
|
||||
} else {
|
||||
ASSERT(this_far_down >= 2);
|
||||
ASSERT(kVirtualElements[top_of_stack_state_] <= 2);
|
||||
__ str(reg, ElementAt(this_far_down));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Register VirtualFrame::GetTOSRegister() {
|
||||
if (SpilledScope::is_spilled()) return r0;
|
||||
|
||||
|
@ -666,3 +773,5 @@ void VirtualFrame::SpillAll() {
|
|||
#undef __
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_TARGET_ARCH_ARM
|
|
@ -29,11 +29,14 @@
|
|||
#define V8_ARM_VIRTUAL_FRAME_ARM_H_
|
||||
|
||||
#include "register-allocator.h"
|
||||
#include "scopes.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// This dummy class is only used to create invalid virtual frames.
|
||||
extern class InvalidVirtualFrameInitializer {}* kInvalidVirtualFrameInitializer;
|
||||
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Virtual frames
|
||||
//
|
||||
|
@ -82,26 +85,8 @@ class VirtualFrame : public ZoneObject {
|
|||
// is not spilled, ie. where register allocation occurs. Eventually
|
||||
// when RegisterAllocationScope is ubiquitous it can be removed
|
||||
// along with the (by then unused) SpilledScope class.
|
||||
explicit RegisterAllocationScope(CodeGenerator* cgen)
|
||||
: cgen_(cgen),
|
||||
old_is_spilled_(SpilledScope::is_spilled_) {
|
||||
SpilledScope::is_spilled_ = false;
|
||||
if (old_is_spilled_) {
|
||||
VirtualFrame* frame = cgen->frame();
|
||||
if (frame != NULL) {
|
||||
frame->AssertIsSpilled();
|
||||
}
|
||||
}
|
||||
}
|
||||
~RegisterAllocationScope() {
|
||||
SpilledScope::is_spilled_ = old_is_spilled_;
|
||||
if (old_is_spilled_) {
|
||||
VirtualFrame* frame = cgen_->frame();
|
||||
if (frame != NULL) {
|
||||
frame->SpillAll();
|
||||
}
|
||||
}
|
||||
}
|
||||
inline explicit RegisterAllocationScope(CodeGenerator* cgen);
|
||||
inline ~RegisterAllocationScope();
|
||||
|
||||
private:
|
||||
CodeGenerator* cgen_;
|
||||
|
@ -116,19 +101,20 @@ class VirtualFrame : public ZoneObject {
|
|||
// Construct an initial virtual frame on entry to a JS function.
|
||||
inline VirtualFrame();
|
||||
|
||||
// Construct an invalid virtual frame, used by JumpTargets.
|
||||
inline VirtualFrame(InvalidVirtualFrameInitializer* dummy);
|
||||
|
||||
// Construct a virtual frame as a clone of an existing one.
|
||||
explicit inline VirtualFrame(VirtualFrame* original);
|
||||
|
||||
CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
|
||||
MacroAssembler* masm() { return cgen()->masm(); }
|
||||
inline CodeGenerator* cgen() const;
|
||||
inline MacroAssembler* masm();
|
||||
|
||||
// The number of elements on the virtual frame.
|
||||
int element_count() { return element_count_; }
|
||||
int element_count() const { return element_count_; }
|
||||
|
||||
// The height of the virtual expression stack.
|
||||
int height() {
|
||||
return element_count() - expression_base_index();
|
||||
}
|
||||
inline int height() const;
|
||||
|
||||
bool is_used(int num) {
|
||||
switch (num) {
|
||||
|
@ -160,10 +146,6 @@ class VirtualFrame : public ZoneObject {
|
|||
}
|
||||
}
|
||||
|
||||
bool is_used(Register reg) {
|
||||
return is_used(RegisterAllocator::ToNumber(reg));
|
||||
}
|
||||
|
||||
// Add extra in-memory elements to the top of the frame to match an actual
|
||||
// frame (eg, the frame after an exception handler is pushed). No code is
|
||||
// emitted.
|
||||
|
@ -180,7 +162,7 @@ class VirtualFrame : public ZoneObject {
|
|||
// Spill all values from the frame to memory.
|
||||
void SpillAll();
|
||||
|
||||
void AssertIsSpilled() {
|
||||
void AssertIsSpilled() const {
|
||||
ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
|
||||
ASSERT(register_allocation_map_ == 0);
|
||||
}
|
||||
|
@ -202,7 +184,7 @@ class VirtualFrame : public ZoneObject {
|
|||
// Make this virtual frame have a state identical to an expected virtual
|
||||
// frame. As a side effect, code may be emitted to make this frame match
|
||||
// the expected one.
|
||||
void MergeTo(VirtualFrame* expected);
|
||||
void MergeTo(const VirtualFrame* expected, Condition cond = al);
|
||||
|
||||
// Detach a frame from its code generator, perhaps temporarily. This
|
||||
// tells the register allocator that it is free to use frame-internal
|
||||
|
@ -247,16 +229,13 @@ class VirtualFrame : public ZoneObject {
|
|||
|
||||
// An element of the expression stack as an assembly operand.
|
||||
MemOperand ElementAt(int index) {
|
||||
AssertIsSpilled();
|
||||
return MemOperand(sp, index * kPointerSize);
|
||||
int adjusted_index = index - kVirtualElements[top_of_stack_state_];
|
||||
ASSERT(adjusted_index >= 0);
|
||||
return MemOperand(sp, adjusted_index * kPointerSize);
|
||||
}
|
||||
|
||||
// A frame-allocated local as an assembly operand.
|
||||
MemOperand LocalAt(int index) {
|
||||
ASSERT(0 <= index);
|
||||
ASSERT(index < local_count());
|
||||
return MemOperand(fp, kLocal0Offset - index * kPointerSize);
|
||||
}
|
||||
inline MemOperand LocalAt(int index);
|
||||
|
||||
// Push the address of the receiver slot on the frame.
|
||||
void PushReceiverSlotAddress();
|
||||
|
@ -268,26 +247,17 @@ class VirtualFrame : public ZoneObject {
|
|||
MemOperand Context() { return MemOperand(fp, kContextOffset); }
|
||||
|
||||
// A parameter as an assembly operand.
|
||||
MemOperand ParameterAt(int index) {
|
||||
// Index -1 corresponds to the receiver.
|
||||
ASSERT(-1 <= index); // -1 is the receiver.
|
||||
ASSERT(index <= parameter_count());
|
||||
return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize);
|
||||
}
|
||||
inline MemOperand ParameterAt(int index);
|
||||
|
||||
// The receiver frame slot.
|
||||
MemOperand Receiver() { return ParameterAt(-1); }
|
||||
inline MemOperand Receiver();
|
||||
|
||||
// Push a try-catch or try-finally handler on top of the virtual frame.
|
||||
void PushTryHandler(HandlerType type);
|
||||
|
||||
// Call stub given the number of arguments it expects on (and
|
||||
// removes from) the stack.
|
||||
void CallStub(CodeStub* stub, int arg_count) {
|
||||
if (arg_count != 0) Forget(arg_count);
|
||||
ASSERT(cgen()->HasValidEntryRegisters());
|
||||
masm()->CallStub(stub);
|
||||
}
|
||||
inline void CallStub(CodeStub* stub, int arg_count);
|
||||
|
||||
// Call JS function from top of the stack with arguments
|
||||
// taken from the stack.
|
||||
|
@ -308,7 +278,8 @@ class VirtualFrame : public ZoneObject {
|
|||
InvokeJSFlags flag,
|
||||
int arg_count);
|
||||
|
||||
// Call load IC. Receiver is on the stack. Result is returned in r0.
|
||||
// Call load IC. Receiver is on the stack and is consumed. Result is returned
|
||||
// in r0.
|
||||
void CallLoadIC(Handle<String> name, RelocInfo::Mode mode);
|
||||
|
||||
// Call store IC. If the load is contextual, value is found on top of the
|
||||
|
@ -316,12 +287,12 @@ class VirtualFrame : public ZoneObject {
|
|||
// Result is returned in r0.
|
||||
void CallStoreIC(Handle<String> name, bool is_contextual);
|
||||
|
||||
// Call keyed load IC. Key and receiver are on the stack. Result is returned
|
||||
// in r0.
|
||||
// Call keyed load IC. Key and receiver are on the stack. Both are consumed.
|
||||
// Result is returned in r0.
|
||||
void CallKeyedLoadIC();
|
||||
|
||||
// Call keyed store IC. Key and receiver are on the stack and the value is in
|
||||
// r0. Result is returned in r0.
|
||||
// Call keyed store IC. Value, key and receiver are on the stack. All three
|
||||
// are consumed. Result is returned in r0.
|
||||
void CallKeyedStoreIC();
|
||||
|
||||
// Call into an IC stub given the number of arguments it removes
|
||||
|
@ -355,6 +326,9 @@ class VirtualFrame : public ZoneObject {
|
|||
// Duplicate the top of stack.
|
||||
void Dup();
|
||||
|
||||
// Duplicate the two elements on top of stack.
|
||||
void Dup2();
|
||||
|
||||
// Flushes all registers, but it puts a copy of the top-of-stack in r0.
|
||||
void SpillAllButCopyTOSToR0();
|
||||
|
||||
|
@ -383,6 +357,12 @@ class VirtualFrame : public ZoneObject {
|
|||
void EmitPush(MemOperand operand);
|
||||
void EmitPushRoot(Heap::RootListIndex index);
|
||||
|
||||
// Overwrite the nth thing on the stack. If the nth position is in a
|
||||
// register then this turns into a mov, otherwise an str. Afterwards
|
||||
// you can still use the register even if it is a register that can be
|
||||
// used for TOS (r0 or r1).
|
||||
void SetElementAt(Register reg, int this_far_down);
|
||||
|
||||
// Get a register which is free and which must be immediately used to
|
||||
// push on the top of the stack.
|
||||
Register GetTOSRegister();
|
||||
|
@ -446,13 +426,13 @@ class VirtualFrame : public ZoneObject {
|
|||
int stack_pointer() { return element_count_ - 1; }
|
||||
|
||||
// The number of frame-allocated locals and parameters respectively.
|
||||
int parameter_count() { return cgen()->scope()->num_parameters(); }
|
||||
int local_count() { return cgen()->scope()->num_stack_slots(); }
|
||||
inline int parameter_count() const;
|
||||
inline int local_count() const;
|
||||
|
||||
// The index of the element that is at the processor's frame pointer
|
||||
// (the fp register). The parameters, receiver, function, and context
|
||||
// are below the frame pointer.
|
||||
int frame_pointer() { return parameter_count() + 3; }
|
||||
inline int frame_pointer() const;
|
||||
|
||||
// The index of the first parameter. The receiver lies below the first
|
||||
// parameter.
|
||||
|
@ -460,26 +440,22 @@ class VirtualFrame : public ZoneObject {
|
|||
|
||||
// The index of the context slot in the frame. It is immediately
|
||||
// below the frame pointer.
|
||||
int context_index() { return frame_pointer() - 1; }
|
||||
inline int context_index();
|
||||
|
||||
// The index of the function slot in the frame. It is below the frame
|
||||
// pointer and context slot.
|
||||
int function_index() { return frame_pointer() - 2; }
|
||||
inline int function_index();
|
||||
|
||||
// The index of the first local. Between the frame pointer and the
|
||||
// locals lies the return address.
|
||||
int local0_index() { return frame_pointer() + 2; }
|
||||
inline int local0_index() const;
|
||||
|
||||
// The index of the base of the expression stack.
|
||||
int expression_base_index() { return local0_index() + local_count(); }
|
||||
inline int expression_base_index() const;
|
||||
|
||||
// Convert a frame index into a frame pointer relative offset into the
|
||||
// actual stack.
|
||||
int fp_relative(int index) {
|
||||
ASSERT(index < element_count());
|
||||
ASSERT(frame_pointer() < element_count()); // FP is on the frame.
|
||||
return (frame_pointer() - index) * kPointerSize;
|
||||
}
|
||||
inline int fp_relative(int index);
|
||||
|
||||
// Spill all elements in registers. Spill the top spilled_args elements
|
||||
// on the frame. Sync all other frame elements.
|
||||
|
@ -491,10 +467,13 @@ class VirtualFrame : public ZoneObject {
|
|||
// onto the physical stack and made free.
|
||||
void EnsureOneFreeTOSRegister();
|
||||
|
||||
inline bool Equals(VirtualFrame* other);
|
||||
// Emit instructions to get the top of stack state from where we are to where
|
||||
// we want to be.
|
||||
void MergeTOSTo(TopOfStack expected_state, Condition cond = al);
|
||||
|
||||
inline bool Equals(const VirtualFrame* other);
|
||||
|
||||
friend class JumpTarget;
|
||||
friend class DeferredCode;
|
||||
};
|
||||
|
||||
|
|
@ -424,8 +424,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
|
|||
return "no reloc";
|
||||
case RelocInfo::EMBEDDED_OBJECT:
|
||||
return "embedded object";
|
||||
case RelocInfo::EMBEDDED_STRING:
|
||||
return "embedded string";
|
||||
case RelocInfo::CONSTRUCT_CALL:
|
||||
return "code target (js construct call)";
|
||||
case RelocInfo::CODE_TARGET_CONTEXT:
|
||||
|
@ -508,7 +506,6 @@ void RelocInfo::Verify() {
|
|||
ASSERT(code->address() == HeapObject::cast(found)->address());
|
||||
break;
|
||||
}
|
||||
case RelocInfo::EMBEDDED_STRING:
|
||||
case RUNTIME_ENTRY:
|
||||
case JS_RETURN:
|
||||
case COMMENT:
|
||||
|
@ -670,16 +667,6 @@ ExternalReference ExternalReference::scheduled_exception_address() {
|
|||
}
|
||||
|
||||
|
||||
ExternalReference ExternalReference::compile_array_pop_call() {
|
||||
return ExternalReference(FUNCTION_ADDR(CompileArrayPopCall));
|
||||
}
|
||||
|
||||
|
||||
ExternalReference ExternalReference::compile_array_push_call() {
|
||||
return ExternalReference(FUNCTION_ADDR(CompileArrayPushCall));
|
||||
}
|
||||
|
||||
|
||||
#ifndef V8_INTERPRETED_REGEXP
|
||||
|
||||
ExternalReference ExternalReference::re_check_stack_guard_state() {
|
|
@ -38,6 +38,7 @@
|
|||
#include "runtime.h"
|
||||
#include "top.h"
|
||||
#include "token.h"
|
||||
#include "objects.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
@ -121,7 +122,6 @@ class RelocInfo BASE_EMBEDDED {
|
|||
DEBUG_BREAK,
|
||||
CODE_TARGET, // code target which is not any of the above.
|
||||
EMBEDDED_OBJECT,
|
||||
EMBEDDED_STRING,
|
||||
|
||||
// Everything after runtime_entry (inclusive) is not GC'ed.
|
||||
RUNTIME_ENTRY,
|
||||
|
@ -137,7 +137,7 @@ class RelocInfo BASE_EMBEDDED {
|
|||
NUMBER_OF_MODES, // must be no greater than 14 - see RelocInfoWriter
|
||||
NONE, // never recorded
|
||||
LAST_CODE_ENUM = CODE_TARGET,
|
||||
LAST_GCED_ENUM = EMBEDDED_STRING
|
||||
LAST_GCED_ENUM = EMBEDDED_OBJECT
|
||||
};
|
||||
|
||||
|
||||
|
@ -185,6 +185,11 @@ class RelocInfo BASE_EMBEDDED {
|
|||
// Apply a relocation by delta bytes
|
||||
INLINE(void apply(intptr_t delta));
|
||||
|
||||
// Is the pointer this relocation info refers to coded like a plain pointer
|
||||
// or is it strange in some way (eg relative or patched into a series of
|
||||
// instructions).
|
||||
bool IsCodedSpecially();
|
||||
|
||||
// Read/modify the code target in the branch/call instruction
|
||||
// this relocation applies to;
|
||||
// can only be called if IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY
|
||||
|
@ -195,9 +200,23 @@ class RelocInfo BASE_EMBEDDED {
|
|||
INLINE(Object** target_object_address());
|
||||
INLINE(void set_target_object(Object* target));
|
||||
|
||||
// Read the address of the word containing the target_address. Can only
|
||||
// be called if IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY.
|
||||
// Read the address of the word containing the target_address in an
|
||||
// instruction stream. What this means exactly is architecture-independent.
|
||||
// The only architecture-independent user of this function is the serializer.
|
||||
// The serializer uses it to find out how many raw bytes of instruction to
|
||||
// output before the next target. Architecture-independent code shouldn't
|
||||
// dereference the pointer it gets back from this.
|
||||
INLINE(Address target_address_address());
|
||||
// This indicates how much space a target takes up when deserializing a code
|
||||
// stream. For most architectures this is just the size of a pointer. For
|
||||
// an instruction like movw/movt where the target bits are mixed into the
|
||||
// instruction bits the size of the target will be zero, indicating that the
|
||||
// serializer should not step forwards in memory after a target is resolved
|
||||
// and written. In this case the target_address_address function above
|
||||
// should return the end of the instructions to be patched, allowing the
|
||||
// deserializer to deserialize the instructions as raw bytes and put them in
|
||||
// place, ready to be patched with the target.
|
||||
INLINE(int target_address_size());
|
||||
|
||||
// Read/modify the reference in the instruction this relocation
|
||||
// applies to; can only be called if rmode_ is external_reference
|
||||
|
@ -212,6 +231,8 @@ class RelocInfo BASE_EMBEDDED {
|
|||
INLINE(Object** call_object_address());
|
||||
INLINE(void set_call_object(Object* target));
|
||||
|
||||
inline void Visit(ObjectVisitor* v);
|
||||
|
||||
// Patch the code with some other code.
|
||||
void PatchCode(byte* instructions, int instruction_count);
|
||||
|
||||
|
@ -444,9 +465,6 @@ class ExternalReference BASE_EMBEDDED {
|
|||
|
||||
static ExternalReference scheduled_exception_address();
|
||||
|
||||
static ExternalReference compile_array_pop_call();
|
||||
static ExternalReference compile_array_push_call();
|
||||
|
||||
Address address() const {return reinterpret_cast<Address>(address_);}
|
||||
|
||||
#ifdef ENABLE_DEBUGGER_SUPPORT
|
79
ext/v8/upstream/2.2.14/src/ast-inl.h
Normal file
79
ext/v8/upstream/2.2.14/src/ast-inl.h
Normal file
|
@ -0,0 +1,79 @@
|
|||
// Copyright 2010 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "v8.h"
|
||||
|
||||
#include "ast.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
BreakableStatement::BreakableStatement(ZoneStringList* labels, Type type)
|
||||
: labels_(labels), type_(type) {
|
||||
ASSERT(labels == NULL || labels->length() > 0);
|
||||
}
|
||||
|
||||
|
||||
SwitchStatement::SwitchStatement(ZoneStringList* labels)
|
||||
: BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
|
||||
tag_(NULL), cases_(NULL) {
|
||||
}
|
||||
|
||||
|
||||
IterationStatement::IterationStatement(ZoneStringList* labels)
|
||||
: BreakableStatement(labels, TARGET_FOR_ANONYMOUS), body_(NULL) {
|
||||
}
|
||||
|
||||
|
||||
Block::Block(ZoneStringList* labels, int capacity, bool is_initializer_block)
|
||||
: BreakableStatement(labels, TARGET_FOR_NAMED_ONLY),
|
||||
statements_(capacity),
|
||||
is_initializer_block_(is_initializer_block) {
|
||||
}
|
||||
|
||||
|
||||
ForStatement::ForStatement(ZoneStringList* labels)
|
||||
: IterationStatement(labels),
|
||||
init_(NULL),
|
||||
cond_(NULL),
|
||||
next_(NULL),
|
||||
may_have_function_literal_(true),
|
||||
loop_variable_(NULL),
|
||||
peel_this_loop_(false) {
|
||||
}
|
||||
|
||||
|
||||
ForInStatement::ForInStatement(ZoneStringList* labels)
|
||||
: IterationStatement(labels), each_(NULL), enumerable_(NULL) {
|
||||
}
|
||||
|
||||
|
||||
DoWhileStatement::DoWhileStatement(ZoneStringList* labels)
|
||||
: IterationStatement(labels), cond_(NULL), condition_position_(-1) {
|
||||
}
|
||||
|
||||
} } // namespace v8::internal
|
|
@ -32,6 +32,8 @@
|
|||
#include "parser.h"
|
||||
#include "scopes.h"
|
||||
#include "string-stream.h"
|
||||
#include "ast-inl.h"
|
||||
#include "jump-target-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
@ -786,6 +788,13 @@ Block::Block(Block* other, ZoneList<Statement*>* statements)
|
|||
}
|
||||
|
||||
|
||||
WhileStatement::WhileStatement(ZoneStringList* labels)
|
||||
: IterationStatement(labels),
|
||||
cond_(NULL),
|
||||
may_have_function_literal_(true) {
|
||||
}
|
||||
|
||||
|
||||
ExpressionStatement::ExpressionStatement(ExpressionStatement* other,
|
||||
Expression* expression)
|
||||
: Statement(other), expression_(expression) {}
|
||||
|
@ -809,6 +818,11 @@ IterationStatement::IterationStatement(IterationStatement* other,
|
|||
: BreakableStatement(other), body_(body) {}
|
||||
|
||||
|
||||
CaseClause::CaseClause(Expression* label, ZoneList<Statement*>* statements)
|
||||
: label_(label), statements_(statements) {
|
||||
}
|
||||
|
||||
|
||||
ForStatement::ForStatement(ForStatement* other,
|
||||
Statement* init,
|
||||
Expression* cond,
|
|
@ -351,10 +351,7 @@ class BreakableStatement: public Statement {
|
|||
bool is_target_for_anonymous() const { return type_ == TARGET_FOR_ANONYMOUS; }
|
||||
|
||||
protected:
|
||||
BreakableStatement(ZoneStringList* labels, Type type)
|
||||
: labels_(labels), type_(type) {
|
||||
ASSERT(labels == NULL || labels->length() > 0);
|
||||
}
|
||||
inline BreakableStatement(ZoneStringList* labels, Type type);
|
||||
|
||||
explicit BreakableStatement(BreakableStatement* other);
|
||||
|
||||
|
@ -367,10 +364,7 @@ class BreakableStatement: public Statement {
|
|||
|
||||
class Block: public BreakableStatement {
|
||||
public:
|
||||
Block(ZoneStringList* labels, int capacity, bool is_initializer_block)
|
||||
: BreakableStatement(labels, TARGET_FOR_NAMED_ONLY),
|
||||
statements_(capacity),
|
||||
is_initializer_block_(is_initializer_block) { }
|
||||
inline Block(ZoneStringList* labels, int capacity, bool is_initializer_block);
|
||||
|
||||
// Construct a clone initialized from the original block and
|
||||
// a deep copy of all statements of the original block.
|
||||
|
@ -437,8 +431,7 @@ class IterationStatement: public BreakableStatement {
|
|||
BreakTarget* continue_target() { return &continue_target_; }
|
||||
|
||||
protected:
|
||||
explicit IterationStatement(ZoneStringList* labels)
|
||||
: BreakableStatement(labels, TARGET_FOR_ANONYMOUS), body_(NULL) { }
|
||||
explicit inline IterationStatement(ZoneStringList* labels);
|
||||
|
||||
// Construct a clone initialized from original and
|
||||
// a deep copy of the original body.
|
||||
|
@ -456,9 +449,7 @@ class IterationStatement: public BreakableStatement {
|
|||
|
||||
class DoWhileStatement: public IterationStatement {
|
||||
public:
|
||||
explicit DoWhileStatement(ZoneStringList* labels)
|
||||
: IterationStatement(labels), cond_(NULL), condition_position_(-1) {
|
||||
}
|
||||
explicit inline DoWhileStatement(ZoneStringList* labels);
|
||||
|
||||
void Initialize(Expression* cond, Statement* body) {
|
||||
IterationStatement::Initialize(body);
|
||||
|
@ -482,11 +473,7 @@ class DoWhileStatement: public IterationStatement {
|
|||
|
||||
class WhileStatement: public IterationStatement {
|
||||
public:
|
||||
explicit WhileStatement(ZoneStringList* labels)
|
||||
: IterationStatement(labels),
|
||||
cond_(NULL),
|
||||
may_have_function_literal_(true) {
|
||||
}
|
||||
explicit WhileStatement(ZoneStringList* labels);
|
||||
|
||||
void Initialize(Expression* cond, Statement* body) {
|
||||
IterationStatement::Initialize(body);
|
||||
|
@ -511,14 +498,7 @@ class WhileStatement: public IterationStatement {
|
|||
|
||||
class ForStatement: public IterationStatement {
|
||||
public:
|
||||
explicit ForStatement(ZoneStringList* labels)
|
||||
: IterationStatement(labels),
|
||||
init_(NULL),
|
||||
cond_(NULL),
|
||||
next_(NULL),
|
||||
may_have_function_literal_(true),
|
||||
loop_variable_(NULL),
|
||||
peel_this_loop_(false) {}
|
||||
explicit inline ForStatement(ZoneStringList* labels);
|
||||
|
||||
// Construct a for-statement initialized from another for-statement
|
||||
// and deep copies of all parts of the original statement.
|
||||
|
@ -574,8 +554,7 @@ class ForStatement: public IterationStatement {
|
|||
|
||||
class ForInStatement: public IterationStatement {
|
||||
public:
|
||||
explicit ForInStatement(ZoneStringList* labels)
|
||||
: IterationStatement(labels), each_(NULL), enumerable_(NULL) { }
|
||||
explicit inline ForInStatement(ZoneStringList* labels);
|
||||
|
||||
void Initialize(Expression* each, Expression* enumerable, Statement* body) {
|
||||
IterationStatement::Initialize(body);
|
||||
|
@ -691,8 +670,7 @@ class WithExitStatement: public Statement {
|
|||
|
||||
class CaseClause: public ZoneObject {
|
||||
public:
|
||||
CaseClause(Expression* label, ZoneList<Statement*>* statements)
|
||||
: label_(label), statements_(statements) { }
|
||||
CaseClause(Expression* label, ZoneList<Statement*>* statements);
|
||||
|
||||
bool is_default() const { return label_ == NULL; }
|
||||
Expression* label() const {
|
||||
|
@ -711,9 +689,7 @@ class CaseClause: public ZoneObject {
|
|||
|
||||
class SwitchStatement: public BreakableStatement {
|
||||
public:
|
||||
explicit SwitchStatement(ZoneStringList* labels)
|
||||
: BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
|
||||
tag_(NULL), cases_(NULL) { }
|
||||
explicit inline SwitchStatement(ZoneStringList* labels);
|
||||
|
||||
void Initialize(Expression* tag, ZoneList<CaseClause*>* cases) {
|
||||
tag_ = tag;
|
|
@ -37,6 +37,7 @@
|
|||
#include "macro-assembler.h"
|
||||
#include "natives.h"
|
||||
#include "snapshot.h"
|
||||
#include "stub-cache.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
@ -228,6 +229,7 @@ class Genesis BASE_EMBEDDED {
|
|||
// Used for creating a context from scratch.
|
||||
void InstallNativeFunctions();
|
||||
bool InstallNatives();
|
||||
void InstallCustomCallGenerators();
|
||||
void InstallJSFunctionResultCaches();
|
||||
// Used both for deserialized and from-scratch contexts to add the extensions
|
||||
// provided.
|
||||
|
@ -1229,6 +1231,8 @@ bool Genesis::InstallNatives() {
|
|||
|
||||
InstallNativeFunctions();
|
||||
|
||||
InstallCustomCallGenerators();
|
||||
|
||||
// Install Function.prototype.call and apply.
|
||||
{ Handle<String> key = Factory::function_class_symbol();
|
||||
Handle<JSFunction> function =
|
||||
|
@ -1326,6 +1330,29 @@ bool Genesis::InstallNatives() {
|
|||
}
|
||||
|
||||
|
||||
static void InstallCustomCallGenerator(Handle<JSFunction> holder_function,
|
||||
const char* function_name,
|
||||
int id) {
|
||||
Handle<JSObject> proto(JSObject::cast(holder_function->instance_prototype()));
|
||||
Handle<String> name = Factory::LookupAsciiSymbol(function_name);
|
||||
Handle<JSFunction> function(JSFunction::cast(proto->GetProperty(*name)));
|
||||
function->shared()->set_function_data(Smi::FromInt(id));
|
||||
}
|
||||
|
||||
|
||||
void Genesis::InstallCustomCallGenerators() {
|
||||
HandleScope scope;
|
||||
#define INSTALL_CALL_GENERATOR(holder_fun, fun_name, name) \
|
||||
{ \
|
||||
Handle<JSFunction> holder(global_context()->holder_fun##_function()); \
|
||||
const int id = CallStubCompiler::k##name##CallGenerator; \
|
||||
InstallCustomCallGenerator(holder, #fun_name, id); \
|
||||
}
|
||||
CUSTOM_CALL_IC_GENERATORS(INSTALL_CALL_GENERATOR)
|
||||
#undef INSTALL_CALL_GENERATOR
|
||||
}
|
||||
|
||||
|
||||
// Do not forget to update macros.py with named constant
|
||||
// of cache id.
|
||||
#define JSFUNCTION_RESULT_CACHE_LIST(F) \
|
||||
|
@ -1726,8 +1753,8 @@ Genesis::Genesis(Handle<Object> global_object,
|
|||
CreateNewGlobals(global_template, global_object, &inner_global);
|
||||
HookUpGlobalProxy(inner_global, global_proxy);
|
||||
InitializeGlobal(inner_global, empty_function);
|
||||
if (!InstallNatives()) return;
|
||||
InstallJSFunctionResultCaches();
|
||||
if (!InstallNatives()) return;
|
||||
|
||||
MakeFunctionInstancePrototypeWritable();
|
||||
|
|
@ -80,10 +80,6 @@ class Bootstrapper : public AllStatic {
|
|||
// Tells whether bootstrapping is active.
|
||||
static bool IsActive() { return BootstrapperActive::IsActive(); }
|
||||
|
||||
// Encoding/decoding support for fixup flags.
|
||||
class FixupFlagsUseCodeObject: public BitField<bool, 0, 1> {};
|
||||
class FixupFlagsArgumentsCount: public BitField<uint32_t, 1, 32-1> {};
|
||||
|
||||
// Support for thread preemption.
|
||||
static int ArchiveSpacePerThread();
|
||||
static char* ArchiveState(char* to);
|
|
@ -305,7 +305,7 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
|
|||
// In large object space the object's start must coincide with chunk
|
||||
// and thus the trick is just not applicable.
|
||||
// In old space we do not use this trick to avoid dealing with
|
||||
// remembered sets.
|
||||
// region dirty marks.
|
||||
ASSERT(Heap::new_space()->Contains(elms));
|
||||
|
||||
STATIC_ASSERT(FixedArray::kMapOffset == 0);
|
||||
|
@ -322,7 +322,7 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
|
|||
Heap::CreateFillerObjectAt(elms->address(), to_trim * kPointerSize);
|
||||
|
||||
former_start[to_trim] = Heap::fixed_array_map();
|
||||
former_start[to_trim + 1] = reinterpret_cast<Object*>(len - to_trim);
|
||||
former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
|
||||
|
||||
ASSERT_EQ(elms->address() + to_trim * kPointerSize,
|
||||
(elms + to_trim * kPointerSize)->address());
|
||||
|
@ -330,22 +330,19 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
|
|||
}
|
||||
|
||||
|
||||
static bool ArrayPrototypeHasNoElements() {
|
||||
static bool ArrayPrototypeHasNoElements(Context* global_context,
|
||||
JSObject* array_proto) {
|
||||
// This method depends on non writability of Object and Array prototype
|
||||
// fields.
|
||||
Context* global_context = Top::context()->global_context();
|
||||
// Array.prototype
|
||||
JSObject* proto =
|
||||
JSObject::cast(global_context->array_function()->prototype());
|
||||
if (proto->elements() != Heap::empty_fixed_array()) return false;
|
||||
if (array_proto->elements() != Heap::empty_fixed_array()) return false;
|
||||
// Hidden prototype
|
||||
proto = JSObject::cast(proto->GetPrototype());
|
||||
ASSERT(proto->elements() == Heap::empty_fixed_array());
|
||||
array_proto = JSObject::cast(array_proto->GetPrototype());
|
||||
ASSERT(array_proto->elements() == Heap::empty_fixed_array());
|
||||
// Object.prototype
|
||||
proto = JSObject::cast(proto->GetPrototype());
|
||||
if (proto != global_context->initial_object_prototype()) return false;
|
||||
if (proto->elements() != Heap::empty_fixed_array()) return false;
|
||||
ASSERT(proto->GetPrototype()->IsNull());
|
||||
array_proto = JSObject::cast(array_proto->GetPrototype());
|
||||
if (array_proto != global_context->initial_object_prototype()) return false;
|
||||
if (array_proto->elements() != Heap::empty_fixed_array()) return false;
|
||||
ASSERT(array_proto->GetPrototype()->IsNull());
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -368,6 +365,18 @@ static bool IsJSArrayWithFastElements(Object* receiver,
|
|||
}
|
||||
|
||||
|
||||
static bool IsFastElementMovingAllowed(Object* receiver,
|
||||
FixedArray** elements) {
|
||||
if (!IsJSArrayWithFastElements(receiver, elements)) return false;
|
||||
|
||||
Context* global_context = Top::context()->global_context();
|
||||
JSObject* array_proto =
|
||||
JSObject::cast(global_context->array_function()->prototype());
|
||||
if (JSArray::cast(receiver)->GetPrototype() != array_proto) return false;
|
||||
return ArrayPrototypeHasNoElements(global_context, array_proto);
|
||||
}
|
||||
|
||||
|
||||
static Object* CallJsBuiltin(const char* name,
|
||||
BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
|
||||
HandleScope handleScope;
|
||||
|
@ -377,7 +386,7 @@ static Object* CallJsBuiltin(const char* name,
|
|||
name);
|
||||
ASSERT(js_builtin->IsJSFunction());
|
||||
Handle<JSFunction> function(Handle<JSFunction>::cast(js_builtin));
|
||||
Vector<Object**> argv(Vector<Object**>::New(args.length() - 1));
|
||||
ScopedVector<Object**> argv(args.length() - 1);
|
||||
int n_args = args.length() - 1;
|
||||
for (int i = 0; i < n_args; i++) {
|
||||
argv[i] = args.at<Object>(i + 1).location();
|
||||
|
@ -388,7 +397,6 @@ static Object* CallJsBuiltin(const char* name,
|
|||
n_args,
|
||||
argv.start(),
|
||||
&pending_exception);
|
||||
argv.Dispose();
|
||||
if (pending_exception) return Failure::Exception();
|
||||
return *result;
|
||||
}
|
||||
|
@ -466,11 +474,7 @@ BUILTIN(ArrayPop) {
|
|||
return top;
|
||||
}
|
||||
|
||||
// Remember to check the prototype chain.
|
||||
JSFunction* array_function =
|
||||
Top::context()->global_context()->array_function();
|
||||
JSObject* prototype = JSObject::cast(array_function->prototype());
|
||||
top = prototype->GetElement(len - 1);
|
||||
top = array->GetPrototype()->GetElement(len - 1);
|
||||
|
||||
return top;
|
||||
}
|
||||
|
@ -479,8 +483,7 @@ BUILTIN(ArrayPop) {
|
|||
BUILTIN(ArrayShift) {
|
||||
Object* receiver = *args.receiver();
|
||||
FixedArray* elms = NULL;
|
||||
if (!IsJSArrayWithFastElements(receiver, &elms)
|
||||
|| !ArrayPrototypeHasNoElements()) {
|
||||
if (!IsFastElementMovingAllowed(receiver, &elms)) {
|
||||
return CallJsBuiltin("ArrayShift", args);
|
||||
}
|
||||
JSArray* array = JSArray::cast(receiver);
|
||||
|
@ -497,7 +500,7 @@ BUILTIN(ArrayShift) {
|
|||
|
||||
if (Heap::new_space()->Contains(elms)) {
|
||||
// As elms still in the same space they used to be (new space),
|
||||
// there is no need to update remembered set.
|
||||
// there is no need to update region dirty mark.
|
||||
array->set_elements(LeftTrimFixedArray(elms, 1), SKIP_WRITE_BARRIER);
|
||||
} else {
|
||||
// Shift the elements.
|
||||
|
@ -516,8 +519,7 @@ BUILTIN(ArrayShift) {
|
|||
BUILTIN(ArrayUnshift) {
|
||||
Object* receiver = *args.receiver();
|
||||
FixedArray* elms = NULL;
|
||||
if (!IsJSArrayWithFastElements(receiver, &elms)
|
||||
|| !ArrayPrototypeHasNoElements()) {
|
||||
if (!IsFastElementMovingAllowed(receiver, &elms)) {
|
||||
return CallJsBuiltin("ArrayUnshift", args);
|
||||
}
|
||||
JSArray* array = JSArray::cast(receiver);
|
||||
|
@ -566,8 +568,7 @@ BUILTIN(ArrayUnshift) {
|
|||
BUILTIN(ArraySlice) {
|
||||
Object* receiver = *args.receiver();
|
||||
FixedArray* elms = NULL;
|
||||
if (!IsJSArrayWithFastElements(receiver, &elms)
|
||||
|| !ArrayPrototypeHasNoElements()) {
|
||||
if (!IsFastElementMovingAllowed(receiver, &elms)) {
|
||||
return CallJsBuiltin("ArraySlice", args);
|
||||
}
|
||||
JSArray* array = JSArray::cast(receiver);
|
||||
|
@ -636,8 +637,7 @@ BUILTIN(ArraySlice) {
|
|||
BUILTIN(ArraySplice) {
|
||||
Object* receiver = *args.receiver();
|
||||
FixedArray* elms = NULL;
|
||||
if (!IsJSArrayWithFastElements(receiver, &elms)
|
||||
|| !ArrayPrototypeHasNoElements()) {
|
||||
if (!IsFastElementMovingAllowed(receiver, &elms)) {
|
||||
return CallJsBuiltin("ArraySplice", args);
|
||||
}
|
||||
JSArray* array = JSArray::cast(receiver);
|
||||
|
@ -789,7 +789,10 @@ BUILTIN(ArraySplice) {
|
|||
|
||||
|
||||
BUILTIN(ArrayConcat) {
|
||||
if (!ArrayPrototypeHasNoElements()) {
|
||||
Context* global_context = Top::context()->global_context();
|
||||
JSObject* array_proto =
|
||||
JSObject::cast(global_context->array_function()->prototype());
|
||||
if (!ArrayPrototypeHasNoElements(global_context, array_proto)) {
|
||||
return CallJsBuiltin("ArrayConcat", args);
|
||||
}
|
||||
|
||||
|
@ -799,7 +802,8 @@ BUILTIN(ArrayConcat) {
|
|||
int result_len = 0;
|
||||
for (int i = 0; i < n_arguments; i++) {
|
||||
Object* arg = args[i];
|
||||
if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements()) {
|
||||
if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements()
|
||||
|| JSArray::cast(arg)->GetPrototype() != array_proto) {
|
||||
return CallJsBuiltin("ArrayConcat", args);
|
||||
}
|
||||
|
|
@ -34,54 +34,6 @@ namespace v8 {
|
|||
namespace internal {
|
||||
|
||||
|
||||
template<typename Record>
|
||||
CircularQueue<Record>::CircularQueue(int desired_buffer_size_in_bytes)
|
||||
: buffer_(NewArray<Record>(desired_buffer_size_in_bytes / sizeof(Record))),
|
||||
buffer_end_(buffer_ + desired_buffer_size_in_bytes / sizeof(Record)),
|
||||
enqueue_semaphore_(
|
||||
OS::CreateSemaphore(static_cast<int>(buffer_end_ - buffer_) - 1)),
|
||||
enqueue_pos_(buffer_),
|
||||
dequeue_pos_(buffer_) {
|
||||
// To be able to distinguish between a full and an empty queue
|
||||
// state, the queue must be capable of containing at least 2
|
||||
// records.
|
||||
ASSERT((buffer_end_ - buffer_) >= 2);
|
||||
}
|
||||
|
||||
|
||||
template<typename Record>
|
||||
CircularQueue<Record>::~CircularQueue() {
|
||||
DeleteArray(buffer_);
|
||||
delete enqueue_semaphore_;
|
||||
}
|
||||
|
||||
|
||||
template<typename Record>
|
||||
void CircularQueue<Record>::Dequeue(Record* rec) {
|
||||
ASSERT(!IsEmpty());
|
||||
*rec = *dequeue_pos_;
|
||||
dequeue_pos_ = Next(dequeue_pos_);
|
||||
// Tell we have a spare record.
|
||||
enqueue_semaphore_->Signal();
|
||||
}
|
||||
|
||||
|
||||
template<typename Record>
|
||||
void CircularQueue<Record>::Enqueue(const Record& rec) {
|
||||
// Wait until we have at least one spare record.
|
||||
enqueue_semaphore_->Wait();
|
||||
ASSERT(Next(enqueue_pos_) != dequeue_pos_);
|
||||
*enqueue_pos_ = rec;
|
||||
enqueue_pos_ = Next(enqueue_pos_);
|
||||
}
|
||||
|
||||
|
||||
template<typename Record>
|
||||
Record* CircularQueue<Record>::Next(Record* curr) {
|
||||
return ++curr != buffer_end_ ? curr : buffer_;
|
||||
}
|
||||
|
||||
|
||||
void* SamplingCircularQueue::Enqueue() {
|
||||
WrapPositionIfNeeded(&producer_pos_->enqueue_pos);
|
||||
void* result = producer_pos_->enqueue_pos;
|
|
@ -32,32 +32,6 @@ namespace v8 {
|
|||
namespace internal {
|
||||
|
||||
|
||||
// Lock-based blocking circular queue for small records. Intended for
|
||||
// transfer of small records between a single producer and a single
|
||||
// consumer. Blocks on enqueue operation if the queue is full.
|
||||
template<typename Record>
|
||||
class CircularQueue {
|
||||
public:
|
||||
inline explicit CircularQueue(int desired_buffer_size_in_bytes);
|
||||
inline ~CircularQueue();
|
||||
|
||||
INLINE(void Dequeue(Record* rec));
|
||||
INLINE(void Enqueue(const Record& rec));
|
||||
INLINE(bool IsEmpty()) { return enqueue_pos_ == dequeue_pos_; }
|
||||
|
||||
private:
|
||||
INLINE(Record* Next(Record* curr));
|
||||
|
||||
Record* buffer_;
|
||||
Record* const buffer_end_;
|
||||
Semaphore* enqueue_semaphore_;
|
||||
Record* enqueue_pos_;
|
||||
Record* dequeue_pos_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(CircularQueue);
|
||||
};
|
||||
|
||||
|
||||
// Lock-free cache-friendly sampling circular queue for large
|
||||
// records. Intended for fast transfer of large records between a
|
||||
// single producer and a single consumer. If the queue is full,
|
|
@ -28,7 +28,6 @@
|
|||
#ifndef V8_CODEGEN_H_
|
||||
#define V8_CODEGEN_H_
|
||||
|
||||
#include "ast.h"
|
||||
#include "code-stubs.h"
|
||||
#include "runtime.h"
|
||||
#include "type-info.h"
|
||||
|
@ -111,11 +110,12 @@ namespace internal {
|
|||
F(ClassOf, 1, 1) \
|
||||
F(ValueOf, 1, 1) \
|
||||
F(SetValueOf, 2, 1) \
|
||||
F(FastCharCodeAt, 2, 1) \
|
||||
F(CharFromCode, 1, 1) \
|
||||
F(StringCharCodeAt, 2, 1) \
|
||||
F(StringCharFromCode, 1, 1) \
|
||||
F(StringCharAt, 2, 1) \
|
||||
F(ObjectEquals, 2, 1) \
|
||||
F(Log, 3, 1) \
|
||||
F(RandomHeapNumber, 0, 1) \
|
||||
F(RandomHeapNumber, 0, 1) \
|
||||
F(IsObject, 1, 1) \
|
||||
F(IsFunction, 1, 1) \
|
||||
F(IsUndetectableObject, 1, 1) \
|
||||
|
@ -180,6 +180,111 @@ class CodeGeneratorScope BASE_EMBEDDED {
|
|||
};
|
||||
|
||||
|
||||
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
|
||||
|
||||
// State of used registers in a virtual frame.
|
||||
class FrameRegisterState {
|
||||
public:
|
||||
// Captures the current state of the given frame.
|
||||
explicit FrameRegisterState(VirtualFrame* frame);
|
||||
|
||||
// Saves the state in the stack.
|
||||
void Save(MacroAssembler* masm) const;
|
||||
|
||||
// Restores the state from the stack.
|
||||
void Restore(MacroAssembler* masm) const;
|
||||
|
||||
private:
|
||||
// Constants indicating special actions. They should not be multiples
|
||||
// of kPointerSize so they will not collide with valid offsets from
|
||||
// the frame pointer.
|
||||
static const int kIgnore = -1;
|
||||
static const int kPush = 1;
|
||||
|
||||
// This flag is ored with a valid offset from the frame pointer, so
|
||||
// it should fit in the low zero bits of a valid offset.
|
||||
static const int kSyncedFlag = 2;
|
||||
|
||||
int registers_[RegisterAllocator::kNumRegisters];
|
||||
};
|
||||
|
||||
#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
|
||||
|
||||
|
||||
class FrameRegisterState {
|
||||
public:
|
||||
inline FrameRegisterState(VirtualFrame frame) : frame_(frame) { }
|
||||
|
||||
inline const VirtualFrame* frame() const { return &frame_; }
|
||||
|
||||
private:
|
||||
VirtualFrame frame_;
|
||||
};
|
||||
|
||||
#else
|
||||
|
||||
#error Unsupported target architecture.
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
// Helper interface to prepare to/restore after making runtime calls.
|
||||
class RuntimeCallHelper {
|
||||
public:
|
||||
virtual ~RuntimeCallHelper() {}
|
||||
|
||||
virtual void BeforeCall(MacroAssembler* masm) const = 0;
|
||||
|
||||
virtual void AfterCall(MacroAssembler* masm) const = 0;
|
||||
|
||||
protected:
|
||||
RuntimeCallHelper() {}
|
||||
|
||||
private:
|
||||
DISALLOW_COPY_AND_ASSIGN(RuntimeCallHelper);
|
||||
};
|
||||
|
||||
|
||||
// RuntimeCallHelper implementation that saves/restores state of a
|
||||
// virtual frame.
|
||||
class VirtualFrameRuntimeCallHelper : public RuntimeCallHelper {
|
||||
public:
|
||||
// Does not take ownership of |frame_state|.
|
||||
explicit VirtualFrameRuntimeCallHelper(const FrameRegisterState* frame_state)
|
||||
: frame_state_(frame_state) {}
|
||||
|
||||
virtual void BeforeCall(MacroAssembler* masm) const;
|
||||
|
||||
virtual void AfterCall(MacroAssembler* masm) const;
|
||||
|
||||
private:
|
||||
const FrameRegisterState* frame_state_;
|
||||
};
|
||||
|
||||
|
||||
// RuntimeCallHelper implementation used in IC stubs: enters/leaves a
|
||||
// newly created internal frame before/after the runtime call.
|
||||
class ICRuntimeCallHelper : public RuntimeCallHelper {
|
||||
public:
|
||||
ICRuntimeCallHelper() {}
|
||||
|
||||
virtual void BeforeCall(MacroAssembler* masm) const;
|
||||
|
||||
virtual void AfterCall(MacroAssembler* masm) const;
|
||||
};
|
||||
|
||||
|
||||
// Trivial RuntimeCallHelper implementation.
|
||||
class NopRuntimeCallHelper : public RuntimeCallHelper {
|
||||
public:
|
||||
NopRuntimeCallHelper() {}
|
||||
|
||||
virtual void BeforeCall(MacroAssembler* masm) const {}
|
||||
|
||||
virtual void AfterCall(MacroAssembler* masm) const {}
|
||||
};
|
||||
|
||||
|
||||
// Deferred code objects are small pieces of code that are compiled
|
||||
// out of line. They are used to defer the compilation of uncommon
|
||||
// paths thereby avoiding expensive jumps around uncommon code parts.
|
||||
|
@ -210,6 +315,8 @@ class DeferredCode: public ZoneObject {
|
|||
inline void Branch(Condition cc);
|
||||
void BindExit() { masm_->bind(&exit_label_); }
|
||||
|
||||
const FrameRegisterState* frame_state() const { return &frame_state_; }
|
||||
|
||||
void SaveRegisters();
|
||||
void RestoreRegisters();
|
||||
|
||||
|
@ -217,28 +324,13 @@ class DeferredCode: public ZoneObject {
|
|||
MacroAssembler* masm_;
|
||||
|
||||
private:
|
||||
// Constants indicating special actions. They should not be multiples
|
||||
// of kPointerSize so they will not collide with valid offsets from
|
||||
// the frame pointer.
|
||||
static const int kIgnore = -1;
|
||||
static const int kPush = 1;
|
||||
|
||||
// This flag is ored with a valid offset from the frame pointer, so
|
||||
// it should fit in the low zero bits of a valid offset.
|
||||
static const int kSyncedFlag = 2;
|
||||
|
||||
int statement_position_;
|
||||
int position_;
|
||||
|
||||
Label entry_label_;
|
||||
Label exit_label_;
|
||||
|
||||
// C++ doesn't allow zero length arrays, so we make the array length 1 even
|
||||
// if we don't need it.
|
||||
static const int kRegistersArrayLength =
|
||||
(RegisterAllocator::kNumRegisters == 0) ?
|
||||
1 : RegisterAllocator::kNumRegisters;
|
||||
int registers_[kRegistersArrayLength];
|
||||
FrameRegisterState frame_state_;
|
||||
|
||||
#ifdef DEBUG
|
||||
const char* comment_;
|
||||
|
@ -612,6 +704,163 @@ class ToBooleanStub: public CodeStub {
|
|||
};
|
||||
|
||||
|
||||
enum StringIndexFlags {
|
||||
// Accepts smis or heap numbers.
|
||||
STRING_INDEX_IS_NUMBER,
|
||||
|
||||
// Accepts smis or heap numbers that are valid array indices
|
||||
// (ECMA-262 15.4). Invalid indices are reported as being out of
|
||||
// range.
|
||||
STRING_INDEX_IS_ARRAY_INDEX
|
||||
};
|
||||
|
||||
|
||||
// Generates code implementing String.prototype.charCodeAt.
|
||||
//
|
||||
// Only supports the case when the receiver is a string and the index
|
||||
// is a number (smi or heap number) that is a valid index into the
|
||||
// string. Additional index constraints are specified by the
|
||||
// flags. Otherwise, bails out to the provided labels.
|
||||
//
|
||||
// Register usage: |object| may be changed to another string in a way
|
||||
// that doesn't affect charCodeAt/charAt semantics, |index| is
|
||||
// preserved, |scratch| and |result| are clobbered.
|
||||
class StringCharCodeAtGenerator {
|
||||
public:
|
||||
StringCharCodeAtGenerator(Register object,
|
||||
Register index,
|
||||
Register scratch,
|
||||
Register result,
|
||||
Label* receiver_not_string,
|
||||
Label* index_not_number,
|
||||
Label* index_out_of_range,
|
||||
StringIndexFlags index_flags)
|
||||
: object_(object),
|
||||
index_(index),
|
||||
scratch_(scratch),
|
||||
result_(result),
|
||||
receiver_not_string_(receiver_not_string),
|
||||
index_not_number_(index_not_number),
|
||||
index_out_of_range_(index_out_of_range),
|
||||
index_flags_(index_flags) {
|
||||
ASSERT(!scratch_.is(object_));
|
||||
ASSERT(!scratch_.is(index_));
|
||||
ASSERT(!scratch_.is(result_));
|
||||
ASSERT(!result_.is(object_));
|
||||
ASSERT(!result_.is(index_));
|
||||
}
|
||||
|
||||
// Generates the fast case code. On the fallthrough path |result|
|
||||
// register contains the result.
|
||||
void GenerateFast(MacroAssembler* masm);
|
||||
|
||||
// Generates the slow case code. Must not be naturally
|
||||
// reachable. Expected to be put after a ret instruction (e.g., in
|
||||
// deferred code). Always jumps back to the fast case.
|
||||
void GenerateSlow(MacroAssembler* masm,
|
||||
const RuntimeCallHelper& call_helper);
|
||||
|
||||
private:
|
||||
Register object_;
|
||||
Register index_;
|
||||
Register scratch_;
|
||||
Register result_;
|
||||
|
||||
Label* receiver_not_string_;
|
||||
Label* index_not_number_;
|
||||
Label* index_out_of_range_;
|
||||
|
||||
StringIndexFlags index_flags_;
|
||||
|
||||
Label call_runtime_;
|
||||
Label index_not_smi_;
|
||||
Label got_smi_index_;
|
||||
Label exit_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(StringCharCodeAtGenerator);
|
||||
};
|
||||
|
||||
|
||||
// Generates code for creating a one-char string from a char code.
|
||||
class StringCharFromCodeGenerator {
|
||||
public:
|
||||
StringCharFromCodeGenerator(Register code,
|
||||
Register result)
|
||||
: code_(code),
|
||||
result_(result) {
|
||||
ASSERT(!code_.is(result_));
|
||||
}
|
||||
|
||||
// Generates the fast case code. On the fallthrough path |result|
|
||||
// register contains the result.
|
||||
void GenerateFast(MacroAssembler* masm);
|
||||
|
||||
// Generates the slow case code. Must not be naturally
|
||||
// reachable. Expected to be put after a ret instruction (e.g., in
|
||||
// deferred code). Always jumps back to the fast case.
|
||||
void GenerateSlow(MacroAssembler* masm,
|
||||
const RuntimeCallHelper& call_helper);
|
||||
|
||||
private:
|
||||
Register code_;
|
||||
Register result_;
|
||||
|
||||
Label slow_case_;
|
||||
Label exit_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(StringCharFromCodeGenerator);
|
||||
};
|
||||
|
||||
|
||||
// Generates code implementing String.prototype.charAt.
|
||||
//
|
||||
// Only supports the case when the receiver is a string and the index
|
||||
// is a number (smi or heap number) that is a valid index into the
|
||||
// string. Additional index constraints are specified by the
|
||||
// flags. Otherwise, bails out to the provided labels.
|
||||
//
|
||||
// Register usage: |object| may be changed to another string in a way
|
||||
// that doesn't affect charCodeAt/charAt semantics, |index| is
|
||||
// preserved, |scratch1|, |scratch2|, and |result| are clobbered.
|
||||
class StringCharAtGenerator {
|
||||
public:
|
||||
StringCharAtGenerator(Register object,
|
||||
Register index,
|
||||
Register scratch1,
|
||||
Register scratch2,
|
||||
Register result,
|
||||
Label* receiver_not_string,
|
||||
Label* index_not_number,
|
||||
Label* index_out_of_range,
|
||||
StringIndexFlags index_flags)
|
||||
: char_code_at_generator_(object,
|
||||
index,
|
||||
scratch1,
|
||||
scratch2,
|
||||
receiver_not_string,
|
||||
index_not_number,
|
||||
index_out_of_range,
|
||||
index_flags),
|
||||
char_from_code_generator_(scratch2, result) {}
|
||||
|
||||
// Generates the fast case code. On the fallthrough path |result|
|
||||
// register contains the result.
|
||||
void GenerateFast(MacroAssembler* masm);
|
||||
|
||||
// Generates the slow case code. Must not be naturally
|
||||
// reachable. Expected to be put after a ret instruction (e.g., in
|
||||
// deferred code). Always jumps back to the fast case.
|
||||
void GenerateSlow(MacroAssembler* masm,
|
||||
const RuntimeCallHelper& call_helper);
|
||||
|
||||
private:
|
||||
StringCharCodeAtGenerator char_code_at_generator_;
|
||||
StringCharFromCodeGenerator char_from_code_generator_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(StringCharAtGenerator);
|
||||
};
|
||||
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
|
@ -44,6 +44,18 @@
|
|||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// For normal operation the syntax checker is used to determine whether to
|
||||
// use the full compiler for top level code or not. However if the flag
|
||||
// --always-full-compiler is specified or debugging is active the full
|
||||
// compiler will be used for all code.
|
||||
static bool AlwaysFullCompiler() {
|
||||
#ifdef ENABLE_DEBUGGER_SUPPORT
|
||||
return FLAG_always_full_compiler || Debugger::IsDebuggerActive();
|
||||
#else
|
||||
return FLAG_always_full_compiler;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
|
||||
FunctionLiteral* function = info->function();
|
||||
|
@ -120,7 +132,9 @@ static Handle<Code> MakeCode(Handle<Context> context, CompilationInfo* info) {
|
|||
? info->scope()->is_global_scope()
|
||||
: (shared->is_toplevel() || shared->try_full_codegen());
|
||||
|
||||
if (FLAG_always_full_compiler || (FLAG_full_compiler && is_run_once)) {
|
||||
if (AlwaysFullCompiler()) {
|
||||
return FullCodeGenerator::MakeCode(info);
|
||||
} else if (FLAG_full_compiler && is_run_once) {
|
||||
FullCodeGenSyntaxChecker checker;
|
||||
checker.Check(function);
|
||||
if (checker.has_supported_syntax()) {
|
||||
|
@ -507,7 +521,11 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
|
|||
CHECK(!FLAG_always_full_compiler || !FLAG_always_fast_compiler);
|
||||
bool is_run_once = literal->try_full_codegen();
|
||||
bool is_compiled = false;
|
||||
if (FLAG_always_full_compiler || (FLAG_full_compiler && is_run_once)) {
|
||||
|
||||
if (AlwaysFullCompiler()) {
|
||||
code = FullCodeGenerator::MakeCode(&info);
|
||||
is_compiled = true;
|
||||
} else if (FLAG_full_compiler && is_run_once) {
|
||||
FullCodeGenSyntaxChecker checker;
|
||||
checker.Check(literal);
|
||||
if (checker.has_supported_syntax()) {
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue