blob: c65b1ddbf33f21e267e768cc2546c09f8f5e98ba [file] [log] [blame]
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
#if V8_TARGET_ARCH_X64
#include "src/base/bits.h"
#include "src/code-factory.h"
#include "src/code-stubs.h"
#include "src/cpu-profiler.h"
#include "src/hydrogen-osr.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
#include "src/x64/lithium-codegen-x64.h"
namespace v8 {
namespace internal {
// When invoking builtins, we need to record the safepoint in the middle of
// the invoke instruction sequence generated by the macro assembler.
class SafepointGenerator final : public CallWrapper {
public:
SafepointGenerator(LCodeGen* codegen,
LPointerMap* pointers,
Safepoint::DeoptMode mode)
: codegen_(codegen),
pointers_(pointers),
deopt_mode_(mode) { }
virtual ~SafepointGenerator() {}
void BeforeCall(int call_size) const override {}
void AfterCall() const override {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
}
private:
LCodeGen* codegen_;
LPointerMap* pointers_;
Safepoint::DeoptMode deopt_mode_;
};
#define __ masm()->
bool LCodeGen::GenerateCode() {
LPhase phase("Z_Code generation", chunk());
DCHECK(is_unused());
status_ = GENERATING;
// Open a frame scope to indicate that there is a frame on the stack. The
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done in GeneratePrologue).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
return GeneratePrologue() &&
GenerateBody() &&
GenerateDeferredCode() &&
GenerateJumpTable() &&
GenerateSafepointTable();
}
void LCodeGen::FinishCode(Handle<Code> code) {
DCHECK(is_done());
code->set_stack_slots(GetStackSlotCount());
code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
PopulateDeoptimizationData(code);
}
#ifdef _MSC_VER
void LCodeGen::MakeSureStackPagesMapped(int offset) {
const int kPageSize = 4 * KB;
for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
__ movp(Operand(rsp, offset), rax);
}
}
#endif
void LCodeGen::SaveCallerDoubles() {
DCHECK(info()->saves_caller_doubles());
DCHECK(NeedsEagerFrame());
Comment(";;; Save clobbered callee double registers");
int count = 0;
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
__ movsd(MemOperand(rsp, count * kDoubleSize),
XMMRegister::FromAllocationIndex(save_iterator.Current()));
save_iterator.Advance();
count++;
}
}
void LCodeGen::RestoreCallerDoubles() {
DCHECK(info()->saves_caller_doubles());
DCHECK(NeedsEagerFrame());
Comment(";;; Restore clobbered callee double registers");
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
__ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()),
MemOperand(rsp, count * kDoubleSize));
save_iterator.Advance();
count++;
}
}
bool LCodeGen::GeneratePrologue() {
DCHECK(is_generating());
if (info()->IsOptimizing()) {
ProfileEntryHookStub::MaybeCallEntryHook(masm_);
#ifdef DEBUG
if (strlen(FLAG_stop_at) > 0 &&
info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
__ int3();
}
#endif
// Sloppy mode functions need to replace the receiver with the global proxy
// when called as functions (without an explicit receiver object).
if (is_sloppy(info()->language_mode()) && info()->MayUseThis() &&
!info()->is_native() && info()->scope()->has_this_declaration()) {
Label ok;
StackArgumentsAccessor args(rsp, scope()->num_parameters());
__ movp(rcx, args.GetReceiverOperand());
__ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &ok, Label::kNear);
__ movp(rcx, GlobalObjectOperand());
__ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
__ movp(args.GetReceiverOperand(), rcx);
__ bind(&ok);
}
}
info()->set_prologue_offset(masm_->pc_offset());
if (NeedsEagerFrame()) {
DCHECK(!frame_is_built_);
frame_is_built_ = true;
if (info()->IsStub()) {
__ StubPrologue();
} else {
__ Prologue(info()->IsCodePreAgingActive());
}
info()->AddNoFrameRange(0, masm_->pc_offset());
}
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
if (slots > 0) {
if (FLAG_debug_code) {
__ subp(rsp, Immediate(slots * kPointerSize));
#ifdef _MSC_VER
MakeSureStackPagesMapped(slots * kPointerSize);
#endif
__ Push(rax);
__ Set(rax, slots);
__ Set(kScratchRegister, kSlotsZapValue);
Label loop;
__ bind(&loop);
__ movp(MemOperand(rsp, rax, times_pointer_size, 0),
kScratchRegister);
__ decl(rax);
__ j(not_zero, &loop);
__ Pop(rax);
} else {
__ subp(rsp, Immediate(slots * kPointerSize));
#ifdef _MSC_VER
MakeSureStackPagesMapped(slots * kPointerSize);
#endif
}
if (info()->saves_caller_doubles()) {
SaveCallerDoubles();
}
}
// Possibly allocate a local context.
int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
Comment(";;; Allocate local context");
bool need_write_barrier = true;
// Argument to NewContext is the function, which is still in rdi.
DCHECK(!info()->scope()->is_script_scope());
if (heap_slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), heap_slots);
__ CallStub(&stub);
// Result of FastNewContextStub is always in new space.
need_write_barrier = false;
} else {
__ Push(rdi);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
RecordSafepoint(Safepoint::kNoLazyDeopt);
// Context is returned in rax. It replaces the context passed to us.
// It's saved in the stack and kept live in rsi.
__ movp(rsi, rax);
__ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax);
// Copy any necessary parameters into the context.
int num_parameters = scope()->num_parameters();
int first_parameter = scope()->has_this_declaration() ? -1 : 0;
for (int i = first_parameter; i < num_parameters; i++) {
Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
// Load parameter from stack.
__ movp(rax, Operand(rbp, parameter_offset));
// Store it in the context.
int context_offset = Context::SlotOffset(var->index());
__ movp(Operand(rsi, context_offset), rax);
// Update the write barrier. This clobbers rax and rbx.
if (need_write_barrier) {
__ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
} else if (FLAG_debug_code) {
Label done;
__ JumpIfInNewSpace(rsi, rax, &done, Label::kNear);
__ Abort(kExpectedNewSpaceObject);
__ bind(&done);
}
}
}
Comment(";;; End allocate local context");
}
// Trace the call.
if (FLAG_trace && info()->IsOptimizing()) {
__ CallRuntime(Runtime::kTraceEnter, 0);
}
return !is_aborted();
}
void LCodeGen::GenerateOsrPrologue() {
// Generate the OSR entry prologue at the first unknown OSR value, or if there
// are none, at the OSR entrypoint instruction.
if (osr_pc_offset_ >= 0) return;
osr_pc_offset_ = masm()->pc_offset();
// Adjust the frame size, subsuming the unoptimized frame into the
// optimized frame.
int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
DCHECK(slots >= 0);
__ subp(rsp, Immediate(slots * kPointerSize));
}
void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
if (instr->IsCall()) {
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
}
if (!instr->IsLazyBailout() && !instr->IsGap()) {
safepoints_.BumpLastLazySafepointIndex();
}
}
void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
if (FLAG_debug_code && FLAG_enable_slow_asserts && instr->HasResult() &&
instr->hydrogen_value()->representation().IsInteger32() &&
instr->result()->IsRegister()) {
__ AssertZeroExtended(ToRegister(instr->result()));
}
if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
// We sign extend the dehoisted key at the definition point when the pointer
// size is 64-bit. For x32 port, we sign extend the dehoisted key at the use
// points and MustSignExtendResult is always false. We can't use
// STATIC_ASSERT here as the pointer size is 32-bit for x32.
DCHECK(kPointerSize == kInt64Size);
if (instr->result()->IsRegister()) {
Register result_reg = ToRegister(instr->result());
__ movsxlq(result_reg, result_reg);
} else {
// Sign extend the 32bit result in the stack slots.
DCHECK(instr->result()->IsStackSlot());
Operand src = ToOperand(instr->result());
__ movsxlq(kScratchRegister, src);
__ movq(src, kScratchRegister);
}
}
}
bool LCodeGen::GenerateJumpTable() {
if (jump_table_.length() == 0) return !is_aborted();
Label needs_frame;
Comment(";;; -------------------- Jump table --------------------");
for (int i = 0; i < jump_table_.length(); i++) {
Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
__ bind(&table_entry->label);
Address entry = table_entry->address;
DeoptComment(table_entry->deopt_info);
if (table_entry->needs_frame) {
DCHECK(!info()->saves_caller_doubles());
__ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
__ call(&needs_frame);
} else {
if (info()->saves_caller_doubles()) {
DCHECK(info()->IsStub());
RestoreCallerDoubles();
}
__ call(entry, RelocInfo::RUNTIME_ENTRY);
}
info()->LogDeoptCallPosition(masm()->pc_offset(),
table_entry->deopt_info.inlining_id);
}
if (needs_frame.is_linked()) {
__ bind(&needs_frame);
/* stack layout
4: return address <-- rsp
3: garbage
2: garbage
1: garbage
0: garbage
*/
// Reserve space for context and stub marker.
__ subp(rsp, Immediate(2 * kPointerSize));
__ Push(MemOperand(rsp, 2 * kPointerSize)); // Copy return address.
__ Push(kScratchRegister); // Save entry address for ret(0)
/* stack layout
4: return address
3: garbage
2: garbage
1: return address
0: entry address <-- rsp
*/
// Remember context pointer.
__ movp(kScratchRegister,
MemOperand(rbp, StandardFrameConstants::kContextOffset));
// Save context pointer into the stack frame.
__ movp(MemOperand(rsp, 3 * kPointerSize), kScratchRegister);
// Create a stack frame.
__ movp(MemOperand(rsp, 4 * kPointerSize), rbp);
__ leap(rbp, MemOperand(rsp, 4 * kPointerSize));
// This variant of deopt can only be used with stubs. Since we don't
// have a function pointer to install in the stack frame that we're
// building, install a special marker there instead.
DCHECK(info()->IsStub());
__ Move(MemOperand(rsp, 2 * kPointerSize), Smi::FromInt(StackFrame::STUB));
/* stack layout
4: old rbp
3: context pointer
2: stub marker
1: return address
0: entry address <-- rsp
*/
__ ret(0);
}
return !is_aborted();
}
bool LCodeGen::GenerateDeferredCode() {
DCHECK(is_generating());
if (deferred_.length() > 0) {
for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
LDeferredCode* code = deferred_[i];
HValue* value =
instructions_->at(code->instruction_index())->hydrogen_value();
RecordAndWritePosition(
chunk()->graph()->SourcePositionToScriptPosition(value->position()));
Comment(";;; <@%d,#%d> "
"-------------------- Deferred %s --------------------",
code->instruction_index(),
code->instr()->hydrogen_value()->id(),
code->instr()->Mnemonic());
__ bind(code->entry());
if (NeedsDeferredFrame()) {
Comment(";;; Build frame");
DCHECK(!frame_is_built_);
DCHECK(info()->IsStub());
frame_is_built_ = true;
// Build the frame in such a way that esi isn't trashed.
__ pushq(rbp); // Caller's frame pointer.
__ Push(Operand(rbp, StandardFrameConstants::kContextOffset));
__ Push(Smi::FromInt(StackFrame::STUB));
__ leap(rbp, Operand(rsp, 2 * kPointerSize));
Comment(";;; Deferred code");
}
code->Generate();
if (NeedsDeferredFrame()) {
__ bind(code->done());
Comment(";;; Destroy frame");
DCHECK(frame_is_built_);
frame_is_built_ = false;
__ movp(rsp, rbp);
__ popq(rbp);
}
__ jmp(code->exit());
}
}
// Deferred code is the last part of the instruction sequence. Mark
// the generated code as done unless we bailed out.
if (!is_aborted()) status_ = DONE;
return !is_aborted();
}
bool LCodeGen::GenerateSafepointTable() {
DCHECK(is_done());
safepoints_.Emit(masm(), GetStackSlotCount());
return !is_aborted();
}
Register LCodeGen::ToRegister(int index) const {
return Register::FromAllocationIndex(index);
}
XMMRegister LCodeGen::ToDoubleRegister(int index) const {
return XMMRegister::FromAllocationIndex(index);
}
Register LCodeGen::ToRegister(LOperand* op) const {
DCHECK(op->IsRegister());
return ToRegister(op->index());
}
XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
DCHECK(op->IsDoubleRegister());
return ToDoubleRegister(op->index());
}
bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
}
bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const {
return op->IsConstantOperand() &&
chunk_->IsDehoistedKey(chunk_->LookupConstant(op));
}
bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
return chunk_->LookupLiteralRepresentation(op).IsSmi();
}
int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
return ToRepresentation(op, Representation::Integer32());
}
int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
const Representation& r) const {
HConstant* constant = chunk_->LookupConstant(op);
int32_t value = constant->Integer32Value();
if (r.IsInteger32()) return value;
DCHECK(SmiValuesAre31Bits() && r.IsSmiOrTagged());
return static_cast<int32_t>(reinterpret_cast<intptr_t>(Smi::FromInt(value)));
}
Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
return Smi::FromInt(constant->Integer32Value());
}
double LCodeGen::ToDouble(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
DCHECK(constant->HasDoubleValue());
return constant->DoubleValue();
}
ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
DCHECK(constant->HasExternalReferenceValue());
return constant->ExternalReferenceValue();
}
Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
HConstant* constant = chunk_->LookupConstant(op);
DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
return constant->handle(isolate());
}
static int ArgumentsOffsetWithoutFrame(int index) {
DCHECK(index < 0);
return -(index + 1) * kPointerSize + kPCOnStackSize;
}
Operand LCodeGen::ToOperand(LOperand* op) const {
// Does not handle registers. In X64 assembler, plain registers are not
// representable as an Operand.
DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
if (NeedsEagerFrame()) {
return Operand(rbp, StackSlotOffset(op->index()));
} else {
// Retrieve parameter without eager stack-frame relative to the
// stack-pointer.
return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index()));
}
}
void LCodeGen::WriteTranslation(LEnvironment* environment,
Translation* translation) {
if (environment == NULL) return;
// The translation includes one command per value in the environment.
int translation_size = environment->translation_size();
WriteTranslation(environment->outer(), translation);
WriteTranslationFrame(environment, translation);
int object_index = 0;
int dematerialized_index = 0;
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
AddToTranslation(
environment, translation, value, environment->HasTaggedValueAt(i),
environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
}
}
void LCodeGen::AddToTranslation(LEnvironment* environment,
Translation* translation,
LOperand* op,
bool is_tagged,
bool is_uint32,
int* object_index_pointer,
int* dematerialized_index_pointer) {
if (op == LEnvironment::materialization_marker()) {
int object_index = (*object_index_pointer)++;
if (environment->ObjectIsDuplicateAt(object_index)) {
int dupe_of = environment->ObjectDuplicateOfAt(object_index);
translation->DuplicateObject(dupe_of);
return;
}
int object_length = environment->ObjectLengthAt(object_index);
if (environment->ObjectIsArgumentsAt(object_index)) {
translation->BeginArgumentsObject(object_length);
} else {
translation->BeginCapturedObject(object_length);
}
int dematerialized_index = *dematerialized_index_pointer;
int env_offset = environment->translation_size() + dematerialized_index;
*dematerialized_index_pointer += object_length;
for (int i = 0; i < object_length; ++i) {
LOperand* value = environment->values()->at(env_offset + i);
AddToTranslation(environment,
translation,
value,
environment->HasTaggedValueAt(env_offset + i),
environment->HasUint32ValueAt(env_offset + i),
object_index_pointer,
dematerialized_index_pointer);
}
return;
}
if (op->IsStackSlot()) {
if (is_tagged) {
translation->StoreStackSlot(op->index());
} else if (is_uint32) {
translation->StoreUint32StackSlot(op->index());
} else {
translation->StoreInt32StackSlot(op->index());
}
} else if (op->IsDoubleStackSlot()) {
translation->StoreDoubleStackSlot(op->index());
} else if (op->IsRegister()) {
Register reg = ToRegister(op);
if (is_tagged) {
translation->StoreRegister(reg);
} else if (is_uint32) {
translation->StoreUint32Register(reg);
} else {
translation->StoreInt32Register(reg);
}
} else if (op->IsDoubleRegister()) {
XMMRegister reg = ToDoubleRegister(op);
translation->StoreDoubleRegister(reg);
} else if (op->IsConstantOperand()) {
HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
translation->StoreLiteral(src_index);
} else {
UNREACHABLE();
}
}
void LCodeGen::CallCodeGeneric(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr,
SafepointMode safepoint_mode,
int argc) {
DCHECK(instr != NULL);
__ call(code, mode);
RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
// Signal that we don't inline smi code before these stubs in the
// optimizing code generator.
if (code->kind() == Code::BINARY_OP_IC ||
code->kind() == Code::COMPARE_IC) {
__ nop();
}
}
void LCodeGen::CallCode(Handle<Code> code,
RelocInfo::Mode mode,
LInstruction* instr) {
CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
}
void LCodeGen::CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr,
SaveFPRegsMode save_doubles) {
DCHECK(instr != NULL);
DCHECK(instr->HasPointerMap());
__ CallRuntime(function, num_arguments, save_doubles);
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
}
void LCodeGen::LoadContextFromDeferred(LOperand* context) {
if (context->IsRegister()) {
if (!ToRegister(context).is(rsi)) {
__ movp(rsi, ToRegister(context));
}
} else if (context->IsStackSlot()) {
__ movp(rsi, ToOperand(context));
} else if (context->IsConstantOperand()) {
HConstant* constant =
chunk_->LookupConstant(LConstantOperand::cast(context));
__ Move(rsi, Handle<Object>::cast(constant->handle(isolate())));
} else {
UNREACHABLE();
}
}
void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
int argc,
LInstruction* instr,
LOperand* context) {
LoadContextFromDeferred(context);
__ CallRuntimeSaveDoubles(id);
RecordSafepointWithRegisters(
instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
}
void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
Safepoint::DeoptMode mode) {
environment->set_has_been_used();
if (!environment->HasBeenRegistered()) {
// Physical stack frame layout:
// -x ............. -4 0 ..................................... y
// [incoming arguments] [spill slots] [pushed outgoing arguments]
// Layout of the environment:
// 0 ..................................................... size-1
// [parameters] [locals] [expression stack including arguments]
// Layout of the translation:
// 0 ........................................................ size - 1 + 4
// [expression stack including arguments] [locals] [4 words] [parameters]
// |>------------ translation_size ------------<|
int frame_count = 0;
int jsframe_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
if (e->frame_type() == JS_FUNCTION) {
++jsframe_count;
}
}
Translation translation(&translations_, frame_count, jsframe_count, zone());
WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
environment->Register(deoptimization_index,
translation.index(),
(mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
deoptimizations_.Add(environment, environment->zone());
}
}
void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason,
Deoptimizer::BailoutType bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
DCHECK(environment->HasBeenRegistered());
int id = environment->deoptimization_index();
DCHECK(info()->IsOptimizing() || info()->IsStub());
Address entry =
Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
if (entry == NULL) {
Abort(kBailoutWasNotPrepared);
return;
}
if (DeoptEveryNTimes()) {
ExternalReference count = ExternalReference::stress_deopt_count(isolate());
Label no_deopt;
__ pushfq();
__ pushq(rax);
Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
__ movl(rax, count_operand);
__ subl(rax, Immediate(1));
__ j(not_zero, &no_deopt, Label::kNear);
if (FLAG_trap_on_deopt) __ int3();
__ movl(rax, Immediate(FLAG_deopt_every_n_times));
__ movl(count_operand, rax);
__ popq(rax);
__ popfq();
DCHECK(frame_is_built_);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
__ bind(&no_deopt);
__ movl(count_operand, rax);
__ popq(rax);
__ popfq();
}
if (info()->ShouldTrapOnDeopt()) {
Label done;
if (cc != no_condition) {
__ j(NegateCondition(cc), &done, Label::kNear);
}
__ int3();
__ bind(&done);
}
Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
DCHECK(info()->IsStub() || frame_is_built_);
// Go through jump table if we need to handle condition, build frame, or
// restore caller doubles.
if (cc == no_condition && frame_is_built_ &&
!info()->saves_caller_doubles()) {
DeoptComment(deopt_info);
__ call(entry, RelocInfo::RUNTIME_ENTRY);
info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
} else {
Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
!frame_is_built_);
// We often have several deopts to the same entry, reuse the last
// jump entry if this is the case.
if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
jump_table_.is_empty() ||
!table_entry.IsEquivalentTo(jump_table_.last())) {
jump_table_.Add(table_entry, zone());
}
if (cc == no_condition) {
__ jmp(&jump_table_.last().label);
} else {
__ j(cc, &jump_table_.last().label);
}
}
}
void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
Deoptimizer::DeoptReason deopt_reason) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
}
void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
int length = deoptimizations_.length();
if (length == 0) return;
Handle<DeoptimizationInputData> data =
DeoptimizationInputData::New(isolate(), length, TENURED);
Handle<ByteArray> translations =
translations_.CreateByteArray(isolate()->factory());
data->SetTranslationByteArray(*translations);
data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
if (info_->IsOptimizing()) {
// Reference to shared function info does not change between phases.
AllowDeferredHandleDereference allow_handle_dereference;
data->SetSharedFunctionInfo(*info_->shared_info());
} else {
data->SetSharedFunctionInfo(Smi::FromInt(0));
}
data->SetWeakCellCache(Smi::FromInt(0));
Handle<FixedArray> literals =
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
{ AllowDeferredHandleDereference copy_handles;
for (int i = 0; i < deoptimization_literals_.length(); i++) {
literals->set(i, *deoptimization_literals_[i]);
}
data->SetLiteralArray(*literals);
}
data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
// Populate the deoptimization entries.
for (int i = 0; i < length; i++) {
LEnvironment* env = deoptimizations_[i];
data->SetAstId(i, env->ast_id());
data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
data->SetArgumentsStackHeight(i,
Smi::FromInt(env->arguments_stack_height()));
data->SetPc(i, Smi::FromInt(env->pc_offset()));
}
code->set_deoptimization_data(*data);
}
void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
DCHECK_EQ(0, deoptimization_literals_.length());
for (auto function : chunk()->inlined_functions()) {
DefineDeoptimizationLiteral(function);
}
inlined_function_count_ = deoptimization_literals_.length();
}
void LCodeGen::RecordSafepointWithLazyDeopt(
LInstruction* instr, SafepointMode safepoint_mode, int argc) {
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
} else {
DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
RecordSafepointWithRegisters(
instr->pointer_map(), argc, Safepoint::kLazyDeopt);
}
}
void LCodeGen::RecordSafepoint(
LPointerMap* pointers,
Safepoint::Kind kind,
int arguments,
Safepoint::DeoptMode deopt_mode) {
DCHECK(kind == expected_safepoint_kind_);
const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
kind, arguments, deopt_mode);
for (int i = 0; i < operands->length(); i++) {
LOperand* pointer = operands->at(i);
if (pointer->IsStackSlot()) {
safepoint.DefinePointerSlot(pointer->index(), zone());
} else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
safepoint.DefinePointerRegister(ToRegister(pointer), zone());
}
}
}
void LCodeGen::RecordSafepoint(LPointerMap* pointers,
Safepoint::DeoptMode deopt_mode) {
RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
}
void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
LPointerMap empty_pointers(zone());
RecordSafepoint(&empty_pointers, deopt_mode);
}
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
int arguments,
Safepoint::DeoptMode deopt_mode) {
RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
}
void LCodeGen::RecordAndWritePosition(int position) {
if (position == RelocInfo::kNoPosition) return;
masm()->positions_recorder()->RecordPosition(position);
masm()->positions_recorder()->WriteRecordedPositions();
}
static const char* LabelType(LLabel* label) {
if (label->is_loop_header()) return " (loop header)";
if (label->is_osr_entry()) return " (OSR entry)";
return "";
}
void LCodeGen::DoLabel(LLabel* label) {
Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
current_instruction_,
label->hydrogen_value()->id(),
label->block_id(),
LabelType(label));
__ bind(label->label());
current_block_ = label->block_id();
DoGap(label);
}
void LCodeGen::DoParallelMove(LParallelMove* move) {
resolver_.Resolve(move);
}
void LCodeGen::DoGap(LGap* gap) {
for (int i = LGap::FIRST_INNER_POSITION;
i <= LGap::LAST_INNER_POSITION;
i++) {
LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
LParallelMove* move = gap->GetParallelMove(inner_pos);
if (move != NULL) DoParallelMove(move);
}
}
void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
DoGap(instr);
}
void LCodeGen::DoParameter(LParameter* instr) {
// Nothing to do.
}
void LCodeGen::DoCallStub(LCallStub* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
DCHECK(ToRegister(instr->result()).is(rax));
switch (instr->hydrogen()->major_key()) {
case CodeStub::RegExpExec: {
RegExpExecStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::SubString: {
SubStringStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
case CodeStub::StringCompare: {
StringCompareStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
break;
}
default:
UNREACHABLE();
}
}
void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
GenerateOsrPrologue();
}
void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
Register dividend = ToRegister(instr->dividend());
int32_t divisor = instr->divisor();
DCHECK(dividend.is(ToRegister(instr->result())));
// Theoretically, a variation of the branch-free code for integer division by
// a power of 2 (calculating the remainder via an additional multiplication
// (which gets simplified to an 'and') and subtraction) should be faster, and
// this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
// indicate that positive dividends are heavily favored, so the branching
// version performs better.
HMod* hmod = instr->hydrogen();
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
Label dividend_is_not_negative, done;
if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
__ testl(dividend, dividend);
__ j(not_sign, &dividend_is_not_negative, Label::kNear);
// Note that this is correct even for kMinInt operands.
__ negl(dividend);
__ andl(dividend, Immediate(mask));
__ negl(dividend);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
}
__ jmp(&done, Label::kNear);
}
__ bind(&dividend_is_not_negative);
__ andl(dividend, Immediate(mask));
__ bind(&done);
}
void LCodeGen::DoModByConstI(LModByConstI* instr) {
Register dividend = ToRegister(instr->dividend());
int32_t divisor = instr->divisor();
DCHECK(ToRegister(instr->result()).is(rax));
if (divisor == 0) {
DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
return;
}
__ TruncatingDiv(dividend, Abs(divisor));
__ imull(rdx, rdx, Immediate(Abs(divisor)));
__ movl(rax, dividend);
__ subl(rax, rdx);
// Check for negative zero.
HMod* hmod = instr->hydrogen();
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label remainder_not_zero;
__ j(not_zero, &remainder_not_zero, Label::kNear);
__ cmpl(dividend, Immediate(0));
DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
__ bind(&remainder_not_zero);
}
}
void LCodeGen::DoModI(LModI* instr) {
HMod* hmod = instr->hydrogen();
Register left_reg = ToRegister(instr->left());
DCHECK(left_reg.is(rax));
Register right_reg = ToRegister(instr->right());
DCHECK(!right_reg.is(rax));
DCHECK(!right_reg.is(rdx));
Register result_reg = ToRegister(instr->result());
DCHECK(result_reg.is(rdx));
Label done;
// Check for x % 0, idiv would signal a divide error. We have to
// deopt in this case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(right_reg, right_reg);
DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
}
// Check for kMinInt % -1, idiv would signal a divide error. We
// have to deopt if we care about -0, because we can't return that.
if (hmod->CheckFlag(HValue::kCanOverflow)) {
Label no_overflow_possible;
__ cmpl(left_reg, Immediate(kMinInt));
__ j(not_zero, &no_overflow_possible, Label::kNear);
__ cmpl(right_reg, Immediate(-1));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
} else {
__ j(not_equal, &no_overflow_possible, Label::kNear);
__ Set(result_reg, 0);
__ jmp(&done, Label::kNear);
}
__ bind(&no_overflow_possible);
}
// Sign extend dividend in eax into edx:eax, since we are using only the low
// 32 bits of the values.
__ cdq();
// If we care about -0, test if the dividend is <0 and the result is 0.
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label positive_left;
__ testl(left_reg, left_reg);
__ j(not_sign, &positive_left, Label::kNear);
__ idivl(right_reg);
__ testl(result_reg, result_reg);
DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
__ jmp(&done, Label::kNear);
__ bind(&positive_left);
}
__ idivl(right_reg);
__ bind(&done);
}
void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
Register dividend = ToRegister(instr->dividend());
int32_t divisor = instr->divisor();
DCHECK(dividend.is(ToRegister(instr->result())));
// If the divisor is positive, things are easy: There can be no deopts and we
// can simply do an arithmetic right shift.
if (divisor == 1) return;
int32_t shift = WhichPowerOf2Abs(divisor);
if (divisor > 1) {
__ sarl(dividend, Immediate(shift));
return;
}
// If the divisor is negative, we have to negate and handle edge cases.
__ negl(dividend);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
}
return;
}
// If the negation could not overflow, simply shifting is OK.
if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
__ sarl(dividend, Immediate(shift));
return;
}
Label not_kmin_int, done;
__ j(no_overflow, &not_kmin_int, Label::kNear);
__ movl(dividend, Immediate(kMinInt / divisor));
__ jmp(&done, Label::kNear);
__ bind(&not_kmin_int);
__ sarl(dividend, Immediate(shift));
__ bind(&done);
}
void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
Register dividend = ToRegister(instr->dividend());
int32_t divisor = instr->divisor();
DCHECK(ToRegister(instr->result()).is(rdx));
if (divisor == 0) {
DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
return;
}
// Check for (0 / -x) that will produce negative zero.
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
}
// Easy case: We need no dynamic check for the dividend and the flooring
// division is the same as the truncating division.
if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
(divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
__ TruncatingDiv(dividend, Abs(divisor));
if (divisor < 0) __ negl(rdx);
return;
}
// In the general case we may need to adjust before and after the truncating
// division to get a flooring division.
Register temp = ToRegister(instr->temp3());
DCHECK(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
Label needs_adjustment, done;
__ cmpl(dividend, Immediate(0));
__ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
__ TruncatingDiv(dividend, Abs(divisor));
if (divisor < 0) __ negl(rdx);
__ jmp(&done, Label::kNear);
__ bind(&needs_adjustment);
__ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1));
__ TruncatingDiv(temp, Abs(divisor));
if (divisor < 0) __ negl(rdx);
__ decl(rdx);
__ bind(&done);
}
// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
HBinaryOperation* hdiv = instr->hydrogen();
Register dividend = ToRegister(instr->dividend());
Register divisor = ToRegister(instr->divisor());
Register remainder = ToRegister(instr->temp());
Register result = ToRegister(instr->result());
DCHECK(dividend.is(rax));
DCHECK(remainder.is(rdx));
DCHECK(result.is(rax));
DCHECK(!divisor.is(rax));
DCHECK(!divisor.is(rdx));
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(divisor, divisor);
DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label dividend_not_zero;
__ testl(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ testl(divisor, divisor);
DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
__ bind(&dividend_not_zero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow)) {
Label dividend_not_min_int;
__ cmpl(dividend, Immediate(kMinInt));
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmpl(divisor, Immediate(-1));
DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
__ bind(&dividend_not_min_int);
}
// Sign extend to rdx (= remainder).
__ cdq();
__ idivl(divisor);
Label done;
__ testl(remainder, remainder);
__ j(zero, &done, Label::kNear);
__ xorl(remainder, divisor);
__ sarl(remainder, Immediate(31));
__ addl(result, remainder);
__ bind(&done);
}
void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
Register dividend = ToRegister(instr->dividend());
int32_t divisor = instr->divisor();
Register result = ToRegister(instr->result());
DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
DCHECK(!result.is(dividend));
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmpl(dividend, Immediate(kMinInt));
DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ testl(dividend, Immediate(mask));
DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
}
__ Move(result, dividend);
int32_t shift = WhichPowerOf2Abs(divisor);
if (shift > 0) {
// The arithmetic shift is always OK, the 'if' is an optimization only.
if (shift > 1) __ sarl(result, Immediate(31));
__ shrl(result, Immediate(32 - shift));
__ addl(result, dividend);
__ sarl(result, Immediate(shift));
}
if (divisor < 0) __ negl(result);
}
void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
Register dividend = ToRegister(instr->dividend());
int32_t divisor = instr->divisor();
DCHECK(ToRegister(instr->result()).is(rdx));
if (divisor == 0) {
DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
return;
}
// Check for (0 / -x) that will produce negative zero.
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ testl(dividend, dividend);
DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
}
__ TruncatingDiv(dividend, Abs(divisor));
if (divisor < 0) __ negl(rdx);
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
__ movl(rax, rdx);
__ imull(rax, rax, Immediate(divisor));
__ subl(rax, dividend);
DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
}
}
// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
void LCodeGen::DoDivI(LDivI* instr) {
HBinaryOperation* hdiv = instr->hydrogen();
Register dividend = ToRegister(instr->dividend());
Register divisor = ToRegister(instr->divisor());
Register remainder = ToRegister(instr->temp());
DCHECK(dividend.is(rax));
DCHECK(remainder.is(rdx));
DCHECK(ToRegister(instr->result()).is(rax));
DCHECK(!divisor.is(rax));
DCHECK(!divisor.is(rdx));
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ testl(divisor, divisor);
DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label dividend_not_zero;
__ testl(dividend, dividend);
__ j(not_zero, &dividend_not_zero, Label::kNear);
__ testl(divisor, divisor);
DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
__ bind(&dividend_not_zero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow)) {
Label dividend_not_min_int;
__ cmpl(dividend, Immediate(kMinInt));
__ j(not_zero, &dividend_not_min_int, Label::kNear);
__ cmpl(divisor, Immediate(-1));
DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
__ bind(&dividend_not_min_int);
}
// Sign extend to rdx (= remainder).
__ cdq();
__ idivl(divisor);
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
// Deoptimize if remainder is not 0.
__ testl(remainder, remainder);
DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
}
}
void LCodeGen::DoMulI(LMulI* instr) {
Register left = ToRegister(instr->left());
LOperand* right = instr->right();
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
if (instr->hydrogen_value()->representation().IsSmi()) {
__ movp(kScratchRegister, left);
} else {
__ movl(kScratchRegister, left);
}
}
bool can_overflow =
instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
if (right->IsConstantOperand()) {
int32_t right_value = ToInteger32(LConstantOperand::cast(right));
if (right_value == -1) {
__ negl(left);
} else if (right_value == 0) {
__ xorl(left, left);
} else if (right_value == 2) {
__ addl(left, left);
} else if (!can_overflow) {
// If the multiplication is known to not overflow, we
// can use operations that don't set the overflow flag
// correctly.
switch (right_value) {
case 1:
// Do nothing.
break;
case 3:
__ leal(left, Operand(left, left, times_2, 0));
break;
case 4:
__ shll(left, Immediate(2));
break;
case 5:
__ leal(left, Operand(left, left, times_4, 0));
break;
case 8:
__ shll(left, Immediate(3));
break;
case 9:
__ leal(left, Operand(left, left, times_8, 0));
break;
case 16:
__ shll(left, Immediate(4));
break;
default:
__ imull(left, left, Immediate(right_value));
break;
}
} else {
__ imull(left, left, Immediate(right_value));
}
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
__ SmiToInteger64(left, left);
__ imulp(left, ToOperand(right));
} else {
__ imull(left, ToOperand(right));
}
} else {
if (instr->hydrogen_value()->representation().IsSmi()) {
__ SmiToInteger64(left, left);
__ imulp(left, ToRegister(right));
} else {
__ imull(left, ToRegister(right));
}
}
if (can_overflow) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
}
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Bail out if the result is supposed to be negative zero.
Label done;
if (instr->hydrogen_value()->representation().IsSmi()) {
__ testp(left, left);
} else {
__ testl(left, left);
}
__ j(not_zero, &done, Label::kNear);
if (right->IsConstantOperand()) {
// Constant can't be represented as 32-bit Smi due to immediate size
// limit.
DCHECK(SmiValuesAre32Bits()
? !instr->hydrogen_value()->representation().IsSmi()
: SmiValuesAre31Bits());
if (ToInteger32(LConstantOperand::cast(right)) < 0) {
DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
} else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
__ cmpl(kScratchRegister, Immediate(0));
DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
}
} else if (right->IsStackSlot()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
__ orp(kScratchRegister, ToOperand(right));
} else {
__ orl(kScratchRegister, ToOperand(right));
}
DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
} else {
// Test the non-zero operand for negative sign.
if (instr->hydrogen_value()->representation().IsSmi()) {
__ orp(kScratchRegister, ToRegister(right));
} else {
__ orl(kScratchRegister, ToRegister(right));
}
DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
}
__ bind(&done);
}
}
void LCodeGen::DoBitI(LBitI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
DCHECK(left->Equals(instr->result()));
DCHECK(left->IsRegister());
if (right->IsConstantOperand()) {
int32_t right_operand =
ToRepresentation(LConstantOperand::cast(right),
instr->hydrogen()->right()->representation());
switch (instr->op()) {
case Token::BIT_AND:
__ andl(ToRegister(left), Immediate(right_operand));
break;
case Token::BIT_OR:
__ orl(ToRegister(left), Immediate(right_operand));
break;
case Token::BIT_XOR:
if (right_operand == int32_t(~0)) {
__ notl(ToRegister(left));
} else {
__ xorl(ToRegister(left), Immediate(right_operand));
}
break;
default:
UNREACHABLE();
break;
}
} else if (right->IsStackSlot()) {
switch (instr->op()) {
case Token::BIT_AND:
if (instr->IsInteger32()) {
__ andl(ToRegister(left), ToOperand(right));
} else {
__ andp(ToRegister(left), ToOperand(right));
}
break;
case Token::BIT_OR:
if (instr->IsInteger32()) {
__ orl(ToRegister(left), ToOperand(right));
} else {
__ orp(ToRegister(left), ToOperand(right));
}
break;
case Token::BIT_XOR:
if (instr->IsInteger32()) {
__ xorl(ToRegister(left), ToOperand(right));
} else {
__ xorp(ToRegister(left), ToOperand(right));
}
break;
default:
UNREACHABLE();
break;
}
} else {
DCHECK(right->IsRegister());
switch (instr->op()) {
case Token::BIT_AND:
if (instr->IsInteger32()) {
__ andl(ToRegister(left), ToRegister(right));
} else {
__ andp(ToRegister(left), ToRegister(right));
}
break;
case Token::BIT_OR:
if (instr->IsInteger32()) {
__ orl(ToRegister(left), ToRegister(right));
} else {
__ orp(ToRegister(left), ToRegister(right));
}
break;
case Token::BIT_XOR:
if (instr->IsInteger32()) {
__ xorl(ToRegister(left), ToRegister(right));
} else {
__ xorp(ToRegister(left), ToRegister(right));
}
break;
default:
UNREACHABLE();
break;
}
}
}
void LCodeGen::DoShiftI(LShiftI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
DCHECK(left->Equals(instr->result()));
DCHECK(left->IsRegister());
if (right->IsRegister()) {
DCHECK(ToRegister(right).is(rcx));
switch (instr->op()) {
case Token::ROR:
__ rorl_cl(ToRegister(left));
break;
case Token::SAR:
__ sarl_cl(ToRegister(left));
break;
case Token::SHR:
__ shrl_cl(ToRegister(left));
if (instr->can_deopt()) {
__ testl(ToRegister(left), ToRegister(left));
DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
}
break;
case Token::SHL:
__ shll_cl(ToRegister(left));
break;
default:
UNREACHABLE();
break;
}
} else {
int32_t value = ToInteger32(LConstantOperand::cast(right));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
case Token::ROR:
if (shift_count != 0) {
__ rorl(ToRegister(left), Immediate(shift_count));
}
break;
case Token::SAR:
if (shift_count != 0) {
__ sarl(ToRegister(left), Immediate(shift_count));
}
break;
case Token::SHR:
if (shift_count != 0) {
__ shrl(ToRegister(left), Immediate(shift_count));
} else if (instr->can_deopt()) {
__ testl(ToRegister(left), ToRegister(left));
DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
}
break;
case Token::SHL:
if (shift_count != 0) {
if (instr->hydrogen_value()->representation().IsSmi()) {
if (SmiValuesAre32Bits()) {
__ shlp(ToRegister(left), Immediate(shift_count));
} else {
DCHECK(SmiValuesAre31Bits());
if (instr->can_deopt()) {
if (shift_count != 1) {
__ shll(ToRegister(left), Immediate(shift_count - 1));
}
__ Integer32ToSmi(ToRegister(left), ToRegister(left));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
} else {
__ shll(ToRegister(left), Immediate(shift_count));
}
}
} else {
__ shll(ToRegister(left), Immediate(shift_count));
}
}
break;
default:
UNREACHABLE();
break;
}
}
}
void LCodeGen::DoSubI(LSubI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
DCHECK(left->Equals(instr->result()));
if (right->IsConstantOperand()) {
int32_t right_operand =
ToRepresentation(LConstantOperand::cast(right),
instr->hydrogen()->right()->representation());
__ subl(ToRegister(left), Immediate(right_operand));
} else if (right->IsRegister()) {
if (instr->hydrogen_value()->representation().IsSmi()) {
__ subp(ToRegister(left), ToRegister(right));
} else {
__ subl(ToRegister(left), ToRegister(right));
}
} else {
if (instr->hydrogen_value()->representation().IsSmi()) {
__ subp(ToRegister(left), ToOperand(right));
} else {
__ subl(ToRegister(left), ToOperand(right));
}
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
}
}
void LCodeGen::DoConstantI(LConstantI* instr) {
Register dst = ToRegister(instr->result());
if (instr->value() == 0) {
__ xorl(dst, dst);
} else {
__ movl(dst, Immediate(instr->value()));
}
}
void LCodeGen::DoConstantS(LConstantS* instr) {
__ Move(ToRegister(instr->result()), instr->value());
}
void LCodeGen::DoConstantD(LConstantD* instr) {
__ Move(ToDoubleRegister(instr->result()), instr->bits());
}
void LCodeGen::DoConstantE(LConstantE* instr) {
__ LoadAddress(ToRegister(instr->result()), instr->value());
}
void LCodeGen::DoConstantT(LConstantT* instr) {
Handle<Object> object = instr->value(isolate());
AllowDeferredHandleDereference smi_check;
__ Move(ToRegister(instr->result()), object);
}
void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
Register result = ToRegister(instr->result());
Register map = ToRegister(instr->value());
__ EnumLength(result, map);
}
void LCodeGen::DoDateField(LDateField* instr) {
Register object = ToRegister(instr->date());
Register result = ToRegister(instr->result());
Smi* index = instr->index();
DCHECK(object.is(result));
DCHECK(object.is(rax));
if (FLAG_debug_code) {
__ AssertNotSmi(object);
__ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
__ Check(equal, kOperandIsNotADate);
}
if (index->value() == 0) {
__ movp(result, FieldOperand(object, JSDate::kValueOffset));
} else {
Label runtime, done;
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
Operand stamp_operand = __ ExternalOperand(stamp);
__ movp(kScratchRegister, stamp_operand);
__ cmpp(kScratchRegister, FieldOperand(object,
JSDate::kCacheStampOffset));
__ j(not_equal, &runtime, Label::kNear);
__ movp(result, FieldOperand(object, JSDate::kValueOffset +
kPointerSize * index->value()));
__ jmp(&done, Label::kNear);
}
__ bind(&runtime);
__ PrepareCallCFunction(2);
__ movp(arg_reg_1, object);
__ Move(arg_reg_2, index, Assembler::RelocInfoNone());
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
__ bind(&done);
}
}
Operand LCodeGen::BuildSeqStringOperand(Register string,
LOperand* index,
String::Encoding encoding) {
if (index->IsConstantOperand()) {
int offset = ToInteger32(LConstantOperand::cast(index));
if (encoding == String::TWO_BYTE_ENCODING) {
offset *= kUC16Size;
}
STATIC_ASSERT(kCharSize == 1);
return FieldOperand(string, SeqString::kHeaderSize + offset);
}
return FieldOperand(
string, ToRegister(index),
encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
SeqString::kHeaderSize);
}
void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
String::Encoding encoding = instr->hydrogen()->encoding();
Register result = ToRegister(instr->result());
Register string = ToRegister(instr->string());
if (FLAG_debug_code) {
__ Push(string);
__ movp(string, FieldOperand(string, HeapObject::kMapOffset));
__ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset));
__ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
__ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
? one_byte_seq_type : two_byte_seq_type));
__ Check(equal, kUnexpectedStringType);
__ Pop(string);
}
Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
if (encoding == String::ONE_BYTE_ENCODING) {
__ movzxbl(result, operand);
} else {
__ movzxwl(result, operand);
}
}
void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
String::Encoding encoding = instr->hydrogen()->encoding();
Register string = ToRegister(instr->string());
if (FLAG_debug_code) {
Register value = ToRegister(instr->value());
Register index = ToRegister(instr->index());
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
int encoding_mask =
instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
? one_byte_seq_type : two_byte_seq_type;
__ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
}
Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
if (instr->value()->IsConstantOperand()) {
int value = ToInteger32(LConstantOperand::cast(instr->value()));
DCHECK_LE(0, value);
if (encoding == String::ONE_BYTE_ENCODING) {
DCHECK_LE(value, String::kMaxOneByteCharCode);
__ movb(operand, Immediate(value));
} else {
DCHECK_LE(value, String::kMaxUtf16CodeUnit);
__ movw(operand, Immediate(value));
}
} else {
Register value = ToRegister(instr->value());
if (encoding == String::ONE_BYTE_ENCODING) {
__ movb(operand, value);
} else {
__ movw(operand, value);
}
}
}
void LCodeGen::DoAddI(LAddI* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
Representation target_rep = instr->hydrogen()->representation();
bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
if (right->IsConstantOperand()) {
// No support for smi-immediates for 32-bit SMI.
DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
int32_t offset =
ToRepresentation(LConstantOperand::cast(right),
instr->hydrogen()->right()->representation());
if (is_p) {
__ leap(ToRegister(instr->result()),
MemOperand(ToRegister(left), offset));
} else {
__ leal(ToRegister(instr->result()),
MemOperand(ToRegister(left), offset));
}
} else {
Operand address(ToRegister(left), ToRegister(right), times_1, 0);
if (is_p) {
__ leap(ToRegister(instr->result()), address);
} else {
__ leal(ToRegister(instr->result()), address);
}
}
} else {
if (right->IsConstantOperand()) {
// No support for smi-immediates for 32-bit SMI.
DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
int32_t right_operand =
ToRepresentation(LConstantOperand::cast(right),
instr->hydrogen()->right()->representation());
if (is_p) {
__ addp(ToRegister(left), Immediate(right_operand));
} else {
__ addl(ToRegister(left), Immediate(right_operand));
}
} else if (right->IsRegister()) {
if (is_p) {
__ addp(ToRegister(left), ToRegister(right));
} else {
__ addl(ToRegister(left), ToRegister(right));
}
} else {
if (is_p) {
__ addp(ToRegister(left), ToOperand(right));
} else {
__ addl(ToRegister(left), ToOperand(right));
}
}
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
}
}
}
void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
DCHECK(left->Equals(instr->result()));
HMathMinMax::Operation operation = instr->hydrogen()->operation();
if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
Label return_left;
Condition condition = (operation == HMathMinMax::kMathMin)
? less_equal
: greater_equal;
Register left_reg = ToRegister(left);
if (right->IsConstantOperand()) {
Immediate right_imm = Immediate(
ToRepresentation(LConstantOperand::cast(right),
instr->hydrogen()->right()->representation()));
DCHECK(SmiValuesAre32Bits()
? !instr->hydrogen()->representation().IsSmi()
: SmiValuesAre31Bits());
__ cmpl(left_reg, right_imm);
__ j(condition, &return_left, Label::kNear);
__ movp(left_reg, right_imm);
} else if (right->IsRegister()) {
Register right_reg = ToRegister(right);
if (instr->hydrogen_value()->representation().IsSmi()) {
__ cmpp(left_reg, right_reg);
} else {
__ cmpl(left_reg, right_reg);
}
__ j(condition, &return_left, Label::kNear);
__ movp(left_reg, right_reg);
} else {
Operand right_op = ToOperand(right);
if (instr->hydrogen_value()->representation().IsSmi()) {
__ cmpp(left_reg, right_op);
} else {
__ cmpl(left_reg, right_op);
}
__ j(condition, &return_left, Label::kNear);
__ movp(left_reg, right_op);
}
__ bind(&return_left);
} else {
DCHECK(instr->hydrogen()->representation().IsDouble());
Label check_nan_left, check_zero, return_left, return_right;
Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
XMMRegister left_reg = ToDoubleRegister(left);
XMMRegister right_reg = ToDoubleRegister(right);
__ ucomisd(left_reg, right_reg);
__ j(parity_even, &check_nan_left, Label::kNear); // At least one NaN.
__ j(equal, &check_zero, Label::kNear); // left == right.
__ j(condition, &return_left, Label::kNear);
__ jmp(&return_right, Label::kNear);
__ bind(&check_zero);
XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(left_reg, xmm_scratch);
__ j(not_equal, &return_left, Label::kNear); // left == right != 0.
// At this point, both left and right are either 0 or -0.
if (operation == HMathMinMax::kMathMin) {
__ orps(left_reg, right_reg);
} else {
// Since we operate on +0 and/or -0, addsd and andsd have the same effect.
__ addsd(left_reg, right_reg);
}
__ jmp(&return_left, Label::kNear);
__ bind(&check_nan_left);
__ ucomisd(left_reg, left_reg); // NaN check.
__ j(parity_even, &return_left, Label::kNear);
__ bind(&return_right);
__ movaps(left_reg, right_reg);
__ bind(&return_left);
}
}
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
XMMRegister left = ToDoubleRegister(instr->left());
XMMRegister right = ToDoubleRegister(instr->right());
XMMRegister result = ToDoubleRegister(instr->result());
switch (instr->op()) {
case Token::ADD:
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(masm(), AVX);
__ vaddsd(result, left, right);
} else {
DCHECK(result.is(left));
__ addsd(left, right);
}
break;
case Token::SUB:
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(masm(), AVX);
__ vsubsd(result, left, right);
} else {
DCHECK(result.is(left));
__ subsd(left, right);
}
break;
case Token::MUL:
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(masm(), AVX);
__ vmulsd(result, left, right);
} else {
DCHECK(result.is(left));
__ mulsd(left, right);
}
break;
case Token::DIV:
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(masm(), AVX);
__ vdivsd(result, left, right);
} else {
DCHECK(result.is(left));
__ divsd(left, right);
}
// Don't delete this mov. It may improve performance on some CPUs,
// when there is a (v)mulsd depending on the result
__ movaps(result, result);
break;
case Token::MOD: {
XMMRegister xmm_scratch = double_scratch0();
__ PrepareCallCFunction(2);
__ movaps(xmm_scratch, left);
DCHECK(right.is(xmm1));
__ CallCFunction(
ExternalReference::mod_two_doubles_operation(isolate()), 2);
__ movaps(result, xmm_scratch);
break;
}
default:
UNREACHABLE();
break;
}
}
void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
DCHECK(ToRegister(instr->left()).is(rdx));
DCHECK(ToRegister(instr->right()).is(rax));
DCHECK(ToRegister(instr->result()).is(rax));
Handle<Code> code =
CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
CallCode(code, RelocInfo::CODE_TARGET, instr);
}
template<class InstrType>
void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
int left_block = instr->TrueDestination(chunk_);
int right_block = instr->FalseDestination(chunk_);
int next_block = GetNextEmittedBlock();
if (right_block == left_block || cc == no_condition) {
EmitGoto(left_block);
} else if (left_block == next_block) {
__ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
} else if (right_block == next_block) {
__ j(cc, chunk_->GetAssemblyLabel(left_block));
} else {
__ j(cc, chunk_->GetAssemblyLabel(left_block));
if (cc != always) {
__ jmp(chunk_->GetAssemblyLabel(right_block));
}
}
}
template<class InstrType>
void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
int false_block = instr->FalseDestination(chunk_);
__ j(cc, chunk_->GetAssemblyLabel(false_block));
}
void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
__ int3();
}
void LCodeGen::DoBranch(LBranch* instr) {
Representation r = instr->hydrogen()->value()->representation();
if (r.IsInteger32()) {
DCHECK(!info()->IsStub());
Register reg = ToRegister(instr->value());
__ testl(reg, reg);
EmitBranch(instr, not_zero);
} else if (r.IsSmi()) {
DCHECK(!info()->IsStub());
Register reg = ToRegister(instr->value());
__ testp(reg, reg);
EmitBranch(instr, not_zero);
} else if (r.IsDouble()) {
DCHECK(!info()->IsStub());
XMMRegister reg = ToDoubleRegister(instr->value());
XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(reg, xmm_scratch);
EmitBranch(instr, not_equal);
} else {
DCHECK(r.IsTagged());
Register reg = ToRegister(instr->value());
HType type = instr->hydrogen()->value()->type();
if (type.IsBoolean()) {
DCHECK(!info()->IsStub());
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
EmitBranch(instr, equal);
} else if (type.IsSmi()) {
DCHECK(!info()->IsStub());
__ SmiCompare(reg, Smi::FromInt(0));
EmitBranch(instr, not_equal);
} else if (type.IsJSArray()) {
DCHECK(!info()->IsStub());
EmitBranch(instr, no_condition);
} else if (type.IsHeapNumber()) {
DCHECK(!info()->IsStub());
XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
EmitBranch(instr, not_equal);
} else if (type.IsString()) {
DCHECK(!info()->IsStub());
__ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
EmitBranch(instr, not_equal);
} else {
ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
// Avoid deopts in the case where we've never executed this path before.
if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
if (expected.Contains(ToBooleanStub::UNDEFINED)) {
// undefined -> false.
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
__ j(equal, instr->FalseLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::BOOLEAN)) {
// true -> true.
__ CompareRoot(reg, Heap::kTrueValueRootIndex);
__ j(equal, instr->TrueLabel(chunk_));
// false -> false.
__ CompareRoot(reg, Heap::kFalseValueRootIndex);
__ j(equal, instr->FalseLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
// 'null' -> false.
__ CompareRoot(reg, Heap::kNullValueRootIndex);
__ j(equal, instr->FalseLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::SMI)) {
// Smis: 0 -> false, all other -> true.
__ Cmp(reg, Smi::FromInt(0));
__ j(equal, instr->FalseLabel(chunk_));
__ JumpIfSmi(reg, instr->TrueLabel(chunk_));
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ testb(reg, Immediate(kSmiTagMask));
DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
}
const Register map = kScratchRegister;
if (expected.NeedsMap()) {
__ movp(map, FieldOperand(reg, HeapObject::kMapOffset));
if (expected.CanBeUndetectable()) {
// Undetectable -> false.
__ testb(FieldOperand(map, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, instr->FalseLabel(chunk_));
}
}
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
// spec object -> true.
__ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
__ j(above_equal, instr->TrueLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::STRING)) {
// String value -> false iff empty.
Label not_string;
__ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
__ j(above_equal, &not_string, Label::kNear);
__ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
__ j(not_zero, instr->TrueLabel(chunk_));
__ jmp(instr->FalseLabel(chunk_));
__ bind(&not_string);
}
if (expected.Contains(ToBooleanStub::SYMBOL)) {
// Symbol value -> true.
__ CmpInstanceType(map, SYMBOL_TYPE);
__ j(equal, instr->TrueLabel(chunk_));
}
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
// heap number -> false iff +0, -0, or NaN.
Label not_heap_number;
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &not_heap_number, Label::kNear);
XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
__ j(zero, instr->FalseLabel(chunk_));
__ jmp(instr->TrueLabel(chunk_));
__ bind(&not_heap_number);
}
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
}
}
}
}
void LCodeGen::EmitGoto(int block) {
if (!IsNextEmittedBlock(block)) {
__ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
}
}
void LCodeGen::DoGoto(LGoto* instr) {
EmitGoto(instr->block_id());
}
inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
Condition cond = no_condition;
switch (op) {
case Token::EQ:
case Token::EQ_STRICT:
cond = equal;
break;
case Token::NE:
case Token::NE_STRICT:
cond = not_equal;
break;
case Token::LT:
cond = is_unsigned ? below : less;
break;
case Token::GT:
cond = is_unsigned ? above : greater;
break;
case Token::LTE:
cond = is_unsigned ? below_equal : less_equal;
break;
case Token::GTE:
cond = is_unsigned ? above_equal : greater_equal;
break;
case Token::IN:
case Token::INSTANCEOF:
default:
UNREACHABLE();
}
return cond;
}
void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
LOperand* left = instr->left();
LOperand* right = instr->right();
bool is_unsigned =
instr->is_double() ||
instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
Condition cc = TokenToCondition(instr->op(), is_unsigned);
if (left->IsConstantOperand() && right->IsConstantOperand()) {
// We can statically evaluate the comparison.
double left_val = ToDouble(LConstantOperand::cast(left));
double right_val = ToDouble(LConstantOperand::cast(right));
int next_block = EvalComparison(instr->op(), left_val, right_val) ?
instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
EmitGoto(next_block);
} else {
if (instr->is_double()) {
// Don't base result on EFLAGS when a NaN is involved. Instead
// jump to the false block.
__ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
__ j(parity_even, instr->FalseLabel(chunk_));
} else {
int32_t value;
if (right->IsConstantOperand()) {
value = ToInteger32(LConstantOperand::cast(right));
if (instr->hydrogen_value()->representation().IsSmi()) {
__ Cmp(ToRegister(left), Smi::FromInt(value));
} else {
__ cmpl(ToRegister(left), Immediate(value));
}
} else if (left->IsConstantOperand()) {
value = ToInteger32(LConstantOperand::cast(left));
if (instr->hydrogen_value()->representation().IsSmi()) {
if (right->IsRegister()) {
__ Cmp(ToRegister(right), Smi::FromInt(value));
} else {
__ Cmp(ToOperand(right), Smi::FromInt(value));
}
} else if (right->IsRegister()) {
__ cmpl(ToRegister(right), Immediate(value));
} else {
__ cmpl(ToOperand(right), Immediate(value));
}
// We commuted the operands, so commute the condition.
cc = CommuteCondition(cc);
} else if (instr->hydrogen_value()->representation().IsSmi()) {
if (right->IsRegister()) {
__ cmpp(ToRegister(left), ToRegister(right));
} else {
__ cmpp(ToRegister(left), ToOperand(right));
}
} else {
if (right->IsRegister()) {
__ cmpl(ToRegister(left), ToRegister(right));
} else {
__ cmpl(ToRegister(left), ToOperand(right));
}
}
}
EmitBranch(instr, cc);
}
}
void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
Register left = ToRegister(instr->left());
if (instr->right()->IsConstantOperand()) {
Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
__ Cmp(left, right);
} else {
Register right = ToRegister(instr->right());
__ cmpp(left, right);
}
EmitBranch(instr, equal);
}
void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
if (instr->hydrogen()->representation().IsTagged()) {
Register input_reg = ToRegister(instr->object());
__ Cmp(input_reg, factory()->the_hole_value());
EmitBranch(instr, equal);
return;
}
XMMRegister input_reg = ToDoubleRegister(instr->object());
__ ucomisd(input_reg, input_reg);
EmitFalseBranch(instr, parity_odd);
__ subp(rsp, Immediate(kDoubleSize));
__ movsd(MemOperand(rsp, 0), input_reg);
__ addp(rsp, Immediate(kDoubleSize));
int offset = sizeof(kHoleNanUpper32);
__ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
EmitBranch(instr, equal);
}
void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
Representation rep = instr->hydrogen()->value()->representation();
DCHECK(!rep.IsInteger32());
if (rep.IsDouble()) {
XMMRegister value = ToDoubleRegister(instr->value());
XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(xmm_scratch, value);
EmitFalseBranch(instr, not_equal);
__ movmskpd(kScratchRegister, value);
__ testl(kScratchRegister, Immediate(1));
EmitBranch(instr, not_zero);
} else {
Register value = ToRegister(instr->value());
Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
__ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
__ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
Immediate(0x1));
EmitFalseBranch(instr, no_overflow);
__ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
Immediate(0x00000000));
EmitBranch(instr, equal);
}
}
Condition LCodeGen::EmitIsObject(Register input,
Label* is_not_object,
Label* is_object) {
DCHECK(!input.is(kScratchRegister));
__ JumpIfSmi(input, is_not_object);
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ j(equal, is_object);
__ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
// Undetectable objects behave like undefined.
__ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
__ j(not_zero, is_not_object);
__ movzxbl(kScratchRegister,
FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
__ cmpb(kScratchRegister, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(below, is_not_object);
__ cmpb(kScratchRegister, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
return below_equal;
}
void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
Register reg = ToRegister(instr->value());
Condition true_cond = EmitIsObject(
reg, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
EmitBranch(instr, true_cond);
}
Condition LCodeGen::EmitIsString(Register input,
Register temp1,
Label* is_not_string,
SmiCheck check_needed = INLINE_SMI_CHECK) {
if (check_needed == INLINE_SMI_CHECK) {
__ JumpIfSmi(input, is_not_string);
}
Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
return cond;
}
void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
Register reg = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
SmiCheck check_needed =
instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
Condition true_cond = EmitIsString(
reg, temp, instr->FalseLabel(chunk_), check_needed);
EmitBranch(instr, true_cond);
}
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
Condition is_smi;
if (instr->value()->IsRegister()) {
Register input = ToRegister(instr->value());
is_smi = masm()->CheckSmi(input);
} else {
Operand input = ToOperand(instr->value());
is_smi = masm()->CheckSmi(input);
}
EmitBranch(instr, is_smi);
}
void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
Register input = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ JumpIfSmi(input, instr->FalseLabel(chunk_));
}
__ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
__ testb(FieldOperand(temp, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
EmitBranch(instr, not_zero);
}
void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
Token::Value op = instr->op();
Handle<Code> ic =
CodeFactory::CompareIC(isolate(), op, Strength::WEAK).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = TokenToCondition(op, false);
__ testp(rax, rax);
EmitBranch(instr, condition);
}
static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == FIRST_TYPE) return to;
DCHECK(from == to || to == LAST_TYPE);
return from;
}
static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
InstanceType from = instr->from();
InstanceType to = instr->to();
if (from == to) return equal;
if (to == LAST_TYPE) return above_equal;
if (from == FIRST_TYPE) return below_equal;
UNREACHABLE();
return equal;
}
void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
Register input = ToRegister(instr->value());
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ JumpIfSmi(input, instr->FalseLabel(chunk_));
}
__ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
EmitBranch(instr, BranchCondition(instr->hydrogen()));
}
void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
__ AssertString(input);
__ movl(result, FieldOperand(input, String::kHashFieldOffset));
DCHECK(String::kHashShift >= kSmiTagSize);
__ IndexFromHash(result, result);
}
void LCodeGen::DoHasCachedArrayIndexAndBranch(
LHasCachedArrayIndexAndBranch* instr) {
Register input = ToRegister(instr->value());
__ testl(FieldOperand(input, String::kHashFieldOffset),
Immediate(String::kContainsCachedArrayIndexMask));
EmitBranch(instr, equal);
}
// Branches to a label or falls through with the answer in the z flag.
// Trashes the temp register.
void LCodeGen::EmitClassOfTest(Label* is_true,
Label* is_false,
Handle<String> class_name,
Register input,
Register temp,
Register temp2) {
DCHECK(!input.is(temp));
DCHECK(!input.is(temp2));
DCHECK(!temp.is(temp2));
__ JumpIfSmi(input, is_false);
if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
// Assuming the following assertions, we can use the same compares to test
// for both being a function type and being in the object type range.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
FIRST_SPEC_OBJECT_TYPE + 1);
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
LAST_SPEC_OBJECT_TYPE - 1);
STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
__ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
__ j(below, is_false);
__ j(equal, is_true);
__ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
__ j(equal, is_true);
} else {
// Faster code path to avoid two compares: subtract lower bound from the
// actual type and do a signed compare with the width of the type range.
__ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
__ movzxbl(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
__ subp(temp2, Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ cmpp(temp2, Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
__ j(above, is_false);
}
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
// Check if the constructor in the map is a function.
__ GetMapConstructor(temp, temp, kScratchRegister);
// Objects with a non-function constructor have class 'Object'.
__ CmpInstanceType(kScratchRegister, JS_FUNCTION_TYPE);
if (String::Equals(class_name, isolate()->factory()->Object_string())) {
__ j(not_equal, is_true);
} else {
__ j(not_equal, is_false);
}
// temp now contains the constructor function. Grab the
// instance class name from there.
__ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
__ movp(temp, FieldOperand(temp,
SharedFunctionInfo::kInstanceClassNameOffset));
// The class name we are testing against is internalized since it's a literal.
// The name in the constructor is internalized because of the way the context
// is booted. This routine isn't expected to work for random API-created
// classes and it doesn't have to because you can't access it with natives
// syntax. Since both sides are internalized it is sufficient to use an
// identity comparison.
DCHECK(class_name->IsInternalizedString());
__ Cmp(temp, class_name);
// End with the answer in the z flag.
}
void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
Register input = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
Register temp2 = ToRegister(instr->temp2());
Handle<String> class_name = instr->hydrogen()->class_name();
EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
class_name, input, temp, temp2);
EmitBranch(instr, equal);
}
void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
Register reg = ToRegister(instr->value());
__ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
EmitBranch(instr, equal);
}
void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
__ Push(ToRegister(instr->left()));
__ Push(ToRegister(instr->right()));
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
Label true_value, done;
__ testp(rax, rax);
__ j(zero, &true_value, Label::kNear);
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
__ jmp(&done, Label::kNear);
__ bind(&true_value);
__ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
__ bind(&done);
}
void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
class DeferredInstanceOfKnownGlobal final : public LDeferredCode {
public:
DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
LInstanceOfKnownGlobal* instr)
: LDeferredCode(codegen), instr_(instr) { }
void Generate() override {
codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
}
LInstruction* instr() override { return instr_; }
Label* map_check() { return &map_check_; }
private:
LInstanceOfKnownGlobal* instr_;
Label map_check_;
};
DCHECK(ToRegister(instr->context()).is(rsi));
DeferredInstanceOfKnownGlobal* deferred;
deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
Label done, false_result;
Register object = ToRegister(instr->value());
// A Smi is not an instance of anything.
__ JumpIfSmi(object, &false_result, Label::kNear);
// This is the inlined call site instanceof cache. The two occurences of the
// hole value will be patched to the last map/result pair generated by the
// instanceof stub.
Label cache_miss;
// Use a temp register to avoid memory operands with variable lengths.
Register map = ToRegister(instr->temp());
__ movp(map, FieldOperand(object, HeapObject::kMapOffset));
__ bind(deferred->map_check()); // Label for calculating code patching.
Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
__ Move(kScratchRegister, cache_cell, RelocInfo::CELL);
__ cmpp(map, Operand(kScratchRegister, 0));
__ j(not_equal, &cache_miss, Label::kNear);
// Patched to load either true or false.
__ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
#ifdef DEBUG
// Check that the code size between patch label and patch sites is invariant.
Label end_of_patched_code;
__ bind(&end_of_patched_code);
DCHECK(true);
#endif
__ jmp(&done, Label::kNear);
// The inlined call site cache did not match. Check for null and string
// before calling the deferred code.
__ bind(&cache_miss); // Null is not an instance of anything.
__ CompareRoot(object, Heap::kNullValueRootIndex);
__ j(equal, &false_result, Label::kNear);
// String values are not instances of anything.
__ JumpIfNotString(object, kScratchRegister, deferred->entry());
__ bind(&false_result);
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
__ bind(deferred->exit());
__ bind(&done);
}
void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check) {
{
PushSafepointRegistersScope scope(this);
InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
InstanceofStub::kNoFlags | InstanceofStub::kCallSiteInlineCheck);
InstanceofStub stub(isolate(), flags);
__ Push(ToRegister(instr->value()));
__ Push(instr->function());
static const int kAdditionalDelta = kPointerSize == kInt64Size ? 10 : 16;
int delta =
masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
DCHECK(delta >= 0);
__ PushImm32(delta);
// We are pushing three values on the stack but recording a
// safepoint with two arguments because stub is going to
// remove the third argument from the stack before jumping
// to instanceof builtin on the slow path.
CallCodeGeneric(stub.GetCode(),
RelocInfo::CODE_TARGET,
instr,
RECORD_SAFEPOINT_WITH_REGISTERS,
2);
DCHECK(delta == masm_->SizeOfCodeGeneratedSince(map_check));
LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
// Move result to a register that survives the end of the
// PushSafepointRegisterScope.
__ movp(kScratchRegister, rax);
}
__ testp(kScratchRegister, kScratchRegister);
Label load_false;
Label done;
__ j(not_zero, &load_false, Label::kNear);
__ LoadRoot(rax, Heap::kTrueValueRootIndex);
__ jmp(&done, Label::kNear);
__ bind(&load_false);
__ LoadRoot(rax, Heap::kFalseValueRootIndex);
__ bind(&done);
}
void LCodeGen::DoCmpT(LCmpT* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
Token::Value op = instr->op();
Handle<Code> ic =
CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
Condition condition = TokenToCondition(op, false);
Label true_value, done;
__ testp(rax, rax);
__ j(condition, &true_value, Label::kNear);
__ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
__ jmp(&done, Label::kNear);
__ bind(&true_value);
__ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
__ bind(&done);
}
void LCodeGen::DoReturn(LReturn* instr) {
if (FLAG_trace && info()->IsOptimizing()) {
// Preserve the return value on the stack and rely on the runtime call
// to return the value in the same register. We're leaving the code
// managed by the register allocator and tearing down the frame, it's
// safe to write to the context register.
__ Push(rax);
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (info()->saves_caller_doubles()) {
RestoreCallerDoubles();
}
int no_frame_start = -1;
if (NeedsEagerFrame()) {
__ movp(rsp, rbp);
__ popq(rbp);
no_frame_start = masm_->pc_offset();
}
if (instr->has_constant_parameter_count()) {
__ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
rcx);
} else {
DCHECK(info()->IsStub()); // Functions would need to drop one more value.
Register reg = ToRegister(instr->parameter_count());
// The argument count parameter is a smi
__ SmiToInteger32(reg, reg);
Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
__ PopReturnAddressTo(return_addr_reg);
__ shlp(reg, Immediate(kPointerSizeLog2));
__ addp(rsp, reg);
__ jmp(return_addr_reg);
}
if (no_frame_start != -1) {
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
}
template <class T>
void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
Register vector_register = ToRegister(instr->temp_vector());
Register slot_register = LoadWithVectorDescriptor::SlotRegister();
DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
DCHECK(slot_register.is(rax));
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Move(vector_register, vector);
// No need to allocate this register.
FeedbackVectorICSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ Move(slot_register, Smi::FromInt(index));
}
template <class T>
void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
Register vector_register = ToRegister(instr->temp_vector());
Register slot_register = ToRegister(instr->temp_slot());
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
__ Move(vector_register, vector);
FeedbackVectorICSlot slot = instr->hydrogen()->slot();
int index = vector->GetIndex(slot);
__ Move(slot_register, Smi::FromInt(index));
}
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
DCHECK(ToRegister(instr->global_object())
.is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(rax));
__ Move(LoadDescriptor::NameRegister(), instr->name());
EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
Handle<Code> ic =
CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
SLOPPY, PREMONOMORPHIC).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ movp(result, ContextOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIf(equal, instr, Deoptimizer::kHole);
} else {
Label is_not_hole;
__ j(not_equal, &is_not_hole, Label::kNear);
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ bind(&is_not_hole);
}
}
}
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
Operand target = ContextOperand(context, instr->slot_index());
Label skip_assignment;
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(target, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
DeoptimizeIf(equal, instr, Deoptimizer::kHole);
} else {
__ j(not_equal, &skip_assignment);
}
}
__ movp(target, value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
SmiCheck check_needed =
instr->hydrogen()->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
int offset = Context::SlotOffset(instr->slot_index());
Register scratch = ToRegister(instr->temp());
__ RecordWriteContextSlot(context,
offset,
value,
scratch,
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed);
}
__ bind(&skip_assignment);
}
void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
HObjectAccess access = instr->hydrogen()->access();
int offset = access.offset();
if (access.IsExternalMemory()) {
Register result = ToRegister(instr->result());
if (instr->object()->IsConstantOperand()) {
DCHECK(result.is(rax));
__ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
} else {
Register object = ToRegister(instr->object());
__ Load(result, MemOperand(object, offset), access.representation());
}
return;
}
Register object = ToRegister(instr->object());
if (instr->hydrogen()->representation().IsDouble()) {
DCHECK(access.IsInobject());
XMMRegister result = ToDoubleRegister(instr->result());
__ movsd(result, FieldOperand(object, offset));
return;
}
Register result = ToRegister(instr->result());
if (!access.IsInobject()) {
__ movp(result, FieldOperand(object, JSObject::kPropertiesOffset));
object = result;
}
Representation representation = access.representation();
if (representation.IsSmi() && SmiValuesAre32Bits() &&
instr->hydrogen()->representation().IsInteger32()) {
if (FLAG_debug_code) {
Register scratch = kScratchRegister;
__ Load(scratch, FieldOperand(object, offset), representation);
__ AssertSmi(scratch);
}
// Read int value directly from upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
DCHECK(kSmiTagSize + kSmiShiftSize == 32);
offset += kPointerSize / 2;
representation = Representation::Integer32();
}
__ Load(result, FieldOperand(object, offset), representation);
}
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->result()).is(rax));
__ Move(LoadDescriptor::NameRegister(), instr->name());
EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
Handle<Code> ic =
CodeFactory::LoadICInOptimizedCode(
isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
Register function = ToRegister(instr->function());
Register result = ToRegister(instr->result());
// Get the prototype or initial map from the function.
__ movp(result,
FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
// Check that the function has a prototype or an initial map.
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr, Deoptimizer::kHole);
// If the function does not have an initial map, we're done.
Label done;
__ CmpObjectType(result, MAP_TYPE, kScratchRegister);
__ j(not_equal, &done, Label::kNear);
// Get the prototype from the initial map.
__ movp(result, FieldOperand(result, Map::kPrototypeOffset));
// All done.
__ bind(&done);
}
void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
Register result = ToRegister(instr->result());
__ LoadRoot(result, instr->index());
}
void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
Register arguments = ToRegister(instr->arguments());
Register result = ToRegister(instr->result());
if (instr->length()->IsConstantOperand() &&
instr->index()->IsConstantOperand()) {
int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
if (const_index >= 0 && const_index < const_length) {
StackArgumentsAccessor args(arguments, const_length,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
__ movp(result, args.GetArgumentOperand(const_index));
} else if (FLAG_debug_code) {
__ int3();
}
} else {
Register length = ToRegister(instr->length());
// There are two words between the frame pointer and the last argument.
// Subtracting from length accounts for one of them add one more.
if (instr->index()->IsRegister()) {
__ subl(length, ToRegister(instr->index()));
} else {
__ subl(length, ToOperand(instr->index()));
}
StackArgumentsAccessor args(arguments, length,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
__ movp(result, args.GetArgumentOperand(0));
}
}
void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
Register key_reg = ToRegister(key);
Representation key_representation =
instr->hydrogen()->key()->representation();
if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
__ SmiToInteger64(key_reg, key_reg);
} else if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
}
}
Operand operand(BuildFastArrayOperand(
instr->elements(),
key,
instr->hydrogen()->key()->representation(),
elements_kind,
instr->base_offset()));
if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
elements_kind == FLOAT32_ELEMENTS) {
XMMRegister result(ToDoubleRegister(instr->result()));
__ movss(result, operand);
__ cvtss2sd(result, result);
} else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
elements_kind == FLOAT64_ELEMENTS) {
__ movsd(ToDoubleRegister(instr->result()), operand);
} else {
Register result(ToRegister(instr->result()));
switch (elements_kind) {
case EXTERNAL_INT8_ELEMENTS:
case INT8_ELEMENTS:
__ movsxbl(result, operand);
break;
case EXTERNAL_UINT8_ELEMENTS:
case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
__ movzxbl(result, operand);
break;
case EXTERNAL_INT16_ELEMENTS:
case INT16_ELEMENTS:
__ movsxwl(result, operand);
break;
case EXTERNAL_UINT16_ELEMENTS:
case UINT16_ELEMENTS:
__ movzxwl(result, operand);
break;
case EXTERNAL_INT32_ELEMENTS:
case INT32_ELEMENTS:
__ movl(result, operand);
break;
case EXTERNAL_UINT32_ELEMENTS:
case UINT32_ELEMENTS:
__ movl(result, operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ testl(result, result);
DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
}
break;
case EXTERNAL_FLOAT32_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
}
}
void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
XMMRegister result(ToDoubleRegister(instr->result()));
LOperand* key = instr->key();
if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(ToRegister(key), ToRegister(key));
}
if (instr->hydrogen()->RequiresHoleCheck()) {
Operand hole_check_operand = BuildFastArrayOperand(
instr->elements(),
key,
instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
instr->base_offset() + sizeof(kHoleNanLower32));
__ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
DeoptimizeIf(equal, instr, Deoptimizer::kHole);
}
Operand double_load_operand = BuildFastArrayOperand(
instr->elements(),
key,
instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
instr->base_offset());
__ movsd(result, double_load_operand);
}
void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
HLoadKeyed* hinstr = instr->hydrogen();
Register result = ToRegister(instr->result());
LOperand* key = instr->key();
bool requires_hole_check = hinstr->RequiresHoleCheck();
Representation representation = hinstr->representation();
int offset = instr->base_offset();
if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(ToRegister(key), ToRegister(key));
}
if (representation.IsInteger32() && SmiValuesAre32Bits() &&
hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
DCHECK(!requires_hole_check);
if (FLAG_debug_code) {
Register scratch = kScratchRegister;
__ Load(scratch,
BuildFastArrayOperand(instr->elements(),
key,
instr->hydrogen()->key()->representation(),
FAST_ELEMENTS,
offset),
Representation::Smi());
__ AssertSmi(scratch);
}
// Read int value directly from upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
DCHECK(kSmiTagSize + kSmiShiftSize == 32);
offset += kPointerSize / 2;
}
__ Load(result,
BuildFastArrayOperand(instr->elements(), key,
instr->hydrogen()->key()->representation(),
FAST_ELEMENTS, offset),
representation);
// Check for the hole value.
if (requires_hole_check) {
if (IsFastSmiElementsKind(hinstr->elements_kind())) {
Condition smi = __ CheckSmi(result);
DeoptimizeIf(NegateCondition(smi), instr, Deoptimizer::kNotASmi);
} else {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr, Deoptimizer::kHole);
}
} else if (hinstr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
DCHECK(hinstr->elements_kind() == FAST_HOLEY_ELEMENTS);
Label done;
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
__ j(not_equal, &done);
if (info()->IsStub()) {
// A stub can safely convert the hole to undefined only if the array
// protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
// it needs to bail out.
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ Cmp(FieldOperand(result, Cell::kValueOffset),
Smi::FromInt(Isolate::kArrayProtectorValid));
DeoptimizeIf(not_equal, instr, Deoptimizer::kHole);
}
__ Move(result, isolate()->factory()->undefined_value());
__ bind(&done);
}
}
void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
if (instr->is_typed_elements()) {
DoLoadKeyedExternalArray(instr);
} else if (instr->hydrogen()->representation().IsDouble()) {
DoLoadKeyedFixedDoubleArray(instr);
} else {
DoLoadKeyedFixedArray(instr);
}
}
Operand LCodeGen::BuildFastArrayOperand(
LOperand* elements_pointer,
LOperand* key,
Representation key_representation,
ElementsKind elements_kind,
uint32_t offset) {
Register elements_pointer_reg = ToRegister(elements_pointer);
int shift_size = ElementsKindToShiftSize(elements_kind);
if (key->IsConstantOperand()) {
int32_t constant_value = ToInteger32(LConstantOperand::cast(key));
if (constant_value & 0xF0000000) {
Abort(kArrayIndexConstantValueTooBig);
}
return Operand(elements_pointer_reg,
(constant_value << shift_size) + offset);
} else {
// Guaranteed by ArrayInstructionInterface::KeyedAccessIndexRequirement().
DCHECK(key_representation.IsInteger32());
ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
return Operand(elements_pointer_reg,
ToRegister(key),
scale_factor,
offset);
}
}
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
if (instr->hydrogen()->HasVectorAndSlot()) {
EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
}
Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
isolate(), instr->hydrogen()->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
Register result = ToRegister(instr->result());
if (instr->hydrogen()->from_inlined()) {
__ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
} else {
// Check for arguments adapter frame.
Label done, adapted;
__ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(equal, &adapted, Label::kNear);
// No arguments adaptor frame.
__ movp(result, rbp);
__ jmp(&done, Label::kNear);
// Arguments adaptor frame present.
__ bind(&adapted);
__ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
// Result is the frame pointer for the frame if not adapted and for the real
// frame below the adaptor frame if adapted.
__ bind(&done);
}
}
void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
Register result = ToRegister(instr->result());
Label done;
// If no arguments adaptor frame the number of arguments is fixed.
if (instr->elements()->IsRegister()) {
__ cmpp(rbp, ToRegister(instr->elements()));
} else {
__ cmpp(rbp, ToOperand(instr->elements()));
}
__ movl(result, Immediate(scope()->num_parameters()));
__ j(equal, &done, Label::kNear);
// Arguments adaptor frame present. Get argument length from there.
__ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ SmiToInteger32(result,
Operand(result,
ArgumentsAdaptorFrameConstants::kLengthOffset));
// Argument length is in result register.
__ bind(&done);
}
void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
// If the receiver is null or undefined, we have to pass the global
// object as a receiver to normal functions. Values have to be
// passed unchanged to builtins and strict-mode functions.
Label global_object, receiver_ok;
Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
if (!instr->hydrogen()->known_function()) {
// Do not transform the receiver to object for strict mode
// functions.
__ movp(kScratchRegister,
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
__ testb(FieldOperand(kScratchRegister,
SharedFunctionInfo::kStrictModeByteOffset),
Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
__ j(not_equal, &receiver_ok, dist);
// Do not transform the receiver to object for builtins.
__ testb(FieldOperand(kScratchRegister,
SharedFunctionInfo::kNativeByteOffset),
Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
__ j(not_equal, &receiver_ok, dist);
}
// Normal function. Replace undefined or null with global receiver.
__ CompareRoot(receiver, Heap::kNullValueRootIndex);
__ j(equal, &global_object, Label::kNear);
__ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
__ j(equal, &global_object, Label::kNear);
// The receiver should be a JS object.
Condition is_smi = __ CheckSmi(receiver);
DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi);
__ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
__ jmp(&receiver_ok, Label::kNear);
__ bind(&global_object);
__ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
__ movp(receiver,
Operand(receiver,
Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
__ movp(receiver, FieldOperand(receiver, GlobalObject::kGlobalProxyOffset));
__ bind(&receiver_ok);
}
void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
Register receiver = ToRegister(instr->receiver());
Register function = ToRegister(instr->function());
Register length = ToRegister(instr->length());
Register elements = ToRegister(instr->elements());
DCHECK(receiver.is(rax)); // Used for parameter count.
DCHECK(function.is(rdi)); // Required by InvokeFunction.
DCHECK(ToRegister(instr->result()).is(rax));
// Copy the arguments to this function possibly from the
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmpp(length, Immediate(kArgumentsLimit));
DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
__ Push(receiver);
__ movp(receiver, length);
// Loop through the arguments pushing them onto the execution
// stack.
Label invoke, loop;
// length is a small non-negative integer, due to the test above.
__ testl(length, length);
__ j(zero, &invoke, Label::kNear);
__ bind(&loop);
StackArgumentsAccessor args(elements, length,
ARGUMENTS_DONT_CONTAIN_RECEIVER);
__ Push(args.GetArgumentOperand(0));
__ decl(length);
__ j(not_zero, &loop);
// Invoke the function.
__ bind(&invoke);
DCHECK(instr->HasPointerMap());
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator safepoint_generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount actual(rax);
__ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
}
void LCodeGen::DoPushArgument(LPushArgument* instr) {
LOperand* argument = instr->value();
EmitPushTaggedOperand(argument);
}
void LCodeGen::DoDrop(LDrop* instr) {
__ Drop(instr->count());
}
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
__ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
}
void LCodeGen::DoContext(LContext* instr) {
Register result = ToRegister(instr->result());
if (info()->IsOptimizing()) {
__ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset));
} else {
// If there is no frame, the context must be in rsi.
DCHECK(result.is(rsi));
}
}
void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
__ Push(rsi); // The context is the first argument.
__ Push(instr->hydrogen()->pairs());
__ Push(Smi::FromInt(instr->hydrogen()->flags()));
CallRuntime(Runtime::kDeclareGlobals, 3, instr);
}
void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int formal_parameter_count, int arity,
LInstruction* instr) {
bool dont_adapt_arguments =
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
bool can_invoke_directly =
dont_adapt_arguments || formal_parameter_count == arity;
Register function_reg = rdi;
LPointerMap* pointers = instr->pointer_map();
if (can_invoke_directly) {
// Change context.
__ movp(rsi, FieldOperand(function_reg, JSFunction::kContextOffset));
// Set rax to arguments count if adaption is not needed. Assumes that rax
// is available to write to at this point.
if (dont_adapt_arguments) {
__ Set(rax, arity);
}
// Invoke function.
if (function.is_identical_to(info()->closure())) {
__ CallSelf();
} else {
__ Call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
}
// Set up deoptimization.
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
} else {
// We need to adapt arguments.
SafepointGenerator generator(
this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(arity);
ParameterCount expected(formal_parameter_count);
__ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
}
}
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
DCHECK(ToRegister(instr->result()).is(rax));
if (instr->hydrogen()->IsTailCall()) {
if (NeedsEagerFrame()) __ leave();
if (instr->target()->IsConstantOperand()) {
LConstantOperand* target = LConstantOperand::cast(instr->target());
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
__ jmp(code, RelocInfo::CODE_TARGET);
} else {
DCHECK(instr->target()->IsRegister());
Register target = ToRegister(instr->target());
__ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(target);
}
} else {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
if (instr->target()->IsConstantOperand()) {
LConstantOperand* target = LConstantOperand::cast(instr->target());
Handle<Code> code = Handle<Code>::cast(ToHandle(target));
generator.BeforeCall(__ CallSize(code));
__ call(code, RelocInfo::CODE_TARGET);
} else {
DCHECK(instr->target()->IsRegister());
Register target = ToRegister(instr->target());
generator.BeforeCall(__ CallSize(target));
__ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(target);
}
generator.AfterCall();
}
}
void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
DCHECK(ToRegister(instr->function()).is(rdi));
DCHECK(ToRegister(instr->result()).is(rax));
if (instr->hydrogen()->pass_argument_count()) {
__ Set(rax, instr->arity());
}
// Change context.
__ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
bool is_self_call = false;
if (instr->hydrogen()->function()->IsConstant()) {
Handle<JSFunction> jsfun = Handle<JSFunction>::null();
HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
jsfun = Handle<JSFunction>::cast(fun_const->handle(isolate()));
is_self_call = jsfun.is_identical_to(info()->closure());
}
if (is_self_call) {
__ CallSelf();
} else {
Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
generator.BeforeCall(__ CallSize(target));
__ Call(target);
}
generator.AfterCall();
}
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
Label slow, allocated, done;
Register tmp = input_reg.is(rax) ? rcx : rax;
Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
__ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
// Check the sign of the argument. If the argument is positive, just
// return it. We do not need to patch the stack since |input| and
// |result| are the same register and |input| will be restored
// unchanged by popping safepoint registers.
__ testl(tmp, Immediate(HeapNumber::kSignMask));
__ j(zero, &done);
__ AllocateHeapNumber(tmp, tmp2, &slow);
__ jmp(&allocated, Label::kNear);
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
CallRuntimeFromDeferred(
Runtime::kAllocateHeapNumber, 0, instr, instr->context());
// Set the pointer to the new heap number in tmp.
if (!tmp.is(rax)) __ movp(tmp, rax);
// Restore input_reg after call to runtime.
__ LoadFromSafepointRegisterSlot(input_reg, input_reg);
__ bind(&allocated);
__ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ shlq(tmp2, Immediate(1));
__ shrq(tmp2, Immediate(1));
__ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
__ StoreToSafepointRegisterSlot(input_reg, tmp);
__ bind(&done);
}
void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ testl(input_reg, input_reg);
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ negl(input_reg); // Sets flags.
DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
__ bind(&is_positive);
}
void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ testp(input_reg, input_reg);
Label is_positive;
__ j(not_sign, &is_positive, Label::kNear);
__ negp(input_reg); // Sets flags.
DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
__ bind(&is_positive);
}
void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
void Generate() override {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
LInstruction* instr() override { return instr_; }
private:
LMathAbs* instr_;
};
DCHECK(instr->value()->Equals(instr->result()));
Representation r = instr->hydrogen()->value()->representation();
if (r.IsDouble()) {
XMMRegister scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ xorps(scratch, scratch);
__ subsd(scratch, input_reg);
__ andps(input_reg, scratch);
} else if (r.IsInteger32()) {
EmitIntegerMathAbs(instr);
} else if (r.IsSmi()) {
EmitSmiMathAbs(instr);
} else { // Tagged case.
DeferredMathAbsTaggedHeapNumber* deferred =
new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
Register input_reg = ToRegister(instr->value());
// Smi check.
__ JumpIfNotSmi(input_reg, deferred->entry());
EmitSmiMathAbs(instr);
__ bind(deferred->exit());
}
}
void LCodeGen::DoMathFloor(LMathFloor* instr) {
XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
if (CpuFeatures::IsSupported(SSE4_1)) {
CpuFeatureScope scope(masm(), SSE4_1);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Deoptimize if minus zero.
__ movq(output_reg, input_reg);
__ subq(output_reg, Immediate(1));
DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero);
}
__ roundsd(xmm_scratch, input_reg, kRoundDown);
__ cvttsd2si(output_reg, xmm_scratch);
__ cmpl(output_reg, Immediate(0x1));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
} else {
Label negative_sign, done;
// Deoptimize on unordered.
__ xorps(xmm_scratch, xmm_scratch); // Zero the register.
__ ucomisd(input_reg, xmm_scratch);
DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
__ j(below, &negative_sign, Label::kNear);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Check for negative zero.
Label positive_sign;
__ j(above, &positive_sign, Label::kNear);
__ movmskpd(output_reg, input_reg);
__ testq(output_reg, Immediate(1));
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
__ Set(output_reg, 0);
__ jmp(&done);
__ bind(&positive_sign);
}
// Use truncating instruction (OK because input is positive).
__ cvttsd2si(output_reg, input_reg);
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x1));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
__ jmp(&done, Label::kNear);
// Non-zero negative reaches here.
__ bind(&negative_sign);
// Truncate, then compare and compensate.
__ cvttsd2si(output_reg, input_reg);
__ Cvtlsi2sd(xmm_scratch, output_reg);
__ ucomisd(input_reg, xmm_scratch);
__ j(equal, &done, Label::kNear);
__ subl(output_reg, Immediate(1));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
__ bind(&done);
}
}
void LCodeGen::DoMathRound(LMathRound* instr) {
const XMMRegister xmm_scratch = double_scratch0();
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
XMMRegister input_temp = ToDoubleRegister(instr->temp());
static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5
static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5
Label done, round_to_zero, below_one_half;
Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
__ movq(kScratchRegister, one_half);
__ movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg);
__ j(above, &below_one_half, Label::kNear);
// CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
__ addsd(xmm_scratch, input_reg);
__ cvttsd2si(output_reg, xmm_scratch);
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x1));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
__ jmp(&done, dist);
__ bind(&below_one_half);
__ movq(kScratchRegister, minus_one_half);
__ movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg);
__ j(below_equal, &round_to_zero, Label::kNear);
// CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
// compare and compensate.
__ movq(input_temp, input_reg); // Do not alter input_reg.
__ subsd(input_temp, xmm_scratch);
__ cvttsd2si(output_reg, input_temp);
// Catch minint due to overflow, and to prevent overflow when compensating.
__ cmpl(output_reg, Immediate(0x1));
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
__ Cvtlsi2sd(xmm_scratch, output_reg);
__ ucomisd(xmm_scratch, input_temp);
__ j(equal, &done, dist);
__ subl(output_reg, Immediate(1));
// No overflow because we already ruled out minint.
__ jmp(&done, dist);
__ bind(&round_to_zero);
// We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
// we can ignore the difference between a result of -0 and +0.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ movq(output_reg, input_reg);
__ testq(output_reg, output_reg);
DeoptimizeIf(negative, instr, Deoptimizer::kMinusZero);
}
__ Set(output_reg, 0);
__ bind(&done);
}
void LCodeGen::DoMathFround(LMathFround* instr) {
XMMRegister input_reg = ToDoubleRegister(instr->value());
XMMRegister output_reg = ToDoubleRegister(instr->result());
__ cvtsd2ss(output_reg, input_reg);
__ cvtss2sd(output_reg, output_reg);
}
void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
XMMRegister output = ToDoubleRegister(instr->result());
if (instr->value()->IsDoubleRegister()) {
XMMRegister input = ToDoubleRegister(instr->value());
__ sqrtsd(output, input);
} else {
Operand input = ToOperand(instr->value());
__ sqrtsd(output, input);
}
}
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
XMMRegister xmm_scratch = double_scratch0();
XMMRegister input_reg = ToDoubleRegister(instr->value());
DCHECK(ToDoubleRegister(instr->result()).is(input_reg));
// Note that according to ECMA-262 15.8.2.13:
// Math.pow(-Infinity, 0.5) == Infinity
// Math.sqrt(-Infinity) == NaN
Label done, sqrt;
// Check base for -Infinity. According to IEEE-754, double-precision
// -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
__ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
__ movq(xmm_scratch, kScratchRegister);
__ ucomisd(xmm_scratch, input_reg);
// Comparing -Infinity with NaN results in "unordered", which sets the
// zero flag as if both were equal. However, it also sets the carry flag.
__ j(not_equal, &sqrt, Label::kNear);
__ j(carry, &sqrt, Label::kNear);
// If input is -Infinity, return Infinity.
__ xorps(input_reg, input_reg);
__ subsd(input_reg, xmm_scratch);
__ jmp(&done, Label::kNear);
// Square root.
__ bind(&sqrt);
__ xorps(xmm_scratch, xmm_scratch);
__ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
__ sqrtsd(input_reg, input_reg);
__ bind(&done);
}
void LCodeGen::DoPower(LPower* instr) {
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
Register tagged_exponent = MathPowTaggedDescriptor::exponent();
DCHECK(!instr->right()->IsRegister() ||
ToRegister(instr->right()).is(tagged_exponent));
DCHECK(!instr->right()->IsDoubleRegister() ||
ToDoubleRegister(instr->right()).is(xmm1));
DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
DCHECK(ToDoubleRegister(instr->result()).is(xmm3));
if (exponent_type.IsSmi()) {
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsTagged()) {
Label no_deopt;
__ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear);
__ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx);
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
} else if (exponent_type.IsInteger32()) {
MathPowStub stub(isolate(), MathPowStub::INTEGER);
__ CallStub(&stub);
} else {
DCHECK(exponent_type.IsDouble());
MathPowStub stub(isolate(), MathPowStub::DOUBLE);
__ CallStub(&stub);
}
}
void LCodeGen::DoMathExp(LMathExp* instr) {
XMMRegister input = ToDoubleRegister(instr->value());
XMMRegister result = ToDoubleRegister(instr->result());
XMMRegister temp0 = double_scratch0();
Register temp1 = ToRegister(instr->temp1());
Register temp2 = ToRegister(instr->temp2());
MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
}
void LCodeGen::DoMathLog(LMathLog* instr) {
DCHECK(instr->value()->Equals(instr->result()));
XMMRegister input_reg = ToDoubleRegister(instr->value());
XMMRegister xmm_scratch = double_scratch0();
Label positive, done, zero;
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(input_reg, xmm_scratch);
__ j(above, &positive, Label::kNear);
__ j(not_carry, &zero, Label::kNear);
__ pcmpeqd(input_reg, input_reg);
__ jmp(&done, Label::kNear);
__ bind(&zero);
ExternalReference ninf =
ExternalReference::address_of_negative_infinity();
Operand ninf_operand = masm()->ExternalOperand(ninf);
__ movsd(input_reg, ninf_operand);
__ jmp(&done, Label::kNear);
__ bind(&positive);
__ fldln2();
__ subp(rsp, Immediate(kDoubleSize));
__ movsd(Operand(rsp, 0), input_reg);
__ fld_d(Operand(rsp, 0));
__ fyl2x();
__ fstp_d(Operand(rsp, 0));
__ movsd(input_reg, Operand(rsp, 0));
__ addp(rsp, Immediate(kDoubleSize));
__ bind(&done);
}
void LCodeGen::DoMathClz32(LMathClz32* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
__ Lzcntl(result, input);
}
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
DCHECK(ToRegister(instr->function()).is(rdi));
DCHECK(instr->HasPointerMap());
Handle<JSFunction> known_function = instr->hydrogen()->known_function();
if (known_function.is_null()) {
LPointerMap* pointers = instr->pointer_map();
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
ParameterCount count(instr->arity());
__ InvokeFunction(rdi, count, CALL_FUNCTION, generator);
} else {
CallKnownFunction(known_function,
instr->hydrogen()->formal_parameter_count(),
instr->arity(), instr);
}
}
void LCodeGen::DoCallFunction(LCallFunction* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
DCHECK(ToRegister(instr->function()).is(rdi));
DCHECK(ToRegister(instr->result()).is(rax));
int arity = instr->arity();
CallFunctionFlags flags = instr->hydrogen()->function_flags();
if (instr->hydrogen()->HasVectorAndSlot()) {
Register slot_register = ToRegister(instr->temp_slot());
Register vector_register = ToRegister(instr->temp_vector());
DCHECK(slot_register.is(rdx));
DCHECK(vector_register.is(rbx));
AllowDeferredHandleDereference vector_structure_check;
Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
int index = vector->GetIndex(instr->hydrogen()->slot());
__ Move(vector_register, vector);
__ Move(slot_register, Smi::FromInt(index));
CallICState::CallType call_type =
(flags & CALL_AS_METHOD) ? CallICState::METHOD : CallICState::FUNCTION;
Handle<Code> ic =
CodeFactory::CallICInOptimizedCode(isolate(), arity, call_type).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
} else {
CallFunctionStub stub(isolate(), arity, flags);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
void LCodeGen::DoCallNew(LCallNew* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
DCHECK(ToRegister(instr->constructor()).is(rdi));
DCHECK(ToRegister(instr->result()).is(rax));
__ Set(rax, instr->arity());
// No cell in ebx for construct type feedback in optimized code
__ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
DCHECK(ToRegister(instr->constructor()).is(rdi));
DCHECK(ToRegister(instr->result()).is(rax));
__ Set(rax, instr->arity());
if (instr->arity() == 1) {
// We only need the allocation site for the case we have a length argument.
// The case may bail out to the runtime, which will determine the correct
// elements kind with the site.
__ Move(rbx, instr->hydrogen()->site());
} else {
__ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
}
ElementsKind kind = instr->hydrogen()->elements_kind();
AllocationSiteOverrideMode override_mode =
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
? DISABLE_ALLOCATION_SITES
: DONT_OVERRIDE;
if (instr->arity() == 0) {
ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
} else if (instr->arity() == 1) {
Label done;
if (IsFastPackedElementsKind(kind)) {
Label packed_case;
// We might need a change here
// look at the first argument
__ movp(rcx, Operand(rsp, 0));
__ testp(rcx, rcx);
__ j(zero, &packed_case, Label::kNear);
ElementsKind holey_kind = GetHoleyElementsKind(kind);
ArraySingleArgumentConstructorStub stub(isolate(),
holey_kind,
override_mode);
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
__ jmp(&done, Label::kNear);
__ bind(&packed_case);
}
ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
__ bind(&done);
} else {
ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
}
void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
}
void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
Register function = ToRegister(instr->function());
Register code_object = ToRegister(instr->code_object());
__ leap(code_object, FieldOperand(code_object, Code::kHeaderSize));
__ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
}
void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
Register result = ToRegister(instr->result());
Register base = ToRegister(instr->base_object());
if (instr->offset()->IsConstantOperand()) {
LConstantOperand* offset = LConstantOperand::cast(instr->offset());
__ leap(result, Operand(base, ToInteger32(offset)));
} else {
Register offset = ToRegister(instr->offset());
__ leap(result, Operand(base, offset, times_1, 0));
}
}
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
HStoreNamedField* hinstr = instr->hydrogen();
Representation representation = instr->representation();
HObjectAccess access = hinstr->access();
int offset = access.offset();
if (access.IsExternalMemory()) {
DCHECK(!hinstr->NeedsWriteBarrier());
Register value = ToRegister(instr->value());
if (instr->object()->IsConstantOperand()) {
DCHECK(value.is(rax));
LConstantOperand* object = LConstantOperand::cast(instr->object());
__ store_rax(ToExternalReference(object));
} else {
Register object = ToRegister(instr->object());
__ Store(MemOperand(object, offset), value, representation);
}
return;
}
Register object = ToRegister(instr->object());
__ AssertNotSmi(object);
DCHECK(!representation.IsSmi() ||
!instr->value()->IsConstantOperand() ||
IsInteger32Constant(LConstantOperand::cast(instr->value())));
if (!FLAG_unbox_double_fields && representation.IsDouble()) {
DCHECK(access.IsInobject());
DCHECK(!hinstr->has_transition());
DCHECK(!hinstr->NeedsWriteBarrier());
XMMRegister value = ToDoubleRegister(instr->value());
__ movsd(FieldOperand(object, offset), value);
return;
}
if (hinstr->has_transition()) {
Handle<Map> transition = hinstr->transition_map();
AddDeprecationDependency(transition);
if (!hinstr->NeedsWriteBarrierForMap()) {
__ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
} else {
Register temp = ToRegister(instr->temp());
__ Move(kScratchRegister, transition);
__ movp(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
// Update the write barrier for the map field.
__ RecordWriteForMap(object,
kScratchRegister,
temp,
kSaveFPRegs);
}
}
// Do the store.
Register write_register = object;
if (!access.IsInobject()) {
write_register = ToRegister(instr->temp());
__ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
}
if (representation.IsSmi() && SmiValuesAre32Bits() &&
hinstr->value()->representation().IsInteger32()) {
DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
if (FLAG_debug_code) {
Register scratch = kScratchRegister;
__ Load(scratch, FieldOperand(write_register, offset), representation);
__ AssertSmi(scratch);
}
// Store int value directly to upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
DCHECK(kSmiTagSize + kSmiShiftSize == 32);
offset += kPointerSize / 2;
representation = Representation::Integer32();
}
Operand operand = FieldOperand(write_register, offset);
if (FLAG_unbox_double_fields && representation.IsDouble()) {
DCHECK(access.IsInobject());
XMMRegister value = ToDoubleRegister(instr->value());
__ movsd(operand, value);
} else if (instr->value()->IsRegister()) {
Register value = ToRegister(instr->value());
__ Store(operand, value, representation);
} else {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
if (IsInteger32Constant(operand_value)) {
DCHECK(!hinstr->NeedsWriteBarrier());
int32_t value = ToInteger32(operand_value);
if (representation.IsSmi()) {
__ Move(operand, Smi::FromInt(value));
} else {
__ movl(operand, Immediate(value));
}
} else {
Handle<Object> handle_value = ToHandle(operand_value);
DCHECK(!hinstr->NeedsWriteBarrier());
__ Move(operand, handle_value);
}
}
if (hinstr->NeedsWriteBarrier()) {
Register value = ToRegister(instr->value());
Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
// Update the write barrier for the object for in-object properties.
__ RecordWriteField(write_register,
offset,
value,
temp,
kSaveFPRegs,
EMIT_REMEMBERED_SET,
hinstr->SmiCheckForWriteBarrier(),
hinstr->PointersToHereCheckForValue());
}
}
void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
if (instr->hydrogen()->HasVectorAndSlot()) {
EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
}
__ Move(StoreDescriptor::NameRegister(), instr->hydrogen()->name());
Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
Representation representation = instr->hydrogen()->length()->representation();
DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
DCHECK(representation.IsSmiOrInteger32());
Condition cc = instr->hydrogen()->allow_equality() ? below : below_equal;
if (instr->length()->IsConstantOperand()) {
int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
Register index = ToRegister(instr->index());
if (representation.IsSmi()) {
__ Cmp(index, Smi::FromInt(length));
} else {
__ cmpl(index, Immediate(length));
}
cc = CommuteCondition(cc);
} else if (instr->index()->IsConstantOperand()) {
int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
if (instr->length()->IsRegister()) {
Register length = ToRegister(instr->length());
if (representation.IsSmi()) {
__ Cmp(length, Smi::FromInt(index));
} else {
__ cmpl(length, Immediate(index));
}
} else {
Operand length = ToOperand(instr->length());
if (representation.IsSmi()) {
__ Cmp(length, Smi::FromInt(index));
} else {
__ cmpl(length, Immediate(index));
}
}
} else {
Register index = ToRegister(instr->index());
if (instr->length()->IsRegister()) {
Register length = ToRegister(instr->length());
if (representation.IsSmi()) {
__ cmpp(length, index);
} else {
__ cmpl(length, index);
}
} else {
Operand length = ToOperand(instr->length());
if (representation.IsSmi()) {
__ cmpp(length, index);
} else {
__ cmpl(length, index);
}
}
}
if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
Label done;
__ j(NegateCondition(cc), &done, Label::kNear);
__ int3();
__ bind(&done);
} else {
DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
}
}
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
Register key_reg = ToRegister(key);
Representation key_representation =
instr->hydrogen()->key()->representation();
if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
__ SmiToInteger64(key_reg, key_reg);
} else if (instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(key_reg, key_reg);
}
}
Operand operand(BuildFastArrayOperand(
instr->elements(),
key,
instr->hydrogen()->key()->representation(),
elements_kind,
instr->base_offset()));
if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
elements_kind == FLOAT32_ELEMENTS) {
XMMRegister value(ToDoubleRegister(instr->value()));
__ cvtsd2ss(value, value);
__ movss(operand, value);
} else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
elements_kind == FLOAT64_ELEMENTS) {
__ movsd(operand, ToDoubleRegister(instr->value()));
} else {
Register value(ToRegister(instr->value()));
switch (elements_kind) {
case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
case EXTERNAL_INT8_ELEMENTS:
case EXTERNAL_UINT8_ELEMENTS:
case INT8_ELEMENTS:
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
__ movb(operand, value);
break;
case EXTERNAL_INT16_ELEMENTS:
case EXTERNAL_UINT16_ELEMENTS:
case INT16_ELEMENTS:
case UINT16_ELEMENTS:
__ movw(operand, value);
break;
case EXTERNAL_INT32_ELEMENTS:
case EXTERNAL_UINT32_ELEMENTS:
case INT32_ELEMENTS:
case UINT32_ELEMENTS:
__ movl(operand, value);
break;
case EXTERNAL_FLOAT32_ELEMENTS:
case EXTERNAL_FLOAT64_ELEMENTS:
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
}
}
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
XMMRegister value = ToDoubleRegister(instr->value());
LOperand* key = instr->key();
if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(ToRegister(key), ToRegister(key));
}
if (instr->NeedsCanonicalization()) {
XMMRegister xmm_scratch = double_scratch0();
// Turn potential sNaN value into qNaN.
__ xorps(xmm_scratch, xmm_scratch);
__ subsd(value, xmm_scratch);
}
Operand double_store_operand = BuildFastArrayOperand(
instr->elements(),
key,
instr->hydrogen()->key()->representation(),
FAST_DOUBLE_ELEMENTS,
instr->base_offset());
__ movsd(double_store_operand, value);
}
void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
HStoreKeyed* hinstr = instr->hydrogen();
LOperand* key = instr->key();
int offset = instr->base_offset();
Representation representation = hinstr->value()->representation();
if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
instr->hydrogen()->IsDehoisted()) {
// Sign extend key because it could be a 32 bit negative value
// and the dehoisted address computation happens in 64 bits
__ movsxlq(ToRegister(key), ToRegister(key));
}
if (representation.IsInteger32() && SmiValuesAre32Bits()) {
DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
if (FLAG_debug_code) {
Register scratch = kScratchRegister;
__ Load(scratch,
BuildFastArrayOperand(instr->elements(),
key,
instr->hydrogen()->key()->representation(),
FAST_ELEMENTS,
offset),
Representation::Smi());
__ AssertSmi(scratch);
}
// Store int value directly to upper half of the smi.
STATIC_ASSERT(kSmiTag == 0);
DCHECK(kSmiTagSize + kSmiShiftSize == 32);
offset += kPointerSize / 2;
}
Operand operand =
BuildFastArrayOperand(instr->elements(),
key,
instr->hydrogen()->key()->representation(),
FAST_ELEMENTS,
offset);
if (instr->value()->IsRegister()) {
__ Store(operand, ToRegister(instr->value()), representation);
} else {
LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
if (IsInteger32Constant(operand_value)) {
int32_t value = ToInteger32(operand_value);
if (representation.IsSmi()) {
__ Move(operand, Smi::FromInt(value));
} else {
__ movl(operand, Immediate(value));
}
} else {
Handle<Object> handle_value = ToHandle(operand_value);
__ Move(operand, handle_value);
}
}
if (hinstr->NeedsWriteBarrier()) {
Register elements = ToRegister(instr->elements());
DCHECK(instr->value()->IsRegister());
Register value = ToRegister(instr->value());
DCHECK(!key->IsConstantOperand());
SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
// Compute address of modified element and store it into key register.
Register key_reg(ToRegister(key));
__ leap(key_reg, operand);
__ RecordWrite(elements,
key_reg,
value,
kSaveFPRegs,
EMIT_REMEMBERED_SET,
check_needed,
hinstr->PointersToHereCheckForValue());
}
}
void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
if (instr->is_typed_elements()) {
DoStoreKeyedExternalArray(instr);
} else if (instr->hydrogen()->value()->representation().IsDouble()) {
DoStoreKeyedFixedDoubleArray(instr);
} else {
DoStoreKeyedFixedArray(instr);
}
}
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
if (instr->hydrogen()->HasVectorAndSlot()) {
EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
}
Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
isolate(), instr->language_mode(),
instr->hydrogen()->initialization_state()).code();
CallCode(ic, RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
class DeferredMaybeGrowElements final : public LDeferredCode {
public:
DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
: LDeferredCode(codegen), instr_(instr) {}
void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
LInstruction* instr() override { return instr_; }
private:
LMaybeGrowElements* instr_;
};
Register result = rax;
DeferredMaybeGrowElements* deferred =
new (zone()) DeferredMaybeGrowElements(this, instr);
LOperand* key = instr->key();
LOperand* current_capacity = instr->current_capacity();
DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
DCHECK(key->IsConstantOperand() || key->IsRegister());
DCHECK(current_capacity->IsConstantOperand() ||
current_capacity->IsRegister());
if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
int32_t constant_capacity =
ToInteger32(LConstantOperand::cast(current_capacity));
if (constant_key >= constant_capacity) {
// Deferred case.
__ jmp(deferred->entry());
}
} else if (key->IsConstantOperand()) {
int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
__ cmpl(ToRegister(current_capacity), Immediate(constant_key));
__ j(less_equal, deferred->entry());
} else if (current_capacity->IsConstantOperand()) {
int32_t constant_capacity =
ToInteger32(LConstantOperand::cast(current_capacity));
__ cmpl(ToRegister(key), Immediate(constant_capacity));
__ j(greater_equal, deferred->entry());
} else {
__ cmpl(ToRegister(key), ToRegister(current_capacity));
__ j(greater_equal, deferred->entry());
}
if (instr->elements()->IsRegister()) {
__ movp(result, ToRegister(instr->elements()));
} else {
__ movp(result, ToOperand(instr->elements()));
}
__ bind(deferred->exit());
}
void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
Register result = rax;
__ Move(result, Smi::FromInt(0));
// We have to call a stub.
{
PushSafepointRegistersScope scope(this);
if (instr->object()->IsConstantOperand()) {
LConstantOperand* constant_object =
LConstantOperand::cast(instr->object());
if (IsSmiConstant(constant_object)) {
Smi* immediate = ToSmi(constant_object);
__ Move(result, immediate);
} else {
Handle<Object> handle_value = ToHandle(constant_object);
__ Move(result, handle_value);
}
} else if (instr->object()->IsRegister()) {
__ Move(result, ToRegister(instr->object()));
} else {
__ movp(result, ToOperand(instr->object()));
}
LOperand* key = instr->key();
if (key->IsConstantOperand()) {
__ Move(rbx, ToSmi(LConstantOperand::cast(key)));
} else {
__ Move(rbx, ToRegister(key));
__ Integer32ToSmi(rbx, rbx);
}
GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
instr->hydrogen()->kind());
__ CallStub(&stub);
RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
__ StoreToSafepointRegisterSlot(result, result);
}
// Deopt on smi, which means the elements array changed to dictionary mode.
Condition is_smi = __ CheckSmi(result);
DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi);
}
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
Handle<Map> from_map = instr->original_map();
Handle<Map> to_map = instr->transitioned_map();
ElementsKind from_kind = instr->from_kind();
ElementsKind to_kind = instr->to_kind();
Label not_applicable;
__ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
__ j(not_equal, &not_applicable);
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
Register new_map_reg = ToRegister(instr->new_map_temp());
__ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
__ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
// Write barrier.
__ RecordWriteForMap(object_reg, new_map_reg, ToRegister(instr->temp()),
kDontSaveFPRegs);
} else {
DCHECK(object_reg.is(rax));
DCHECK(ToRegister(instr->context()).is(rsi));
PushSafepointRegistersScope scope(this);
__ Move(rbx, to_map);
bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
__ CallStub(&stub);
RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
}
__ bind(&not_applicable);
}
void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register object = ToRegister(instr->object());
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
__ bind(&no_memento_found);
}
void LCodeGen::DoStringAdd(LStringAdd* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
DCHECK(ToRegister(instr->left()).is(rdx));
DCHECK(ToRegister(instr->right()).is(rax));
StringAddStub stub(isolate(),
instr->hydrogen()->flags(),
instr->hydrogen()->pretenure_flag());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
class DeferredStringCharCodeAt final : public LDeferredCode {
public:
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
: LDeferredCode(codegen), instr_(instr) { }
void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
LInstruction* instr() override { return instr_; }
private:
LStringCharCodeAt* instr_;
};
DeferredStringCharCodeAt* deferred =
new(zone()) DeferredStringCharCodeAt(this, instr);
StringCharLoadGenerator::Generate(masm(),
ToRegister(instr->string()),
ToRegister(instr->index()),
ToRegister(instr->result()),
deferred->entry());
__ bind(deferred->exit());
}
void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
Register string = ToRegister(instr->string());
Register result = ToRegister(instr->result());
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
__ Set(result, 0);
PushSafepointRegistersScope scope(this);
__ Push(string);
// Push the index as a smi. This is safe because of the checks in
// DoStringCharCodeAt above.
STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
if (instr->index()->IsConstantOperand()) {
int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
__ Push(Smi::FromInt(const_index));
} else {
Register index = ToRegister(instr->index());
__ Integer32ToSmi(index, index);
__ Push(index);
}
CallRuntimeFromDeferred(
Runtime::kStringCharCodeAtRT, 2, instr, instr->context());
__ AssertSmi(rax);
__ SmiToInteger32(rax, rax);
__ StoreToSafepointRegisterSlot(result, rax);
}
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
class DeferredStringCharFromCode final : public LDeferredCode {
public:
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
: LDeferredCode(codegen), instr_(instr) { }
void Generate() override {
codegen()->DoDeferredStringCharFromCode(instr_);
}
LInstruction* instr() override { return instr_; }
private:
LStringCharFromCode* instr_;
};
DeferredStringCharFromCode* deferred =
new(zone()) DeferredStringCharFromCode(this, instr);
DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
Register char_code = ToRegister(instr->char_code());
Register result = ToRegister(instr->result());
DCHECK(!char_code.is(result));
__ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
__ j(above, deferred->entry());
__ movsxlq(char_code, char_code);
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
__ movp(result, FieldOperand(result,
char_code, times_pointer_size,
FixedArray::kHeaderSize));
__ CompareRoot(result, Heap::kUndefinedValueRootIndex);
__ j(equal, deferred->entry());
__ bind(deferred->exit());
}
void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
Register char_code = ToRegister(instr->char_code());
Register result = ToRegister(instr->result());
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
__ Set(result, 0);
PushSafepointRegistersScope scope(this);
__ Integer32ToSmi(char_code, char_code);
__ Push(char_code);
CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, rax);
}
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
LOperand* input = instr->value();
DCHECK(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
DCHECK(output->IsDoubleRegister());
if (input->IsRegister()) {
__ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
} else {
__ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
}
}
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
LOperand* input = instr->value();
LOperand* output = instr->result();
__ LoadUint32(ToDoubleRegister(output), ToRegister(input));
}
void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
class DeferredNumberTagI final : public LDeferredCode {
public:
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
: LDeferredCode(codegen), instr_(instr) { }
void Generate() override {
codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
instr_->temp2(), SIGNED_INT32);
}
LInstruction* instr() override { return instr_; }
private:
LNumberTagI* instr_;
};
LOperand* input = instr->value();
DCHECK(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
if (SmiValuesAre32Bits()) {
__ Integer32ToSmi(reg, reg);
} else {
DCHECK(SmiValuesAre31Bits());
DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
__ Integer32ToSmi(reg, reg);
__ j(overflow, deferred->entry());
__ bind(deferred->exit());
}
}
void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
class DeferredNumberTagU final : public LDeferredCode {
public:
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
: LDeferredCode(codegen), instr_(instr) { }
void Generate() override {
codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
instr_->temp2(), UNSIGNED_INT32);
}
LInstruction* instr() override { return instr_; }
private:
LNumberTagU* instr_;
};
LOperand* input = instr->value();
DCHECK(input->IsRegister() && input->Equals(instr->result()));
Register reg = ToRegister(input);
DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
__ cmpl(reg, Immediate(Smi::kMaxValue));
__ j(above, deferred->entry());
__ Integer32ToSmi(reg, reg);
__ bind(deferred->exit());
}
void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
LOperand* value,
LOperand* temp1,
LOperand* temp2,
IntegerSignedness signedness) {
Label done, slow;
Register reg = ToRegister(value);
Register tmp = ToRegister(temp1);
XMMRegister temp_xmm = ToDoubleRegister(temp2);
// Load value into temp_xmm which will be preserved across potential call to
// runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
// XMM registers on x64).
if (signedness == SIGNED_INT32) {
DCHECK(SmiValuesAre31Bits());
// There was overflow, so bits 30 and 31 of the original integer
// disagree. Try to allocate a heap number in new space and store
// the value in there. If that fails, call the runtime system.
__ SmiToInteger32(reg, reg);
__ xorl(reg, Immediate(0x80000000));
__ cvtlsi2sd(temp_xmm, reg);
} else {
DCHECK(signedness == UNSIGNED_INT32);
__ LoadUint32(temp_xmm, reg);
}
if (FLAG_inline_new) {
__ AllocateHeapNumber(reg, tmp, &slow);
__ jmp(&done, kPointerSize == kInt64Size ? Label::kNear : Label::kFar);
}
// Slow case: Call the runtime system to do the number allocation.
__ bind(&slow);
{
// Put a valid pointer value in the stack slot where the result
// register is stored, as this register is in the pointer map, but contains
// an integer value.
__ Set(reg, 0);
// Preserve the value of all registers.
PushSafepointRegistersScope scope(this);
// NumberTagIU uses the context from the frame, rather than
// the environment's HContext or HInlinedContext value.
// They only call Runtime::kAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(reg, rax);
}
// Done. Put the value in temp_xmm into the value of the allocated heap
// number.
__ bind(&done);
__ movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
}
void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
class DeferredNumberTagD final : public LDeferredCode {
public:
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
: LDeferredCode(codegen), instr_(instr) { }
void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
LInstruction* instr() override { return instr_; }
private:
LNumberTagD* instr_;
};
XMMRegister input_reg = ToDoubleRegister(instr->value());
Register reg = ToRegister(instr->result());
Register tmp = ToRegister(instr->temp());
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ AllocateHeapNumber(reg, tmp, deferred->entry());
} else {
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
__ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
}
void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
Register reg = ToRegister(instr->result());
__ Move(reg, Smi::FromInt(0));
{
PushSafepointRegistersScope scope(this);
// NumberTagD uses the context from the frame, rather than
// the environment's HContext or HInlinedContext value.
// They only call Runtime::kAllocateHeapNumber.
// The corresponding HChange instructions are added in a phase that does
// not have easy access to the local context.
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
RecordSafepointWithRegisters(
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
__ movp(kScratchRegister, rax);
}
__ movp(reg, kScratchRegister);
}
void LCodeGen::DoSmiTag(LSmiTag* instr) {
HChange* hchange = instr->hydrogen();
Register input = ToRegister(instr->value());
Register output = ToRegister(instr->result());
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kOverflow);
}
__ Integer32ToSmi(output, input);
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
}
}
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
DCHECK(instr->value()->Equals(instr->result()));
Register input = ToRegister(instr->value());
if (instr->needs_check()) {
Condition is_smi = __ CheckSmi(input);
DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kNotASmi);
} else {
__ AssertSmi(input);
}
__ SmiToInteger32(input, input);
}
void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
XMMRegister result_reg, NumberUntagDMode mode) {
bool can_convert_undefined_to_nan =
instr->hydrogen()->can_convert_undefined_to_nan();
bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
Label convert, load_smi, done;
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
// Smi check.
__ JumpIfSmi(input_reg, &load_smi, Label::kNear);
// Heap number map check.
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
// On x64 it is safe to load at heap number offset before evaluating the map
// check, since all heap objects are at least two words long.
__ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
if (can_convert_undefined_to_nan) {
__ j(not_equal, &convert, Label::kNear);
} else {
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
}
if (deoptimize_on_minus_zero) {
XMMRegister xmm_scratch = double_scratch0();
__ xorps(xmm_scratch, xmm_scratch);
__ ucomisd(xmm_scratch, result_reg);
__ j(not_equal, &done, Label::kNear);
__ movmskpd(kScratchRegister, result_reg);
__ testq(kScratchRegister, Immediate(1));
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
}
__ jmp(&done, Label::kNear);
if (can_convert_undefined_to_nan) {
__ bind(&convert);
// Convert undefined (and hole) to NaN. Compute NaN as 0/0.
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
__ pcmpeqd(result_reg, result_reg);
__ jmp(&done, Label::kNear);
}
} else {
DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
}
// Smi to XMM conversion
__ bind(&load_smi);
__ SmiToInteger32(kScratchRegister, input_reg);
__ Cvtlsi2sd(result_reg, kScratchRegister);
__ bind(&done);
}
void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
Register input_reg = ToRegister(instr->value());
if (instr->truncating()) {
Label no_heap_number, check_bools, check_false;
// Heap number map check.
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
__ j(not_equal, &no_heap_number, Label::kNear);
__ TruncateHeapNumberToI(input_reg, input_reg);
__ jmp(done);
__ bind(&no_heap_number);
// Check for Oddballs. Undefined/False is converted to zero and True to one
// for truncating conversions.
__ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &check_bools, Label::kNear);
__ Set(input_reg, 0);
__ jmp(done);
__ bind(&check_bools);
__ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
__ j(not_equal, &check_false, Label::kNear);
__ Set(input_reg, 1);
__ jmp(done);
__ bind(&check_false);
__ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
DeoptimizeIf(not_equal, instr,
Deoptimizer::kNotAHeapNumberUndefinedBoolean);
__ Set(input_reg, 0);
} else {
XMMRegister scratch = ToDoubleRegister(instr->temp());
DCHECK(!scratch.is(xmm0));
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
__ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ cvttsd2si(input_reg, xmm0);
__ Cvtlsi2sd(scratch, input_reg);
__ ucomisd(xmm0, scratch);
DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
__ testl(input_reg, input_reg);
__ j(not_zero, done);
__ movmskpd(input_reg, xmm0);
__ andl(input_reg, Immediate(1));
DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
}
}
}
void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
class DeferredTaggedToI final : public LDeferredCode {
public:
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
: LDeferredCode(codegen), instr_(instr) { }
void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); }
LInstruction* instr() override { return instr_; }
private:
LTaggedToI* instr_;
};
LOperand* input = instr->value();
DCHECK(input->IsRegister());
DCHECK(input->Equals(instr->result()));
Register input_reg = ToRegister(input);
if (instr->hydrogen()->value()->representation().IsSmi()) {
__ SmiToInteger32(input_reg, input_reg);
} else {
DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
__ JumpIfNotSmi(input_reg, deferred->entry());
__ SmiToInteger32(input_reg, input_reg);
__ bind(deferred->exit());
}
}
void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
LOperand* input = instr->value();
DCHECK(input->IsRegister());
LOperand* result = instr->result();
DCHECK(result->IsDoubleRegister());
Register input_reg = ToRegister(input);
XMMRegister result_reg = ToDoubleRegister(result);
HValue* value = instr->hydrogen()->value();
NumberUntagDMode mode = value->representation().IsSmi()
? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
EmitNumberUntagD(instr, input_reg, result_reg, mode);
}
void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
LOperand* input = instr->value();
DCHECK(input->IsDoubleRegister());
LOperand* result = instr->result();
DCHECK(result->IsRegister());
XMMRegister input_reg = ToDoubleRegister(input);
Register result_reg = ToRegister(result);
if (instr->truncating()) {
__ TruncateDoubleToI(result_reg, input_reg);
} else {
Label lost_precision, is_nan, minus_zero, done;
XMMRegister xmm_scratch = double_scratch0();
Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
__ DoubleToI(result_reg, input_reg, xmm_scratch,
instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
&is_nan, &minus_zero, dist);
__ jmp(&done, dist);
__ bind(&lost_precision);
DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
__ bind(&is_nan);
DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
__ bind(&minus_zero);
DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
__ bind(&done);
}
}
void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
LOperand* input = instr->value();
DCHECK(input->IsDoubleRegister());
LOperand* result = instr->result();
DCHECK(result->IsRegister());
XMMRegister input_reg = ToDoubleRegister(input);
Register result_reg = ToRegister(result);
Label lost_precision, is_nan, minus_zero, done;
XMMRegister xmm_scratch = double_scratch0();
Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
__ DoubleToI(result_reg, input_reg, xmm_scratch,
instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
&minus_zero, dist);
__ jmp(&done, dist);
__ bind(&lost_precision);
DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
__ bind(&is_nan);
DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
__ bind(&minus_zero);
DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
__ bind(&done);
__ Integer32ToSmi(result_reg, result_reg);
DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
DeoptimizeIf(NegateCondition(cc), instr, Deoptimizer::kNotASmi);
}
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
Condition cc = masm()->CheckSmi(ToRegister(input));
DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
}
}
void LCodeGen::DoCheckArrayBufferNotNeutered(
LCheckArrayBufferNotNeutered* instr) {
Register view = ToRegister(instr->view());
__ movp(kScratchRegister,
FieldOperand(view, JSArrayBufferView::kBufferOffset));
__ testb(FieldOperand(kScratchRegister, JSArrayBuffer::kBitFieldOffset),
Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
}
void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
Register input = ToRegister(instr->value());
__ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
if (instr->hydrogen()->is_interval_check()) {
InstanceType first;
InstanceType last;
instr->hydrogen()->GetCheckInterval(&first, &last);
__ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
Immediate(static_cast<int8_t>(first)));
// If there is only one type in the interval check for equality.
if (first == last) {
DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
} else {
DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
Immediate(static_cast<int8_t>(last)));
DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
}
}
} else {
uint8_t mask;
uint8_t tag;
instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
Immediate(mask));
DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
Deoptimizer::kWrongInstanceType);
} else {
__ movzxbl(kScratchRegister,
FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
__ andb(kScratchRegister, Immediate(mask));
__ cmpb(kScratchRegister, Immediate(tag));
DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
}
}
}
void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
__ Cmp(reg, instr->hydrogen()->object().handle());
DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
}
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
{
PushSafepointRegistersScope scope(this);
__ Push(object);
__ Set(rsi, 0);
__ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
RecordSafepointWithRegisters(
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
__ testp(rax, Immediate(kSmiTagMask));
}
DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
}
void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
class DeferredCheckMaps final : public LDeferredCode {
public:
DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
: LDeferredCode(codegen), instr_(instr), object_(object) {
SetExit(check_maps());
}
void Generate() override {
codegen()->DoDeferredInstanceMigration(instr_, object_);
}
Label* check_maps() { return &check_maps_; }
LInstruction* instr() override { return instr_; }
private:
LCheckMaps* instr_;
Label check_maps_;
Register object_;
};
if (instr->hydrogen()->IsStabilityCheck()) {
const UniqueSet<Map>* maps = instr->hydrogen()->maps();
for (int i = 0; i < maps->size(); ++i) {
AddStabilityDependency(maps->at(i).handle());
}
return;
}
LOperand* input = instr->value();
DCHECK(input->IsRegister());
Register reg = ToRegister(input);
DeferredCheckMaps* deferred = NULL;
if (instr->hydrogen()->HasMigrationTarget()) {
deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
__ bind(deferred->check_maps());
}
const UniqueSet<Map>* maps = instr->hydrogen()->maps();
Label success;
for (int i = 0; i < maps->size() - 1; i++) {
Handle<Map> map = maps->at(i).handle();
__ CompareMap(reg, map);
__ j(equal, &success, Label::kNear);
}
Handle<Map> map = maps->at(maps->size() - 1).handle();
__ CompareMap(reg, map);
if (instr->hydrogen()->HasMigrationTarget()) {
__ j(not_equal, deferred->entry());
} else {
DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
}
__ bind(&success);
}
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
XMMRegister xmm_scratch = double_scratch0();
Register result_reg = ToRegister(instr->result());
__ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
}
void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
DCHECK(instr->unclamped()->Equals(instr->result()));
Register value_reg = ToRegister(instr->result());
__ ClampUint8(value_reg);
}
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
DCHECK(instr->unclamped()->Equals(instr->result()));
Register input_reg = ToRegister(instr->unclamped());
XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
XMMRegister xmm_scratch = double_scratch0();
Label is_smi, done, heap_number;
Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
__ JumpIfSmi(input_reg, &is_smi, dist);
// Check for heap number
__ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
__ j(equal, &heap_number, Label::kNear);
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ Cmp(input_reg, factory()->undefined_value());
DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
__ xorl(input_reg, input_reg);
__ jmp(&done, Label::kNear);
// Heap number
__ bind(&heap_number);
__ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
__ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
__ jmp(&done, Label::kNear);
// smi
__ bind(&is_smi);
__ SmiToInteger32(input_reg, input_reg);
__ ClampUint8(input_reg);
__ bind(&done);
}
void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
XMMRegister value_reg = ToDoubleRegister(instr->value());
Register result_reg = ToRegister(instr->result());
if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
__ movq(result_reg, value_reg);
__ shrq(result_reg, Immediate(32));
} else {
__ movd(result_reg, value_reg);
}
}
void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
Register hi_reg = ToRegister(instr->hi());
Register lo_reg = ToRegister(instr->lo());
XMMRegister result_reg = ToDoubleRegister(instr->result());
XMMRegister xmm_scratch = double_scratch0();
__ movd(result_reg, hi_reg);
__ psllq(result_reg, 32);
__ movd(xmm_scratch, lo_reg);
__ orps(result_reg, xmm_scratch);
}
void LCodeGen::DoAllocate(LAllocate* instr) {
class DeferredAllocate final : public LDeferredCode {
public:
DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
: LDeferredCode(codegen), instr_(instr) { }
void Generate() override { codegen()->DoDeferredAllocate(instr_); }
LInstruction* instr() override { return instr_; }
private:
LAllocate* instr_;
};
DeferredAllocate* deferred =
new(zone()) DeferredAllocate(this, instr);
Register result = ToRegister(instr->result());
Register temp = ToRegister(instr->temp());
// Allocate memory for the object.
AllocationFlags flags = TAG_OBJECT;
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
if (instr->hydrogen()->IsOldSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = static_cast<AllocationFlags>(flags | PRETENURE);
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
if (size <= Page::kMaxRegularHeapObjectSize) {
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
} else {
__ jmp(deferred->entry());
}
} else {
Register size = ToRegister(instr->size());
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
}
__ bind(deferred->exit());
if (instr->hydrogen()->MustPrefillWithFiller()) {
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
__ movl(temp, Immediate((size / kPointerSize) - 1));
} else {
temp = ToRegister(instr->size());
__ sarp(temp, Immediate(kPointerSizeLog2));
__ decl(temp);
}
Label loop;
__ bind(&loop);
__ Move(FieldOperand(result, temp, times_pointer_size, 0),
isolate()->factory()->one_pointer_filler_map());
__ decl(temp);
__ j(not_zero, &loop);
}
}
void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
Register result = ToRegister(instr->result());
// TODO(3095996): Get rid of this. For now, we need to make the
// result register contain a valid pointer because it is already
// contained in the register pointer map.
__ Move(result, Smi::FromInt(0));
PushSafepointRegistersScope scope(this);
if (instr->size()->IsRegister()) {
Register size = ToRegister(instr->size());
DCHECK(!size.is(result));
__ Integer32ToSmi(size, size);
__ Push(size);
} else {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
__ Push(Smi::FromInt(size));
}
int flags = 0;
if (instr->hydrogen()->IsOldSpaceAllocation()) {
DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
flags = AllocateTargetSpace::update(flags, OLD_SPACE);
} else {
flags = AllocateTargetSpace::update(flags, NEW_SPACE);
}
__ Push(Smi::FromInt(flags));
CallRuntimeFromDeferred(
Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
__ StoreToSafepointRegisterSlot(result, rax);
}
void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
DCHECK(ToRegister(instr->value()).is(rax));
__ Push(rax);
CallRuntime(Runtime::kToFastProperties, 1, instr);
}
void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
Label materialized;
// Registers will be used as follows:
// rcx = literals array.
// rbx = regexp literal.
// rax = regexp literal clone.
int literal_offset =
FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
__ Move(rcx, instr->hydrogen()->literals());
__ movp(rbx, FieldOperand(rcx, literal_offset));
__ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &materialized, Label::kNear);
// Create regexp literal using runtime function
// Result will be in rax.
__ Push(rcx);
__ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
__ Push(instr->hydrogen()->pattern());
__ Push(instr->hydrogen()->flags());
CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
__ movp(rbx, rax);
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
__ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated, Label::kNear);
__ bind(&runtime_allocate);
__ Push(rbx);
__ Push(Smi::FromInt(size));
CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
__ Pop(rbx);
__ bind(&allocated);
// Copy the content into the newly allocated memory.
// (Unroll copy loop once for better throughput).
for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
__ movp(rdx, FieldOperand(rbx, i));
__ movp(rcx, FieldOperand(rbx, i + kPointerSize));
__ movp(FieldOperand(rax, i), rdx);
__ movp(FieldOperand(rax, i + kPointerSize), rcx);
}
if ((size % (2 * kPointerSize)) != 0) {
__ movp(rdx, FieldOperand(rbx, size - kPointerSize));
__ movp(FieldOperand(rax, size - kPointerSize), rdx);
}
}
void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && instr->hydrogen()->has_no_literals()) {
FastNewClosureStub stub(isolate(), instr->hydrogen()->language_mode(),
instr->hydrogen()->kind());
__ Move(rbx, instr->hydrogen()->shared_info());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
} else {
__ Push(rsi);
__ Push(instr->hydrogen()->shared_info());
__ PushRoot(pretenure ? Heap::kTrueValueRootIndex :
Heap::kFalseValueRootIndex);
CallRuntime(Runtime::kNewClosure, 3, instr);
}
}
void LCodeGen::DoTypeof(LTypeof* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
DCHECK(ToRegister(instr->value()).is(rbx));
Label end, do_call;
Register value_register = ToRegister(instr->value());
__ JumpIfNotSmi(value_register, &do_call);
__ Move(rax, isolate()->factory()->number_string());
__ jmp(&end);
__ bind(&do_call);
TypeofStub stub(isolate());
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
__ bind(&end);
}
void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
DCHECK(!operand->IsDoubleRegister());
if (operand->IsConstantOperand()) {
__ Push(ToHandle(LConstantOperand::cast(operand)));
} else if (operand->IsRegister()) {
__ Push(ToRegister(operand));
} else {
__ Push(ToOperand(operand));
}
}
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
Register input = ToRegister(instr->value());
Condition final_branch_condition = EmitTypeofIs(instr, input);
if (final_branch_condition != no_condition) {
EmitBranch(instr, final_branch_condition);
}
}
Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
Label* true_label = instr->TrueLabel(chunk_);
Label* false_label = instr->FalseLabel(chunk_);
Handle<String> type_name = instr->type_literal();
int left_block = instr->TrueDestination(chunk_);
int right_block = instr->FalseDestination(chunk_);
int next_block = GetNextEmittedBlock();
Label::Distance true_distance = left_block == next_block ? Label::kNear
: Label::kFar;
Label::Distance false_distance = right_block == next_block ? Label::kNear
: Label::kFar;
Condition final_branch_condition = no_condition;
Factory* factory = isolate()->factory();
if (String::Equals(type_name, factory->number_string())) {
__ JumpIfSmi(input, true_label, true_distance);
__ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
final_branch_condition = equal;
} else if (String::Equals(type_name, factory->string_string())) {
__ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
__ j(above_equal, false_label, false_distance);
__ testb(FieldOperand(input, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = zero;
} else if (String::Equals(type_name, factory->symbol_string())) {
__ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, SYMBOL_TYPE, input);
final_branch_condition = equal;
} else if (String::Equals(type_name, factory->boolean_string())) {
__ CompareRoot(input, Heap::kTrueValueRootIndex);
__ j(equal, true_label, true_distance);
__ CompareRoot(input, Heap::kFalseValueRootIndex);
final_branch_condition = equal;
} else if (String::Equals(type_name, factory->undefined_string())) {
__ CompareRoot(input, Heap::kUndefinedValueRootIndex);
__ j(equal, true_label, true_distance);
__ JumpIfSmi(input, false_label, false_distance);
// Check for undetectable objects => true.
__ movp(input, FieldOperand(input, HeapObject::kMapOffset));
__ testb(FieldOperand(input, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = not_zero;
} else if (String::Equals(type_name, factory->function_string())) {
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
__ JumpIfSmi(input, false_label, false_distance);
__ CmpObjectType(input, JS_FUNCTION_TYPE, input);
__ j(equal, true_label, true_distance);
__ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
final_branch_condition = equal;
} else if (String::Equals(type_name, factory->object_string())) {
__ JumpIfSmi(input, false_label, false_distance);
__ CompareRoot(input, Heap::kNullValueRootIndex);
__ j(equal, true_label, true_distance);
__ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
__ j(below, false_label, false_distance);
__ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
__ j(above, false_label, false_distance);
// Check for undetectable objects => false.
__ testb(FieldOperand(input, Map::kBitFieldOffset),
Immediate(1 << Map::kIsUndetectable));
final_branch_condition = zero;
} else {
__ jmp(false_label, false_distance);
}
return final_branch_condition;
}
void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
Register temp = ToRegister(instr->temp());
EmitIsConstructCall(temp);
EmitBranch(instr, equal);
}
void LCodeGen::EmitIsConstructCall(Register temp) {
// Get the frame pointer for the calling frame.
__ movp(temp, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
Label check_frame_marker;
__ Cmp(Operand(temp, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
__ j(not_equal, &check_frame_marker, Label::kNear);
__ movp(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
// Check the marker in the calling frame.
__ bind(&check_frame_marker);
__ Cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
Smi::FromInt(StackFrame::CONSTRUCT));
}
void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
if (!info()->IsStub()) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
if (current_pc < last_lazy_deopt_pc_ + space_needed) {
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
__ Nop(padding_size);
}
}
last_lazy_deopt_pc_ = masm()->pc_offset();
}
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
last_lazy_deopt_pc_ = masm()->pc_offset();
DCHECK(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
}
void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
Deoptimizer::BailoutType type = instr->hydrogen()->type();
// TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
// needed return address), even though the implementation of LAZY and EAGER is
// now identical. When LAZY is eventually completely folded into EAGER, remove
// the special case below.
if (info()->IsStub() && type == Deoptimizer::EAGER) {
type = Deoptimizer::LAZY;
}
DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
}
void LCodeGen::DoDummy(LDummy* instr) {
// Nothing to see here, move on!
}
void LCodeGen::DoDummyUse(LDummyUse* instr) {
// Nothing to see here, move on!
}
void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
PushSafepointRegistersScope scope(this);
__ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
__ CallRuntimeSaveDoubles(Runtime::kStackGuard);
RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
DCHECK(instr->HasEnvironment());
LEnvironment* env = instr->environment();
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
}
void LCodeGen::DoStackCheck(LStackCheck* instr) {
class DeferredStackCheck final : public LDeferredCode {
public:
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
: LDeferredCode(codegen), instr_(instr) { }
void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
LInstruction* instr() override { return instr_; }
private:
LStackCheck* instr_;
};
DCHECK(instr->HasEnvironment());
LEnvironment* env = instr->environment();
// There is no LLazyBailout instruction for stack-checks. We have to
// prepare for lazy deoptimization explicitly here.
if (instr->hydrogen()->is_function_entry()) {
// Perform stack overflow check.
Label done;
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(above_equal, &done, Label::kNear);
DCHECK(instr->context()->IsRegister());
DCHECK(ToRegister(instr->context()).is(rsi));
CallCode(isolate()->builtins()->StackCheck(),
RelocInfo::CODE_TARGET,
instr);
__ bind(&done);
} else {
DCHECK(instr->hydrogen()->is_backwards_branch());
// Perform stack overflow check if this goto needs it before jumping.
DeferredStackCheck* deferred_stack_check =
new(zone()) DeferredStackCheck(this, instr);
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(below, deferred_stack_check->entry());
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
// Don't record a deoptimization index for the safepoint here.
// This will be done explicitly when emitting call and the safepoint in
// the deferred code.
}
}
void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
// This is a pseudo-instruction that ensures that the environment here is
// properly registered for deoptimization and records the assembler's PC
// offset.
LEnvironment* environment = instr->environment();
// If the environment were already registered, we would have no way of
// backpatching it with the spill slot operands.
DCHECK(!environment->HasBeenRegistered());
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
GenerateOsrPrologue();
}
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(ToRegister(instr->context()).is(rsi));
Condition cc = masm()->CheckSmi(rax);
DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
__ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
Label use_cache, call_runtime;
Register null_value = rdi;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
__ CheckEnumCache(null_value, &call_runtime);
__ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
__ jmp(&use_cache, Label::kNear);
// Get the set of properties to enumerate.
__ bind(&call_runtime);
__ Push(rax);
CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
__ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
Heap::kMetaMapRootIndex);
DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
__ bind(&use_cache);
}
void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
Register map = ToRegister(instr->map());
Register result = ToRegister(instr->result());
Label load_cache, done;
__ EnumLength(result, map);
__ Cmp(result, Smi::FromInt(0));
__ j(not_equal, &load_cache, Label::kNear);
__ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
__ jmp(&done, Label::kNear);
__ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
__ movp(result,
FieldOperand(result, DescriptorArray::kEnumCacheOffset));
__ movp(result,
FieldOperand(result, FixedArray::SizeFor(instr->idx())));
__ bind(&done);
Condition cc = masm()->CheckSmi(result);
DeoptimizeIf(cc, instr, Deoptimizer::kNoCache);
}
void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
__ cmpp(ToRegister(instr->map()),
FieldOperand(object, HeapObject::kMapOffset));
DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
}
void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
Register object,
Register index) {
PushSafepointRegistersScope scope(this);
__ Push(object);
__ Push(index);
__ xorp(rsi, rsi);
__ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
RecordSafepointWithRegisters(
instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
__ StoreToSafepointRegisterSlot(object, rax);
}
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
class DeferredLoadMutableDouble final : public LDeferredCode {
public:
DeferredLoadMutableDouble(LCodeGen* codegen,
LLoadFieldByIndex* instr,
Register object,
Register index)
: LDeferredCode(codegen),
instr_(instr),
object_(object),
index_(index) {
}
void Generate() override {
codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
}
LInstruction* instr() override { return instr_; }
private:
LLoadFieldByIndex* instr_;
Register object_;
Register index_;
};
Register object = ToRegister(instr->object());
Register index = ToRegister(instr->index());
DeferredLoadMutableDouble* deferred;
deferred = new(zone()) DeferredLoadMutableDouble(this, instr, object, index);
Label out_of_object, done;
__ Move(kScratchRegister, Smi::FromInt(1));
__ testp(index, kScratchRegister);
__ j(not_zero, deferred->entry());
__ sarp(index, Immediate(1));
__ SmiToInteger32(index, index);
__ cmpl(index, Immediate(0));
__ j(less, &out_of_object, Label::kNear);
__ movp(object, FieldOperand(object,
index,
times_pointer_size,
JSObject::kHeaderSize));
__ jmp(&done, Label::kNear);
__ bind(&out_of_object);
__ movp(object, FieldOperand(object, JSObject::kPropertiesOffset));
__ negl(index);
// Index is now equal to out of object property index plus 1.
__ movp(object, FieldOperand(object,
index,
times_pointer_size,
FixedArray::kHeaderSize - kPointerSize));
__ bind(deferred->exit());
__ bind(&done);
}
void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
Register context = ToRegister(instr->context());
__ movp(Operand(rbp, StandardFrameConstants::kContextOffset), context);
}
void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
Handle<ScopeInfo> scope_info = instr->scope_info();
__ Push(scope_info);
__ Push(ToRegister(instr->function()));
CallRuntime(Runtime::kPushBlockContext, 2, instr);
RecordSafepoint(Safepoint::kNoLazyDeopt);
}
#undef __
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_X64