File: | platform/mac/avmshell/../../../core/CodegenLIR.cpp |
Location: | line 4211, column 13 |
Description: | Value stored to 'objType' is never read |
1 | /* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */ |
2 | /* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */ |
3 | /* ***** BEGIN LICENSE BLOCK ***** |
4 | * Version: MPL 1.1/GPL 2.0/LGPL 2.1 |
5 | * |
6 | * The contents of this file are subject to the Mozilla Public License Version |
7 | * 1.1 (the "License"); you may not use this file except in compliance with |
8 | * the License. You may obtain a copy of the License at |
9 | * http://www.mozilla.org/MPL/ |
10 | * |
11 | * Software distributed under the License is distributed on an "AS IS" basis, |
12 | * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License |
13 | * for the specific language governing rights and limitations under the |
14 | * License. |
15 | * |
16 | * The Original Code is [Open Source Virtual Machine.]. |
17 | * |
18 | * The Initial Developer of the Original Code is |
19 | * Adobe System Incorporated. |
20 | * Portions created by the Initial Developer are Copyright (C) 2004-2006 |
21 | * the Initial Developer. All Rights Reserved. |
22 | * |
23 | * Contributor(s): |
24 | * Adobe AS3 Team |
25 | * leon.sha@sun.com |
26 | * |
27 | * Alternatively, the contents of this file may be used under the terms of |
28 | * either the GNU General Public License Version 2 or later (the "GPL"), or |
29 | * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), |
30 | * in which case the provisions of the GPL or the LGPL are applicable instead |
31 | * of those above. If you wish to allow use of your version of this file only |
32 | * under the terms of either the GPL or the LGPL, and not to allow others to |
33 | * use your version of this file under the terms of the MPL, indicate your |
34 | * decision by deleting the provisions above and replace them with the notice |
35 | * and other provisions required by the GPL or the LGPL. If you do not delete |
36 | * the provisions above, a recipient may use your version of this file under |
37 | * the terms of any one of the MPL, the GPL or the LGPL. |
38 | * |
39 | * ***** END LICENSE BLOCK ***** */ |
40 | |
41 | |
42 | #include "avmplus.h" |
43 | |
44 | #ifdef VMCFG_NANOJIT |
45 | |
46 | #include "CodegenLIR.h" |
47 | #include "exec-osr.h" |
48 | |
49 | #if defined(WIN32) && defined(AVMPLUS_ARM) |
50 | #include <cmnintrin.h> |
51 | #endif |
52 | |
53 | #ifdef VMCFG_VTUNE |
54 | namespace vtune { |
55 | using namespace avmplus; |
56 | void* vtuneInit(String*); |
57 | void vtuneCleanup(void*); |
58 | } |
59 | using namespace vtune; |
60 | #endif // VMCFG_VTUNE |
61 | |
62 | // Sparc, ARM and SH4 have buggy LIR_d2i support (bug 613189) |
63 | #if (defined(NANOJIT_SPARC) || defined(NANOJIT_SH4) || defined(NANOJIT_ARM)) |
64 | # undef NJ_F2I_SUPPORTED1 |
65 | # define NJ_F2I_SUPPORTED1 0 |
66 | #endif |
67 | |
68 | #ifdef _MSC_VER |
69 | #if !defined (AVMPLUS_ARM) |
70 | extern "C" |
71 | { |
72 | int __cdecl _setjmp3(jmp_buf jmpbuf, int arg); |
73 | } |
74 | #else |
75 | #include <setjmp.h> |
76 | #undef setjmp |
77 | extern "C" |
78 | { |
79 | int __cdecl setjmp(jmp_buf jmpbuf); |
80 | } |
81 | #endif // AVMPLUS_ARM |
82 | #endif // _MSC_VER |
83 | |
84 | #ifdef AVMPLUS_ARM |
85 | #ifdef _MSC_VER |
86 | #define RETURN_METHOD_PTR(_class, _method)union { int (_class::*bar)(); intptr_t foo; }; bar = _method; return foo; \ |
87 | return *((int*)&_method); |
88 | #else |
89 | #define RETURN_METHOD_PTR(_class, _method)union { int (_class::*bar)(); intptr_t foo; }; bar = _method; return foo; \ |
90 | union { \ |
91 | int (_class::*bar)(); \ |
92 | int foo[2]; \ |
93 | }; \ |
94 | bar = _method; \ |
95 | return foo[0]; |
96 | #endif |
97 | |
98 | #elif defined __GNUC__4 |
99 | #define RETURN_METHOD_PTR(_class, _method)union { int (_class::*bar)(); intptr_t foo; }; bar = _method; return foo; \ |
100 | union { \ |
101 | int (_class::*bar)(); \ |
102 | intptr_t foo; \ |
103 | }; \ |
104 | bar = _method; \ |
105 | return foo; |
106 | #else |
107 | #define RETURN_METHOD_PTR(_class, _method)union { int (_class::*bar)(); intptr_t foo; }; bar = _method; return foo; \ |
108 | return *((intptr_t*)&_method); |
109 | #endif |
110 | |
111 | #ifdef AVMPLUS_ARM |
112 | #ifdef _MSC_VER |
113 | #define RETURN_METHOD_PTR_F(_class, _method)union { double (_class::*bar)(); intptr_t foo; }; bar = _method ; return foo; \ |
114 | return *((int*)&_method); |
115 | #else |
116 | #define RETURN_METHOD_PTR_F(_class, _method)union { double (_class::*bar)(); intptr_t foo; }; bar = _method ; return foo; \ |
117 | union { \ |
118 | double (_class::*bar)(); \ |
119 | int foo[2]; \ |
120 | }; \ |
121 | bar = _method; \ |
122 | return foo[0]; |
123 | #endif |
124 | |
125 | #elif defined __GNUC__4 |
126 | #define RETURN_METHOD_PTR_F(_class, _method)union { double (_class::*bar)(); intptr_t foo; }; bar = _method ; return foo; \ |
127 | union { \ |
128 | double (_class::*bar)(); \ |
129 | intptr_t foo; \ |
130 | }; \ |
131 | bar = _method; \ |
132 | return foo; |
133 | #else |
134 | #define RETURN_METHOD_PTR_F(_class, _method)union { double (_class::*bar)(); intptr_t foo; }; bar = _method ; return foo; \ |
135 | return *((intptr_t*)&_method); |
136 | #endif |
137 | |
138 | #ifdef PERFM |
139 | #define DOPROF |
140 | #endif /* PERFM */ |
141 | |
142 | //#define DOPROF |
143 | #include "../vprof/vprof.h" |
144 | |
145 | // Profiling generated code. |
146 | |
147 | #ifdef DOPROF |
148 | #define JIT_EVENT(id)do { } while (0) \ |
149 | do { \ |
150 | static void* id; \ |
151 | _jnvprof_init(&id, #id, NULL); \ |
152 | callIns(FUNCTIONID(jitProfileEvent)&ci_jitProfileEvent, 1, InsConstPtr(id)); \ |
153 | } while (0) |
154 | #define JIT_VALUE(id, val)do { } while (0) \ |
155 | do { \ |
156 | static void* id; \ |
157 | _jnvprof_init(&id, #id, NULL); \ |
158 | callIns(FUNCTIONID(jitProfileValue32)&ci_jitProfileValue32, 2, InsConstPtr(id), val); \ |
159 | } while (0) |
160 | #define JIT_TAGVAL(id, val)do { } while (0) \ |
161 | do { \ |
162 | static void* id; \ |
163 | _jnhprof_init(&id, #id, 8, 1, 2, 3, 4, 5, 6, 7, 8); \ |
164 | LIns* jit_tagval_tag = p2i(andp((val), 7)); \ |
165 | callIns(FUNCTIONID(jitProfileHist32)&ci_jitProfileHist32, 2, InsConstPtr(id), jit_tagval_tag); \ |
166 | } while (0) |
167 | #else |
168 | #define JIT_EVENT(id)do { } while (0) do { } while (0) |
169 | #define JIT_VALUE(id, val)do { } while (0) do { } while (0) |
170 | #define JIT_TAGVAL(id, val)do { } while (0) do { } while (0) |
171 | #endif |
172 | |
173 | #ifdef AVMPLUS_64BIT |
174 | #define PTR_SCALE2 3 |
175 | #else |
176 | #define PTR_SCALE2 2 |
177 | #endif |
178 | |
179 | #define IS_ALIGNED(x, size)((uintptr_t(x) & ((size)-1)) == 0) ((uintptr_t(x) & ((size)-1)) == 0) |
180 | |
181 | namespace avmplus |
182 | { |
183 | #define COREADDR(f)coreAddr((int (AvmCore::*)())(&f)) coreAddr((int (AvmCore::*)())(&f)) |
184 | #define GCADDR(f)gcAddr((int (MMgc::GC::*)())(&f)) gcAddr((int (MMgc::GC::*)())(&f)) |
185 | #define ENVADDR(f)envAddr((int (MethodEnv::*)())(&f)) envAddr((int (MethodEnv::*)())(&f)) |
186 | #define ARRAYADDR(f)arrayAddr((int (ArrayObject::*)())(&f)) arrayAddr((int (ArrayObject::*)())(&f)) |
187 | #define STRINGADDR(f)stringAddr((int (String::*)())(&f)) stringAddr((int (String::*)())(&f)) |
188 | #define VECTORINTADDR(f)vectorIntAddr((int (IntVectorObject::*)())(&f)) vectorIntAddr((int (IntVectorObject::*)())(&f)) |
189 | #define VECTORUINTADDR(f)vectorUIntAddr((int (UIntVectorObject::*)())(&f)) vectorUIntAddr((int (UIntVectorObject::*)())(&f)) |
190 | #define VECTORDOUBLEADDR(f)vectorDoubleAddr((int (DoubleVectorObject::*)())(&f)) vectorDoubleAddr((int (DoubleVectorObject::*)())(&f)) |
191 | #define VECTORDOUBLEADDRF(f)vectorDoubleAddrF((double (DoubleVectorObject::*)())(&f)) vectorDoubleAddrF((double (DoubleVectorObject::*)())(&f)) |
192 | #define VECTOROBJADDR(f)vectorObjAddr((int (ObjectVectorObject::*)())(&f)) vectorObjAddr((int (ObjectVectorObject::*)())(&f)) |
193 | #define EFADDR(f)efAddr((int (ExceptionFrame::*)())(&f)) efAddr((int (ExceptionFrame::*)())(&f)) |
194 | #define DEBUGGERADDR(f)debuggerAddr((int (Debugger::*)())(&f)) debuggerAddr((int (Debugger::*)())(&f)) |
195 | #define FUNCADDR(addr)(uintptr_t)addr (uintptr_t)addr |
196 | |
197 | intptr_t coreAddr( int (AvmCore::*f)() ) |
198 | { |
199 | RETURN_METHOD_PTR(AvmCore, f)union { int (AvmCore::*bar)(); intptr_t foo; }; bar = f; return foo;; |
200 | } |
201 | |
202 | intptr_t gcAddr( int (MMgc::GC::*f)() ) |
203 | { |
204 | RETURN_METHOD_PTR(MMgc::GC, f)union { int (MMgc::GC::*bar)(); intptr_t foo; }; bar = f; return foo;; |
205 | } |
206 | |
207 | intptr_t envAddr( int (MethodEnv::*f)() ) |
208 | { |
209 | RETURN_METHOD_PTR(MethodEnv, f)union { int (MethodEnv::*bar)(); intptr_t foo; }; bar = f; return foo;; |
210 | } |
211 | |
212 | #ifdef DEBUGGER |
213 | intptr_t debuggerAddr( int (Debugger::*f)() ) |
214 | { |
215 | RETURN_METHOD_PTR(Debugger, f)union { int (Debugger::*bar)(); intptr_t foo; }; bar = f; return foo;; |
216 | } |
217 | #endif /* DEBUGGER */ |
218 | |
219 | intptr_t arrayAddr(int (ArrayObject::*f)()) |
220 | { |
221 | RETURN_METHOD_PTR(ArrayObject, f)union { int (ArrayObject::*bar)(); intptr_t foo; }; bar = f; return foo;; |
222 | } |
223 | |
224 | intptr_t stringAddr(int (String::*f)()) |
225 | { |
226 | RETURN_METHOD_PTR(String, f)union { int (String::*bar)(); intptr_t foo; }; bar = f; return foo;; |
227 | } |
228 | |
229 | intptr_t vectorIntAddr(int (IntVectorObject::*f)()) |
230 | { |
231 | RETURN_METHOD_PTR(IntVectorObject, f)union { int (IntVectorObject::*bar)(); intptr_t foo; }; bar = f; return foo;; |
232 | } |
233 | |
234 | intptr_t vectorUIntAddr(int (UIntVectorObject::*f)()) |
235 | { |
236 | RETURN_METHOD_PTR(UIntVectorObject, f)union { int (UIntVectorObject::*bar)(); intptr_t foo; }; bar = f; return foo;; |
237 | } |
238 | |
239 | intptr_t vectorDoubleAddr(int (DoubleVectorObject::*f)()) |
240 | { |
241 | RETURN_METHOD_PTR(DoubleVectorObject, f)union { int (DoubleVectorObject::*bar)(); intptr_t foo; }; bar = f; return foo;; |
242 | } |
243 | |
244 | intptr_t vectorDoubleAddrF(double (DoubleVectorObject::*f)()) |
245 | { |
246 | RETURN_METHOD_PTR_F(DoubleVectorObject, f)union { double (DoubleVectorObject::*bar)(); intptr_t foo; }; bar = f; return foo;; |
247 | } |
248 | |
249 | intptr_t vectorObjAddr(int (ObjectVectorObject::*f)()) |
250 | { |
251 | RETURN_METHOD_PTR(ObjectVectorObject, f)union { int (ObjectVectorObject::*bar)(); intptr_t foo; }; bar = f; return foo;; |
252 | } |
253 | intptr_t efAddr( int (ExceptionFrame::*f)() ) |
254 | { |
255 | RETURN_METHOD_PTR(ExceptionFrame, f)union { int (ExceptionFrame::*bar)(); intptr_t foo; }; bar = f ; return foo;; |
256 | } |
257 | |
258 | using namespace MMgc; |
259 | using namespace nanojit; |
260 | |
261 | #if defined _MSC_VER && !defined AVMPLUS_ARM |
262 | # define SETJMP((uintptr_t)::_setjmp) ((uintptr_t)_setjmp3) |
263 | #elif defined AVMPLUS_MAC_CARBON |
264 | # define SETJMP((uintptr_t)::_setjmp) setjmpAddress |
265 | #else |
266 | # define SETJMP((uintptr_t)::_setjmp) ((uintptr_t)VMPI_setjmpNoUnwind::_setjmp) |
267 | #endif // _MSC_VER |
268 | |
269 | #include "../core/jit-calls.h" |
270 | |
271 | #if NJ_EXPANDED_LOADSTORE_SUPPORTED1 && defined(VMCFG_UNALIGNED_INT_ACCESS) && defined(VMCFG_LITTLE_ENDIAN) |
272 | #define VMCFG_MOPS_USE_EXPANDED_LOADSTORE_INT |
273 | #endif |
274 | |
275 | #if NJ_EXPANDED_LOADSTORE_SUPPORTED1 && defined(VMCFG_UNALIGNED_FP_ACCESS) && defined(VMCFG_LITTLE_ENDIAN) |
276 | #define VMCFG_MOPS_USE_EXPANDED_LOADSTORE_FP |
277 | #endif |
278 | |
279 | // source of entropy for Assembler |
280 | JITNoise::JITNoise() |
281 | { |
282 | MathUtils::initRandom(&randomSeed); |
283 | } |
284 | |
285 | // produce a random number from 0-maxValue for the JIT to use in attack mitigation |
286 | uint32_t JITNoise::getValue(uint32_t maxValue) |
287 | { |
288 | int32_t v = MathUtils::Random(maxValue, &randomSeed); |
289 | AvmAssert(v>=0)do { } while (0); |
290 | return (uint32_t)v; |
291 | } |
292 | |
293 | struct MopsInfo |
294 | { |
295 | uint32_t size; |
296 | LOpcode op; |
297 | const CallInfo* call; |
298 | }; |
299 | |
300 | static const MopsInfo kMopsLoadInfo[7] = { |
301 | { 1, LIR_ldc2i, FUNCTIONID(mop_lix8)&ci_mop_lix8 }, |
302 | { 2, LIR_lds2i, FUNCTIONID(mop_lix16)&ci_mop_lix16 }, |
303 | { 1, LIR_lduc2ui, FUNCTIONID(mop_liz8)&ci_mop_liz8 }, |
304 | { 2, LIR_ldus2ui, FUNCTIONID(mop_liz16)&ci_mop_liz16 }, |
305 | { 4, LIR_ldi, FUNCTIONID(mop_li32)&ci_mop_li32 }, |
306 | { 4, LIR_ldf2d, FUNCTIONID(mop_lf32)&ci_mop_lf32 }, |
307 | { 8, LIR_ldd, FUNCTIONID(mop_lf64)&ci_mop_lf64 } |
308 | }; |
309 | |
310 | static const MopsInfo kMopsStoreInfo[5] = { |
311 | { 1, LIR_sti2c, FUNCTIONID(mop_si8)&ci_mop_si8 }, |
312 | { 2, LIR_sti2s, FUNCTIONID(mop_si16)&ci_mop_si16 }, |
313 | { 4, LIR_sti, FUNCTIONID(mop_si32)&ci_mop_si32 }, |
314 | { 4, LIR_std2f, FUNCTIONID(mop_sf32)&ci_mop_sf32 }, |
315 | { 8, LIR_std, FUNCTIONID(mop_sf64)&ci_mop_sf64 } |
316 | }; |
317 | |
318 | class MopsRangeCheckFilter: public LirWriter |
319 | { |
320 | private: |
321 | LirWriter* const prolog_out; |
322 | LIns* const env_domainenv; |
323 | LIns* curMemBase; |
324 | LIns* curMemSize; |
325 | LIns* curMopAddr; |
326 | LIns* curRangeCheckLHS; |
327 | LIns* curRangeCheckRHS; |
328 | int32_t curRangeCheckMinValue; |
329 | int32_t curRangeCheckMaxValue; |
330 | |
331 | private: |
332 | void clearMemBaseAndSize(); |
333 | |
334 | static void extractConstantDisp(LIns*& mopAddr, int32_t& curDisp); |
335 | LIns* safeIns2(LOpcode op, LIns*, int32_t); |
336 | void safeRewrite(LIns* ins, int32_t); |
337 | |
338 | public: |
339 | MopsRangeCheckFilter(LirWriter* out, LirWriter* prolog_out, LIns* env_domainenv); |
340 | |
341 | LIns* emitRangeCheck(LIns*& mopAddr, int32_t const size, int32_t* disp, LIns*& br); |
342 | void flushRangeChecks(); |
343 | |
344 | // overrides from LirWriter |
345 | LIns* ins0(LOpcode v); |
346 | LIns* insCall(const CallInfo* call, LIns* args[]); |
347 | }; |
348 | |
349 | inline MopsRangeCheckFilter::MopsRangeCheckFilter(LirWriter* out, LirWriter* prolog_out, LIns* env_domainenv) : |
350 | LirWriter(out), |
351 | prolog_out(prolog_out), |
352 | env_domainenv(env_domainenv), |
353 | curMemBase(NULL__null), |
354 | curMemSize(NULL__null), |
355 | curMopAddr(NULL__null), |
356 | curRangeCheckLHS(NULL__null), |
357 | curRangeCheckRHS(NULL__null), |
358 | curRangeCheckMinValue(int32_t(0x7fffffff)), |
359 | curRangeCheckMaxValue(int32_t(0x80000000)) |
360 | { |
361 | clearMemBaseAndSize(); |
362 | } |
363 | |
364 | void MopsRangeCheckFilter::clearMemBaseAndSize() |
365 | { |
366 | curMemBase = curMemSize = NULL__null; |
367 | } |
368 | |
369 | void MopsRangeCheckFilter::flushRangeChecks() |
370 | { |
371 | AvmAssert((curRangeCheckLHS != NULL) == (curRangeCheckRHS != NULL))do { } while (0); |
372 | if (curRangeCheckLHS) |
373 | { |
374 | curRangeCheckLHS = curRangeCheckRHS = curMopAddr = NULL__null; |
375 | curRangeCheckMinValue = int32_t(0x7fffffff); |
376 | curRangeCheckMaxValue = int32_t(0x80000000); |
377 | // but don't clearMemBaseAndSize()! |
378 | } |
379 | else |
380 | { |
381 | AvmAssert(curMopAddr == NULL)do { } while (0); |
382 | AvmAssert(curRangeCheckMinValue == int32_t(0x7fffffff))do { } while (0); |
383 | AvmAssert(curRangeCheckMaxValue == int32_t(0x80000000))do { } while (0); |
384 | } |
385 | } |
386 | |
387 | static boolbool sumFitsInInt32(int32_t a, int32_t b) |
388 | { |
389 | return int64_t(a) + int64_t(b) == int64_t(a + b); |
390 | } |
391 | |
392 | /*static*/ void MopsRangeCheckFilter::extractConstantDisp(LIns*& mopAddr, int32_t& curDisp) |
393 | { |
394 | // mopAddr is an int (an offset from globalMemoryBase) on all archs. |
395 | // if mopAddr is an expression of the form |
396 | // expr+const |
397 | // const+expr |
398 | // expr-const |
399 | // (but not const-expr) |
400 | // then try to pull the constant out and return it as a displacement to |
401 | // be used in the instruction as an addressing-mode offset. |
402 | // (but only if caller requests it that way.) |
403 | for (;;) |
404 | { |
405 | LOpcode const op = mopAddr->opcode(); |
406 | if (op != LIR_addi && op != LIR_subi) |
407 | break; |
408 | |
409 | int32_t imm; |
410 | LIns* nonImm; |
411 | if (mopAddr->oprnd2()->isImmI()) |
412 | { |
413 | imm = mopAddr->oprnd2()->immI(); |
414 | nonImm = mopAddr->oprnd1(); |
415 | |
416 | if (op == LIR_subi) |
417 | imm = -imm; |
418 | } |
419 | else if (mopAddr->oprnd1()->isImmI()) |
420 | { |
421 | // don't try to optimize const-expr |
422 | if (op == LIR_subi) |
423 | break; |
424 | |
425 | imm = mopAddr->oprnd1()->immI(); |
426 | nonImm = mopAddr->oprnd2(); |
427 | } |
428 | else |
429 | { |
430 | break; |
431 | } |
432 | |
433 | if (!sumFitsInInt32(curDisp, imm)) |
434 | break; |
435 | |
436 | curDisp += imm; |
437 | mopAddr = nonImm; |
438 | } |
439 | } |
440 | |
441 | LIns* MopsRangeCheckFilter::emitRangeCheck(LIns*& mopAddr, int32_t const size, int32_t* disp, LIns*& br) |
442 | { |
443 | int32_t offsetMin = 0; |
444 | if (disp != NULL__null) |
445 | { |
446 | *disp = 0; |
447 | extractConstantDisp(mopAddr, *disp); |
448 | offsetMin = *disp; |
449 | } |
450 | |
451 | int32_t offsetMax = offsetMin + size; |
452 | |
453 | AvmAssert((curRangeCheckLHS != NULL) == (curRangeCheckRHS != NULL))do { } while (0); |
454 | |
455 | AvmAssert(mopAddr != NULL)do { } while (0); |
456 | if (curRangeCheckLHS != NULL__null && curMopAddr == mopAddr) |
457 | { |
458 | int32_t n_curRangeCheckMin = curRangeCheckMinValue; |
459 | if (n_curRangeCheckMin > offsetMin) |
460 | n_curRangeCheckMin = offsetMin; |
461 | int32_t n_curRangeCheckMax = curRangeCheckMaxValue; |
462 | if (n_curRangeCheckMax < offsetMax) |
463 | n_curRangeCheckMax = offsetMax; |
464 | |
465 | if ((n_curRangeCheckMax - n_curRangeCheckMin) <= DomainEnv::GLOBAL_MEMORY_MIN_SIZE) |
466 | { |
467 | if (curRangeCheckMinValue != n_curRangeCheckMin) |
468 | safeRewrite(curRangeCheckLHS, curRangeCheckMinValue); |
469 | |
470 | if ((n_curRangeCheckMax - n_curRangeCheckMin) != (curRangeCheckMaxValue - curRangeCheckMinValue)) |
471 | safeRewrite(curRangeCheckRHS, curRangeCheckMaxValue - curRangeCheckMinValue); |
472 | |
473 | curRangeCheckMinValue = n_curRangeCheckMin; |
474 | curRangeCheckMaxValue = n_curRangeCheckMax; |
475 | } |
476 | else |
477 | { |
478 | // if collapsed ranges get too large, pre-emptively flush, so that the |
479 | // range-checking code can always assume the range is within minsize |
480 | flushRangeChecks(); |
481 | } |
482 | } |
483 | else |
484 | { |
485 | flushRangeChecks(); |
486 | } |
487 | |
488 | if (!curMemBase) |
489 | { |
490 | //AvmAssert(curMemSize == NULL); |
491 | curMemBase = out->insLoad(LIR_ldp, env_domainenv, offsetof(DomainEnv,m_globalMemoryBase)__builtin_offsetof(DomainEnv, m_globalMemoryBase), ACCSET_OTHER); |
492 | curMemSize = out->insLoad(LIR_ldi, env_domainenv, offsetof(DomainEnv,m_globalMemorySize)__builtin_offsetof(DomainEnv, m_globalMemorySize), ACCSET_OTHER); |
493 | } |
494 | |
495 | AvmAssert((curRangeCheckLHS != NULL) == (curRangeCheckRHS != NULL))do { } while (0); |
496 | |
497 | if (!curRangeCheckLHS) |
498 | { |
499 | AvmAssert(!curMopAddr)do { } while (0); |
500 | curMopAddr = mopAddr; |
501 | curRangeCheckMinValue = offsetMin; |
502 | curRangeCheckMaxValue = offsetMax; |
503 | |
504 | AvmAssert(env_domainenv != NULL)do { } while (0); |
505 | |
506 | // we want to pass range-check if |
507 | // |
508 | // (curMopAddr+curRangeCheckMin >= 0 && curMopAddr+curRangeCheckMax <= mopsMemorySize) |
509 | // |
510 | // which is the same as |
511 | // |
512 | // (curMopAddr >= -curRangeCheckMin && curMopAddr <= mopsMemorySize - curRangeCheckMax) |
513 | // |
514 | // which is the same as |
515 | // |
516 | // (curMopAddr >= -curRangeCheckMin && curMopAddr < mopsMemorySize - curRangeCheckMax + 1) |
517 | // |
518 | // and since (x >= min && x < max) is equivalent to (unsigned)(x-min) < (unsigned)(max-min) |
519 | // |
520 | // (unsigned(curMopAddr + curRangeCheckMin) < unsigned(mopsMemorySize - curRangeCheckMax + 1 + curRangeCheckMin)) |
521 | // |
522 | // from there, you'd think you could do |
523 | // |
524 | // (curMopAddr < mopsMemorySize - curRangeCheckMax + 1)) |
525 | // |
526 | // but that is only valid if you are certain that curMopAddr>0, due to the unsigned casting... |
527 | // and curMopAddr could be anything, which is really the point of this whole exercise. Instead, settle for |
528 | // |
529 | // (unsigned(curMopAddr + curRangeCheckMin) <= unsigned(mopsMemorySize - (curRangeCheckMax-curRangeCheckMin))) |
530 | // |
531 | |
532 | AvmAssert(curRangeCheckMaxValue > curRangeCheckMinValue)do { } while (0); |
533 | AvmAssert(curRangeCheckMaxValue - curRangeCheckMinValue <= DomainEnv::GLOBAL_MEMORY_MIN_SIZE)do { } while (0); |
534 | |
535 | curRangeCheckLHS = safeIns2(LIR_addi, curMopAddr, curRangeCheckMinValue); |
536 | curRangeCheckRHS = safeIns2(LIR_subi, curMemSize, curRangeCheckMaxValue - curRangeCheckMinValue); |
537 | |
538 | LIns* cond = this->ins2(LIR_leui, curRangeCheckLHS, curRangeCheckRHS); |
539 | br = this->insBranch(LIR_jf, cond, NULL__null); |
540 | } |
541 | |
542 | return curMemBase; |
543 | } |
544 | |
545 | // workaround for WE2569232: don't let these adds get specialized or CSE'd. |
546 | LIns* MopsRangeCheckFilter::safeIns2(LOpcode op, LIns* lhs, int32_t rhsConst) |
547 | { |
548 | LIns* rhs = prolog_out->insImmI(rhsConst); |
549 | LIns* ins = out->ins2(op, lhs, rhs); |
550 | AvmAssert(ins->isop(op) && ins->oprnd1() == lhs && ins->oprnd2() == rhs)do { } while (0); |
551 | return ins; |
552 | } |
553 | |
554 | // rewrite the instruction with a new rhs constant |
555 | void MopsRangeCheckFilter::safeRewrite(LIns* ins, int32_t rhsConst) |
556 | { |
557 | LIns* rhs = prolog_out->insImmI(rhsConst); |
558 | AvmAssert(ins->isop(LIR_addi) || ins->isop(LIR_subi))do { } while (0); |
559 | ins->initLInsOp2(ins->opcode(), ins->oprnd1(), rhs); |
560 | } |
561 | |
562 | LIns* MopsRangeCheckFilter::ins0(LOpcode v) |
563 | { |
564 | if (v == LIR_label) |
565 | { |
566 | flushRangeChecks(); |
567 | clearMemBaseAndSize(); |
568 | } |
569 | return LirWriter::ins0(v); |
570 | } |
571 | |
572 | LIns* MopsRangeCheckFilter::insCall(const CallInfo *ci, LIns* args[]) |
573 | { |
574 | // calls could potentially resize globalMemorySize, so we |
575 | // can't collapse range checks across them |
576 | if (!ci->_isPure) |
577 | { |
578 | flushRangeChecks(); |
579 | clearMemBaseAndSize(); |
580 | } |
581 | return LirWriter::insCall(ci, args); |
582 | } |
583 | |
584 | /** |
585 | * --------------------------------- |
586 | * Instruction convenience functions |
587 | * --------------------------------- |
588 | */ |
589 | |
590 | // address calc instruction |
591 | LIns* CodegenLIR::leaIns(int32_t disp, LIns* base) { |
592 | return lirout->ins2(LIR_addp, base, InsConstPtr((void*)disp)); |
593 | } |
594 | |
595 | LIns* CodegenLIR::localCopy(int i) |
596 | { |
597 | switch (bt(state->value(i).traits)) { |
598 | case BUILTIN_number: |
599 | return localGetf(i); |
600 | case BUILTIN_boolean: |
601 | case BUILTIN_int: |
602 | case BUILTIN_uint: |
603 | return localGet(i); |
604 | default: |
605 | return localGetp(i); |
606 | } |
607 | } |
608 | |
609 | // returns true if mask has exactly one bit set |
610 | // see http://aggregate.org/MAGIC/#Is%20Power%20of%202 |
611 | REALLY_INLINEinline __attribute__((always_inline)) boolbool exactlyOneBit(uint32_t m) |
612 | { |
613 | AvmAssert(m != 0)do { } while (0); |
614 | return (m & (m-1)) == 0; |
615 | } |
616 | |
617 | void CodegenLIR::localSet(int i, LIns* o, Traits* type) |
618 | { |
619 | BuiltinType tag = bt(type); |
620 | SlotStorageType sst = valueStorageType(tag); |
621 | #ifdef DEBUG |
622 | jit_sst[i] = uint8_t(1 << sst); |
623 | #endif |
624 | lirout->insStore(o, vars, i * VARSIZE, ACCSET_VARS); |
625 | lirout->insStore(LIR_sti2c, InsConst(sst), tags, i, ACCSET_TAGS); |
626 | } |
627 | |
628 | LIns* CodegenLIR::atomToNativeRep(int i, LIns* atom) |
629 | { |
630 | return atomToNativeRep(state->value(i).traits, atom); |
631 | } |
632 | |
633 | LIns* CodegenLIR::ptrToNativeRep(Traits*t, LIns* ptr) |
634 | { |
635 | return t->isMachineType() ? addp(ptr, kObjectType) : ptr; |
636 | } |
637 | |
638 | #ifdef _DEBUG |
639 | boolbool CodegenLIR::isPointer(int i) { |
640 | return !state->value(i).traits->isMachineType(); |
641 | } |
642 | #endif |
643 | |
644 | LIns* CodegenLIR::loadAtomRep(int i) |
645 | { |
646 | return nativeToAtom(localCopy(i), state->value(i).traits); |
647 | } |
648 | |
649 | LIns* CodegenLIR::storeAtomArgs(int count, int index) |
650 | { |
651 | LIns* ap = insAlloc(sizeof(Atom)*count); |
652 | for (int i=0; i < count; i++) |
653 | stp(loadAtomRep(index++), ap, i * sizeof(Atom), ACCSET_OTHER); |
654 | return ap; |
655 | } |
656 | |
657 | LIns* CodegenLIR::storeAtomArgs(LIns* receiver, int count, int index) |
658 | { |
659 | #ifdef NJ_VERBOSE |
660 | if (verbose()) |
661 | core->console << " store args\n"; |
662 | #endif |
663 | LIns* ap = insAlloc(sizeof(Atom)*(count+1)); |
664 | stp(receiver, ap, 0, ACCSET_OTHER); |
665 | for (int i=1; i <= count; i++) |
666 | { |
667 | LIns* v = loadAtomRep(index++); |
668 | stp(v, ap, sizeof(Atom)*i, ACCSET_OTHER); |
669 | } |
670 | return ap; |
671 | } |
672 | |
673 | CodegenLIR::CodegenLIR(MethodInfo* i, MethodSignaturep ms, Toplevel* toplevel, |
674 | OSR* osr_state) : |
675 | LirHelper(i->pool()), |
676 | info(i), |
677 | ms(ms), |
678 | toplevel(toplevel), |
679 | pool(i->pool()), |
680 | osr(osr_state), |
681 | driver(NULL__null), |
682 | state(NULL__null), |
683 | mopsRangeCheckFilter(NULL__null), |
684 | restArgc(NULL__null), |
685 | restLocal(-1), |
686 | interruptable(truetrue), |
687 | npe_label("npe"), |
688 | upe_label("upe"), |
689 | interrupt_label("interrupt"), |
690 | mop_rangeCheckFailed_label("mop_rangeCheckFailed"), |
691 | catch_label("catch"), |
692 | inlineFastpath(falsefalse), |
693 | call_cache_builder(*alloc1, *initCodeMgr(pool)), |
694 | get_cache_builder(*alloc1, *pool->codeMgr), |
695 | set_cache_builder(*alloc1, *pool->codeMgr), |
696 | prolog(NULL__null), |
697 | skip_ins(NULL__null), |
698 | specializedCallHashMap(NULL__null), |
699 | builtinFunctionOptimizerHashMap(NULL__null), |
700 | blockLabels(NULL__null), |
701 | cseFilter(NULL__null), |
702 | noise() |
703 | DEBUGGER_ONLY(, haveDebugger(core->debugger() != NULL) ) |
704 | { |
705 | #ifdef AVMPLUS_MAC_CARBON |
706 | setjmpInit(); |
707 | #endif |
708 | |
709 | verbose_only( |
710 | if (pool->isVerbose(VB_jit, i)) { |
711 | core->console << "codegen " << i; |
712 | core->console << |
713 | " required=" << ms->requiredParamCount() << |
714 | " optional=" << (ms->param_count() - ms->requiredParamCount()) << "\n"; |
715 | }) |
716 | } |
717 | |
718 | CodegenLIR::~CodegenLIR() { |
719 | cleanup(); |
720 | } |
721 | |
722 | void CodegenLIR::cleanup() |
723 | { |
724 | finddef_cache_builder.cleanup(); |
725 | LirHelper::cleanup(); |
726 | } |
727 | |
728 | #ifdef AVMPLUS_MAC_CARBON |
729 | int CodegenLIR::setjmpAddress = 0; |
730 | |
731 | extern "C" int __setjmp(); |
732 | |
733 | asm int CodegenLIR::setjmpDummy(jmp_buf buf) |
734 | { |
735 | b __setjmp; |
736 | } |
737 | |
738 | void CodegenLIR::setjmpInit() |
739 | { |
740 | // CodeWarrior defies all reasonable efforts to get |
741 | // the address of __vec_setjmp. So, we resort to |
742 | // a crude hack: We'll search the actual code |
743 | // of setjmpDummy for the branch instruction. |
744 | if (setjmpAddress == 0) |
745 | { |
746 | setjmpAddress = *((int*)&setjmpDummy); |
747 | } |
748 | } |
749 | #endif |
750 | |
751 | void CodegenLIR::suspendCSE() |
752 | { |
753 | if (cseFilter) cseFilter->suspend(); |
754 | } |
755 | |
756 | void CodegenLIR::resumeCSE() |
757 | { |
758 | if (cseFilter) cseFilter->resume(); |
759 | } |
760 | |
761 | LIns* CodegenLIR::atomToNativeRep(Traits* t, LIns* atom) |
762 | { |
763 | return atomToNative(bt(t), atom); |
764 | } |
765 | |
766 | boolbool isNullable(Traits* t) { |
767 | BuiltinType bt = Traits::getBuiltinType(t); |
768 | return bt != BUILTIN_int && bt != BUILTIN_uint && bt != BUILTIN_boolean && bt != BUILTIN_number; |
769 | } |
770 | |
771 | /** |
772 | * Eliminates redundant loads within a block, and tracks the nullability of pointers |
773 | * within blocks and across edges. CodegenLIR will inform VarTracker that a |
774 | * pointer is not null by calling setNotNull(ptr, type) either when the Verifier's |
775 | * FrameState.FrameValue is not null, in localGetp(), or after a null check in emitNullCheck(). |
776 | * |
777 | * Within a block, we track nullability of references to instructions; when references |
778 | * are copied, we know the copies are not null. |
779 | * |
780 | * At block boundaries, different values may flow together, so we track nullability |
781 | * in variable slots instead of specific instruction references. |
782 | */ |
783 | class VarTracker: public LirWriter |
784 | { |
785 | Allocator &alloc; // Allocator for the lifetime of this filter |
786 | LIns** varTracker; // remembers the last value stored in each var |
787 | LIns** tagTracker; // remembers the last tag stored in each var |
788 | HashMap<LIns*, boolbool> *checked; // pointers we know are not null. |
789 | nanojit::BitSet *notnull; // stack locations we know are not null |
790 | LIns* vars; // LIns that defines the vars[] array |
791 | LIns* tags; // LIns that defines the tags[] array |
792 | const int nvar; // this method's frame size. |
793 | const int scopeBase; // index of first local scope |
794 | const int stackBase; // index of first stack slot |
795 | int restLocal; // -1 or, if it's lazily allocated, the local holding the rest array |
796 | |
797 | // false after an unconditional control flow instruction (jump, throw, return), |
798 | // true from the start and after we start a block via trackLabel() |
799 | boolbool reachable; |
800 | |
801 | // true if any backedges exist in LIR, false otherwise. |
802 | boolbool has_backedges; |
803 | |
804 | #ifdef DEBUGGER |
805 | boolbool haveDebugger; // true if debugger is currently enabled |
806 | #else |
807 | static const boolbool haveDebugger = falsefalse; |
808 | #endif |
809 | #ifdef AVMPLUS_VERBOSE |
810 | boolbool verbose; // true when generating verbose output |
811 | #else |
812 | static const boolbool verbose = falsefalse; |
813 | #endif |
814 | |
815 | public: |
816 | VarTracker(MethodInfo* info, Allocator& alloc, LirWriter *out, |
817 | int nvar, int scopeBase, int stackBase, int restLocal, |
818 | uint32_t code_len) |
819 | : LirWriter(out), alloc(alloc), |
820 | vars(NULL__null), tags(NULL__null), |
821 | nvar(nvar), scopeBase(scopeBase), stackBase(stackBase), |
822 | restLocal(restLocal), reachable(truetrue), |
823 | has_backedges(falsefalse) |
824 | #ifdef DEBUGGER |
825 | , haveDebugger(info->pool()->core->debugger() != NULL__null) |
826 | #endif |
827 | #ifdef AVMPLUS_VERBOSE |
828 | , verbose(info->pool()->isVerbose(VB_jit, info)) |
829 | #endif |
830 | { |
831 | (void) info; // suppress warning if !DEBUGGER && !AVMPLUS_VERBOSE |
832 | varTracker = new (alloc) LIns*[nvar]; |
833 | tagTracker = new (alloc) LIns*[nvar]; |
834 | // allocate a large value until https://bugzilla.mozilla.org/show_bug.cgi?id=565489 is resolved |
835 | checked = new (alloc) InsSet(alloc, code_len < 16700 ? code_len : 16700); |
836 | notnull = new (alloc) nanojit::BitSet(alloc, nvar); |
837 | clearState(); |
838 | } |
839 | |
840 | void init(LIns *vars, LIns* tags) { |
841 | this->vars = vars; |
842 | this->tags = tags; |
843 | } |
844 | |
845 | boolbool hasBackedges() const { |
846 | return has_backedges; |
847 | } |
848 | |
849 | void setNotNull(LIns* ins, Traits* t) { |
850 | if (isNullable(t)) |
851 | checked->put(ins, truetrue); |
852 | } |
853 | |
854 | boolbool isNotNull(LIns* ins) { |
855 | return checked->containsKey(ins); |
856 | } |
857 | |
858 | void initNotNull(const FrameState* state) { |
859 | syncNotNull(notnull, state); |
860 | } |
861 | |
862 | // We're at the start of an AS3 basic block; syncronize our |
863 | // notnull bits for that block with ones from the driver. |
864 | void syncNotNull(nanojit::BitSet* bits, const FrameState* state) { |
865 | int scopeTop = scopeBase + state->scopeDepth; |
866 | int stackTop = stackBase + state->stackDepth; |
867 | if (state->targetOfBackwardsBranch) { |
868 | // Clear any notNull bits that are not set in FrameState. |
869 | for (int i=0, n=nvar; i < n; i++) { |
870 | const FrameValue& v = state->value(i); |
871 | boolbool stateNotNull = v.notNull && isNullable(v.traits); |
872 | if (!stateNotNull || (i >= scopeTop && i < stackBase) || (i >= stackTop)) |
873 | bits->clear(i); |
874 | else |
875 | bits->set(i); |
876 | } |
877 | printNotNull(bits, "loop label", state); |
878 | } else { |
879 | // Set any notNull bits that are set in FrameState. |
880 | // If we are tracking an expression for a non-null variable, |
881 | // add it to the set of checked expressions. |
882 | for (int i=0, n=nvar; i < n; i++) { |
883 | const FrameValue& v = state->value(i); |
884 | boolbool stateNotNull = v.notNull && isNullable(v.traits); |
885 | if ((i >= scopeTop && i < stackBase) || (i >= stackTop)) { |
886 | bits->clear(i); |
887 | } else if (stateNotNull) { |
888 | bits->set(i); |
889 | if (varTracker[i]) |
890 | checked->put(varTracker[i], truetrue); |
891 | } else if (bits->get(i) && varTracker[i]) |
892 | checked->put(varTracker[i], truetrue); |
893 | } |
894 | printNotNull(bits, "forward label", state); |
895 | } |
896 | } |
897 | |
898 | // Model a control flow edge by merging our current state with the state |
899 | // saved at the target. Used for forward branches and exception edges. |
900 | void trackForwardEdge(CodegenLabel& target, boolbool isExceptionEdge) { |
901 | AvmAssert(target.labelIns == NULL)do { } while (0); // illegal to call trackEdge on backedge |
902 | |
903 | // Merge varTracker/tagTracker state with state at target label. |
904 | // Due to hidden internal control flow in exception dispatch, state may not be |
905 | // propagated across exception edges. Thus, if a label may be reached from such |
906 | // an edge, we cannot determine accurate state at the label, and must clear it. |
907 | // The state will remain cleared due to the monotonicity of the merge. |
908 | if (!target.varTracker) { |
909 | // Allocate state vectors for target label upon first encounter. |
910 | target.varTracker = new (alloc) LIns*[nvar]; |
911 | target.tagTracker = new (alloc) LIns*[nvar]; |
912 | if (isExceptionEdge) { |
913 | VMPI_memset::memset(target.varTracker, 0, nvar*sizeof(LIns*)); |
914 | VMPI_memset::memset(target.tagTracker, 0, nvar*sizeof(LIns*)); |
915 | } else { |
916 | VMPI_memcpy::memcpy(target.varTracker, varTracker, nvar*sizeof(LIns*)); |
917 | VMPI_memcpy::memcpy(target.tagTracker, tagTracker, nvar*sizeof(LIns*)); |
918 | } |
919 | } else if (isExceptionEdge) { |
920 | VMPI_memset::memset(target.varTracker, 0, nvar*sizeof(LIns*)); |
921 | VMPI_memset::memset(target.tagTracker, 0, nvar*sizeof(LIns*)); |
922 | } else { |
923 | for (int i=0, n=nvar; i < n; i++) { |
924 | if (varTracker[i] != target.varTracker[i]) |
925 | target.varTracker[i] = NULL__null; |
926 | if (tagTracker[i] != target.tagTracker[i]) |
927 | target.tagTracker[i] = NULL__null; |
928 | } |
929 | } |
930 | |
931 | for (int i=0, n=nvar; i < n; i++) { |
932 | if (varTracker[i]) { |
933 | if (checked->containsKey(varTracker[i])) |
934 | notnull->set(i); |
935 | else |
936 | AvmAssert(!notnull->get(i))do { } while (0); |
937 | } |
938 | } |
939 | if (!target.notnull) { |
940 | //printf("save state\n"); |
941 | target.notnull = new (alloc) nanojit::BitSet(alloc, nvar); |
942 | target.notnull->setFrom(*notnull); |
943 | } else { |
944 | // target.notnull &= notnull |
945 | for (int i=0, n=nvar; i < n; i++) |
946 | if (!notnull->get(i)) |
947 | target.notnull->clear(i); |
948 | } |
949 | } |
950 | |
951 | #ifdef AVMPLUS_VERBOSE |
952 | void printNotNull(nanojit::BitSet* bits, const char* title, |
953 | const FrameState* state) { |
954 | if (0 && verbose) { |
955 | if (bits) { |
956 | int max_scope = scopeBase + state->scopeDepth - 1; |
957 | int max_stack = stackBase + state->stackDepth; |
958 | printf("%s [0-%d,%d-%d] notnull = ", title, max_scope+1, stackBase, max_stack-1); |
959 | for (int i=0; i < max_stack; i = i != max_scope ? i + 1 : stackBase) |
960 | if (bits->get(i)) |
961 | printf("%d ", i); |
962 | printf("\n"); |
963 | } else { |
964 | printf("%s notnull = null\n", title); |
965 | } |
966 | } |
967 | } |
968 | #else |
969 | void printNotNull(nanojit::BitSet*, const char*, const FrameState*) |
970 | {} |
971 | #endif |
972 | |
973 | void checkBackEdge(CodegenLabel& target, const FrameState* state) { |
974 | has_backedges = truetrue; |
975 | #ifdef DEBUG |
976 | AvmAssert(target.labelIns != NULL)do { } while (0); |
977 | if (target.notnull) { |
978 | printNotNull(notnull, "current", state); |
979 | printNotNull(target.notnull, "target", state); |
980 | int scopeTop = scopeBase + state->scopeDepth; |
981 | int stackTop = stackBase + state->stackDepth; |
982 | // make sure our notnull bits at the target of the backedge were safe. |
983 | for (int i=0, n=nvar; i < n; i++) { |
984 | if ((i >= scopeTop && i < stackBase) || i >= stackTop) |
985 | continue; // skip empty locations |
986 | if (!isNullable(state->value(i).traits)) |
987 | continue; // skip non-nullable types in current state |
988 | // current target assert(!target || current) |
989 | // ------- ------ ------ |
990 | // false false true |
991 | // false true false (assertion fires) |
992 | // true false true |
993 | // true true true |
994 | boolbool currentNotNull = (varTracker[i] ? isNotNull(varTracker[i]) : falsefalse) || notnull->get(i); |
995 | AvmAssert(!target.notnull->get(i) || currentNotNull)do { } while (0); |
996 | } |
997 | } |
998 | #else |
999 | (void) target; |
1000 | (void) state; |
1001 | #endif // DEBUG |
1002 | } |
1003 | |
1004 | // starts a new block. if the new label is reachable from here, |
1005 | // merge our state with it. then initialize from the new merged state. |
1006 | void trackLabel(CodegenLabel& label, const FrameState* state) { |
1007 | if (reachable) |
1008 | trackForwardEdge(label, falsefalse); // model the fall-through path as an edge |
1009 | clearState(); |
1010 | label.labelIns = out->ins0(LIR_label); |
1011 | |
1012 | // Load varTracker/tagTracker state accumulated from forward branches. |
1013 | // Do not load if there are any backward branches, as the tracker state may |
1014 | // not be accurate. Just switch the pointers -- no need to copy the arrays. |
1015 | if (!state->targetOfBackwardsBranch && label.varTracker) { |
1016 | varTracker = label.varTracker; |
1017 | tagTracker = label.tagTracker; |
1018 | } |
1019 | |
1020 | // load state saved at label |
1021 | if (label.notnull) { |
1022 | syncNotNull(label.notnull, state); |
1023 | notnull->setFrom(*label.notnull); |
1024 | printNotNull(notnull, "merged label", state); |
1025 | } else { |
1026 | syncNotNull(notnull, state); |
1027 | printNotNull(notnull, "first-time label", state); |
1028 | } |
1029 | reachable = truetrue; |
1030 | } |
1031 | |
1032 | // Clear the var and tag expression states, but do not clear the nullability |
1033 | // state. Called around debugger safe points to ensure that we reload values |
1034 | // that are possibly modified by the debugger. Clearing the nullability state |
1035 | // correctly must be done at the verifier level, and at that level, it must always |
1036 | // be done or never be done (can't be conditional on debugging). |
1037 | // FIXME: bug 544238: clearing only the var state has questionable validity |
1038 | void clearVarState() { |
1039 | VMPI_memset::memset(varTracker, 0, nvar*sizeof(LIns*)); |
1040 | VMPI_memset::memset(tagTracker, 0, nvar*sizeof(LIns*)); |
1041 | } |
1042 | |
1043 | // clear all nullability and var/tag tracking state at branch targets |
1044 | void clearState() { |
1045 | clearVarState(); |
1046 | checked->clear(); |
1047 | notnull->reset(); |
1048 | } |
1049 | |
1050 | REALLY_INLINEinline __attribute__((always_inline)) int varOffsetToIndex(int offset) { |
1051 | AvmAssert(IS_ALIGNED(offset, VARSIZE))do { } while (0); |
1052 | return offset / VARSIZE; |
1053 | } |
1054 | |
1055 | // keep track of the value stored in var d and update notnull |
1056 | void trackVarStore(LIns *value, int i) { |
1057 | varTracker[i] = value; |
1058 | if (checked->containsKey(value)) |
1059 | notnull->set(i); |
1060 | else |
1061 | notnull->clear(i); |
1062 | } |
1063 | |
1064 | // keep track of the tag stored in var i. |
1065 | void trackTagStore(LIns *value, int i) { |
1066 | tagTracker[i] = value; |
1067 | } |
1068 | |
1069 | // The first time we see a load from variable i, remember it, |
1070 | // and if we know that slot is nonnull, add the load instruction to the nonnull set. |
1071 | void trackVarLoad(LIns* value, int i) { |
1072 | varTracker[i] = value; |
1073 | if (notnull->get(i)) |
1074 | checked->put(value, truetrue); |
1075 | } |
1076 | |
1077 | // first time we read a tag for variable i, remember it. |
1078 | void trackTagLoad(LIns* value, int i) { |
1079 | tagTracker[i] = value; |
1080 | } |
1081 | |
1082 | // monitor loads emitted by the LIR generator, track access to vars and tags |
1083 | LIns *insLoad(LOpcode op, LIns *base, int32_t d, AccSet accSet, LoadQual loadQual) { |
1084 | if (base == vars) { |
1085 | int i = varOffsetToIndex(d); |
1086 | LIns *val = varTracker[i]; |
1087 | if (!val) { |
1088 | val = out->insLoad(op, base, d, accSet, loadQual); |
1089 | trackVarLoad(val, i); |
1090 | } |
1091 | return val; |
1092 | } |
1093 | if (base == tags) { |
1094 | int i = d; // 1 byte per tag |
1095 | LIns *tag = tagTracker[i]; |
1096 | if (!tag) { |
1097 | tag = out->insLoad(op, base, d, accSet, loadQual); |
1098 | trackTagLoad(tag, i); |
1099 | } |
1100 | return tag; |
1101 | } |
1102 | return out->insLoad(op, base, d, accSet, loadQual); |
1103 | } |
1104 | |
1105 | // monitor all stores emitted by LIR generator, update our tracking state |
1106 | // when we see stores to vars or tags. |
1107 | LIns *insStore(LOpcode op, LIns *value, LIns *base, int32_t d, AccSet accSet) { |
1108 | if (base == vars) |
1109 | trackVarStore(value, varOffsetToIndex(d)); |
1110 | else if (base == tags) |
1111 | trackTagStore(value, d); |
1112 | return out->insStore(op, value, base, d, accSet); |
1113 | } |
1114 | |
1115 | // we expect the frontend to use CodegenLabels and call trackLabel for all |
1116 | // LIR label creation. Assert to prevent unknown label generation. |
1117 | LIns *ins0(LOpcode op) { |
1118 | AvmAssert(op != LIR_label)do { } while (0); // trackState must be called directly to generate a label. |
1119 | return out->ins0(op); |
1120 | } |
1121 | |
1122 | // set reachable = false after return instructions |
1123 | LIns* ins1(LOpcode op, LIns* a) { |
1124 | if (isRetOpcode(op)) |
1125 | reachable = falsefalse; |
1126 | return out->ins1(op, a); |
1127 | } |
1128 | |
1129 | // set reachable = false after unconditional jumps |
1130 | LIns *insBranch(LOpcode v, LIns* cond, LIns* to) { |
1131 | if (v == LIR_j) |
1132 | reachable = falsefalse; |
1133 | return out->insBranch(v, cond, to); |
1134 | } |
1135 | |
1136 | // set reachable = false after LIR_jtbl which has explicit targets for all cases |
1137 | LIns *insJtbl(LIns* index, uint32_t size) { |
1138 | reachable = falsefalse; |
1139 | return out->insJtbl(index, size); |
1140 | } |
1141 | |
1142 | // assume any non-pure function can throw an exception, and that pure functions cannot. |
1143 | boolbool canThrow(const CallInfo* call) |
1144 | { |
1145 | return !call->_isPure; |
1146 | } |
1147 | |
1148 | // if debugging is attached, clear our tracking state when calling side-effect |
1149 | // fucntions, which are effectively debugger safe points. |
1150 | // also set reachable = false if the function is known to always throw, and never return. |
1151 | LIns *insCall(const CallInfo *call, LIns* args[]) { |
1152 | if (haveDebugger && canThrow(call)) |
1153 | clearVarState(); // debugger might have modified locals, so make sure we reload after call. |
1154 | if (neverReturns(call)) |
1155 | reachable = falsefalse; |
1156 | if (call->_address == (uintptr_t)&restargHelper) { |
1157 | // That helper has a by-reference argument which points into the vars array |
1158 | AvmAssert(restLocal != -1)do { } while (0); |
1159 | varTracker[restLocal] = 0; |
1160 | } |
1161 | return out->insCall(call, args); |
1162 | } |
1163 | }; |
1164 | |
1165 | LIns* CodegenLIR::localGet(int i) { |
1166 | #ifdef DEBUG |
1167 | const FrameValue& v = state->value(i); |
1168 | AvmAssert((v.sst_mask == (1 << SST_int32) && v.traits == INT_TYPE) ||do { } while (0) |
1169 | (v.sst_mask == (1 << SST_uint32) && v.traits == UINT_TYPE) ||do { } while (0) |
1170 | (v.sst_mask == (1 << SST_bool32) && v.traits == BOOLEAN_TYPE))do { } while (0); |
1171 | #endif |
1172 | return lirout->insLoad(LIR_ldi, vars, i * VARSIZE, ACCSET_VARS); |
1173 | } |
1174 | |
1175 | LIns* CodegenLIR::localGetf(int i) { |
1176 | #ifdef DEBUG |
1177 | const FrameValue& v = state->value(i); |
1178 | AvmAssert(v.sst_mask == (1<<SST_double) && v.traits == NUMBER_TYPE)do { } while (0); |
1179 | #endif |
1180 | return lirout->insLoad(LIR_ldd, vars, i * VARSIZE, ACCSET_VARS); |
1181 | } |
1182 | |
1183 | // Load a pointer-sized var, and update null tracking state if the driver |
1184 | // informs us that it is not null via FrameState.value. |
1185 | LIns* CodegenLIR::localGetp(int i) |
1186 | { |
1187 | const FrameValue& v = state->value(i); |
1188 | LIns* ins; |
1189 | if (exactlyOneBit(v.sst_mask)) { |
1190 | // pointer or atom |
1191 | AvmAssert(!(v.sst_mask == (1 << SST_int32) && v.traits == INT_TYPE) &&do { } while (0) |
1192 | !(v.sst_mask == (1 << SST_uint32) && v.traits == UINT_TYPE) &&do { } while (0) |
1193 | !(v.sst_mask == (1 << SST_bool32) && v.traits == BOOLEAN_TYPE) &&do { } while (0) |
1194 | !(v.sst_mask == (1 << SST_double) && v.traits == NUMBER_TYPE))do { } while (0); |
1195 | ins = lirout->insLoad(LIR_ldp, vars, i * VARSIZE, ACCSET_VARS); |
1196 | } else { |
1197 | // more than one representation is possible: convert to atom using tag found at runtime. |
1198 | AvmAssert(bt(v.traits) == BUILTIN_any || bt(v.traits) == BUILTIN_object)do { } while (0); |
1199 | LIns* tag = lirout->insLoad(LIR_lduc2ui, tags, i, ACCSET_TAGS); |
1200 | LIns* varAddr = leaIns(i * VARSIZE, vars); |
1201 | ins = callIns(FUNCTIONID(makeatom)&ci_makeatom, 3, coreAddr, varAddr, tag); |
1202 | } |
1203 | if (v.notNull) |
1204 | varTracker->setNotNull(ins, v.traits); |
1205 | return ins; |
1206 | } |
1207 | |
1208 | LIns* CodegenLIR::callIns(const CallInfo *ci, uint32_t argc, ...) |
1209 | { |
1210 | const uint8_t* pc = state->abc_pc; |
1211 | |
1212 | // Each exception edge needs to be tracked to make sure we correctly |
1213 | // model the notnull state at the starts of catch blocks. Treat any function |
1214 | // with side effects as possibly throwing an exception. |
1215 | |
1216 | // We must ignore catch blocks that the driver has determined are not reachable, |
1217 | // because we emit a call to debugExit (modeled as possibly throwing) as part of |
1218 | // OP_returnvoid/returnvalue, which ordinarily don't throw. |
1219 | if (!ci->_isPure && pc >= try_from && pc < try_to) { |
1220 | // inside exception handler range, calling a function that could throw |
1221 | ExceptionHandlerTable *exTable = info->abc_exceptions(); |
1222 | for (int i=0, n=exTable->exception_count; i < n; i++) { |
1223 | ExceptionHandler* handler = &exTable->exceptions[i]; |
1224 | const uint8_t* from = code_pos + handler->from; |
1225 | const uint8_t* to = code_pos + handler->to; |
1226 | const uint8_t* target = code_pos + handler->target; |
1227 | if (pc >= from && pc < to && driver->hasFrameState(target)) |
1228 | varTracker->trackForwardEdge(getCodegenLabel(target), truetrue); |
1229 | } |
1230 | } |
1231 | |
1232 | va_list ap; |
1233 | va_start(ap, argc)__builtin_va_start(ap, argc); |
1234 | LIns* ins = LirHelper::vcallIns(ci, argc, ap); |
1235 | va_end(ap)__builtin_va_end(ap); |
1236 | return ins; |
1237 | } |
1238 | |
1239 | #if defined(DEBUGGER) && defined(_DEBUG) |
1240 | // The AS debugger requires type information for variables contained |
1241 | // in the AS frame regions (i.e. 'vars'). In the interpreter this |
1242 | // is not an issues, since the region contains box values (i.e. Atoms) |
1243 | // and so the type information is self-contained. With the jit, this is |
1244 | // not the case, and thus 'tags' is used to track the type of each |
1245 | // variable in 'vars'. |
1246 | // This filter watches stores to 'vars' and 'tags' and upon encountering |
1247 | // debugline (i.e. place where debugger can halt), it ensures that the |
1248 | // tags entry is consistent with the value stored in 'vars' |
1249 | class DebuggerCheck : public LirWriter |
1250 | { |
1251 | AvmCore* core; |
1252 | LIns** varTracker; |
1253 | LIns** tagTracker; |
1254 | LIns *vars; |
1255 | LIns *tags; |
1256 | int nvar; |
1257 | public: |
1258 | DebuggerCheck(AvmCore* core, Allocator& alloc, LirWriter *out, int nvar) |
1259 | : LirWriter(out), core(core), vars(NULL__null), tags(NULL__null), nvar(nvar) |
1260 | { |
1261 | varTracker = new (alloc) LIns*[nvar]; |
1262 | tagTracker = new (alloc) LIns*[nvar]; |
1263 | clearState(); |
1264 | } |
1265 | |
1266 | void init(LIns *vars, LIns *tags) { |
1267 | this->vars = vars; |
1268 | this->tags = tags; |
1269 | } |
1270 | |
1271 | void trackVarStore(LIns *value, int d) { |
1272 | AvmAssert(IS_ALIGNED(d, VARSIZE))do { } while (0); |
1273 | int i = d / VARSIZE; |
1274 | if (i >= nvar) |
1275 | return; |
1276 | varTracker[i] = value; |
1277 | } |
1278 | |
1279 | void trackTagStore(LIns *value, int d) { |
1280 | int i = d; // 1 byte per tag |
1281 | if (i >= nvar) |
1282 | return; |
1283 | tagTracker[i] = value; |
1284 | checkValid(i); |
1285 | tagTracker[i] = (LIns*)((intptr_t)value|1); // lower bit => validated; |
1286 | } |
1287 | |
1288 | void clearState() { |
1289 | VMPI_memset::memset(varTracker, 0, nvar * sizeof(LIns*)); |
1290 | VMPI_memset::memset(tagTracker, 0, nvar * sizeof(LIns*)); |
1291 | } |
1292 | |
1293 | void checkValid(int i) { |
1294 | // @pre tagTracker[i] has been previously filled |
1295 | LIns* val = varTracker[i]; |
1296 | LIns* tra = tagTracker[i]; |
1297 | NanoAssert(val && tra)do { } while (0); |
1298 | |
1299 | switch ((SlotStorageType) tra->immI()) { |
1300 | case SST_double: |
1301 | AvmAssert(val->isQorD())do { } while (0); |
1302 | break; |
1303 | case SST_int32: |
1304 | case SST_uint32: |
1305 | case SST_bool32: |
1306 | AvmAssert(val->isI())do { } while (0); |
1307 | break; |
1308 | default: |
1309 | AvmAssert(val->isP())do { } while (0); |
1310 | break; |
1311 | } |
1312 | } |
1313 | |
1314 | void checkState() { |
1315 | for (int i=0; i < this->nvar; i++) { |
1316 | LIns* val = varTracker[i]; |
1317 | LIns* tra = tagTracker[i]; |
1318 | AvmAssert(val && tra)do { } while (0); |
1319 | |
1320 | // isValid should have already been called on everything |
1321 | AvmAssert(((intptr_t)tra&0x1) == 1)do { } while (0); |
1322 | } |
1323 | } |
1324 | |
1325 | LIns *insCall(const CallInfo *call, LIns* args[]) { |
1326 | if (call == FUNCTIONID(debugLine)&ci_debugLine) |
1327 | checkState(); |
1328 | return out->insCall(call,args); |
1329 | } |
1330 | |
1331 | LIns *insStore(LOpcode op, LIns *value, LIns *base, int32_t d, AccSet accSet) { |
1332 | if (base == vars) |
1333 | trackVarStore(value, d); |
1334 | else if (base == tags) |
1335 | trackTagStore(value, d); |
1336 | return out->insStore(op, value, base, d, accSet); |
1337 | } |
1338 | |
1339 | }; |
1340 | #endif |
1341 | |
1342 | // writer for the prolog. instructions written here dominate code in the |
1343 | // body, and so are added to the body's CseFilter |
1344 | class PrologWriter : public LirWriter |
1345 | { |
1346 | public: |
1347 | LIns* lastIns; |
1348 | LIns* env_scope; |
1349 | LIns* env_vtable; |
1350 | LIns* env_abcenv; |
1351 | LIns* env_domainenv; |
1352 | LIns* env_toplevel; |
1353 | |
1354 | PrologWriter(LirWriter *out): |
1355 | LirWriter(out), |
1356 | lastIns(NULL__null), |
1357 | env_scope(NULL__null), |
1358 | env_vtable(NULL__null), |
1359 | env_abcenv(NULL__null), |
1360 | env_domainenv(NULL__null), |
1361 | env_toplevel(NULL__null) |
1362 | {} |
1363 | |
1364 | virtual LIns* ins0(LOpcode v) { |
1365 | return lastIns = out->ins0(v); |
1366 | } |
1367 | virtual LIns* ins1(LOpcode v, LIns* a) { |
1368 | return lastIns = out->ins1(v, a); |
1369 | } |
1370 | virtual LIns* ins2(LOpcode v, LIns* a, LIns* b) { |
1371 | return lastIns = out->ins2(v, a, b); |
1372 | } |
1373 | virtual LIns* ins3(LOpcode v, LIns* a, LIns* b, LIns* c) { |
1374 | return lastIns = out->ins3(v, a, b, c); |
1375 | } |
1376 | virtual LIns* insGuard(LOpcode v, LIns *c, GuardRecord *gr) { |
1377 | return lastIns = out->insGuard(v, c, gr); |
1378 | } |
1379 | virtual LIns* insBranch(LOpcode v, LIns* condition, LIns* to) { |
1380 | return lastIns = out->insBranch(v, condition, to); |
1381 | } |
1382 | // arg: 0=first, 1=second, ... |
1383 | // kind: 0=arg 1=saved-reg |
1384 | virtual LIns* insParam(int32_t arg, int32_t kind) { |
1385 | return lastIns = out->insParam(arg, kind); |
1386 | } |
1387 | virtual LIns* insImmI(int32_t imm) { |
1388 | return lastIns = out->insImmI(imm); |
1389 | } |
1390 | #ifdef AVMPLUS_64BIT |
1391 | virtual LIns* insImmQ(uint64_t imm) { |
1392 | return lastIns = out->insImmQ(imm); |
1393 | } |
1394 | #endif |
1395 | virtual LIns* insImmD(double d) { |
1396 | return lastIns = out->insImmD(d); |
1397 | } |
1398 | virtual LIns* insLoad(LOpcode op, LIns* base, int32_t d, AccSet accSet, LoadQual loadQual) { |
1399 | return lastIns = out->insLoad(op, base, d, accSet, loadQual); |
1400 | } |
1401 | virtual LIns* insStore(LOpcode op, LIns* value, LIns* base, int32_t d, AccSet accSet) { |
1402 | return lastIns = out->insStore(op, value, base, d, accSet); |
1403 | } |
1404 | // args[] is in reverse order, ie. args[0] holds the rightmost arg. |
1405 | virtual LIns* insCall(const CallInfo *call, LIns* args[]) { |
1406 | return lastIns = out->insCall(call, args); |
1407 | } |
1408 | virtual LIns* insAlloc(int32_t size) { |
1409 | NanoAssert(size != 0)do { } while (0); |
1410 | return lastIns = out->insAlloc(size); |
1411 | } |
1412 | virtual LIns* insJtbl(LIns* index, uint32_t size) { |
1413 | return lastIns = out->insJtbl(index, size); |
1414 | } |
1415 | |
1416 | }; |
1417 | |
1418 | #if defined(VMCFG_OSR) && defined(DEBUG) |
1419 | FUNCTION(FUNCADDR(OSR::checkBugCompatibility), SIG1(V, P), osr_check_bugcompatibility)const CallInfo ci_osr_check_bugcompatibility = { (uintptr_t)OSR ::checkBugCompatibility, nanojit::CallInfo::typeSig1(ARGTYPE_V , ARGTYPE_P), ABI_CDECL, 0, ACCSET_STORE_ANY }; |
1420 | #endif |
1421 | |
1422 | // Generate the prolog for a function with this C++ signature: |
1423 | // |
1424 | // <return-type> f(MethodEnv* env, int argc, void* args) |
1425 | // |
1426 | // argc is the number of arguments, not counting the receiver |
1427 | // (aka "this"). args points to the arguments in memory: |
1428 | // |
1429 | // [receiver] [arg1, arg2, ... ] |
1430 | // |
1431 | // the arguments in memory are typed according to the AS3 method |
1432 | // signature. types * and Object are represented as Atom, and all |
1433 | // other types are native pointers or values. return-type is whatever |
1434 | // the native type is for the AS3 return type; one of double, int32_t, |
1435 | // uint32_t, ScriptObject*, String*, Namespace*, Atom, or void. |
1436 | // |
1437 | // The stack frame layout of a jit-compiled function is determined by |
1438 | // the jit backend. Stack-allocated structs are declared in LIR with |
1439 | // a LIR_allocp instruction. Incoming parameters are declared with LIR_paramp |
1440 | // instructions, and any other local variables with function-body scope |
1441 | // and lifetime are declared with the expressions that compute them. |
1442 | // The backend will also allocate additional stack space for spilled values |
1443 | // and callee-saved registers. The VM and LIR do not currently depend on how |
1444 | // the backend organizes the stack frame. |
1445 | // |
1446 | // Incoming parameters: |
1447 | // |
1448 | // env_param (LIR_paramp, MethodEnv*) is the incoming MethodEnv* parameter |
1449 | // that provides access to the environment for this function and all vm services. |
1450 | // |
1451 | // argc_param (LIR_paramp, int32_t) the # of arguments that follow. Ignored |
1452 | // when the # of args is fixed, but otherwise used for optional arg processing |
1453 | // and/or creating the rest[] or arguments[] arrays for undeclared varargs. |
1454 | // |
1455 | // ap_param (LIR_paramp, uint32_t*) pointer to (argc+1) incoming arguments. |
1456 | // arguments are packed. doubles are sizeof(double), everything else is sizeof(Atom). |
1457 | // |
1458 | // Distinguished locals: |
1459 | // |
1460 | // methodFrame (LIR_allocp, MethodFrame*) is the current MethodFrame. in the prolog |
1461 | // we push this onto the call stack pointed to by AvmCore::currentMethodFrame, and |
1462 | // in the epilog we pop it back off. |
1463 | // |
1464 | // coreAddr (LIR_immi|LIR_immq) constant address of AvmCore*. used in lots of places. |
1465 | // undefConst (LIR_immi|LIR_immq) constant value = undefinedAtom. used all over. |
1466 | // |
1467 | // vars (LIR_allocp) storage for ABC stack frame variables. 8 bytes per variable, |
1468 | // always, laid out according to ABC param/local var numbering. The total number |
1469 | // is local_count + scope_depth + stack_depth, i.e. enough for the whole ABC frame. |
1470 | // values at any given point in the jit code are are represented according to the |
1471 | // statically known type of the variable at that point in the code. (the type and |
1472 | // representation may change at different points. CodegenDriver maintains |
1473 | // the known static types of variables and exposes them via FrameState. |
1474 | // |
1475 | // tags (LIR_allocp) SlotStorageType of each var in vars, one byte per variable. |
1476 | // |
1477 | // The contents of vars+tags are up-to-date at all labels and debugging safe points. |
1478 | // Inbetween those points, the contents are stale; the JIT optimizes away |
1479 | // stores and loads in straightline code. Additional dead stores |
1480 | // are elided by deadvars_analyze() and deadvars_kill(). |
1481 | // |
1482 | // Locals for Debugger use, only present when Debugger is in use: |
1483 | // |
1484 | // csn (LIR_allocp, CallStackNode). extra information about this call frame |
1485 | // used by the debugger and also used for constructing human-readable stack traces. |
1486 | // |
1487 | // Locals for Exception-handling, only present when method has try/catch blocks: |
1488 | // |
1489 | // _save_eip (LIR_allocp, intptr_t) storage for the current ABC-based "pc", used by exception |
1490 | // handling to determine which catch blocks are in scope. The value is an ABC |
1491 | // instruction offset, which is how catch handler records are indexed. |
1492 | // |
1493 | // _ef (LIR_allocp, ExceptionFrame) an instance of struct ExceptionFrame, including |
1494 | // a jmp_buf holding our setjmp() state, a pointer to the next outer ExceptionFrame, |
1495 | // and other junk. |
1496 | // |
1497 | // setjmpResult (LIR_call, int) result from calling setjmp; feeds a conditional branch |
1498 | // that surrounds the whole function body; logic to pick a catch handler and jump to it |
1499 | // is compiled after the function body. if setjmp returns a nonzero result then we |
1500 | // jump forward, pick a catch block, then jump backwards to the catch block. |
1501 | // |
1502 | |
1503 | void CodegenLIR::writePrologue(const FrameState* state, const uint8_t* pc, |
1504 | CodegenDriver* driver) |
1505 | { |
1506 | this->state = state; |
1507 | this->driver = driver; |
1508 | this->code_pos = pc; |
1509 | this->try_from = driver->getTryFrom(); |
1510 | this->try_to = driver->getTryTo(); |
1511 | framesize = ms->frame_size(); |
1512 | |
1513 | int32_t codeLength = info->parse_code_length(); |
1514 | |
1515 | // Enable inline fastpath optimizations for sufficiently small methods. |
1516 | // |
1517 | // An overwhelming majority of methods in the Brightspot collection |
1518 | // are less than 50K bytes of ABC in length, and we see that JIT |
1519 | // times for methods of this length consistently fall in the 10ms |
1520 | // or less range with no outliers to suggest algorithmic pathologies. |
1521 | // The framesize limit is rather arbitrary, and is set to keep within |
1522 | // the boundaries of available brightspot data. Note that large methods |
1523 | // appear more common in Alchemy-generated code, with a 500KB method |
1524 | // in Quake, for example. We are admittedly conservative here. |
1525 | // We see only a modest increase in native code size with the present |
1526 | // suite of inlining optimizations and opcode mix in the code we examined. |
1527 | // |
1528 | // The issue we are attempting to guard against is non-linear blowup of |
1529 | // analysis algorithms such as deadvars() that are stressed by the introduction |
1530 | // of many new labels and branches. Our heuristic seeks to confine the fastpath |
1531 | // optimization to an "envelope" for we have good empirical data, without |
1532 | // attempting to establish its true outer limits. Additionally, from first |
1533 | // principles, we know that specific characteristics of the flow graph are |
1534 | // more relevant than length alone, and even a count of the labels and branches |
1535 | // would likely give a more precise result than overall code lenth. |
1536 | // The preferred long-term solution is to perform actual acconting for JIT |
1537 | // resource consumption (memory and time), and aborting when reasonable limits |
1538 | // are exceeded. |
1539 | // |
1540 | if (core->config.jitconfig.opt_inline) { |
1541 | if (codeLength < 50000 && framesize < 1000) { |
1542 | inlineFastpath = truetrue; |
1543 | } else { |
1544 | //core->console << "disabling inline fastpaths, frame size " << framesize << " code length " << codeLength << "\n"; |
1545 | } |
1546 | } |
1547 | |
1548 | if (info->needRestOrArguments() && info->lazyRest()) |
1549 | restLocal = ms->param_count()+1; |
1550 | |
1551 | frag = new (*lir_alloc) Fragment(pc verbose_only(, 0)); |
1552 | LirBuffer *prolog_buf = frag->lirbuf = new (*lir_alloc) LirBuffer(*lir_alloc); |
1553 | prolog_buf->abi = ABI_CDECL; |
1554 | |
1555 | lirout = new (*alloc1) LirBufWriter(prolog_buf, core->config.njconfig); |
1556 | |
1557 | verbose_only( |
1558 | vbNames = 0; |
1559 | if (verbose()) { |
1560 | vbNames = new (*lir_alloc) LInsPrinter(*lir_alloc, TR_NUM_USED_ACCS); |
1561 | vbNames->addrNameMap->addAddrRange(pool->core, sizeof(AvmCore), 0, "core"); |
1562 | prolog_buf->printer = vbNames; |
1563 | } |
1564 | ) |
1565 | debug_only( |
1566 | lirout = validate2 = new (*alloc1) ValidateWriter(lirout, prolog_buf->printer, |
1567 | "writePrologue(prologue)"); |
1568 | ) |
1569 | verbose_only( |
1570 | vbWriter = 0; |
1571 | if (verbose()) |
1572 | lirout = vbWriter = new (*alloc1) VerboseWriter(*alloc1, lirout, vbNames, &pool->codeMgr->log, "PROLOG"); |
1573 | ) |
1574 | prolog = new (*alloc1) PrologWriter(lirout); |
1575 | redirectWriter = lirout = new (*lir_alloc) LirWriter(prolog); |
1576 | if (core->config.njconfig.cseopt) |
1577 | lirout = cseFilter = new (*alloc1) CseFilter(lirout, TR_NUM_USED_ACCS, *alloc1); |
1578 | #if defined(NANOJIT_ARM) |
1579 | if (core->config.njconfig.soft_float) |
1580 | lirout = new (*alloc1) SoftFloatFilter(lirout); |
1581 | #endif |
1582 | lirout = new (*alloc1) ExprFilter(lirout); |
1583 | |
1584 | #ifdef DEBUGGER |
1585 | dbg_framesize = ms->local_count() + ms->max_scope(); |
1586 | #ifdef DEBUG |
1587 | DebuggerCheck *checker = NULL__null; |
1588 | if (haveDebugger) { |
1589 | checker = new (*alloc1) DebuggerCheck(core, *alloc1, lirout, dbg_framesize); |
1590 | lirout = checker; |
1591 | } |
1592 | #endif // DEBUG |
1593 | #endif // DEBUGGER |
1594 | |
1595 | emitStart(*alloc1, prolog_buf, lirout); |
1596 | |
1597 | // add the VarTracker filter last because we want it to be first in line. |
1598 | lirout = varTracker = new (*alloc1) VarTracker(info, *alloc1, lirout, |
1599 | framesize, ms->scope_base(), ms->stack_base(), restLocal, codeLength); |
1600 | |
1601 | // last pc value that we generated a store for |
1602 | lastPcSave = NULL__null; |
1603 | |
1604 | // |
1605 | // generate lir to define incoming method arguments. Stack |
1606 | // frame allocations follow. |
1607 | // |
1608 | |
1609 | env_param = lirout->insParam(0, 0); |
1610 | argc_param = lirout->insParam(1, 0); |
1611 | #ifdef AVMPLUS_64BIT |
1612 | argc_param = lirout->ins1(LIR_q2i, argc_param); |
1613 | #endif |
1614 | ap_param = lirout->insParam(2, 0); |
1615 | |
1616 | // allocate room for a MethodFrame structure |
1617 | methodFrame = insAlloc(sizeof(MethodFrame)); |
1618 | verbose_only( if (vbNames) { |
1619 | vbNames->lirNameMap->addName(methodFrame, "methodFrame"); |
1620 | }) |
1621 | |
1622 | coreAddr = InsConstPtr(core); |
1623 | |
1624 | // replicate MethodFrame ctor inline |
1625 | LIns* currentMethodFrame = loadIns(LIR_ldp, offsetof(AvmCore,currentMethodFrame)__builtin_offsetof(AvmCore, currentMethodFrame), coreAddr, ACCSET_OTHER); |
1626 | // save env in MethodFrame.envOrCodeContext |
1627 | // explicitly leave IS_EXPLICIT_CODECONTEXT clear |
1628 | // explicitly leave DXNS_NOT_NULL clear, dxns is effectively null without doing the store here. |
1629 | stp(env_param, methodFrame, offsetof(MethodFrame,envOrCodeContext)__builtin_offsetof(MethodFrame, envOrCodeContext), ACCSET_OTHER); |
1630 | stp(currentMethodFrame, methodFrame, offsetof(MethodFrame,next)__builtin_offsetof(MethodFrame, next), ACCSET_OTHER); |
1631 | stp(methodFrame, coreAddr, offsetof(AvmCore,currentMethodFrame)__builtin_offsetof(AvmCore, currentMethodFrame), ACCSET_OTHER); |
1632 | #ifdef _DEBUG |
1633 | // poison MethodFrame.dxns since it's uninitialized by default |
1634 | stp(InsConstPtr((void*)(uintptr_t)0xdeadbeef), methodFrame, offsetof(MethodFrame,dxns)__builtin_offsetof(MethodFrame, dxns), ACCSET_OTHER); |
1635 | #endif |
1636 | |
1637 | #if defined(VMCFG_OSR) && defined(DEBUG) |
1638 | // Check that the BugCompatibility that would be used to OSR this function, |
1639 | // whether or not we actually did so, agrees with the value returned from |
1640 | // currentBugCompatibility() every time the function is executed. Note that |
1641 | // it would be preferable to modify currentBugCompatibility() such that it is |
1642 | // structurally impossible for this test to fail. |
1643 | callIns(FUNCTIONID(osr_check_bugcompatibility)&ci_osr_check_bugcompatibility, 1, env_param); |
1644 | #endif |
1645 | |
1646 | // allocate room for our local variables |
1647 | vars = insAlloc(framesize * VARSIZE); // room for double|Atom|int|pointer |
1648 | tags = insAlloc(framesize); // one tag byte per var |
1649 | prolog_buf->sp = vars; |
1650 | varTracker->init(vars, tags); |
1651 | |
1652 | verbose_only( if (prolog_buf->printer) { |
1653 | prolog_buf->printer->lirNameMap->addName(env_param, "env"); |
1654 | prolog_buf->printer->lirNameMap->addName(argc_param, "argc"); |
1655 | prolog_buf->printer->lirNameMap->addName(ap_param, "ap"); |
1656 | prolog_buf->printer->lirNameMap->addName(vars, "vars"); |
1657 | prolog_buf->printer->lirNameMap->addName(tags, "tags"); |
1658 | }) |
1659 | |
1660 | debug_only( |
1661 | void** extras = new (*alloc1) void*[2]; |
1662 | extras[0] = vars; |
1663 | extras[1] = tags; |
1664 | validate1->setCheckAccSetExtras(extras); |
1665 | validate2->setCheckAccSetExtras(extras); |
1666 | ) |
1667 | |
1668 | // stack overflow check - use methodFrame address as comparison |
1669 | LIns *d = loadIns(LIR_ldp, offsetof(AvmCore, minstack)__builtin_offsetof(AvmCore, minstack), coreAddr, ACCSET_OTHER); |
1670 | LIns *c = binaryIns(LIR_ltup, methodFrame, d); |
1671 | CodegenLabel &begin_label = createLabel("begin"); |
1672 | branchToLabel(LIR_jf, c, begin_label); |
1673 | callIns(FUNCTIONID(handleStackOverflowMethodEnv)&ci_handleStackOverflowMethodEnv, 1, env_param); |
1674 | emitLabel(begin_label); |
1675 | |
1676 | // we emit the undefined constant here since we use it so often and |
1677 | // to ensure it dominates all uses. |
1678 | undefConst = InsConstAtom(undefinedAtom); |
1679 | |
1680 | // whether this sequence is interruptable or not. |
1681 | interruptable = ! info->isNonInterruptible(); |
1682 | |
1683 | // then space for the exception frame, be safe if its an init stub |
1684 | if (driver->hasReachableExceptions()) { |
1685 | // [_save_eip][ExceptionFrame] |
1686 | // offsets of local vars, rel to current ESP |
1687 | _save_eip = insAlloc(sizeof(intptr_t)); |
1688 | _ef = insAlloc(sizeof(ExceptionFrame)); |
1689 | verbose_only( if (vbNames) { |
1690 | vbNames->lirNameMap->addName(_save_eip, "_save_eip"); |
1691 | vbNames->lirNameMap->addName(_ef, "_ef"); |
1692 | }) |
1693 | } else { |
1694 | _save_eip = NULL__null; |
1695 | _ef = NULL__null; |
1696 | } |
1697 | |
1698 | #ifdef DEBUGGER |
1699 | if (haveDebugger) { |
1700 | // tell the sanity checker about vars and tags |
1701 | debug_only( checker->init(vars, tags); ) |
1702 | |
1703 | // Allocate space for the call stack |
1704 | csn = insAlloc(sizeof(CallStackNode)); |
1705 | verbose_only( if (vbNames) { |
1706 | vbNames->lirNameMap->addName(csn, "csn"); |
1707 | }) |
1708 | } |
1709 | #endif |
1710 | |
1711 | #ifdef DEBUG |
1712 | jit_sst = new (*alloc1) uint8_t[framesize]; |
1713 | memset(jit_sst, 0, framesize); |
1714 | #endif |
1715 | |
1716 | // |
1717 | // copy args to local frame |
1718 | // |
1719 | |
1720 | // copy required args, and initialize optional args. |
1721 | // this whole section only applies to functions that actually |
1722 | // have arguments. |
1723 | |
1724 | const int param_count = ms->param_count(); |
1725 | const int optional_count = ms->optional_count(); |
1726 | const int required_count = param_count - optional_count; |
1727 | |
1728 | LIns* apArg = ap_param; |
1729 | if (info->hasOptional()) |
1730 | { |
1731 | // compute offset of first optional arg |
1732 | int offset = 0; |
1733 | for (int i=0, n=required_count; i <= n; i++) |
1734 | offset += argSize(ms, i); |
1735 | |
1736 | // now copy the default optional values |
1737 | LIns* argcarg = argc_param; |
1738 | for (int i=0, n=optional_count; i < n; i++) |
1739 | { |
1740 | // first set the local[p+1] = defaultvalue |
1741 | int param = i + required_count; // 0..N |
1742 | int loc = param+1; |
1743 | |
1744 | LIns* defaultVal = InsConstAtom(ms->getDefaultValue(i)); |
1745 | defaultVal = atomToNativeRep(loc, defaultVal); |
1746 | localSet(loc, defaultVal, state->value(loc).traits); |
1747 | |
1748 | // then generate: if (argc > p) local[p+1] = arg[p+1] |
1749 | LIns* cmp = binaryIns(LIR_lei, argcarg, InsConst(param)); |
1750 | CodegenLabel& optional_label = createLabel("param_", i); |
1751 | branchToLabel(LIR_jt, cmp, optional_label); // will patch |
1752 | copyParam(loc, offset); |
1753 | emitLabel(optional_label); |
1754 | } |
1755 | } |
1756 | else |
1757 | { |
1758 | // !info->hasOptional() |
1759 | AvmAssert(optional_count == 0)do { } while (0); |
1760 | } |
1761 | |
1762 | // now set up the required args (we can ignore argc) |
1763 | // for (int i=0, n=param_count; i <= n; i++) |
1764 | // framep[i] = argv[i]; |
1765 | int offset = 0; |
1766 | for (int i=0, n=required_count; i <= n; i++) |
1767 | copyParam(i, offset); |
1768 | |
1769 | if (info->unboxThis()) |
1770 | { |
1771 | localSet(0, atomToNativeRep(0, localGet(0)), state->value(0).traits); |
1772 | } |
1773 | |
1774 | int firstLocal = 1+param_count; |
1775 | |
1776 | // Capture remaining args. |
1777 | // |
1778 | // Optimized ...rest and 'arguments': |
1779 | // |
1780 | // We avoid constructing the rest array if possible. An analysis in the |
1781 | // verifier sets the _lazyRest bit if access patterns to the rest argument |
1782 | // or the arguments array are only OBJ.length or OBJ[prop] for arbitrary |
1783 | // propery; see comments in Verifier::verify. The former pattern |
1784 | // results in an OP_restargc instruction, while the latter results in an |
1785 | // OP_restarg instruction. Those instructions will access an unconsed |
1786 | // rest array or arguments array when possible, and otherwise access a |
1787 | // constructed rest array. (For example, if prop turns out to be "slice" |
1788 | // we must construct the array. This will almost never happen.) They're |
1789 | // implemented via helper functions restargcHelper and restargHelper. |
1790 | // |
1791 | // The unconsed rest or arguments array, the argument count, the consed array, |
1792 | // and the flag that determines whether to use the unconsed or the consed array, |
1793 | // are represented as follows: |
1794 | // |
1795 | // - The unconsed array restArg is represented indirectly via ap_param and |
1796 | // rest_offset. |
1797 | // - The argument count restArgc is a LIR expression of type uint32 computed from |
1798 | // argc_param and param_count. |
1799 | // - The rest parameter local is an array, it is either null (no rest array |
1800 | // consed yet) or a raw array pointer, so it doubles as the flag. The offset |
1801 | // of this variable is 1+param_count. |
1802 | // |
1803 | // The rest parameter local is passed by reference to restargHelper, which |
1804 | // may update it. |
1805 | // |
1806 | // The difference between a ...rest argument and an arguments array are that in |
1807 | // the former case, |
1808 | // |
1809 | // restArgc = MAX(argc_param - param_count, 0) |
1810 | // restArg = is ap_param + rest_offset |
1811 | // |
1812 | // while in the latter case |
1813 | // |
1814 | // restArgc = argc_param |
1815 | // restArg = ap_param + 1 |
1816 | // |
1817 | // restArg is computed in the code generation case for OP_restarg. |
1818 | |
1819 | if (info->needRest()) |
1820 | { |
1821 | if (info->lazyRest()) |
1822 | { |
1823 | LIns* x0 = binaryIns(LIR_subi, argc_param, InsConst(param_count)); |
1824 | LIns* x1 = binaryIns(LIR_lti, x0, InsConst(0)); |
1825 | restArgc = lirout->insChoose(x1, InsConst(0), x0, use_cmov); |
1826 | |
1827 | // Store a NULL array pointer |
1828 | localSet(firstLocal, InsConstPtr(0), ARRAY_TYPE(core->traits.array_itraits)); |
1829 | } |
1830 | else |
1831 | { |
1832 | //framep[info->param_count+1] = createRest(env, argv, argc); |
1833 | // use csop so if rest value never used, we don't bother creating array |
1834 | LIns* rest = callIns(FUNCTIONID(createRestHelper)&ci_createRestHelper, 3, |
1835 | env_param, argc_param, apArg); |
1836 | localSet(firstLocal, rest, ARRAY_TYPE(core->traits.array_itraits)); |
1837 | } |
1838 | firstLocal++; |
1839 | } |
1840 | else if (info->needArguments()) |
1841 | { |
1842 | if (info->lazyRest()) |
1843 | { |
1844 | restArgc = argc_param; |
1845 | |
1846 | // Store a NULL array pointer |
1847 | localSet(firstLocal, InsConstPtr(0), ARRAY_TYPE(core->traits.array_itraits)); |
1848 | } |
1849 | else { |
1850 | //framep[info->param_count+1] = createArguments(env, argv, argc); |
1851 | // use csop so if arguments never used, we don't create it |
1852 | LIns* arguments = callIns(FUNCTIONID(createArgumentsHelper)&ci_createArgumentsHelper, 3, |
1853 | env_param, argc_param, apArg); |
1854 | localSet(firstLocal, arguments, ARRAY_TYPE(core->traits.array_itraits)); |
1855 | } |
1856 | firstLocal++; |
1857 | } |
1858 | |
1859 | // set remaining locals to undefined |
1860 | for (int i=firstLocal, n = ms->local_count(); i < n; i++) { |
1861 | AvmAssert(state->value(i).traits == NULL)do { } while (0); |
1862 | localSet(i, undefConst, NULL__null); // void would be more precise |
1863 | } |
1864 | |
1865 | /// SWITCH PIPELINE FROM PROLOG TO BODY |
1866 | verbose_only( if (vbWriter) { vbWriter->flush();} ) |
1867 | // we have written the prolog to prolog_buf, now create a new |
1868 | // LirBuffer to hold the body, and redirect further output to the body. |
1869 | LirBuffer *body_buf = new (*lir_alloc) LirBuffer(*lir_alloc); |
1870 | LirWriter *body = new (*alloc1) LirBufWriter(body_buf, core->config.njconfig); |
1871 | skip_ins = body->insSkip(prolog->lastIns); |
1872 | debug_only( |
1873 | body = validate3 = new (*alloc1) ValidateWriter(body, vbNames, "writePrologue(body)"); |
1874 | validate3->setCheckAccSetExtras(extras); |
1875 | ) |
1876 | verbose_only( |
1877 | if (verbose()) { |
1878 | AvmAssert(vbNames != NULL); |
1879 | body_buf->printer = vbNames; |
1880 | body = vbWriter = new (*alloc1) VerboseWriter(*alloc1, body, vbNames, &pool->codeMgr->log); |
1881 | } |
1882 | ) |
1883 | redirectWriter->out = body; |
1884 | /// END SWITCH CODE |
1885 | |
1886 | varTracker->initNotNull(state); |
1887 | |
1888 | if (osr) |
1889 | emitOsrBranch(); |
1890 | |
1891 | // Generate code to initialize the object, if we are compiling an initializer. |
1892 | // This is intentionally before debugEnter(), to match interpreter behavior. |
1893 | if (info->isConstructor()) |
1894 | emitInitializers(); |
1895 | |
1896 | if (haveDebugger) |
1897 | emitDebugEnter(); |
1898 | |
1899 | if (driver->hasReachableExceptions()) { |
1900 | // _ef.beginTry(core); |
1901 | callIns(FUNCTIONID(beginTry)&ci_beginTry, 2, _ef, coreAddr); |
1902 | |
1903 | // Exception* setjmpResult = setjmp(_ef.jmpBuf); |
1904 | // ISSUE this needs to be a cdecl call |
1905 | LIns* jmpbuf = leaIns(offsetof(ExceptionFrame, jmpbuf)__builtin_offsetof(ExceptionFrame, jmpbuf), _ef); |
1906 | setjmpResult = callIns(FUNCTIONID(fsetjmp)&ci_fsetjmp, 2, jmpbuf, InsConst(0)); |
1907 | |
1908 | // If (setjmp() != 0) goto catch dispatcher, which we generate in the epilog. |
1909 | // Note that register contents following setjmp return via longjmp are not predictable. |
1910 | branchToLabel(LIR_jf, eqi0(setjmpResult), catch_label); |
1911 | } |
1912 | verbose_only( if (vbWriter) { vbWriter->flush();} ) |
1913 | } |
1914 | |
1915 | #ifdef VMCFG_OSR |
1916 | FUNCTION(FUNCADDR(OSR::adjustFrame), SIG4(B, P, P, P, P), osr_adjust_frame)const CallInfo ci_osr_adjust_frame = { (uintptr_t)OSR::adjustFrame , nanojit::CallInfo::typeSig4(ARGTYPE_B, ARGTYPE_P, ARGTYPE_P , ARGTYPE_P, ARGTYPE_P), ABI_CDECL, 0, ACCSET_STORE_ANY }; |
1917 | |
1918 | // Emit code to call OSR::adjust_frame and conditionally enter loop. |
1919 | // Note that adjust_frame will call debugEnter if necessary, since |
1920 | // the loop-branch will skip the normal call to debugEnter. |
1921 | void CodegenLIR::emitOsrBranch() |
1922 | { |
1923 | // Compiling an OSR entry point; save FrameState at the OSR loop header, |
1924 | // for later use by adjust_frame(). |
1925 | FrameState* osr_state = mmfx_new(FrameState(ms))new (MMgc::kUseFixedMalloc) FrameState(ms); |
1926 | const FrameState* loop_state = driver->getFrameState(osr->osrPc()); |
1927 | AvmAssert(loop_state->targetOfBackwardsBranch)do { } while (0); |
1928 | osr_state->init(loop_state); |
1929 | osr->setFrameState(osr_state); |
1930 | LIns *isOSR = callIns(FUNCTIONID(osr_adjust_frame)&ci_osr_adjust_frame, 4, |
1931 | methodFrame, |
1932 | haveDebugger ? csn : InsConstPtr(0), |
1933 | vars, tags); |
1934 | branchToAbcPos(LIR_jt, isOSR, osr->osrPc()); |
1935 | } |
1936 | #else |
1937 | void CodegenLIR::emitOsrBranch() { } |
1938 | #endif |
1939 | |
1940 | void CodegenLIR::emitInitializers() |
1941 | { |
1942 | struct JitInitVisitor: public InitVisitor { |
1943 | CodegenLIR *jit; |
1944 | JitInitVisitor(CodegenLIR *jit) : jit(jit) {} |
1945 | virtual ~JitInitVisitor() {} |
1946 | void defaultVal(Atom value, uint32_t slot, Traits* slotType) { |
1947 | #ifdef NJ_VERBOSE |
1948 | if (jit->verbose()) { |
1949 | jit->vbWriter->flush(); |
1950 | jit->core->console << "init [" << slot << "] = " << asAtom(value) << "\n"; |
1951 | } |
1952 | #endif |
1953 | LIns* defaultVal = jit->InsConstAtom(value); |
1954 | defaultVal = jit->atomToNativeRep(slotType, defaultVal); |
1955 | jit->emitSetslot(OP_setslot, slot, 0, defaultVal); |
1956 | } |
1957 | }; |
1958 | JitInitVisitor visitor(this); |
1959 | Traits* t = info->declaringTraits(); |
1960 | const TraitsBindings *tb = t->getTraitsBindings(); |
1961 | t->visitInitBody(&visitor, toplevel, tb); |
1962 | } |
1963 | |
1964 | void CodegenLIR::emitDebugEnter() |
1965 | { |
1966 | #ifdef DEBUGGER |
1967 | for (int i = ms->scope_base(), n = ms->stack_base(); i < n; ++i) |
1968 | localSet(i, undefConst, VOID_TYPE(core->traits.void_itraits)); |
1969 | |
1970 | callIns(FUNCTIONID(debugEnter)&ci_debugEnter, 5, |
1971 | env_param, |
1972 | tags, |
1973 | csn, |
1974 | vars, |
1975 | driver->hasReachableExceptions() ? _save_eip : InsConstPtr(0)); |
1976 | #endif // DEBUGGER |
1977 | } |
1978 | |
1979 | void CodegenLIR::copyParam(int i, int& offset) { |
1980 | LIns* apArg = ap_param; |
1981 | Traits* type = ms->paramTraits(i); |
1982 | LIns *arg; |
1983 | switch (bt(type)) { |
1984 | case BUILTIN_number: |
1985 | arg = loadIns(LIR_ldd, offset, apArg, ACCSET_OTHER, LOAD_CONST); |
1986 | offset += sizeof(double); |
1987 | break; |
1988 | case BUILTIN_int: |
1989 | case BUILTIN_uint: |
1990 | case BUILTIN_boolean: |
1991 | // in the args these are widened to intptr_t or uintptr_t, so truncate here. |
1992 | arg = p2i(loadIns(LIR_ldp, offset, apArg, ACCSET_OTHER, LOAD_CONST)); |
1993 | offset += sizeof(Atom); |
1994 | break; |
1995 | default: |
1996 | arg = loadIns(LIR_ldp, offset, apArg, ACCSET_OTHER, LOAD_CONST); |
1997 | offset += sizeof(Atom); |
1998 | break; |
1999 | } |
2000 | localSet(i, arg, type); |
2001 | } |
2002 | |
2003 | void CodegenLIR::emitCopy(int src, int dest) { |
2004 | localSet(dest, localCopy(src), state->value(src).traits); |
2005 | } |
2006 | |
2007 | void CodegenLIR::emitGetscope(int scope_index, int dest) |
2008 | { |
2009 | Traits* t = info->declaringScope()->getScopeTraitsAt(scope_index); |
2010 | LIns* scope = loadEnvScope(); |
2011 | LIns* scopeobj = loadIns(LIR_ldp, offsetof(ScopeChain,_scopes)__builtin_offsetof(ScopeChain, _scopes) + scope_index*sizeof(Atom), scope, ACCSET_OTHER, LOAD_CONST); |
2012 | localSet(dest, atomToNativeRep(t, scopeobj), t); |
2013 | } |
2014 | |
2015 | void CodegenLIR::emitSwap(int i, int j) { |
2016 | LIns* t = localCopy(i); |
2017 | localSet(i, localCopy(j), state->value(j).traits); |
2018 | localSet(j, t, state->value(i).traits); |
2019 | } |
2020 | |
2021 | void CodegenLIR::emitKill(int i) |
2022 | { |
2023 | localSet(i, undefConst, NULL__null); |
2024 | } |
2025 | |
2026 | #ifdef VMCFG_FASTPATH_ADD |
2027 | |
2028 | #ifdef VMCFG_FASTPATH_ADD_INLINE |
2029 | |
2030 | // Emit code for the fastpath for int + intptr => intptr addition. |
2031 | // Branch to fallback label if rhs is not intptr, or result cannot be so represented due to overflow. |
2032 | // |
2033 | // 64-bit: |
2034 | // |
2035 | // FAST_ADD_INT_TO_ATOM(lhs, rhs, result, fallback) |
2036 | // if ((rhs & kAtomTypeMask) != kIntptrType) goto fallback; # punt if atom argument rhs is not intptr |
2037 | // intptr_t lhsExtended = i; # extend int argument lhs to atom size |
2038 | // intptr_t lhsShifted = lhsExtended << (kAtomTypeSize+8); # align lhs with 53-bit payload of rhs (see next step) |
2039 | // intptr_t rhsShifted = rhs << 8; # left-justify 53-bit payload of rhs (note rhs is tagged) |
2040 | // intptr_t sumShifted = lhsShifted + rhsShifted; # add aligned values, producing 56-bit tagged sum, left-justified |
2041 | // if (OVERFLOW) goto fallback; # punt on overflow, as sum will not fit in 53-bit field allotted |
2042 | // intptr_t sum = sumShifted >> 8; # right-justify 56-bit tagged value (53 payload bits + 3 tag bits) |
2043 | // result = sum; # result is intptr atom, properly tagged |
2044 | // |
2045 | // 32-bit: |
2046 | // |
2047 | // FAST_ADD_INT_TO_ATOM(lhs, rhs, result, fallback) |
2048 | // if ((rhs & kAtomTypeMask) != kIntptrType) goto fallback; # punt if atom argument rhs is not intptr |
2049 | // intptr_t lhsExtended = i; # extend int argument lhs to atom size (a nop on 32-bit platforms) |
2050 | // intptr_t lhsShifted = lhsExtended << kAtomTypeSize; # left-justify 29-bit payload |
2051 | // intptr_t lhsRestored = lhsShifted >> kAtomTypeSize; # restore |
2052 | // if (lhsRestored != lhsExtended) goto fallback; # high-order bits were lost, value will not fit in 29-bit field allotted |
2053 | // intptr_t sum = lhsShifted + rhs; # add left-justified lhs to tagged rhs, resulting in tagged result |
2054 | // if (OVERFLOW) goto fallback; # punt on overflow, as sum will not fit in 29-bit field allotted |
2055 | // result = sum; # result is intptr atom, properly tagged |
2056 | // |
2057 | // Note: If lhs will not fit in 29 bits, the fastpath will fail even if the result may fit. We expect this to be an |
2058 | // uncommon case. Generating code as we do avoids extra tag manipulations that would be required if we did a full |
2059 | // 32-bit addition followed by a range check. |
2060 | |
2061 | void CodegenLIR::emitIntPlusAtomFastpath(int i, Traits* type, LIns* lhs, LIns* rhs, CodegenLabel &fallback) |
2062 | { |
2063 | LIns* tag = andp(rhs, AtomConstants::kAtomTypeMask); |
2064 | branchToLabel(LIR_jf, eqp(tag, AtomConstants::kIntptrType), fallback); |
2065 | LIns* lhsExtended = i2p(lhs); |
2066 | #ifdef AVMPLUS_64BIT |
2067 | // int argument is guaranteed to fit, but must restrict intptr result to 53 bit range |
2068 | // TODO: Consider maintaining 53-bit intptrs in pre-shifted form. |
2069 | LIns* lhsShifted = lshp(lhsExtended, AtomConstants::kAtomTypeSize + 8); |
2070 | LIns* rhsShifted = lshp(rhs, 8); |
2071 | LIns* sumShifted = branchJovToLabel(LIR_addjovp, lhsShifted, rhsShifted, fallback); |
2072 | LIns* sum = rshp(sumShifted, 8); |
2073 | #else |
2074 | // verify that int value will fit in intptr |
2075 | LIns* lhsShifted = lshp(lhsExtended, AtomConstants::kAtomTypeSize); |
2076 | LIns* lhsRestored = rshp(lhsShifted, AtomConstants::kAtomTypeSize); |
2077 | branchToLabel(LIR_jf, binaryIns(LIR_eqp, lhsRestored, lhsExtended), fallback); |
2078 | LIns* sum = branchJovToLabel(LIR_addjovp, lhsShifted, rhs, fallback); |
2079 | #endif |
2080 | localSet(i, sum, type); |
2081 | } |
2082 | #endif /* VMCFG_FASTPATH_ADD_INLINE */ |
2083 | |
2084 | // Emit code for int + atom => atom addition. |
2085 | // The usual helper function call may be bypassed with a fastpath for the int + intptr => intptr case: |
2086 | // |
2087 | // intptr_t rhsa = CONVERT_TO_ATOM(rhs); # box atom if needed |
2088 | // FAST_ADD_INT_TO_ATOM(lhs, rhsa, result, fallback); # handle the fastpath, branching to fallback label on failure |
2089 | // goto done; # fastpath succeeded |
2090 | // fallback: |
2091 | // result = op_add_a_ia(coreAddr, lhs, rhsa); # fastpath failed, fall back to helper function |
2092 | // done: |
2093 | |
2094 | void CodegenLIR::emitAddIntToAtom(int i, int j, Traits* type) |
2095 | { |
2096 | LIns* rhs = loadAtomRep(j); |
2097 | LIns* lhs = localGet(i); |
2098 | |
2099 | #ifdef VMCFG_FASTPATH_ADD_INLINE |
2100 | if (inlineFastpath) { |
2101 | CodegenLabel fallback("fallback"); |
2102 | CodegenLabel done("done"); |
2103 | suspendCSE(); |
2104 | emitIntPlusAtomFastpath(i, type, lhs, rhs, fallback); |
2105 | JIT_EVENT(jit_add_a_ia_fast_intptr)do { } while (0); |
2106 | branchToLabel(LIR_j, NULL__null, done); |
2107 | emitLabel(fallback); |
2108 | LIns* out = callIns(FUNCTIONID(op_add_a_ia)&ci_op_add_a_ia, 3, coreAddr, lhs, rhs); |
2109 | localSet(i, out, type); |
2110 | JIT_EVENT(jit_add_a_ia_slow)do { } while (0); |
2111 | emitLabel(done); |
2112 | resumeCSE(); |
2113 | return; |
2114 | } |
2115 | #endif |
2116 | |
2117 | LIns* out = callIns(FUNCTIONID(op_add_a_ia)&ci_op_add_a_ia, 3, coreAddr, lhs, rhs); |
2118 | localSet(i, out, type); |
2119 | JIT_EVENT(jit_add_a_ia)do { } while (0); |
2120 | } |
2121 | |
2122 | // Emit code for double + atom => atom addition. |
2123 | |
2124 | void CodegenLIR::emitAddDoubleToAtom(int i, int j, Traits* type) |
2125 | { |
2126 | LIns* rhs = loadAtomRep(j); |
2127 | LIns* lhs = localGetf(i); |
2128 | LIns* out = callIns(FUNCTIONID(op_add_a_da)&ci_op_add_a_da, 3, coreAddr, lhs, rhs); |
2129 | localSet(i, out, type); |
2130 | JIT_EVENT(jit_add_a_da)do { } while (0); |
2131 | } |
2132 | |
2133 | // Emit code for atom + int => atom addition. |
2134 | // The usual helper function call may be bypassed with a fastpath for the intptr + int => intptr case: |
2135 | // |
2136 | // intptr_t lhsa = CONVERT_TO_ATOM(lhs); # box atom if needed |
2137 | // FAST_ADD_INT_TO_ATOM(rhs, lhsa, result, fallback); # handle the fastpath, note it is OK to commute arguments here |
2138 | // goto done; # fastpath succeeded |
2139 | // fallback: |
2140 | // result = op_add_a_ai(coreAddr, lhsa, rhs); # fastpath failed, fall back to helper function |
2141 | // done: |
2142 | |
2143 | void CodegenLIR::emitAddAtomToInt(int i, int j, Traits* type) |
2144 | { |
2145 | LIns* lhs = loadAtomRep(i); |
2146 | LIns* rhs = localGet(j); |
2147 | |
2148 | #ifdef VMCFG_FASTPATH_ADD_INLINE |
2149 | if (inlineFastpath) { |
2150 | CodegenLabel fallback("fallback"); |
2151 | CodegenLabel done("done"); |
2152 | suspendCSE(); |
2153 | emitIntPlusAtomFastpath(i, type, rhs, lhs, fallback); |
2154 | JIT_EVENT(jit_add_a_ai_fast_intptr)do { } while (0); |
2155 | branchToLabel(LIR_j, NULL__null, done); |
2156 | emitLabel(fallback); |
2157 | LIns* out = callIns(FUNCTIONID(op_add_a_ai)&ci_op_add_a_ai, 3, coreAddr, lhs, rhs); |
2158 | localSet(i, out, type); |
2159 | JIT_EVENT(jit_add_a_ai_slow)do { } while (0); |
2160 | emitLabel(done); |
2161 | resumeCSE(); |
2162 | return; |
2163 | } |
2164 | #endif |
2165 | |
2166 | LIns* out = callIns(FUNCTIONID(op_add_a_ai)&ci_op_add_a_ai, 3, coreAddr, lhs, rhs); |
2167 | localSet(i, out, type); |
2168 | JIT_EVENT(jit_add_a_ai)do { } while (0); |
2169 | } |
2170 | |
2171 | // Emit code for atom + double => atom addition. |
2172 | |
2173 | void CodegenLIR::emitAddAtomToDouble(int i, int j, Traits* type) |
2174 | { |
2175 | LIns* lhs = loadAtomRep(i); |
2176 | LIns* rhs = localGetf(j); |
2177 | LIns* out = callIns(FUNCTIONID(op_add_a_ad)&ci_op_add_a_ad, 3, coreAddr, lhs, rhs); |
2178 | localSet(i, out, type); |
2179 | JIT_EVENT(jit_add_a_ad)do { } while (0); |
2180 | } |
2181 | |
2182 | // Emit code for atom + atom => atom addition. |
2183 | // |
2184 | // We implement a fastpath for the intptr + intptr => intptr case: |
2185 | // |
2186 | // 64-bit: |
2187 | // |
2188 | // intptr_t lhsa = CONVERT_TO_ATOM(lhs); # box atom if needed |
2189 | // intptr_t rhsa = CONVERT_TO_ATOM(rhs); # box atom if needed |
2190 | // if (((lhsa ^ kIntptrType) | (rhsa ^ kIntptrType)) & kAtomTypeMask) goto fallback; |
2191 | // # both arguments are intptr atoms |
2192 | // intptr_t lhsStripped = lhsa - kIntptrtype; # zero out tag bits on lhs |
2193 | // intptr_t lhsShifted = lhsStripped << 8; # left-justify 53-bit payload (followed by 0s in tag position) |
2194 | // intptr_t rhsShifted = rhsa << 8; # align rhs payload and tag with left-justified lhs |
2195 | // intptr_t sumShifted = lhsShifted + rhsShifted; # add aligned values, producing 53-bit left-justified sum followed by tag |
2196 | // if (OVERFLOW) goto fallback; |
2197 | // result = sumShifted >> 8; # right-justify tagged sum (53-bit payload + 3-bit tag) |
2198 | // goto done; |
2199 | // fallback: |
2200 | // result = op_add_a_aa(coreAddr, lhsa, rhsa); # handle the general case out-of-line |
2201 | // done: |
2202 | // |
2203 | // 32-bit: |
2204 | // |
2205 | // intptr_t lhsa = CONVERT_TO_ATOM(lhs); # box atom if needed |
2206 | // intptr_t rhsa = CONVERT_TO_ATOM(rhs); # box atom if needed |
2207 | // if (((lhsa ^ kIntptrType) | (rhsa ^ kIntptrType)) & kAtomTypeMask) goto fallback; |
2208 | // # both arguments are intptr atoms |
2209 | // intptr_t lhsStripped = lhsa - kIntptrtype; # zero out tag bits on lhs (note rhs retains its tag) |
2210 | // intptr_t sum = lhsStripped + rhs; # add, producing 29-bit sum followed by 3-bit tag |
2211 | // if (OVERFLOW) goto fallback; |
2212 | // result = sum; |
2213 | // goto done; |
2214 | // fallback: |
2215 | // result = op_add_a_aa(coreAddr, lhsa, rhsa); # handle the general case out-of-line |
2216 | // done: |
2217 | |
2218 | void CodegenLIR::emitAddAtomToAtom(int i, int j, Traits* type) |
2219 | { |
2220 | LIns* lhs = loadAtomRep(i); |
2221 | LIns* rhs = loadAtomRep(j); |
2222 | |
2223 | #ifdef VMCFG_FASTPATH_ADD_INLINE |
2224 | if (inlineFastpath) { |
2225 | CodegenLabel fallback("fallback"); |
2226 | CodegenLabel done("done"); |
2227 | // intptr + intptr fastpath |
2228 | suspendCSE(); |
2229 | LIns* t0 = xorp(lhs, AtomConstants::kIntptrType); |
2230 | LIns* t1 = xorp(rhs, AtomConstants::kIntptrType); |
2231 | LIns* t2 = binaryIns(LIR_orp, t0, t1); |
2232 | LIns* t3 = andp(t2, AtomConstants::kAtomTypeMask); |
2233 | branchToLabel(LIR_jf, eqp0(t3), fallback); |
2234 | LIns* lhsStripped = subp(lhs, AtomConstants::kIntptrType); |
2235 | #ifdef AVMPLUS_64BIT |
2236 | // restrict range of intptr result to 53 bits |
2237 | // since 64-bit int atoms expect exactly 53 bits of precision, shift bit 53+3 up into the sign bit |
2238 | LIns* lhsShifted = lshp(lhsStripped, 8); |
2239 | LIns* rhsShifted = lshp(rhs, 8); |
2240 | LIns* sumShifted = branchJovToLabel(LIR_addjovp, lhsShifted, rhsShifted, fallback); |
2241 | LIns* sum = rshp(sumShifted, 8); |
2242 | #else |
2243 | LIns* sum = branchJovToLabel(LIR_addjovp, lhsStripped, rhs, fallback); |
2244 | #endif |
2245 | localSet(i, sum, type); |
2246 | JIT_EVENT(jit_add_a_aa_fast_intptr)do { } while (0); |
2247 | branchToLabel(LIR_j, NULL__null, done); |
2248 | emitLabel(fallback); |
2249 | LIns* out = callIns(FUNCTIONID(op_add_a_aa)&ci_op_add_a_aa, 3, coreAddr, lhs, rhs); |
2250 | localSet(i, out, type); |
2251 | JIT_EVENT(jit_add_a_aa_slow)do { } while (0); |
2252 | emitLabel(done); |
2253 | resumeCSE(); |
2254 | return; |
2255 | } |
2256 | #endif |
2257 | |
2258 | LIns* out = callIns(FUNCTIONID(op_add_a_aa)&ci_op_add_a_aa, 3, coreAddr, lhs, rhs); |
2259 | localSet(i, atomToNativeRep(type, out), type); |
2260 | JIT_EVENT(jit_add_a_aa)do { } while (0); |
2261 | } |
2262 | |
2263 | #else /* VMCFG_FASTPATH_ADD */ |
2264 | |
2265 | void CodegenLIR::emitAddAtomToAtom(int i, int j, Traits* type) |
2266 | { |
2267 | LIns* lhs = loadAtomRep(i); |
2268 | LIns* rhs = loadAtomRep(j); |
2269 | LIns* out = callIns(FUNCTIONID(op_add)&ci_op_add, 3, coreAddr, lhs, rhs); |
2270 | localSet(i, atomToNativeRep(type, out), type); |
2271 | JIT_EVENT(jit_add)do { } while (0); |
2272 | } |
2273 | |
2274 | #endif /* VMCFG_FASTPATH_ADD */ |
2275 | |
2276 | void CodegenLIR::emitAdd(int i, int j, Traits* type) |
2277 | { |
2278 | const FrameValue& val1 = state->value(i); |
2279 | const FrameValue& val2 = state->value(j); |
2280 | if ((val1.traits == STRING_TYPE(core->traits.string_itraits) && val1.notNull) || (val2.traits == STRING_TYPE(core->traits.string_itraits) && val2.notNull)) { |
2281 | // string concatenation |
2282 | AvmAssert(type == STRING_TYPE)do { } while (0); |
2283 | LIns* lhs = convertToString(i, truetrue); |
2284 | LIns* rhs = convertToString(j, truetrue); |
2285 | LIns* out = callIns(FUNCTIONID(concatStrings)&ci_concatStrings, 3, coreAddr, lhs, rhs); |
2286 | localSet(i, out, type); |
2287 | JIT_EVENT(jit_add_a_ss)do { } while (0); |
2288 | } else if (val1.traits && val2.traits && val1.traits->isNumeric() && val2.traits->isNumeric()) { |
2289 | // numeric + numeric |
2290 | // TODO: The tests for isNumeric() above could be isNumericOrBool(), |
2291 | // but a corresponding change would be needed in the verifier, resulting |
2292 | // in a slight change to the verifier's type inference algorithm. |
2293 | AvmAssert(type == NUMBER_TYPE)do { } while (0); |
2294 | LIns* lhs = coerceToNumber(i); |
2295 | LIns* rhs = coerceToNumber(j); |
2296 | localSet(i, binaryIns(LIR_addd, lhs, rhs), type); |
2297 | JIT_EVENT(jit_add_a_nn)do { } while (0); |
2298 | #ifdef VMCFG_FASTPATH_ADD |
2299 | // If we arrive here, at least one argument is not known to be of a numeric type. |
2300 | // Thus, having determined one argument to be of a known numeric type, we will coerce |
2301 | // the other to an atom. We speculate that the other argument will already be an atom |
2302 | // of type kIntptrType or kDoubleType, checking for these cases first. |
2303 | } else if (val1.traits == INT_TYPE(core->traits.int_itraits)) { |
2304 | // integer + atom |
2305 | AvmAssert(type == OBJECT_TYPE)do { } while (0); |
2306 | emitAddIntToAtom(i, j, type); |
2307 | } else if (val1.traits == NUMBER_TYPE(core->traits.number_itraits)) { |
2308 | // double + atom |
2309 | AvmAssert(type == OBJECT_TYPE)do { } while (0); |
2310 | emitAddDoubleToAtom(i, j, type); |
2311 | } else if (val2.traits == INT_TYPE(core->traits.int_itraits)) { |
2312 | // atom + integer |
2313 | AvmAssert(type == OBJECT_TYPE)do { } while (0); |
2314 | emitAddAtomToInt(i, j, type); |
2315 | } else if (val2.traits == NUMBER_TYPE(core->traits.number_itraits)) { |
2316 | // atom + double |
2317 | AvmAssert(type == OBJECT_TYPE)do { } while (0); |
2318 | emitAddAtomToDouble(i, j, type); |
2319 | #endif |
2320 | } else { |
2321 | // Neither argument is known to be of a numeric type, so coerce both to atoms. |
2322 | AvmAssert(type == OBJECT_TYPE)do { } while (0); |
2323 | emitAddAtomToAtom(i, j, type); |
2324 | } |
2325 | } |
2326 | |
2327 | void CodegenLIR::writeBlockStart(const FrameState* state) |
2328 | { |
2329 | this->state = state; |
2330 | // get the saved label for our block start and tie it to this location |
2331 | CodegenLabel& label = getCodegenLabel(state->abc_pc); |
2332 | emitLabel(label); |
2333 | emitSetPc(state->abc_pc); |
2334 | |
2335 | #ifdef DEBUG |
2336 | memset(jit_sst, 0, framesize); |
2337 | #endif |
2338 | |
2339 | // If this is a backwards branch, generate an interrupt check. |
2340 | if (interruptable && core->config.interrupts && state->targetOfBackwardsBranch) { |
2341 | LIns* interrupted = loadIns(LIR_ldi, offsetof(AvmCore,interrupted)__builtin_offsetof(AvmCore, interrupted), |
2342 | coreAddr, ACCSET_OTHER, LOAD_VOLATILE); |
2343 | LIns* cond = binaryIns(LIR_eqi, interrupted, InsConst(AvmCore::NotInterrupted)); |
2344 | branchToLabel(LIR_jf, cond, interrupt_label); |
2345 | } |
2346 | } |
2347 | |
2348 | void CodegenLIR::writeOpcodeVerified(const FrameState* state, const uint8_t*, AbcOpcode) |
2349 | { |
2350 | verbose_only( if (vbWriter) { vbWriter->flush();} ) |
2351 | #ifdef DEBUG |
2352 | this->state = NULL__null; // prevent access to stale state |
2353 | int scopeTop = ms->scope_base() + state->scopeDepth; |
2354 | for (int i=0, n=state->sp()+1; i < n; i++) { |
2355 | if (i >= scopeTop && i < ms->stack_base()) |
2356 | continue; |
2357 | const FrameValue& v = state->value(i); |
2358 | AvmAssert(!jit_sst[i] || jit_sst[i] == v.sst_mask)do { } while (0); |
2359 | } |
2360 | #else |
2361 | (void)state; |
2362 | #endif |
2363 | } |
2364 | |
2365 | // this is a no-op for the JIT because we do all label patching in emitLabel(). |
2366 | void CodegenLIR::writeFixExceptionsAndLabels(const FrameState*, const uint8_t*) |
2367 | {} |
2368 | |
2369 | void CodegenLIR::write(const FrameState* state, const uint8_t* pc, AbcOpcode opcode, Traits *type) |
2370 | { |
2371 | //AvmLog("CodegenLIR::write %x\n", opcode); |
2372 | this->state = state; |
2373 | emitSetPc(pc); |
2374 | const uint8_t* nextpc = pc; |
2375 | unsigned int imm30=0, imm30b=0; |
2376 | int imm8=0, imm24=0; |
2377 | AvmCore::readOperands(nextpc, imm30, imm24, imm30b, imm8); |
2378 | int sp = state->sp(); |
2379 | |
2380 | switch (opcode) { |
2381 | case OP_nop: |
2382 | case OP_pop: |
2383 | case OP_label: |
2384 | // do nothing |
2385 | break; |
2386 | case OP_getlocal0: |
2387 | case OP_getlocal1: |
2388 | case OP_getlocal2: |
2389 | case OP_getlocal3: |
2390 | imm30 = opcode-OP_getlocal0; |
2391 | // hack imm30 and fall through |
2392 | case OP_getlocal: |
2393 | emitCopy(imm30, sp+1); |
2394 | break; |
2395 | case OP_setlocal0: |
2396 | case OP_setlocal1: |
2397 | case OP_setlocal2: |
2398 | case OP_setlocal3: |
2399 | imm30 = opcode-OP_setlocal0; |
2400 | // hack imm30 and fall through |
2401 | case OP_setlocal: |
2402 | emitCopy(sp, imm30); |
2403 | break; |
2404 | case OP_pushtrue: |
2405 | AvmAssert(type == BOOLEAN_TYPE)do { } while (0); |
2406 | emitIntConst(sp+1, 1, type); |
2407 | break; |
2408 | case OP_pushfalse: |
2409 | AvmAssert(type == BOOLEAN_TYPE)do { } while (0); |
2410 | emitIntConst(sp+1, 0, type); |
2411 | break; |
2412 | case OP_pushnull: |
2413 | AvmAssert(type == NULL_TYPE)do { } while (0); |
2414 | emitPtrConst(sp+1, 0, type); |
2415 | break; |
2416 | case OP_pushundefined: |
2417 | AvmAssert(type == VOID_TYPE)do { } while (0); |
2418 | emitPtrConst(sp+1, (void*)undefinedAtom, type); |
2419 | break; |
2420 | case OP_pushshort: |
2421 | AvmAssert(type == INT_TYPE)do { } while (0); |
2422 | emitIntConst(sp+1, (signed short)imm30, type); |
2423 | break; |
2424 | case OP_pushbyte: |
2425 | AvmAssert(type == INT_TYPE)do { } while (0); |
2426 | emitIntConst(sp+1, (signed char)imm8, type); |
2427 | break; |
2428 | case OP_pushstring: |
2429 | AvmAssert(type == STRING_TYPE)do { } while (0); |
2430 | emitPtrConst(sp+1, pool->getString(imm30), type); |
2431 | break; |
2432 | case OP_pushnamespace: |
2433 | AvmAssert(type == NAMESPACE_TYPE)do { } while (0); |
2434 | emitPtrConst(sp+1, pool->cpool_ns[imm30], type); |
2435 | break; |
2436 | case OP_pushint: |
2437 | AvmAssert(type == INT_TYPE)do { } while (0); |
2438 | emitIntConst(sp+1, pool->cpool_int[imm30], type); |
2439 | break; |
2440 | case OP_pushuint: |
2441 | AvmAssert(type == UINT_TYPE)do { } while (0); |
2442 | emitIntConst(sp+1, pool->cpool_uint[imm30], type); |
2443 | break; |
2444 | case OP_pushdouble: |
2445 | AvmAssert(type == NUMBER_TYPE)do { } while (0); |
2446 | emitDoubleConst(sp+1, &pool->cpool_double[imm30]->value); |
2447 | break; |
2448 | case OP_pushnan: |
2449 | AvmAssert(type == NUMBER_TYPE)do { } while (0); |
2450 | emitDoubleConst(sp+1, (double*)atomPtr(core->kNaN)((void*)(uintptr_t(core->kNaN) & ~7))); |
2451 | break; |
2452 | case OP_lookupswitch: |
2453 | emit(opcode, uintptr_t(pc + imm24), imm30b /*count*/); |
2454 | break; |
2455 | case OP_throw: |
2456 | case OP_returnvalue: |
2457 | case OP_returnvoid: |
2458 | emit(opcode, sp); |
2459 | break; |
2460 | case OP_debugfile: |
2461 | { |
2462 | #if defined VMCFG_VTUNE |
2463 | emit(opcode, (uintptr_t)pool->getString(imm30)); |
2464 | #elif defined DEBUGGER |
2465 | if (haveDebugger) |
2466 | emit(opcode, (uintptr_t)pool->getString(imm30)); |
2467 | #endif |
2468 | break; |
2469 | } |
2470 | case OP_dxns: |
2471 | { |
2472 | Stringp str = pool->getString(imm30); // assume been checked already |
2473 | emit(opcode, (uintptr_t)str); |
2474 | break; |
2475 | } |
2476 | case OP_dxnslate: |
2477 | // codgen will call intern on the input atom. |
2478 | emit(opcode, sp); |
2479 | break; |
2480 | case OP_kill: |
2481 | emitKill(imm30); |
2482 | break; |
2483 | case OP_inclocal: |
2484 | case OP_declocal: |
2485 | emit(opcode, imm30, opcode==OP_inclocal ? 1 : -1, NUMBER_TYPE(core->traits.number_itraits)); |
2486 | break; |
2487 | case OP_inclocal_i: |
2488 | case OP_declocal_i: |
2489 | emit(opcode, imm30, opcode==OP_inclocal_i ? 1 : -1, INT_TYPE(core->traits.int_itraits)); |
2490 | break; |
2491 | case OP_lessthan: |
2492 | case OP_greaterthan: |
2493 | case OP_lessequals: |
2494 | case OP_greaterequals: |
2495 | emit(opcode, 0, 0, BOOLEAN_TYPE(core->traits.boolean_itraits)); |
2496 | break; |
2497 | |
2498 | case OP_getdescendants: |
2499 | { |
2500 | const Multiname *name = pool->precomputedMultiname(imm30); |
2501 | emit(opcode, (uintptr_t)name, 0, NULL__null); |
2502 | break; |
2503 | } |
2504 | |
2505 | case OP_checkfilter: |
2506 | emit(opcode, sp, 0, NULL__null); |
2507 | break; |
2508 | |
2509 | case OP_deleteproperty: |
2510 | { |
2511 | const Multiname *name = pool->precomputedMultiname(imm30); |
2512 | emit(opcode, (uintptr_t)name, 0, BOOLEAN_TYPE(core->traits.boolean_itraits)); |
2513 | break; |
2514 | } |
2515 | |
2516 | case OP_astype: |
2517 | { |
2518 | emit(OP_astype, (uintptr_t)type, sp, type && type->isMachineType() ? OBJECT_TYPE(core->traits.object_itraits) : type); |
2519 | break; |
2520 | } |
2521 | case OP_astypelate: |
2522 | { |
2523 | emit(OP_astypelate, 0, 0, type); |
2524 | break; |
2525 | } |
2526 | |
2527 | case OP_coerce: |
2528 | case OP_coerce_b: |
2529 | case OP_convert_b: |
2530 | case OP_coerce_o: |
2531 | case OP_coerce_a: |
2532 | case OP_convert_i: |
2533 | case OP_coerce_i: |
2534 | case OP_convert_u: |
2535 | case OP_coerce_u: |
2536 | case OP_convert_d: |
2537 | case OP_coerce_d: |
2538 | case OP_coerce_s: |
2539 | AvmAssert(do { } while (0) |
2540 | (opcode == OP_coerce && type != NULL) ||do { } while (0) |
2541 | (opcode == OP_coerce_b && type == BOOLEAN_TYPE) ||do { } while (0) |
2542 | (opcode == OP_convert_b && type == BOOLEAN_TYPE) ||do { } while (0) |
2543 | (opcode == OP_coerce_o && type == OBJECT_TYPE) ||do { } while (0) |
2544 | (opcode == OP_coerce_a && type == NULL) ||do { } while (0) |
2545 | (opcode == OP_convert_i && type == INT_TYPE) ||do { } while (0) |
2546 | (opcode == OP_coerce_i && type == INT_TYPE) ||do { } while (0) |
2547 | (opcode == OP_convert_u && type == UINT_TYPE) ||do { } while (0) |
2548 | (opcode == OP_coerce_u && type == UINT_TYPE) ||do { } while (0) |
2549 | (opcode == OP_convert_d && type == NUMBER_TYPE) ||do { } while (0) |
2550 | (opcode == OP_coerce_d && type == NUMBER_TYPE) ||do { } while (0) |
2551 | (opcode == OP_coerce_s && type == STRING_TYPE))do { } while (0); |
2552 | emitCoerce(sp, type); |
2553 | break; |
2554 | |
2555 | case OP_istype: |
2556 | { |
2557 | // used when operator "is" RHS is a compile-time type constant |
2558 | //sp[0] = istype(sp[0], itraits); |
2559 | LIns* obj = loadAtomRep(sp); |
2560 | LIns* out = callIns(FUNCTIONID(istype)&ci_istype, 2, obj, InsConstPtr(type)); |
2561 | localSet(sp, out, BOOLEAN_TYPE(core->traits.boolean_itraits)); |
2562 | break; |
2563 | } |
2564 | |
2565 | case OP_istypelate: |
2566 | { |
2567 | // null check for the type value T in (x is T). This also preserves |
2568 | // any side effects from loading T, even if we end up inlining T.itraits() as a const. |
2569 | Traits* class_type = state->value(sp).traits; |
2570 | emitCheckNull(localCopy(sp), class_type); |
2571 | LIns* obj = loadAtomRep(sp-1); |
2572 | LIns* istype_result; |
2573 | if (class_type && class_type->base == CLASS_TYPE(core->traits.class_itraits)) { |
2574 | // (x is T) where T is a class object: get T.itraits as constant. |
2575 | istype_result = callIns(FUNCTIONID(istype)&ci_istype, 2, obj, InsConstPtr(class_type->itraits)); |
2576 | } else { |
2577 | // RHS is unknown, call general istype |
2578 | istype_result = callIns(FUNCTIONID(istypelate)&ci_istypelate, 3, env_param, obj, loadAtomRep(sp)); |
2579 | } |
2580 | localSet(sp-1, istype_result, BOOLEAN_TYPE(core->traits.boolean_itraits)); |
2581 | break; |
2582 | } |
2583 | |
2584 | case OP_convert_o: |
2585 | // NOTE check null has already been done |
2586 | break; |
2587 | |
2588 | case OP_applytype: |
2589 | // * is ok for the type, as Vector classes have no statics |
2590 | // when we implement type parameters fully, we should do something here. |
2591 | emit(opcode, imm30/*argc*/, 0, NULL__null); |
2592 | break; |
2593 | |
2594 | case OP_newobject: |
2595 | emit(opcode, imm30, 0, OBJECT_TYPE(core->traits.object_itraits)); |
2596 | break; |
2597 | |
2598 | case OP_newarray: |
2599 | emit(opcode, imm30, 0, ARRAY_TYPE(core->traits.array_itraits)); |
2600 | break; |
2601 | |
2602 | case OP_newactivation: |
2603 | emit(opcode, 0, 0, info->activationTraits()); |
2604 | break; |
2605 | |
2606 | case OP_newcatch: |
2607 | { |
2608 | ExceptionHandler* handler = &info->abc_exceptions()->exceptions[imm30]; |
2609 | emit(opcode, 0, 0, handler->scopeTraits); |
2610 | break; |
2611 | } |
2612 | |
2613 | case OP_popscope: |
2614 | if (haveDebugger) |
2615 | emitKill(ms->local_count()/*scopeBase*/ + state->scopeDepth); |
2616 | break; |
2617 | |
2618 | case OP_getslot: |
2619 | { |
2620 | const FrameValue& obj = state->peek(1); |
2621 | int index = imm30-1; |
2622 | Traits* slotTraits = obj.traits ? obj.traits->getTraitsBindings()->getSlotTraits(index) : NULL__null; |
2623 | emitGetslot(index, sp, slotTraits); |
2624 | break; |
2625 | } |
2626 | |
2627 | case OP_setslot: |
2628 | emitSetslot(OP_setslot, imm30-1, sp-1); |
2629 | break; |
2630 | |
2631 | case OP_dup: |
2632 | emitCopy(sp, sp+1); |
2633 | break; |
2634 | |
2635 | case OP_swap: |
2636 | emitSwap(sp, sp-1); |
2637 | break; |
2638 | |
2639 | case OP_add: |
2640 | emitAdd(sp-1, sp, type); |
2641 | break; |
2642 | |
2643 | case OP_equals: |
2644 | case OP_strictequals: |
2645 | case OP_instanceof: |
2646 | case OP_in: |
2647 | emit(opcode, 0, 0, BOOLEAN_TYPE(core->traits.boolean_itraits)); |
2648 | break; |
2649 | |
2650 | case OP_not: |
2651 | AvmAssert(type == BOOLEAN_TYPE)do { } while (0); |
2652 | emit(opcode, sp, 0, type); |
2653 | break; |
2654 | |
2655 | case OP_modulo: |
2656 | case OP_subtract: |
2657 | case OP_divide: |
2658 | case OP_multiply: |
2659 | emit(opcode, 0, 0, NUMBER_TYPE(core->traits.number_itraits)); |
2660 | break; |
2661 | |
2662 | case OP_increment: |
2663 | case OP_decrement: |
2664 | emit(opcode, sp, opcode == OP_increment ? 1 : -1, NUMBER_TYPE(core->traits.number_itraits)); |
2665 | break; |
2666 | |
2667 | case OP_increment_i: |
2668 | case OP_decrement_i: |
2669 | emit(opcode, sp, opcode == OP_increment_i ? 1 : -1, INT_TYPE(core->traits.int_itraits)); |
2670 | break; |
2671 | |
2672 | case OP_add_i: |
2673 | case OP_subtract_i: |
2674 | case OP_multiply_i: |
2675 | emit(opcode, 0, 0, INT_TYPE(core->traits.int_itraits)); |
2676 | break; |
2677 | |
2678 | case OP_negate: |
2679 | emit(opcode, sp, 0, NUMBER_TYPE(core->traits.number_itraits)); |
2680 | break; |
2681 | |
2682 | case OP_negate_i: |
2683 | emit(opcode, sp, 0, INT_TYPE(core->traits.int_itraits)); |
2684 | break; |
2685 | |
2686 | case OP_bitand: |
2687 | case OP_bitor: |
2688 | case OP_bitxor: |
2689 | emit(opcode, 0, 0, INT_TYPE(core->traits.int_itraits)); |
2690 | break; |
2691 | |
2692 | case OP_lshift: |
2693 | case OP_rshift: |
2694 | emit(opcode, 0, 0, INT_TYPE(core->traits.int_itraits)); |
2695 | break; |
2696 | |
2697 | case OP_urshift: |
2698 | emit(opcode, 0, 0, UINT_TYPE(core->traits.uint_itraits)); |
2699 | break; |
2700 | |
2701 | case OP_bitnot: |
2702 | emit(opcode, sp, 0, INT_TYPE(core->traits.int_itraits)); |
2703 | break; |
2704 | |
2705 | case OP_typeof: |
2706 | emit(opcode, sp, 0, STRING_TYPE(core->traits.string_itraits)); |
2707 | break; |
2708 | |
2709 | case OP_debugline: |
2710 | { |
2711 | #if defined VMCFG_VTUNE |
2712 | emit(opcode, imm30); |
2713 | #elif defined DEBUGGER |
2714 | if (haveDebugger) { |
2715 | // we actually do generate code for these, in debugger mode |
2716 | emit(opcode, imm30); |
2717 | } |
2718 | #endif |
2719 | break; |
2720 | } |
2721 | case OP_nextvalue: |
2722 | case OP_nextname: |
2723 | emit(opcode, 0, 0, NULL__null); |
2724 | break; |
2725 | |
2726 | case OP_hasnext: |
2727 | emit(opcode, 0, 0, INT_TYPE(core->traits.int_itraits)); |
2728 | break; |
2729 | |
2730 | case OP_hasnext2: |
2731 | emit(opcode, imm30, imm30b, BOOLEAN_TYPE(core->traits.boolean_itraits)); |
2732 | break; |
2733 | |
2734 | // sign extends |
2735 | case OP_sxi1: |
2736 | case OP_sxi8: |
2737 | case OP_sxi16: |
2738 | emit(opcode, sp, 0, INT_TYPE(core->traits.int_itraits)); |
2739 | break; |
2740 | |
2741 | // loads |
2742 | case OP_lix8: |
2743 | case OP_lix16: |
2744 | case OP_li8: |
2745 | case OP_li16: |
2746 | case OP_li32: |
2747 | case OP_lf32: |
2748 | case OP_lf64: |
2749 | { |
2750 | Traits* result = (opcode == OP_lf32 || opcode == OP_lf64) ? NUMBER_TYPE(core->traits.number_itraits) : INT_TYPE(core->traits.int_itraits); |
2751 | emit(opcode, sp, 0, result); |
2752 | break; |
2753 | } |
2754 | |
2755 | // stores |
2756 | case OP_si8: |
2757 | case OP_si16: |
2758 | case OP_si32: |
2759 | case OP_sf32: |
2760 | case OP_sf64: |
2761 | { |
2762 | emit(opcode, 0, 0, VOID_TYPE(core->traits.void_itraits)); |
2763 | break; |
2764 | } |
2765 | |
2766 | case OP_getglobalscope: |
2767 | emitGetGlobalScope(sp+1); |
2768 | break; |
2769 | |
2770 | case OP_convert_s: |
2771 | localSet(sp, convertToString(sp, falsefalse), STRING_TYPE(core->traits.string_itraits)); |
2772 | break; |
2773 | |
2774 | case OP_esc_xelem: |
2775 | case OP_esc_xattr: |
2776 | emit(opcode, sp, 0, STRING_TYPE(core->traits.string_itraits)); |
2777 | break; |
2778 | |
2779 | case OP_debug: |
2780 | // ignored |
2781 | break; |
2782 | |
2783 | case OP_restargc: |
2784 | { |
2785 | // See documentation in writePrologue regarding rest arguments |
2786 | AvmAssert(info->needRestOrArguments() && info->lazyRest())do { } while (0); |
2787 | LIns* out = callIns(FUNCTIONID(restargcHelper)&ci_restargcHelper, |
2788 | 2, |
2789 | localGetp(restLocal), |
2790 | restArgc); |
2791 | localSet(sp, out, UINT_TYPE(core->traits.uint_itraits)); |
2792 | break; |
2793 | } |
2794 | |
2795 | default: |
2796 | AvmAssertMsg(false, "unhandled opcode in CodegenLIR::write()")do { } while (0); |
2797 | break; |
2798 | } |
2799 | } |
2800 | |
2801 | // coerce parameter types, starting at firstArg. |
2802 | void CodegenLIR::coerceArgs(MethodSignaturep mms, int argc, int firstArg) |
2803 | { |
2804 | int sp = state->sp(); |
2805 | for (int arg = argc, n = 1; arg >= firstArg; arg--, n++) { |
2806 | Traits* target = (arg <= mms->param_count()) ? mms->paramTraits(arg) : NULL__null; |
2807 | int index = sp - (n - 1); |
2808 | emitCoerce(index, target); |
2809 | } |
2810 | } |
2811 | |
2812 | // Coerce parameter types, but not receiver. The object in the reciever |
2813 | // position is the class or function object and we haven't called newInstance yet. |
2814 | // In this case, emitCall() will generate a call to newInstance, producing the |
2815 | // new object, then call its init function with the coerced arguments. |
2816 | void CodegenLIR::emitConstructCall(intptr_t method_id, int argc, LIns* ctor, Traits* ctraits) |
2817 | { |
2818 | Traits* itraits = ctraits->itraits; |
2819 | MethodInfo* m = itraits->init; |
2820 | MethodSignaturep mms = m->getMethodSignature(); |
2821 | AvmAssert(mms->argcOk(argc))do { } while (0); // caller must check this before early binding to ctor |
2822 | |
2823 | coerceArgs(mms, argc, 1); |
2824 | emitCall(OP_construct, method_id, argc, ctor, ctraits, itraits, mms); |
2825 | } |
2826 | |
2827 | /** |
2828 | * emitCoerceCall is used when the jit finds an opportunity to early bind that the |
2829 | * driver did not. It does the coersions using the signature of the callee, and |
2830 | * does not mutate FrameState. |
2831 | */ |
2832 | void CodegenLIR::emitCoerceCall(AbcOpcode opcode, intptr_t method_id, int argc, MethodSignaturep mms) |
2833 | { |
2834 | AvmAssert(state->value(state->sp() - argc).notNull)do { } while (0); // make sure null check happened |
2835 | AvmAssert(opcode != OP_construct)do { } while (0); |
2836 | AvmAssert(mms->argcOk(argc))do { } while (0); |
2837 | coerceArgs(mms, argc, 0); |
2838 | emitCall(opcode, method_id, argc, mms->returnTraits(), mms); |
2839 | } |
2840 | |
2841 | void CodegenLIR::emitGetGlobalScope(int dest) |
2842 | { |
2843 | const ScopeTypeChain* scope = info->declaringScope(); |
2844 | int captured_depth = scope->size; |
2845 | if (captured_depth > 0) |
2846 | { |
2847 | // enclosing scope |
2848 | emitGetscope(0, dest); |
2849 | } |
2850 | else |
2851 | { |
2852 | // local scope |
2853 | AvmAssert(state->scopeDepth > 0)do { } while (0); // verifier checked. |
2854 | emitCopy(ms->scope_base(), dest); |
2855 | } |
2856 | } |
2857 | |
2858 | void CodegenLIR::writeOp1(const FrameState* state, const uint8_t *pc, AbcOpcode opcode, uint32_t opd1, Traits *type) |
2859 | { |
2860 | this->state = state; |
2861 | emitSetPc(pc); |
2862 | switch (opcode) { |
2863 | case OP_iflt: |
2864 | case OP_ifle: |
2865 | case OP_ifnlt: |
2866 | case OP_ifnle: |
2867 | case OP_ifgt: |
2868 | case OP_ifge: |
2869 | case OP_ifngt: |
2870 | case OP_ifnge: |
2871 | case OP_ifeq: |
2872 | case OP_ifstricteq: |
2873 | case OP_ifne: |
2874 | case OP_ifstrictne: |
2875 | { |
2876 | int32_t offset = (int32_t) opd1; |
2877 | int lhs = state->sp()-1; |
2878 | emitIf(opcode, pc+4/*size*/+offset, lhs, lhs+1); |
2879 | break; |
2880 | } |
2881 | case OP_iftrue: |
2882 | case OP_iffalse: |
2883 | { |
2884 | int32_t offset = (int32_t) opd1; |
2885 | int sp = state->sp(); |
2886 | emitIf(opcode, pc+4/*size*/+offset, sp, 0); |
2887 | break; |
2888 | } |
2889 | case OP_jump: |
2890 | { |
2891 | int32_t offset = (int32_t) opd1; |
2892 | emit(opcode, uintptr_t(pc+4/*size*/+offset)); |
2893 | break; |
2894 | } |
2895 | case OP_getslot: |
2896 | emitGetslot(opd1, state->sp(), type); |
2897 | break; |
2898 | case OP_getglobalslot: { |
2899 | int32_t dest_index = state->sp(); // driver already incremented it |
2900 | uint32_t slot = opd1; |
2901 | emitGetGlobalScope(dest_index); |
2902 | emitGetslot(slot, dest_index, type /* slot type */); |
2903 | break; |
2904 | } |
2905 | case OP_setglobalslot: |
2906 | emitSetslot(OP_setglobalslot, opd1, 0 /* computed or ignored */); |
2907 | break; |
2908 | case OP_call: |
2909 | emit(opcode, opd1 /*argc*/, 0, NULL__null); |
2910 | break; |
2911 | |
2912 | case OP_construct: |
2913 | { |
2914 | const uint32_t argc = opd1; |
2915 | int ctor_index = state->sp() - argc; |
2916 | Traits* ctraits = state->value(ctor_index).traits; |
2917 | LIns* ctor = localCopy(ctor_index); |
2918 | emitConstruct(argc, ctor, ctraits); |
2919 | break; |
2920 | } |
2921 | case OP_getouterscope: |
2922 | emitGetscope(opd1, state->sp()+1); |
2923 | break; |
2924 | case OP_getscopeobject: |
2925 | emitCopy(opd1 + ms->scope_base(), state->sp()+1); |
2926 | break; |
2927 | case OP_newfunction: |
2928 | AvmAssert(pool->getMethodInfo(opd1)->declaringTraits() == type)do { } while (0); |
2929 | emit(opcode, opd1, state->sp()+1, type); |
2930 | break; |
2931 | case OP_pushscope: |
2932 | case OP_pushwith: |
2933 | emitCopy(state->sp(), opd1); |
2934 | break; |
2935 | case OP_findpropstrict: |
2936 | case OP_findproperty: |
2937 | { |
2938 | const Multiname *name = pool->precomputedMultiname(opd1); |
2939 | emit(opcode, (uintptr_t)name, 0, OBJECT_TYPE(core->traits.object_itraits)); |
2940 | break; |
2941 | } |
2942 | case OP_findpropglobalstrict: |
2943 | { |
2944 | // NOTE opcode not supported, deoptimizing |
2945 | const Multiname *name = pool->precomputedMultiname(opd1); |
2946 | emit(OP_findpropstrict, (uintptr_t)name, 0, OBJECT_TYPE(core->traits.object_itraits)); |
2947 | break; |
2948 | } |
2949 | case OP_findpropglobal: |
2950 | { |
2951 | // NOTE opcode not supported, deoptimizing |
2952 | const Multiname *name = pool->precomputedMultiname(opd1); |
2953 | emit(OP_findproperty, (uintptr_t)name, 0, OBJECT_TYPE(core->traits.object_itraits)); |
2954 | break; |
2955 | } |
2956 | |
2957 | case OP_newclass: |
2958 | { |
2959 | Traits* ctraits = pool->getClassTraits(opd1); |
2960 | AvmAssert(ctraits == type)do { } while (0); |
2961 | emit(opcode, (uintptr_t)(void*)ctraits, state->sp(), type); |
2962 | break; |
2963 | } |
2964 | |
2965 | case OP_finddef: |
2966 | { |
2967 | // opd1=name index |
2968 | // type=script->declaringTraits |
2969 | const Multiname *multiname = pool->precomputedMultiname(opd1); |
2970 | AvmAssert(multiname->isBinding())do { } while (0); |
2971 | int32_t dest_index = state->sp() + 1; |
2972 | // This allocates a cache slot even if the finddef ultimately becomes dead. |
2973 | // As long as caches tend to be small compared to size of pool data and code, |
2974 | // filtering out dead cache lines isn't worth the complexity. |
2975 | LIns* slot = InsConst(finddef_cache_builder.allocateCacheSlot(opd1)); |
2976 | LIns* out = callIns(FUNCTIONID(finddef_cache)&ci_finddef_cache, 3, env_param, InsConstPtr(multiname), slot); |
2977 | localSet(dest_index, ptrToNativeRep(type, out), type); |
2978 | break; |
2979 | } |
2980 | |
2981 | case OP_restarg: |
2982 | { |
2983 | // See documentation in writePrologue regarding rest arguments |
2984 | AvmAssert(info->needRestOrArguments() && info->lazyRest())do { } while (0); |
2985 | const Multiname *multiname = pool->precomputedMultiname(opd1); |
2986 | // The by-reference parameter &restLocal is handled specially for this |
2987 | // helper function in VarTracker::insCall and in CodegenLIR::analyze_call. |
2988 | LIns* out = callIns(FUNCTIONID(restargHelper)&ci_restargHelper, |
2989 | 6, |
2990 | loadEnvToplevel(), |
2991 | InsConstPtr(multiname), |
2992 | loadAtomRep(state->sp()), |
2993 | leaIns(restLocal * VARSIZE, vars), |
2994 | restArgc, |
2995 | (info->needRest() ? |
2996 | binaryIns(LIR_addp, ap_param, InsConstPtr((void*)(ms->rest_offset()))) : |
2997 | binaryIns(LIR_addp, ap_param, InsConstPtr((void*)sizeof(Atom))))); |
2998 | localSet(state->sp()-1, out, type); |
2999 | break; |
3000 | } |
3001 | |
3002 | default: |
3003 | // writeOp1() called with an improper opcode. |
3004 | AvmAssert(false)do { } while (0); |
3005 | break; |
3006 | } |
3007 | } |
3008 | |
3009 | LIns* CodegenLIR::coerceToString(int index) |
3010 | { |
3011 | const FrameValue& value = state->value(index); |
3012 | Traits* in = value.traits; |
3013 | |
3014 | switch (bt(in)) { |
3015 | case BUILTIN_null: |
3016 | case BUILTIN_string: |
3017 | // fine to just load the pointer |
3018 | return localGetp(index); |
3019 | case BUILTIN_int: |
3020 | return callIns(FUNCTIONID(intToString)&ci_intToString, 2, coreAddr, localGet(index)); |
3021 | case BUILTIN_uint: |
3022 | return callIns(FUNCTIONID(uintToString)&ci_uintToString, 2, coreAddr, localGet(index)); |
3023 | case BUILTIN_number: |
3024 | return callIns(FUNCTIONID(doubleToString)&ci_doubleToString, 2, coreAddr, localGetf(index)); |
3025 | case BUILTIN_boolean: { |
3026 | // load "true" or "false" string constant from AvmCore.booleanStrings[] |
3027 | LIns *offset = binaryIns(LIR_lshp, i2p(localGet(index)), InsConst(PTR_SCALE2)); |
3028 | LIns *arr = InsConstPtr(&core->booleanStrings); |
3029 | return loadIns(LIR_ldp, 0, binaryIns(LIR_addp, arr, offset), ACCSET_OTHER, LOAD_CONST); |
3030 | } |
3031 | default: |
3032 | if (value.notNull) { |
3033 | // not eligible for CSE, and we know it's not null/undefined |
3034 | return emitStringCall(index, FUNCTIONID(string)&ci_string, truetrue); // call string |
3035 | } |
3036 | return emitStringCall(index, FUNCTIONID(coerce_s)&ci_coerce_s, truetrue); // call coerce_s |
3037 | } |
3038 | } |
3039 | |
3040 | /** emit code for * -> Number conversion */ |
3041 | LIns* CodegenLIR::coerceToNumber(int index) |
3042 | { |
3043 | const FrameValue& value = state->value(index); |
3044 | Traits* in = value.traits; |
3045 | |
3046 | if (in && (in->isNumeric() || in == BOOLEAN_TYPE(core->traits.boolean_itraits))) { |
3047 | return promoteNumberIns(in, index); |
3048 | } else { |
3049 | // * -> Number |
3050 | #ifdef VMCFG_FASTPATH_FROMATOM |
3051 | if (inlineFastpath) { |
3052 | // double rslt; |
3053 | // intptr_t val = CONVERT_TO_ATOM(arg); |
3054 | // if ((val & kAtomTypeMask) != kIntptrType) goto not_intptr; # test for kIntptrType tag |
3055 | // # kIntptrType |
3056 | // rslt = double(val >> kAtomTypeSize); # extract integer value and convert to double |
3057 | // goto done; |
3058 | // not_intptr: |
3059 | // if ((val & kAtomTypeMask) != kDoubleType) goto not_double; # test for kDoubleType tag |
3060 | // # kDoubleType |
3061 | // rslt = *(val - kDoubleType); # remove tag and dereference |
3062 | // goto done; |
3063 | // not_double: |
3064 | // rslt = number(val); # slow path -- call helper |
3065 | // done: |
3066 | // result = rslt; |
3067 | CodegenLabel not_intptr; |
3068 | CodegenLabel not_double; |
3069 | CodegenLabel done; |
3070 | suspendCSE(); |
3071 | LIns* val = loadAtomRep(index); |
3072 | LIns* rslt = insAlloc(sizeof(double)); |
3073 | LIns* tag = andp(val, AtomConstants::kAtomTypeMask); |
3074 | // kIntptrType |
3075 | branchToLabel(LIR_jf, eqp(tag, AtomConstants::kIntptrType), not_intptr); |
3076 | // Note that this works on 64bit platforms only if we are careful |
3077 | // to restrict the range of intptr values to those that fit within |
3078 | // the integer range of the double type. |
3079 | std(p2dIns(rshp(val, AtomConstants::kAtomTypeSize)), rslt, 0, ACCSET_OTHER); |
3080 | JIT_EVENT(jit_atom2double_fast_intptr)do { } while (0); |
3081 | branchToLabel(LIR_j, NULL__null, done); |
3082 | emitLabel(not_intptr); |
3083 | // kDoubleType |
3084 | branchToLabel(LIR_jf, eqp(tag, AtomConstants::kDoubleType), not_double); |
3085 | std(ldd(subp(val, AtomConstants::kDoubleType), 0, ACCSET_OTHER), rslt, 0, ACCSET_OTHER); |
3086 | JIT_EVENT(jit_atom2double_fast_double)do { } while (0); |
3087 | branchToLabel(LIR_j, NULL__null, done); |
3088 | emitLabel(not_double); |
3089 | std(callIns(FUNCTIONID(number)&ci_number, 1, val), rslt, 0, ACCSET_OTHER); |
3090 | JIT_EVENT(jit_atom2double_slow)do { } while (0); |
3091 | emitLabel(done); |
3092 | resumeCSE(); |
3093 | return ldd(rslt, 0, ACCSET_OTHER); |
3094 | } |
3095 | #endif |
3096 | |
3097 | return callIns(FUNCTIONID(number)&ci_number, 1, loadAtomRep(index)); |
3098 | } |
3099 | } |
3100 | |
3101 | LIns *CodegenLIR::emitStringCall(int index, const CallInfo *stringCall, boolbool preserveNull) |
3102 | { |
3103 | LIns* val = loadAtomRep(index); |
3104 | |
3105 | if (inlineFastpath) { |
3106 | // Inline fast path for string conversion. |
3107 | // if preserveNull == false: |
3108 | // if ((input & kAtomTypeMask) != kStringType) || (input == kStringType)) |
3109 | // output = stringCall (input); |
3110 | // else |
3111 | // output = input ^ kStringType; |
3112 | // |
3113 | // if preserveNull == true: |
3114 | // if (input & kAtomTypeMask) != kStringType) |
3115 | // output = stringCall (input); |
3116 | // else |
3117 | // output = input ^ kStringType; |
3118 | CodegenLabel not_stringptr; |
3119 | CodegenLabel done; |
3120 | suspendCSE(); |
3121 | LIns* result = insAlloc(sizeof(intptr_t)); |
3122 | LIns* tag = andp(val, AtomConstants::kAtomTypeMask); |
3123 | // kStringType |
3124 | branchToLabel(LIR_jf, eqp(tag, AtomConstants::kStringType), not_stringptr); |
3125 | if (!preserveNull) { |
3126 | // If our value is equal to kStringType, we have a null String ptr |
3127 | branchToLabel(LIR_jt, eqp(val, AtomConstants::kStringType), not_stringptr); |
3128 | } |
3129 | stp(xorp(val, AtomConstants::kStringType), result, 0, ACCSET_OTHER); |
3130 | branchToLabel(LIR_j, NULL__null, done); |
3131 | |
3132 | emitLabel(not_stringptr); |
3133 | stp(callIns(stringCall, 2, coreAddr, val), result, 0, ACCSET_OTHER); |
3134 | emitLabel(done); |
3135 | resumeCSE(); |
3136 | return ldp(result, 0, ACCSET_OTHER); |
3137 | } |
3138 | |
3139 | return callIns(stringCall, 2, coreAddr, val); |
3140 | } |
3141 | |
3142 | // OP_convert_s needs a null String ptr to be converted to "null" |
3143 | // while our other usage prior to concatStrings handles null ptrs |
3144 | // correctly in AvmCore::concatStrings. This function is different |
3145 | // than coerceToString in how undefinedAtom is handled: |
3146 | // convert: undefinedAtom -> "undefined" |
3147 | // coerce: undefinedAtom -> "null" (see coerce_s) |
3148 | LIns* CodegenLIR::convertToString(int index, boolbool preserveNull) |
3149 | { |
3150 | const FrameValue& value = state->value(index); |
3151 | Traits* in = value.traits; |
3152 | Traits* stringType = STRING_TYPE(core->traits.string_itraits); |
3153 | |
3154 | if (in != stringType || (!preserveNull && !value.notNull)) { |
3155 | if (in && (value.notNull || in->isNumeric() || in == BOOLEAN_TYPE(core->traits.boolean_itraits))) { |
3156 | // convert is the same as coerce |
3157 | return coerceToString(index); |
3158 | } else { |
3159 | // explicitly convert to string |
3160 | return emitStringCall(index, FUNCTIONID(string)&ci_string, preserveNull); |
3161 | } |
3162 | } |
3163 | |
3164 | // already String* |
3165 | return localGetp(index); |
3166 | } |
3167 | |
3168 | void CodegenLIR::writeNip(const FrameState* state, const uint8_t *pc) |
3169 | { |
3170 | this->state = state; |
3171 | emitSetPc(pc); |
3172 | emitCopy(state->sp(), state->sp()-1); |
3173 | } |
3174 | |
3175 | void CodegenLIR::writeMethodCall(const FrameState* state, const uint8_t *pc, AbcOpcode opcode, MethodInfo* m, uintptr_t disp_id, uint32_t argc, Traits *type) |
3176 | { |
3177 | this->state = state; |
3178 | emitSetPc(pc); |
3179 | switch (opcode) { |
3180 | case OP_callproperty: |
3181 | case OP_callproplex: |
3182 | case OP_callpropvoid: |
3183 | AvmAssert(m->declaringTraits()->isInterface())do { } while (0); |
3184 | emitTypedCall(OP_callinterface, ImtHolder::getIID(m), argc, type, m); |
3185 | break; |
3186 | case OP_callmethod: |
3187 | emitTypedCall(OP_callmethod, disp_id, argc, type, m); |
3188 | break; |
3189 | default: |
3190 | AvmAssert(false)do { } while (0); |
3191 | break; |
3192 | } |
3193 | } |
3194 | |
3195 | void CodegenLIR::writeOp2(const FrameState* state, const uint8_t *pc, AbcOpcode opcode, uint32_t opd1, uint32_t opd2, Traits *type) |
3196 | { |
3197 | this->state = state; |
3198 | emitSetPc(pc); |
3199 | int sp = state->sp(); |
3200 | switch (opcode) { |
3201 | |
3202 | case OP_constructsuper: |
3203 | { |
3204 | Traits* base = info->declaringTraits()->base; |
3205 | // opd1=unused, opd2=argc |
3206 | if (base == OBJECT_TYPE(core->traits.object_itraits) && base->init->isTrivial()) { |
3207 | AvmAssert(opd2 == 0)do { } while (0); // The verifier should have caught a non-zero argc |
3208 | break; |
3209 | } |
3210 | emitTypedCall(OP_constructsuper, 0, opd2, VOID_TYPE(core->traits.void_itraits), base->init); |
3211 | break; |
3212 | } |
3213 | |
3214 | case OP_setsuper: |
3215 | { |
3216 | const uint32_t index = opd1; |
3217 | const uint32_t n = opd2; |
3218 | Traits* base = type; |
3219 | int32_t ptrIndex = sp-(n-1); |
3220 | |
3221 | const Multiname* name = pool->precomputedMultiname(index); |
3222 | |
3223 | Binding b = toplevel->getBinding(base, name); |
3224 | Traits* propType = Traits::readBinding(base, b); |
3225 | const TraitsBindingsp basetd = base->getTraitsBindings(); |
3226 | |
3227 | if (AvmCore::isSlotBinding(b)) { |
3228 | if (!AvmCore::isVarBinding(b)) { |
3229 | // else, ignore write to readonly accessor |
3230 | break; |
3231 | } |
3232 | int slot_id = AvmCore::bindingToSlotId(b); |
3233 | LIns* value = coerceToType(sp, propType); |
3234 | emitSetslot(OP_setslot, slot_id, ptrIndex, value); |
3235 | break; |
3236 | } |
3237 | if (AvmCore::isAccessorBinding(b)) { |
3238 | if (!AvmCore::hasSetterBinding(b)) { |
3239 | // ignore write to readonly accessor |
3240 | break; |
3241 | } |
3242 | // Invoke the setter |
3243 | int disp_id = AvmCore::bindingToSetterId(b); |
3244 | MethodSignaturep mms = basetd->getMethod(disp_id)->getMethodSignature(); |
3245 | if (mms->argcOk(1)) { |
3246 | emitCoerceCall(OP_callsuperid, disp_id, 1, mms); |
3247 | break; |
3248 | } |
3249 | } |
3250 | // generic late bound case |
3251 | emit(opcode, (uintptr_t)name); |
3252 | break; |
3253 | } |
3254 | case OP_getsuper: |
3255 | { |
3256 | const uint32_t index = opd1; |
3257 | const uint32_t n = opd2; |
3258 | Traits* base = type; |
3259 | |
3260 | const Multiname* name = pool->precomputedMultiname(index); |
3261 | |
3262 | Binding b = toplevel->getBinding(base, name); |
3263 | Traits* propType = Traits::readBinding(base, b); |
3264 | |
3265 | if (AvmCore::isSlotBinding(b)) { |
3266 | int slot_id = AvmCore::bindingToSlotId(b); |
3267 | emitGetslot(slot_id, state->sp()-(n-1), propType); |
3268 | break; |
3269 | } |
3270 | if (AvmCore::hasGetterBinding(b)) { |
3271 | // Invoke the getter |
3272 | int disp_id = AvmCore::bindingToGetterId(b); |
3273 | const TraitsBindingsp basetd = base->getTraitsBindings(); |
3274 | MethodSignaturep mms = basetd->getMethod(disp_id)->getMethodSignature(); |
3275 | if (mms->argcOk(0)) { |
3276 | emitCoerceCall(OP_callsuperid, disp_id, 0, mms); |
3277 | break; |
3278 | } |
3279 | } |
3280 | |
3281 | // generic late-bound case |
3282 | emit(opcode, (uintptr_t)name, 0, propType); |
3283 | break; |
3284 | } |
3285 | case OP_callsuper: |
3286 | case OP_callsupervoid: |
3287 | { |
3288 | const uint32_t index = opd1; |
3289 | const uint32_t argc = opd2; |
3290 | Traits* base = type; |
3291 | const TraitsBindingsp basetd = base->getTraitsBindings(); |
3292 | |
3293 | const Multiname *name = pool->precomputedMultiname(index); |
3294 | |
3295 | Binding b = toplevel->getBinding(base, name); |
3296 | |
3297 | if (AvmCore::isMethodBinding(b)) { |
3298 | int disp_id = AvmCore::bindingToMethodId(b); |
3299 | MethodSignaturep mms = basetd->getMethod(disp_id)->getMethodSignature(); |
3300 | if (mms->argcOk(argc)) { |
3301 | emitCoerceCall(OP_callsuperid, disp_id, argc, mms); |
3302 | break; |
3303 | } |
3304 | } |
3305 | // generic late bound case |
3306 | emit(opcode, (uintptr_t)name, argc, NULL__null); |
3307 | break; |
3308 | } |
3309 | |
3310 | case OP_constructprop: |
3311 | { |
3312 | const uint32_t argc = opd2; |
3313 | const Multiname* name = pool->precomputedMultiname(opd1); |
3314 | |
3315 | const FrameValue& obj = state->peek(argc+1); // object |
3316 | Binding b = toplevel->getBinding(obj.traits, name); |
3317 | |
3318 | if (AvmCore::isSlotBinding(b)) |
3319 | { |
3320 | int slot_id = AvmCore::bindingToSlotId(b); |
3321 | int ctor_index = state->sp() - argc; |
3322 | LIns* ctor = loadFromSlot(ctor_index, slot_id, type); |
3323 | emitConstruct(argc, ctor, type); |
3324 | } |
3325 | else |
3326 | { |
3327 | emit(opcode, (uintptr_t)name, argc, NULL__null); |
3328 | } |
3329 | break; |
3330 | } |
3331 | |
3332 | case OP_getproperty: |
3333 | { |
3334 | // NOTE opd2 is the stack offset to the reciever |
3335 | const Multiname* name = pool->precomputedMultiname(opd1); |
3336 | const FrameValue& obj = state->peek(opd2); // object |
3337 | Binding b = toplevel->getBinding(obj.traits, name); |
3338 | |
3339 | // early bind accessor |
3340 | if (AvmCore::hasGetterBinding(b)) |
3341 | { |
3342 | // Invoke the getter |
3343 | int disp_id = AvmCore::bindingToGetterId(b); |
3344 | const TraitsBindingsp objtd = obj.traits->getTraitsBindings(); |
3345 | MethodInfo *f = objtd->getMethod(disp_id); |
3346 | AvmAssert(f != NULL)do { } while (0); |
3347 | |
3348 | if (!obj.traits->isInterface()) { |
3349 | emitTypedCall(OP_callmethod, disp_id, 0, type, f); |
3350 | } |
3351 | else { |
3352 | emitTypedCall(OP_callinterface, ImtHolder::getIID(f), 0, type, f); |
3353 | } |
3354 | AvmAssert(type == f->getMethodSignature()->returnTraits())do { } while (0); |
3355 | } |
3356 | else { |
3357 | emit(OP_getproperty, opd1, 0, type); |
3358 | } |
3359 | break; |
3360 | } |
3361 | |
3362 | case OP_setproperty: |
3363 | case OP_initproperty: |
3364 | { |
3365 | // opd2=n the stack offset to the reciever |
3366 | const Multiname *name = pool->precomputedMultiname(opd1); |
3367 | const FrameValue& obj = state->peek(opd2); // object |
3368 | Binding b = toplevel->getBinding(obj.traits, name); |
3369 | |
3370 | // early bind accessor |
3371 | if (AvmCore::hasSetterBinding(b)) |
3372 | { |
3373 | // invoke the setter |
3374 | int disp_id = AvmCore::bindingToSetterId(b); |
3375 | const TraitsBindingsp objtd = obj.traits->getTraitsBindings(); |
3376 | MethodInfo *f = objtd->getMethod(disp_id); |
3377 | AvmAssert(f != NULL)do { } while (0); |
3378 | |
3379 | if (!obj.traits->isInterface()) { |
3380 | emitTypedCall(OP_callmethod, disp_id, 1, type, f); |
3381 | } |
3382 | else { |
3383 | emitTypedCall(OP_callinterface, ImtHolder::getIID(f), 1, type, f); |
3384 | } |
3385 | } |
3386 | else { |
3387 | emit(opcode, (uintptr_t)name); |
3388 | } |
3389 | break; |
3390 | } |
3391 | |
3392 | case OP_setslot: |
3393 | emitSetslot(OP_setslot, opd1, opd2); |
3394 | break; |
3395 | |
3396 | case OP_callproperty: |
3397 | case OP_callproplex: |
3398 | case OP_callpropvoid: |
3399 | { |
3400 | emit(opcode, opd1, opd2, NULL__null); |
3401 | break; |
3402 | } |
3403 | |
3404 | case OP_callstatic: { |
3405 | uint32_t method_id = opd1; |
3406 | uint32_t argc = opd2; |
3407 | emitTypedCall(OP_callstatic, method_id, argc, type, pool->getMethodInfo(method_id)); |
3408 | break; |
3409 | } |
3410 | |
3411 | default: |
3412 | AvmAssert(false)do { } while (0); |
3413 | break; |
3414 | } |
3415 | } |
3416 | |
3417 | void CodegenLIR::emitIntConst(int index, int32_t c, Traits* type) |
3418 | { |
3419 | localSet(index, lirout->insImmI(c), type); |
3420 | } |
3421 | |
3422 | void CodegenLIR::emitPtrConst(int index, void* c, Traits* type) |
3423 | { |
3424 | localSet(index, lirout->insImmP(c), type); |
3425 | } |
3426 | |
3427 | void CodegenLIR::emitDoubleConst(int index, const double* pd) |
3428 | { |
3429 | localSet(index, lirout->insImmD(*pd), NUMBER_TYPE(core->traits.number_itraits)); |
3430 | } |
3431 | |
3432 | void CodegenLIR::writeCoerce(const FrameState* state, uint32_t loc, Traits* result) |
3433 | { |
3434 | this->state = state; |
3435 | emitSetPc(state->abc_pc); |
3436 | emitCoerce(loc, result); |
3437 | } |
3438 | |
3439 | void CodegenLIR::emitCoerce(uint32_t loc, Traits* result) |
3440 | { |
3441 | localSet(loc, coerceToType(loc, result), result); |
3442 | } |
3443 | |
3444 | // If we have already generated this specialized function, return our |
3445 | // prior entry instead of re-specializing the same function. |
3446 | LIns* CodegenLIR::getSpecializedCall(LIns* origCall) |
3447 | { |
3448 | if (!specializedCallHashMap) |
3449 | return NULL__null; |
3450 | |
3451 | return specializedCallHashMap->get(origCall); |
3452 | } |
3453 | |
3454 | // Track any specialized function so if we try to specialize the same function |
3455 | // again we can re-use the original one. |
3456 | LIns * CodegenLIR::addSpecializedCall(LIns* origCall, LIns* specializedCall) |
3457 | { |
3458 | if (!specializedCallHashMap) |
3459 | specializedCallHashMap = new (*alloc1) HashMap<LIns *, LIns *>(*alloc1); |
3460 | |
3461 | specializedCallHashMap->put(origCall, specializedCall); |
3462 | |
3463 | return specializedCall; |
3464 | } |
3465 | |
3466 | // Attempt to replace a call with a call to another function, using the |
3467 | // mapping given by 'specs'. The intent is that the replacement function |
3468 | // yield equivalent semantics in the context in which it will appear, but |
3469 | // more efficiently. A single replacement is generated for each call LIns*, |
3470 | // and subquent requests to perform the same specializatoin will return |
3471 | // the previously-constructed replacement LIns*. If an attempt is made to |
3472 | // specialize a function call for which no replacement is given in 'specs', we |
3473 | // return NULL. It is assumed that the replacement function will return |
3474 | // an int32 value. |
3475 | |
3476 | LIns* CodegenLIR::specializeIntCall(LIns* call, Specialization* specs) |
3477 | { |
3478 | LIns *priorCall = getSpecializedCall(call); |
3479 | if (priorCall) |
3480 | return priorCall; |
3481 | |
3482 | const CallInfo *ci = call->callInfo(); |
3483 | int i = 0; |
3484 | while (specs[i].oldFunc != NULL__null) { |
3485 | if (specs[i].oldFunc == ci) { |
3486 | const CallInfo* nci = specs[i].newFunc; |
3487 | AvmAssert(nci->returnType() == ARGTYPE_I)do { } while (0); |
3488 | LIns* specialization = callIns(nci, 2, call->arg(1), call->arg(0), INT_TYPE(core->traits.int_itraits)); |
3489 | addSpecializedCall(call, specialization); |
3490 | return specialization; |
3491 | } |
3492 | i++; |
3493 | } |
3494 | return NULL__null; |
3495 | } |
3496 | |
3497 | // Return true if we are promoting an int or uint to a double |
3498 | boolbool CodegenLIR::isPromote(LOpcode op) |
3499 | { |
3500 | return op == LIR_ui2d || op == LIR_i2d; |
3501 | } |
3502 | |
3503 | // Return non-null LIns* if input is a constant that fits into a int32_t |
3504 | LIns* CodegenLIR::imm2Int(LIns* imm) |
3505 | { |
3506 | if (imm->isImmI()) |
3507 | ; // just use imm |
3508 | else if (imm->isImmD()) { |
3509 | double val = imm->immD(); |
3510 | double cvt = (int32_t)val; |
3511 | if (val == 0 || val == cvt) |
3512 | imm = InsConst((int32_t)cvt); |
3513 | else |
3514 | imm = 0; // can't convert |
3515 | } else { |
3516 | imm = 0; // non-imm |
3517 | } |
3518 | return imm; |
3519 | } |
3520 | |
3521 | static Specialization coerceDoubleToInt[] = { |
3522 | { FUNCTIONID(String_charCodeAtFI)&ci_String_charCodeAtFI, FUNCTIONID(String_charCodeAtIU)&ci_String_charCodeAtIU }, |
3523 | { FUNCTIONID(String_charCodeAtFU)&ci_String_charCodeAtFU, FUNCTIONID(String_charCodeAtIU)&ci_String_charCodeAtIU }, |
3524 | { FUNCTIONID(String_charCodeAtFF)&ci_String_charCodeAtFF, FUNCTIONID(String_charCodeAtIF)&ci_String_charCodeAtIF }, |
3525 | { 0, 0 } |
3526 | }; |
3527 | |
3528 | // Perform coercion from a double to integer. |
3529 | // Try various optimization to avoid double math if possible |
3530 | // and specialize charCodeAt to an faster integer version. |
3531 | LIns* CodegenLIR::coerceNumberToInt(int loc) |
3532 | { |
3533 | LIns *arg = localGetf(loc); |
3534 | LOpcode op = arg->opcode(); |
3535 | switch (op) { |
3536 | case LIR_ui2d: |
3537 | case LIR_i2d: |
3538 | return arg->oprnd1(); |
3539 | case LIR_addd: |
3540 | case LIR_subd: |
3541 | case LIR_muld: { |
3542 | LIns *a = arg->oprnd1(); |
3543 | a = isPromote(a->opcode()) ? a->oprnd1() : imm2Int(a); |
3544 | if (a) { |
3545 | LIns *b = arg->oprnd2(); |
3546 | b = isPromote(b->opcode()) ? b->oprnd1() : imm2Int(b); |
3547 | if (b) |
3548 | return lirout->ins2(arithOpcodeD2I(op), a, b); |
3549 | } |
3550 | break; |
3551 | } |
3552 | // Optimize integer division when divisor is non-zero constant integer. |
3553 | // We cannot use LIR_divi if divisor is non-constant, as we need to generate a NaN on division by zero. |
3554 | case LIR_divd: { |
3555 | LIns *a = arg->oprnd1(); |
3556 | LOpcode aOpcode = a->opcode(); |
3557 | // We should never arrive here at all if both operands are constants, as the LIR_divd should |
3558 | // have been folded previously. Furthermore, Nanojit does not permit both arguments of LIR_divi |
3559 | // to be constant, and will assert if this occurs, so let's be absolutely sure it will not by |
3560 | // specifically not attempting to optimize that case here. |
3561 | AvmAssert(!imm2Int(a) || !imm2Int(arg->oprnd2()))do { } while (0); |
3562 | if (isPromote(aOpcode)) { |
3563 | a = a->oprnd1(); |
3564 | LIns *b = imm2Int(arg->oprnd2()); |
3565 | if (b) { |
3566 | int32_t intConst = b->immI(); |
3567 | if (intConst) { |
3568 | // use faster unsigned right shift if our arg is unsigned and |
3569 | // we have just one bit set in the divisor. |
3570 | if (aOpcode == LIR_ui2d && intConst >= 0 && exactlyOneBit(intConst)) { |
3571 | return lirout->ins2(LIR_rshui, a, lirout->insImmI(msbSet32(intConst))); |
3572 | } |
3573 | #if NJ_DIVI_SUPPORTED1 |
3574 | else if (aOpcode == LIR_i2d) { |
3575 | return lirout->ins2(LIR_divi, a, b); |
3576 | } |
3577 | #endif // NJ_DIVI_SUPPORTED |
3578 | } |
3579 | } |
3580 | } |
3581 | break; |
3582 | } |
3583 | #ifdef AVMPLUS_64BIT |
3584 | case LIR_immq: |
3585 | #endif // AVMPLUS_64BIT |
3586 | case LIR_immd: |
3587 | // const fold |
3588 | return InsConst(AvmCore::integer_d(arg->immD())); |
3589 | // Try to replace a call returning a double, which will then be |
3590 | // coerced to an integer, with a call that will produce an equivalent |
3591 | // integer value directly. |
3592 | case LIR_calld: { |
3593 | LIns* specialized = specializeIntCall(arg, coerceDoubleToInt); |
3594 | if (specialized) |
3595 | return specialized; |
3596 | } |
3597 | } |
3598 | |
3599 | // For SSE capable machines, inline our double to integer conversion |
3600 | // using the CVTTSD2SI instruction. If we get a 0x80000000 return |
3601 | // value, our double is outside the valid integer range we fallback |
3602 | // to calling doubleToInt32. |
3603 | #if defined AVMPLUS_IA32 || defined AVMPLUS_AMD64 |
3604 | #ifndef AVMPLUS_AMD64 |
3605 | SSE2_ONLY(if(core->config.njconfig.i386_sse2))if(core->config.njconfig.i386_sse2) |
3606 | #endif // AVMPLUS_AMD64 |
3607 | { |
3608 | suspendCSE(); |
3609 | CodegenLabel skip_label("goodint"); |
3610 | LIns* intResult = insAlloc(sizeof(int32_t)); |
3611 | LIns* fastd2i = lirout->ins1(LIR_d2i, arg); |
3612 | sti(fastd2i, intResult, 0, ACCSET_STORE_ANY); // int32_t index |
3613 | LIns *c = binaryIns(LIR_eqi, fastd2i, InsConst(1L << 31)); |
3614 | branchToLabel(LIR_jf, c, skip_label); |
3615 | LIns *funcCall = callIns(FUNCTIONID(doubleToInt32)&ci_doubleToInt32, 1, arg); |
3616 | sti(funcCall, intResult, 0, ACCSET_STORE_ANY); // int32_t index |
3617 | emitLabel(skip_label); |
3618 | LIns *result = loadIns(LIR_ldi, 0, intResult, ACCSET_LOAD_ANY); |
3619 | resumeCSE(); |
3620 | return result; |
3621 | } |
3622 | #endif // AVMPLUS_IA32 || AVMPLUS_AMD64 |
3623 | |
3624 | #ifndef AVMPLUS_AMD64 |
3625 | return callIns(FUNCTIONID(integer_d)&ci_integer_d, 1, arg); |
3626 | #endif // AVMPLUS_AMD64 |
3627 | } |
3628 | |
3629 | |
3630 | LIns* CodegenLIR::coerceToType(int loc, Traits* result) |
3631 | { |
3632 | const FrameValue& value = state->value(loc); |
3633 | Traits* in = value.traits; |
3634 | LIns* expr; |
3635 | |
3636 | if (result == NULL__null) |
3637 | { |
3638 | // coerce to * is simple, we just save the atom rep. |
3639 | expr = loadAtomRep(loc); |
3640 | } |
3641 | else if (result == OBJECT_TYPE(core->traits.object_itraits)) |
3642 | { |
3643 | if (in == NULL__null || in == VOID_TYPE(core->traits.void_itraits)) |
3644 | { |
3645 | // value already boxed but we need to coerce undefined->null |
3646 | if (!value.notNull) { |
3647 | // v == undefinedAtom ? nullObjectAtom : v; |
3648 | LIns *v = localGetp(loc); |
3649 | expr = choose(eqp(v, undefConst), nullObjectAtom, v); |
3650 | } else { |
3651 | expr = loadAtomRep(loc); |
3652 | } |
3653 | } |
3654 | else |
3655 | { |
3656 | // value cannot be undefined so just box it |
3657 | expr = loadAtomRep(loc); |
3658 | } |
3659 | } |
3660 | else if (!result->isMachineType() && in == NULL_TYPE(core->traits.null_itraits)) |
3661 | { |
3662 | // it's fine to coerce null to a pointer type, just load the value |
3663 | expr = localGetp(loc); |
3664 | } |
3665 | else if (result == NUMBER_TYPE(core->traits.number_itraits)) |
3666 | { |
3667 | expr = coerceToNumber(loc); |
3668 | } |
3669 | else if (result == INT_TYPE(core->traits.int_itraits)) |
3670 | { |
3671 | if (in == UINT_TYPE(core->traits.uint_itraits) || in == BOOLEAN_TYPE(core->traits.boolean_itraits) || in == INT_TYPE(core->traits.int_itraits)) |
3672 | { |
3673 | // just load the value |
3674 | expr = localGet(loc); |
3675 | } |
3676 | else if (in == NUMBER_TYPE(core->traits.number_itraits)) |
3677 | { |
3678 | // narrowing conversion number->int |
3679 | expr = coerceNumberToInt(loc); |
3680 | } |
3681 | else |
3682 | { |
3683 | // * -> int |
3684 | expr = callIns(FUNCTIONID(integer)&ci_integer, 1, loadAtomRep(loc)); |
3685 | } |
3686 | } |
3687 | else if (result == UINT_TYPE(core->traits.uint_itraits)) |
3688 | { |
3689 | if (in == INT_TYPE(core->traits.int_itraits) || in == BOOLEAN_TYPE(core->traits.boolean_itraits) || in == UINT_TYPE(core->traits.uint_itraits)) |
3690 | { |
3691 | // just load the value |
3692 | expr = localGet(loc); |
3693 | } |
3694 | else if (in == NUMBER_TYPE(core->traits.number_itraits)) |
3695 | { |
3696 | expr = coerceNumberToInt(loc); |
3697 | } |
3698 | else |
3699 | { |
3700 | // * -> uint |
3701 | expr = callIns(FUNCTIONID(toUInt32)&ci_toUInt32, 1, loadAtomRep(loc)); |
3702 | } |
3703 | } |
3704 | else if (result == BOOLEAN_TYPE(core->traits.boolean_itraits)) |
3705 | { |
3706 | if (in == BOOLEAN_TYPE(core->traits.boolean_itraits)) |
3707 | { |
3708 | expr = localGet(loc); |
3709 | } |
3710 | else if (in == NUMBER_TYPE(core->traits.number_itraits)) |
3711 | { |
3712 | expr = callIns(FUNCTIONID(doubleToBool)&ci_doubleToBool, 1, localGetf(loc)); |
3713 | } |
3714 | else if (in == INT_TYPE(core->traits.int_itraits) || in == UINT_TYPE(core->traits.uint_itraits)) |
3715 | { |
3716 | // int to bool: b = (i==0) == 0 |
3717 | expr = eqi0(eqi0(localGet(loc))); |
3718 | } |
3719 | else if (in && !in->hasComplexEqualityRules()) |
3720 | { |
3721 | // ptr to bool: b = (p==0) == 0 |
3722 | expr = eqi0(eqp0(localGetp(loc))); |
3723 | } |
3724 | else |
3725 | { |
3726 | // * -> Boolean |
3727 | expr = callIns(FUNCTIONID(boolean)&ci_boolean, 1, loadAtomRep(loc)); |
3728 | } |
3729 | } |
3730 | else if (result == STRING_TYPE(core->traits.string_itraits)) |
3731 | { |
3732 | expr = coerceToString(loc); |
3733 | } |
3734 | else if (in && !in->isMachineType() && !result->isMachineType() |
3735 | && in != STRING_TYPE(core->traits.string_itraits) && in != NAMESPACE_TYPE(core->traits.namespace_itraits)) |
3736 | { |
3737 | if (!Traits::canAssign(result, in)) { |
3738 | // coerceobj_obj() is void, but we mustn't optimize it out; we only call it when required |
3739 | callIns(FUNCTIONID(coerceobj_obj)&ci_coerceobj_obj, 3, |
3740 | env_param, localGetp(loc), InsConstPtr(result)); |
3741 | } |
3742 | // the input pointer has now been checked but it's still the same value. |
3743 | expr = localGetp(loc); |
3744 | } |
3745 | else if (!result->isMachineType() && result != NAMESPACE_TYPE(core->traits.namespace_itraits)) |
3746 | { |
3747 | // result is a ScriptObject based type. |
3748 | expr = downcast_obj(loadAtomRep(loc), env_param, result); |
3749 | } |
3750 | else if (result == NAMESPACE_TYPE(core->traits.namespace_itraits) && in == NAMESPACE_TYPE(core->traits.namespace_itraits)) |
3751 | { |
3752 | expr = localGetp(loc); |
3753 | } |
3754 | else |
3755 | { |
3756 | LIns* value = loadAtomRep(loc); |
3757 | // resultValue = coerce(caller_env, inputValue, traits) |
3758 | LIns* out = callIns(FUNCTIONID(coerce)&ci_coerce, 3, |
3759 | env_param, value, InsConstPtr(result)); |
3760 | |
3761 | // store the result |
3762 | expr = atomToNativeRep(result, out); |
3763 | } |
3764 | return expr; |
3765 | } |
3766 | |
3767 | void CodegenLIR::writeCheckNull(const FrameState* state, uint32_t index) |
3768 | { |
3769 | this->state = state; |
3770 | emitSetPc(state->abc_pc); |
3771 | emitCheckNull(localCopy(index), state->value(index).traits); |
3772 | } |
3773 | |
3774 | void CodegenLIR::emitCheckNull(LIns* ptr, Traits* t) |
3775 | { |
3776 | // The result is either unchanged or an exception is thrown, so |
3777 | // we don't save the result. This is the null pointer check. |
3778 | if (!isNullable(t) || varTracker->isNotNull(ptr)) |
3779 | return; |
3780 | _nvprof("nullcheck",1); |
3781 | BuiltinType ty = bt(t); |
3782 | if (valueStorageType(ty) == SST_atom) { |
3783 | _nvprof("nullcheck atom", 1); |
3784 | if (ty != BUILTIN_object) { |
3785 | // If we know atom value is an object, it cannot be undefined. |
3786 | branchToLabel(LIR_jt, eqp(ptr, undefConst), upe_label); // if (p == undefined) throw undefined pointer error |
3787 | } |
3788 | branchToLabel(LIR_jt, ltup(ptr, undefConst), npe_label); // if (p < undefined) throw null pointer error |
3789 | } else { |
3790 | _nvprof("nullcheck ptr", 1); |
3791 | branchToLabel(LIR_jt, eqp0(ptr), npe_label); |
3792 | } |
3793 | varTracker->setNotNull(ptr, t); |
3794 | } |
3795 | |
3796 | // Save our current PC location for the catch finder later. |
3797 | void CodegenLIR::emitSetPc(const uint8_t* pc) |
3798 | { |
3799 | AvmAssert(state->abc_pc == pc)do { } while (0); |
3800 | // update bytecode ip if necessary |
3801 | if (_save_eip && lastPcSave != pc) { |
3802 | stp(InsConstPtr((void*)(pc - code_pos)), _save_eip, 0, ACCSET_OTHER); |
3803 | lastPcSave = pc; |
3804 | } |
3805 | } |
3806 | |
3807 | // This is for VTable->createInstanceProc which is called by OP_construct |
3808 | FASTFUNCTION(CALL_INDIRECT, SIG2(P,P,P), createInstanceProc)const CallInfo ci_createInstanceProc = { CALL_INDIRECT, nanojit ::CallInfo::typeSig2(ARGTYPE_P, ARGTYPE_P, ARGTYPE_P), ABI_FASTCALL , 0, ACCSET_STORE_ANY }; |
3809 | |
3810 | void CodegenLIR::emitIsNaN(Traits* result) |
3811 | { |
3812 | int op1 = state->sp(); |
3813 | LIns *f = localGetf(op1); |
3814 | if (isPromote(f->opcode())) { |
3815 | // Promoting an integer to a double cannot result in a NaN. |
3816 | localSet(op1-1, InsConst(0), result); |
3817 | } |
3818 | else { |
3819 | // LIR is required to follow IEEE floating point semantics, thus x is a NaN iff x != x. |
3820 | localSet(op1-1, binaryIns(LIR_eqi, binaryIns(LIR_eqd, f, f), InsConst(0)), result); |
3821 | } |
3822 | } |
3823 | |
3824 | void CodegenLIR::emitIntMathMin(Traits* result) |
3825 | { |
3826 | // if (x < y) |
3827 | // return x |
3828 | // else |
3829 | // return y |
3830 | int op1 = state->sp(); |
3831 | int op2 = state->sp() - 1; |
3832 | LIns *x = getSpecializedArg(op1, BUILTIN_int); |
3833 | LIns *y = getSpecializedArg(op2, BUILTIN_int); |
3834 | LIns *s1 = binaryIns(LIR_lti, x, y); |
3835 | LIns *s2 = lirout->insChoose(s1, x, y, use_cmov); |
3836 | // coerceNumberToInt will remove a d2i/i2d roundtrip |
3837 | localSet(op1-2, i2dIns(s2), result); |
3838 | } |
3839 | |
3840 | void CodegenLIR::emitIntMathMax(Traits* result) |
3841 | { |
3842 | // if (x > y) |
3843 | // return x |
3844 | // else |
3845 | // return y |
3846 | int op1 = state->sp(); |
3847 | int op2 = state->sp() - 1; |
3848 | LIns *x = getSpecializedArg(op1, BUILTIN_int); |
3849 | LIns *y = getSpecializedArg(op2, BUILTIN_int); |
3850 | LIns *s1 = binaryIns(LIR_gti, x, y); |
3851 | LIns *s2 = lirout->insChoose(s1, x, y, use_cmov); |
3852 | // coerceNumberToInt will remove a d2i/i2d roundtrip |
3853 | localSet(op1-2, i2dIns(s2), result); |
3854 | } |
3855 | |
3856 | void CodegenLIR::emitMathAbs(Traits* result) |
3857 | { |
3858 | int op1 = state->sp(); |
3859 | LIns *arg = localGetf(op1); |
3860 | // inline asm for Math.abs |
3861 | // result = arg |
3862 | // if (!(arg > 0.0)) |
3863 | // result = -arg // NaN and -0.0 get here too. |
3864 | // |
3865 | // We do not have an optimized integer path because Math.abs(-2147484648) |
3866 | // generates a floating point result. (Math.abs(0x80000000) > MAX_INT) |
3867 | CodegenLabel done("done"); |
3868 | suspendCSE(); |
3869 | localSet(op1-1, arg, result); |
3870 | |
3871 | LIns *s2 = binaryIns(LIR_gtd, arg, lirout->insImmD(0.0)); |
3872 | branchToLabel(LIR_jt, s2, done); |
3873 | |
3874 | localSet(op1-1, Ins(LIR_negd, arg), result); |
3875 | |
3876 | emitLabel(done); |
3877 | resumeCSE(); |
3878 | } |
3879 | |
3880 | void CodegenLIR::emitStringLength(Traits* result) |
3881 | { |
3882 | int op1 = state->sp(); |
3883 | LIns * ptr = loadIns(LIR_ldi, offsetof(String, m_length)__builtin_offsetof(String, m_length), localGetp(op1), ACCSET_OTHER, LOAD_CONST); |
3884 | localSet(op1, ptr, result); |
3885 | } |
3886 | |
3887 | // Determine a mask for our argument that indicates to which types it may be trivially converted |
3888 | // without insertion of additional instructions and without loss. This information may allow us |
3889 | // to substitute a numeric operation on a more specific type than that inferred by the verifier. |
3890 | // For example: |
3891 | // A number that has been promoted from an integer will be marked as a number|int. |
3892 | // A constant number that is an unsigned integer will be marked as a number|uint|int |
3893 | int32_t CodegenLIR::determineBuiltinMaskForArg (int argOffset) |
3894 | { |
3895 | BuiltinType bt = this->bt(state->value(argOffset).traits); |
3896 | int32_t btMask = 1 << bt; |
3897 | if (bt == BUILTIN_number) { |
3898 | LIns *arg = localGetf(argOffset); |
3899 | if (arg->isImmD()) { |
3900 | int32_t intVal = (int32_t) arg->immD(); |
3901 | if ((double) intVal == arg->immD() && !MathUtils::isNegZero(arg->immD())) { |
3902 | if (intVal >= 0) |
3903 | btMask |= 1 << BUILTIN_uint; |
3904 | btMask |= 1 << BUILTIN_int; |
3905 | } |
3906 | } |
3907 | else if (arg->opcode() == LIR_i2d) |
3908 | btMask |= 1 << BUILTIN_int; |
3909 | else if (arg->opcode() == LIR_ui2d) |
3910 | btMask |= 1 << BUILTIN_uint; |
3911 | } |
3912 | else if (bt == BUILTIN_int) { |
3913 | LIns *arg = localGet(argOffset); |
3914 | if (arg->isImmI() && arg->immI() >= 0) |
3915 | btMask |= 1 << BUILTIN_uint; |
3916 | } |
3917 | |
3918 | return btMask; |
3919 | } |
3920 | |
3921 | // Given an argument and a builtin type to which it may be trivially converted, |
3922 | // return a LIns that represents the argument as a value of the correct machine type. |
3923 | LIns* CodegenLIR::getSpecializedArg (int argOffset, BuiltinType newBt) |
3924 | { |
3925 | BuiltinType oldBt = this->bt(state->value(argOffset).traits); |
3926 | |
3927 | if (oldBt == BUILTIN_number) { |
3928 | LIns *arg = localGetf(argOffset); |
3929 | if (newBt == BUILTIN_int) { |
3930 | if (arg->isImmD()) |
3931 | return InsConst((int32_t)arg->immD()); |
3932 | else if (arg->opcode() == LIR_i2d) |
3933 | return arg->oprnd1(); |
3934 | else |
3935 | AvmAssert(0)do { } while (0); |
3936 | } |
3937 | else if (newBt == BUILTIN_uint) { |
3938 | if (arg->isImmD()) |
3939 | return InsConst((int32_t)arg->immD()); |
3940 | else if (arg->opcode() == LIR_ui2d) |
3941 | return arg->oprnd1(); |
3942 | else |
3943 | AvmAssert(0)do { } while (0); |
3944 | } |
3945 | } |
3946 | |
3947 | switch (newBt) { |
3948 | case BUILTIN_number: |
3949 | return localGetf(argOffset); |
3950 | case BUILTIN_boolean: |
3951 | case BUILTIN_int: |
3952 | case BUILTIN_uint: |
3953 | return localGet(argOffset); |
3954 | default: |
3955 | return localGetp(argOffset); |
3956 | } |
3957 | } |
3958 | |
3959 | // For a given builtin function id and argument count, matching of argument lists occurs in the order in which the |
3960 | // entries appear in the table below. More specialized variants that are expected to execute faster should be listed |
3961 | // prior to the more general cases. If no argument list matches, specialization is not performed. |
3962 | |
3963 | const CodegenLIR::FunctionMatch CodegenLIR::specializedFunctions[] = |
3964 | { |
3965 | { 0, /* dummy entry so 0 can be treated as HashMap miss*/ 1, {}, 0, 0}, |
3966 | { avmplus::NativeID::native_script_function_isNaN, 1, {BUILTIN_number, BUILTIN_none}, 0, &CodegenLIR::emitIsNaN }, |
3967 | |
3968 | { avmplus::NativeID::String_AS3_charCodeAt, 1, {BUILTIN_uint, BUILTIN_none}, FUNCTIONID(String_charCodeAtFU)&ci_String_charCodeAtFU, 0}, |
3969 | { avmplus::NativeID::String_AS3_charCodeAt, 1, {BUILTIN_int, BUILTIN_none}, FUNCTIONID(String_charCodeAtFI)&ci_String_charCodeAtFI, 0}, |
3970 | { avmplus::NativeID::String_AS3_charCodeAt, 1, {BUILTIN_number, BUILTIN_none}, FUNCTIONID(String_charCodeAtFF)&ci_String_charCodeAtFF, 0}, |
3971 | { avmplus::NativeID::String_AS3_charAt, 1, {BUILTIN_uint, BUILTIN_none}, FUNCTIONID(String_charAtU)&ci_String_charAtU, 0}, |
3972 | { avmplus::NativeID::String_AS3_charAt, 1, {BUILTIN_int, BUILTIN_none}, FUNCTIONID(String_charAtI)&ci_String_charAtI, 0}, |
3973 | { avmplus::NativeID::String_AS3_charAt, 1, {BUILTIN_number, BUILTIN_none}, FUNCTIONID(String_charAtF)&ci_String_charAtF, 0}, |
3974 | |
3975 | { avmplus::NativeID::String_length_get, 0, {BUILTIN_none, BUILTIN_none}, 0, &CodegenLIR::emitStringLength}, |
3976 | |
3977 | { avmplus::NativeID::Math_acos, 1, {BUILTIN_number, BUILTIN_none}, FUNCTIONID(Math_acos)&ci_Math_acos, 0}, |
3978 | { avmplus::NativeID::Math_asin, 1, {BUILTIN_number, BUILTIN_none}, FUNCTIONID(Math_asin)&ci_Math_asin, 0}, |
3979 | { avmplus::NativeID::Math_atan, 1, {BUILTIN_number, BUILTIN_none}, FUNCTIONID(Math_atan)&ci_Math_atan, 0}, |
3980 | { avmplus::NativeID::Math_ceil, 1, {BUILTIN_number, BUILTIN_none}, FUNCTIONID(Math_ceil)&ci_Math_ceil, 0}, |
3981 | { avmplus::NativeID::Math_cos, 1, {BUILTIN_number, BUILTIN_none}, FUNCTIONID(Math_cos)&ci_Math_cos, 0}, |
3982 | { avmplus::NativeID::Math_exp, 1, {BUILTIN_number, BUILTIN_none}, FUNCTIONID(Math_exp)&ci_Math_exp, 0}, |
3983 | { avmplus::NativeID::Math_floor, 1, {BUILTIN_number, BUILTIN_none}, FUNCTIONID(Math_floor)&ci_Math_floor, 0}, |
3984 | { avmplus::NativeID::Math_log, 1, {BUILTIN_number, BUILTIN_none}, FUNCTIONID(Math_log)&ci_Math_log, 0}, |
3985 | { avmplus::NativeID::Math_round, 1, {BUILTIN_number, BUILTIN_none}, FUNCTIONID(Math_round)&ci_Math_round, 0}, |
3986 | { avmplus::NativeID::Math_sin, 1, {BUILTIN_number, BUILTIN_none}, FUNCTIONID(Math_sin)&ci_Math_sin, 0}, |
3987 | { avmplus::NativeID::Math_sqrt, 1, {BUILTIN_number, BUILTIN_none}, FUNCTIONID(Math_sqrt)&ci_Math_sqrt, 0}, |
3988 | { avmplus::NativeID::Math_tan, 1, {BUILTIN_number, BUILTIN_none}, FUNCTIONID(Math_tan)&ci_Math_tan, 0}, |
3989 | |
3990 | { avmplus::NativeID::Math_atan2, 2, {BUILTIN_number, BUILTIN_number}, FUNCTIONID(Math_atan2)&ci_Math_atan2, 0}, |
3991 | { avmplus::NativeID::Math_pow, 2, {BUILTIN_number, BUILTIN_number}, FUNCTIONID(Math_pow)&ci_Math_pow, 0}, |
3992 | |
3993 | { avmplus::NativeID::Math_abs, 1, {BUILTIN_number, BUILTIN_none}, 0, &CodegenLIR::emitMathAbs}, |
3994 | |
3995 | { avmplus::NativeID::Math_min, 2, {BUILTIN_int, BUILTIN_int}, 0, &CodegenLIR::emitIntMathMin}, |
3996 | { avmplus::NativeID::Math_max, 2, {BUILTIN_int, BUILTIN_int}, 0, &CodegenLIR::emitIntMathMax}, |
3997 | { avmplus::NativeID::Math_private__min, 2, {BUILTIN_int, BUILTIN_int}, 0, &CodegenLIR::emitIntMathMin}, |
3998 | { avmplus::NativeID::Math_private__max, 2, {BUILTIN_int, BUILTIN_int}, 0, &CodegenLIR::emitIntMathMax}, |
3999 | |
4000 | // We do not inline the non-integral cases of min and max due to the complexity of handling -0 correctly. |
4001 | // See MathClass::_min() and MathClass::_max(). |
4002 | |
4003 | // Unsupported because it uses MathClass::seed |
4004 | //{ avmplus::NativeID::Math_random, 0, {BUILTIN_none, BUILTIN_none}, FUNCTIONID(Math_random), 0}, |
4005 | }; |
4006 | |
4007 | static uint32_t genFunctionKey (int32_t methodId, int32_t argCount) |
4008 | { |
4009 | return uint32_t(methodId | (argCount << 16)); |
4010 | } |
4011 | |
4012 | // Our builtinFunctionOptimizerHashMap maps from a [nativeId, argCount] key into the |
4013 | // starting index in our specializedFunctions table. The inlineBuiltinFunction function |
4014 | // will then iterate over our specializedFunctions table for all matching [nativeId, argCount] |
4015 | // pairs looking for a match to argument types. |
4016 | void CodegenLIR::genBuiltinFunctionOptimizerHashMap() |
4017 | { |
4018 | builtinFunctionOptimizerHashMap = new (*alloc1) HashMap<uint32_t, uint32_t>(*alloc1, 100); |
4019 | |
4020 | uint32_t funcKey = 0; |
4021 | for (uint32_t i = 0; i < sizeof(specializedFunctions) / sizeof(FunctionMatch); i++) { |
4022 | uint32_t newFuncKey = genFunctionKey (specializedFunctions[i].methodId, specializedFunctions[i].argCount); |
4023 | if (newFuncKey != funcKey) { |
4024 | funcKey = newFuncKey; |
4025 | builtinFunctionOptimizerHashMap->put(funcKey, i); |
4026 | } |
4027 | } |
4028 | } |
4029 | |
4030 | boolbool CodegenLIR::inlineBuiltinFunction(AbcOpcode, intptr_t, int argc, Traits* result, MethodInfo* mi) |
4031 | { |
4032 | if (haveDebugger) |
4033 | return falsefalse; |
4034 | |
4035 | if (mi->pool() != core->builtinPool || !mi->isFinal()) |
4036 | return falsefalse; |
4037 | |
4038 | if (!builtinFunctionOptimizerHashMap) |
4039 | genBuiltinFunctionOptimizerHashMap(); |
4040 | |
4041 | uint32_t funcKey = genFunctionKey(mi->method_id(), argc); |
4042 | uint32_t startingIndex = builtinFunctionOptimizerHashMap->get(funcKey); |
4043 | if (!startingIndex) |
4044 | return falsefalse; |
4045 | |
4046 | int count = sizeof(specializedFunctions) / sizeof(FunctionMatch); |
4047 | for (int i = startingIndex; i < count; i++) { |
4048 | if (mi->method_id() != specializedFunctions[i].methodId) |
4049 | return falsefalse; |
4050 | |
4051 | if (argc != (int) specializedFunctions[i].argCount) |
4052 | return falsefalse; |
4053 | |
4054 | // matching identifier, matching arg count, try to match argument types |
4055 | boolbool bMatch = truetrue; |
4056 | for (int32_t argindex = 0; argindex < argc; argindex++) { |
4057 | int32_t btMask = determineBuiltinMaskForArg(state->sp() - argindex); |
4058 | if (!(btMask & (1 << specializedFunctions[i].argType[argindex]))) { |
4059 | bMatch = falsefalse; |
4060 | break; |
4061 | } |
4062 | } |
4063 | |
4064 | if (!bMatch) |
4065 | continue; |
4066 | |
4067 | if (specializedFunctions[i].newFunction) { |
4068 | int32_t sp = state->sp(); |
4069 | // No 'this' param |
4070 | if (specializedFunctions[i].newFunction->count_args() == (uint32_t) argc) { |
4071 | if (argc == 1) |
4072 | localSet(sp-1, callIns(specializedFunctions[i].newFunction, 1, |
4073 | getSpecializedArg(sp, specializedFunctions[i].argType[0])), result); |
4074 | else if (argc == 2) |
4075 | localSet(sp-2, callIns(specializedFunctions[i].newFunction, 2, |
4076 | getSpecializedArg(sp-1, specializedFunctions[i].argType[1]), |
4077 | getSpecializedArg(sp, specializedFunctions[i].argType[0])), result); |
4078 | } |
4079 | else { |
4080 | if (argc == 1) |
4081 | localSet(sp-1, callIns(specializedFunctions[i].newFunction, 2, localGetp(sp-1), |
4082 | getSpecializedArg(sp, specializedFunctions[i].argType[0])), result); |
4083 | else if (argc == 2) |
4084 | localSet(sp-2, callIns(specializedFunctions[i].newFunction, 3, localGetp(sp-2), |
4085 | getSpecializedArg(sp-1, specializedFunctions[i].argType[1]), |
4086 | getSpecializedArg(sp, specializedFunctions[i].argType[0])), result); |
4087 | } |
4088 | } |
4089 | else { |
4090 | // Invoke emitFunction as member function pointer. |
4091 | EmitMethod emitter = specializedFunctions[i].emitFunction; |
4092 | (this->*emitter)(result); |
4093 | } |
4094 | |
4095 | return truetrue; |
4096 | } |
4097 | |
4098 | return falsefalse; |
4099 | } |
4100 | |
4101 | #ifdef DEBUG |
4102 | /** |
4103 | * emitTypedCall is used when the Verifier has found an opportunity to early bind, |
4104 | * and has already coerced arguments from whatever native type is discovered, to |
4105 | * the required types. emitTypedCall() then just double-checks (via assert) that |
4106 | * the arg types are already correct. |
4107 | */ |
4108 | void CodegenLIR::emitTypedCall(AbcOpcode opcode, intptr_t method_id, int argc, Traits* result, MethodInfo* mi) |
4109 | { |
4110 | AvmAssert(opcode != OP_construct)do { } while (0); |
4111 | AvmAssert(state->value(state->sp() - argc).notNull)do { } while (0); // make sure null check happened |
4112 | |
4113 | MethodSignaturep ms = mi->getMethodSignature(); |
4114 | AvmAssert(ms->argcOk(argc))do { } while (0); |
4115 | int objDisp = state->sp() - argc; |
4116 | for (int arg = 0; arg <= argc && arg <= ms->param_count(); arg++) |
4117 | AvmAssert(Traits::canAssign(state->value(objDisp+arg).traits, ms->paramTraits(arg)))do { } while (0); |
4118 | for (int arg = ms->param_count()+1; arg <= argc; arg++) { |
4119 | BuiltinType t = bt(state->value(objDisp+arg).traits); |
4120 | AvmAssert(valueStorageType(t) == SST_atom)do { } while (0); |
4121 | } |
4122 | |
4123 | if (inlineBuiltinFunction(opcode, method_id, argc, result, mi)) |
4124 | return; |
4125 | |
4126 | emitCall(opcode, method_id, argc, result, ms); |
4127 | } |
4128 | #else |
4129 | REALLY_INLINEinline __attribute__((always_inline)) void CodegenLIR::emitTypedCall(AbcOpcode opcode, intptr_t method_id, int argc, Traits* result, MethodInfo* mi) |
4130 | { |
4131 | if (inlineBuiltinFunction(opcode, method_id, argc, result, mi)) |
4132 | return; |
4133 | |
4134 | emitCall(opcode, method_id, argc, result, mi->getMethodSignature()); |
4135 | } |
4136 | #endif |
4137 | |
4138 | void CodegenLIR::emitCall(AbcOpcode opcode, intptr_t method_id, int argc, Traits* result, MethodSignaturep ms) |
4139 | { |
4140 | int objDisp = state->sp() - argc; |
4141 | LIns* obj = localCopy(objDisp); |
4142 | Traits* objType = state->value(objDisp).traits; |
4143 | emitCall(opcode, method_id, argc, obj, objType, result, ms); |
4144 | } |
4145 | |
4146 | void CodegenLIR::emitCall(AbcOpcode opcode, intptr_t method_id, int argc, LIns* obj, Traits* objType, Traits* result, MethodSignaturep ms) |
4147 | { |
4148 | int sp = state->sp(); |
4149 | int dest = sp-argc; |
4150 | int objDisp = dest; |
4151 | |
4152 | LIns *method = NULL__null; |
4153 | LIns *iid = NULL__null; |
4154 | switch (opcode) |
4155 | { |
4156 | case OP_constructsuper: |
4157 | { |
4158 | // env->vtable->base->init->enter32v(argc, ...); |
4159 | LIns* vtable = loadEnvVTable(); |
4160 | LIns* base = loadIns(LIR_ldp, offsetof(VTable,base)__builtin_offsetof(VTable, base), vtable, ACCSET_OTHER, LOAD_CONST); |
4161 | method = loadIns(LIR_ldp, offsetof(VTable,init)__builtin_offsetof(VTable, init), base, ACCSET_OTHER, LOAD_CONST); |
4162 | break; |
4163 | } |
4164 | case OP_callmethod: |
4165 | { |
4166 | // stack in: obj arg1..N |
4167 | // stack out: result |
4168 | // sp[-argc] = callmethod(disp_id, argc, ...); |
4169 | // method_id is disp_id of virtual method |
4170 | LIns* vtable = loadVTable(obj, objType); |
4171 | method = loadIns(LIR_ldp, int32_t(offsetof(VTable,methods)__builtin_offsetof(VTable, methods)+sizeof(MethodEnv*)*method_id), vtable, ACCSET_OTHER, LOAD_CONST); |
4172 | break; |
4173 | } |
4174 | case OP_callsuperid: |
4175 | { |
4176 | // stack in: obj arg1..N |
4177 | // stack out: result |
4178 | // method_id is disp_id of super method |
4179 | LIns* declvtable = loadEnvVTable(); |
4180 | LIns* basevtable = loadIns(LIR_ldp, offsetof(VTable, base)__builtin_offsetof(VTable, base), declvtable, ACCSET_OTHER, LOAD_CONST); |
4181 | method = loadIns(LIR_ldp, int32_t(offsetof(VTable,methods)__builtin_offsetof(VTable, methods)+sizeof(MethodEnv*)*method_id), basevtable, ACCSET_OTHER, LOAD_CONST); |
4182 | break; |
4183 | } |
4184 | case OP_callstatic: |
4185 | { |
4186 | // stack in: obj arg1..N |
4187 | // stack out: result |
4188 | LIns* abcenv = loadEnvAbcEnv(); |
4189 | method = loadIns(LIR_ldp, int32_t(offsetof(AbcEnv,m_methods)__builtin_offsetof(AbcEnv, m_methods)+sizeof(MethodEnv*)*method_id), abcenv, ACCSET_OTHER, LOAD_CONST); |
4190 | break; |
4191 | } |
4192 | case OP_callinterface: |
4193 | { |
4194 | // method_id is pointer to interface method name (multiname) |
4195 | uint32_t index = ImtHolder::hashIID(method_id); |
4196 | LIns* vtable = loadVTable(obj, objType); |
4197 | // note, could be MethodEnv* or ImtThunkEnv* |
4198 | method = loadIns(LIR_ldp, offsetof(VTable, imt.entries)__builtin_offsetof(VTable, imt.entries) + index * sizeof(ImtThunkEnv*), vtable, ACCSET_OTHER, LOAD_CONST); |
4199 | iid = InsConstPtr((void*)method_id); |
4200 | break; |
4201 | } |
4202 | case OP_construct: |
4203 | { |
4204 | // stack in: ctor arg1..N |
4205 | // stack out: newinstance |
4206 | LIns* vtable = loadVTable(obj, objType); |
4207 | LIns* ivtable = loadIns(LIR_ldp, offsetof(VTable, ivtable)__builtin_offsetof(VTable, ivtable), vtable, ACCSET_OTHER, LOAD_CONST); |
4208 | method = loadIns(LIR_ldp, offsetof(VTable, init)__builtin_offsetof(VTable, init), ivtable, ACCSET_OTHER, LOAD_CONST); |
4209 | LIns* createInstanceProc = loadIns(LIR_ldp, offsetof(VTable, createInstanceProc)__builtin_offsetof(VTable, createInstanceProc), ivtable, ACCSET_OTHER); |
4210 | obj = callIns(FUNCTIONID(createInstanceProc)&ci_createInstanceProc, 2, createInstanceProc, obj); |
4211 | objType = result; |
Value stored to 'objType' is never read | |
4212 | // the call below to the init function is void; the expression result we want |
4213 | // is the new object, not the result from the init function. save it now. |
4214 | localSet(dest, obj, result); |
4215 | break; |
4216 | } |
4217 | default: |
4218 | AvmAssert(false)do { } while (0); |
4219 | } |
4220 | |
4221 | // store args for the call |
4222 | LIns* ap = insAlloc(sizeof(Atom)); // we will update this size, below |
4223 | int disp = 0; |
4224 | int pad = 0; |
4225 | |
4226 | int param_count = ms->param_count(); |
4227 | // LIR_allocp of any size >= 8 is always 8-aligned. |
4228 | // if the first double arg would be unaligned, add padding to align it. |
4229 | #if !defined AVMPLUS_64BIT |
4230 | for (int i=0; i <= argc && i <= param_count; i++) { |
4231 | if (ms->paramTraits(i) == NUMBER_TYPE(core->traits.number_itraits)) { |
4232 | if ((disp&7) != 0) { |
4233 | // this double would be unaligned, so add some padding |
4234 | pad = 8-(disp&7); // should be 4 |
4235 | } |
4236 | break; |
4237 | } |
4238 | else { |
4239 | disp += sizeof(Atom); |
4240 | } |
4241 | } |
4242 | #endif |
4243 | |
4244 | disp = pad; |
4245 | for (int i=0, index=objDisp; i <= argc; i++, index++) { |
4246 | Traits* paramType = i <= param_count ? ms->paramTraits(i) : NULL__null; |
4247 | LIns* v; |
4248 | switch (bt(paramType)) { |
4249 | case BUILTIN_number: |
4250 | v = (i == 0) ? obj : lirout->insLoad(LIR_ldd, vars, index * VARSIZE, ACCSET_VARS); |
4251 | std(v, ap, disp, ACCSET_OTHER); |
4252 | disp += sizeof(double); |
4253 | break; |
4254 | case BUILTIN_int: |
4255 | v = (i == 0) ? obj : lirout->insLoad(LIR_ldi, vars, index * VARSIZE, ACCSET_VARS); |
4256 | stp(i2p(v), ap, disp, ACCSET_OTHER); |
4257 | disp += sizeof(intptr_t); |
4258 | break; |
4259 | case BUILTIN_uint: |
4260 | case BUILTIN_boolean: |
4261 | v = (i == 0) ? obj : lirout->insLoad(LIR_ldi, vars, index * VARSIZE, ACCSET_VARS); |
4262 | stp(ui2p(v), ap, disp, ACCSET_OTHER); |
4263 | disp += sizeof(uintptr_t); |
4264 | break; |
4265 | default: |
4266 | v = (i == 0) ? obj : lirout->insLoad(LIR_ldp, vars, index * VARSIZE, ACCSET_VARS); |
4267 | stp(v, ap, disp, ACCSET_OTHER); |
4268 | disp += sizeof(void*); |
4269 | break; |
4270 | } |
4271 | } |
4272 | |
4273 | // patch the size to what we actually need |
4274 | ap->setSize(disp); |
4275 | |
4276 | LIns* target = loadIns(LIR_ldp, offsetof(MethodEnvProcHolder,_implGPR)__builtin_offsetof(MethodEnvProcHolder, _implGPR), method, ACCSET_OTHER); |
4277 | LIns* apAddr = leaIns(pad, ap); |
4278 | |
4279 | LIns *out; |
4280 | BuiltinType rbt = bt(result); |
4281 | if (!iid) { |
4282 | const CallInfo *fid; |
4283 | switch (rbt) { |
4284 | case BUILTIN_number: |
4285 | fid = FUNCTIONID(fcalli)&ci_fcalli; |
4286 | break; |
4287 | case BUILTIN_int: case BUILTIN_uint: case BUILTIN_boolean: |
4288 | fid = FUNCTIONID(icalli)&ci_icalli; |
4289 | break; |
4290 | default: |
4291 | fid = FUNCTIONID(acalli)&ci_acalli; |
4292 | break; |
4293 | } |
4294 | out = callIns(fid, 4, target, method, InsConst(argc), apAddr); |
4295 | } else { |
4296 | const CallInfo *fid; |
4297 | switch (rbt) { |
4298 | case BUILTIN_number: |
4299 | fid = FUNCTIONID(fcallimt)&ci_fcallimt; |
4300 | break; |
4301 | case BUILTIN_int: case BUILTIN_uint: case BUILTIN_boolean: |
4302 | fid = FUNCTIONID(icallimt)&ci_icallimt; |
4303 | break; |
4304 | default: |
4305 | fid = FUNCTIONID(acallimt)&ci_acallimt; |
4306 | break; |
4307 | } |
4308 | out = callIns(fid, 5, target, method, InsConst(argc), apAddr, iid); |
4309 | } |
4310 | |
4311 | // ensure the stack-allocated args are live until after the call |
4312 | liveAlloc(ap); |
4313 | |
4314 | if (opcode != OP_constructsuper && opcode != OP_construct) |
4315 | localSet(dest, out, result); |
4316 | } |
4317 | |
4318 | LIns* CodegenLIR::loadFromSlot(int ptr_index, int slot, Traits* slotType) |
4319 | { |
4320 | Traits *t = state->value(ptr_index).traits; |
4321 | LIns *ptr = localGetp(ptr_index); |
4322 | AvmAssert(state->value(ptr_index).notNull)do { } while (0); |
4323 | AvmAssert(isPointer((int)ptr_index))do { } while (0); // obj |
4324 | |
4325 | AvmAssert(t->isResolved())do { } while (0); |
4326 | const TraitsBindingsp tb = t->getTraitsBindings(); |
4327 | int offset = tb->getSlotOffset(slot); |
4328 | |
4329 | // get |
4330 | LOpcode op; |
4331 | switch (bt(slotType)) { |
4332 | case BUILTIN_number: op = LIR_ldd; break; |
4333 | case BUILTIN_int: |
4334 | case BUILTIN_uint: |
4335 | case BUILTIN_boolean: op = LIR_ldi; break; |
4336 | default: op = LIR_ldp; break; |
4337 | } |
4338 | return loadIns(op, offset, ptr, ACCSET_OTHER); |
4339 | } |
4340 | |
4341 | void CodegenLIR::emitGetslot(int slot, int ptr_index, Traits *slotType) |
4342 | { |
4343 | localSet(ptr_index, loadFromSlot(ptr_index, slot, slotType), slotType); |
4344 | } |
4345 | |
4346 | void CodegenLIR::emitSetslot(AbcOpcode opcode, int slot, int ptr_index) |
4347 | { |
4348 | emitSetslot(opcode, slot, ptr_index, localCopy(state->sp())); |
4349 | } |
4350 | |
4351 | void CodegenLIR::emitSetslot(AbcOpcode opcode, int slot, int ptr_index, LIns* value) |
4352 | { |
4353 | Traits* t; |
4354 | LIns* ptr; |
4355 | |
4356 | if (opcode == OP_setslot) |
4357 | { |
4358 | t = state->value(ptr_index).traits; |
4359 | ptr = localGetp(ptr_index); |
4360 | AvmAssert(state->value(ptr_index).notNull)do { } while (0); |
4361 | AvmAssert(isPointer((int)ptr_index))do { } while (0); // obj |
4362 | } |
4363 | else |
4364 | { |
4365 | // setglobalslot |
4366 | const ScopeTypeChain* scopeTypes = info->declaringScope(); |
4367 | if (scopeTypes->size == 0) |
4368 | { |
4369 | // no captured scopes, so global is local scope 0 |
4370 | ptr_index = ms->scope_base(); |
4371 | t = state->value(ptr_index).traits; |
4372 | ptr = localGetp(ptr_index); |
4373 | AvmAssert(state->value(ptr_index).notNull)do { } while (0); |
4374 | AvmAssert(isPointer((int)ptr_index))do { } while (0); // obj |
4375 | } |
4376 | else |
4377 | { |
4378 | // global is outer scope 0 |
4379 | t = scopeTypes->getScopeTraitsAt(0); |
4380 | LIns* scope = loadEnvScope(); |
4381 | LIns* scopeobj = loadIns(LIR_ldp, offsetof(ScopeChain,_scopes)__builtin_offsetof(ScopeChain, _scopes) + 0*sizeof(Atom), scope, ACCSET_OTHER); |
4382 | ptr = atomToNativeRep(t, scopeobj); |
4383 | } |
4384 | } |
4385 | |
4386 | AvmAssert(t->isResolved())do { } while (0); |
4387 | const TraitsBindingsp tb = t->getTraitsBindings(); |
4388 | int offset = tb->getSlotOffset(slot); |
4389 | |
4390 | LIns *unoffsetPtr = ptr; |
4391 | |
4392 | // if storing to a pointer-typed slot, inline a WB |
4393 | Traits* slotType = tb->getSlotTraits(slot); |
4394 | |
4395 | if (!slotType || !slotType->isMachineType() || slotType == OBJECT_TYPE(core->traits.object_itraits)) |
4396 | { |
4397 | // slot type is Atom (for *, Object) or RCObject* (String, Namespace, or other user types) |
4398 | const CallInfo *wbAddr = FUNCTIONID(privateWriteBarrierRC)&ci_privateWriteBarrierRC; |
4399 | if (slotType == NULL__null || slotType == OBJECT_TYPE(core->traits.object_itraits)) { |
4400 | // use fast atom wb |
4401 | wbAddr = FUNCTIONID(atomWriteBarrier)&ci_atomWriteBarrier; |
4402 | } |
4403 | callIns(wbAddr, 4, |
4404 | InsConstPtr(core->GetGC()), |
4405 | unoffsetPtr, |
4406 | leaIns(offset, ptr), |
4407 | value); |
4408 | } |
4409 | else if (slotType == NUMBER_TYPE(core->traits.number_itraits)) { |
4410 | // slot type is double or int |
4411 | std(value, ptr, offset, ACCSET_OTHER); |
4412 | } else { |
4413 | AvmAssert(slotType == INT_TYPE || slotType == UINT_TYPE || slotType == BOOLEAN_TYPE)do { } while (0); |
4414 | sti(value, ptr, offset, ACCSET_OTHER); |
4415 | } |
4416 | } |
4417 | |
4418 | /** |
4419 | * Emit a constructor call, or a late bound constructor call. |
4420 | * Early binding is possible when we know the constructor (class) being |
4421 | * used, and we know it doesn't override ClassClosure::construct(), |
4422 | * as indicated by the itraits->hasCustomConstruct flag. |
4423 | */ |
4424 | void CodegenLIR::emitConstruct(int argc, LIns* ctor, Traits* ctraits) |
4425 | { |
4426 | // Attempt to early bind to constructor method. |
4427 | Traits* itraits = NULL__null; |
4428 | if (ctraits && (itraits = ctraits->itraits) != NULL__null && |
4429 | !ctraits->hasCustomConstruct) { |
4430 | // Inline the body of ClassClosure::construct() and early bind the call |
4431 | // to the constructor method, if it's resolved and argc is legal. |
4432 | // Cannot resolve signatures now because that could cause a premature verification failure, |
4433 | // one that should occur in the class's script-init. |
4434 | // If it's already resolved then we're good to go. |
4435 | if (itraits->init && itraits->init->isResolved() && itraits->init->getMethodSignature()->argcOk(argc)) { |
4436 | // The explicit null check will throw a different exception than |
4437 | // the generic call to op_construct below, or to similar paths through |
4438 | // interpreted code! |
4439 | emitCheckNull(ctor, ctraits); |
4440 | emitConstructCall(0, argc, ctor, ctraits); |
4441 | return; |
4442 | } |
4443 | } |
4444 | |
4445 | // Generic path: could not early bind to a constructor method. |
4446 | // stack in: ctor-object arg1..N |
4447 | // sp[-argc] = construct(env, sp[-argc], argc, null, arg1..N) |
4448 | int ctor_index = state->sp() - argc; |
4449 | LIns* func = nativeToAtom(ctor, ctraits); |
4450 | LIns* args = storeAtomArgs(InsConstAtom(nullObjectAtom), argc, ctor_index+1); |
4451 | LIns* newobj = callIns(FUNCTIONID(op_construct)&ci_op_construct, 4, env_param, func, InsConst(argc), args); |
4452 | liveAlloc(args); |
4453 | localSet(ctor_index, atomToNativeRep(itraits, newobj), itraits); |
4454 | } |
4455 | |
4456 | static const CallInfo* getArrayHelpers[VI_SIZE] = |
4457 | { FUNCTIONID(ArrayObject_getUintProperty)&ci_ArrayObject_getUintProperty, FUNCTIONID(ArrayObject_getIntProperty)&ci_ArrayObject_getIntProperty, FUNCTIONID(ArrayObject_getDoubleProperty)&ci_ArrayObject_getDoubleProperty }; |
4458 | |
4459 | static const CallInfo* getObjectVectorHelpers[VI_SIZE] = |
4460 | { FUNCTIONID(ObjectVectorObject_getUintProperty)&ci_ObjectVectorObject_getUintProperty, FUNCTIONID(ObjectVectorObject_getIntProperty)&ci_ObjectVectorObject_getIntProperty, FUNCTIONID(ObjectVectorObject_getDoubleProperty)&ci_ObjectVectorObject_getDoubleProperty }; |
4461 | |
4462 | static const CallInfo* getIntVectorNativeHelpers[VI_SIZE] = |
4463 | { FUNCTIONID(IntVectorObject_getNativeUintProperty)&ci_IntVectorObject_getNativeUintProperty, FUNCTIONID(IntVectorObject_getNativeIntProperty)&ci_IntVectorObject_getNativeIntProperty, FUNCTIONID(IntVectorObject_getNativeDoubleProperty)&ci_IntVectorObject_getNativeDoubleProperty }; |
4464 | |
4465 | static const CallInfo* getIntVectorHelpers[VI_SIZE] = |
4466 | { FUNCTIONID(IntVectorObject_getUintProperty)&ci_IntVectorObject_getUintProperty, FUNCTIONID(IntVectorObject_getIntProperty)&ci_IntVectorObject_getIntProperty, FUNCTIONID(IntVectorObject_getDoubleProperty)&ci_IntVectorObject_getDoubleProperty }; |
4467 | |
4468 | static const CallInfo* getUIntVectorNativeHelpers[VI_SIZE] = |
4469 | { FUNCTIONID(UIntVectorObject_getNativeUintProperty)&ci_UIntVectorObject_getNativeUintProperty, FUNCTIONID(UIntVectorObject_getNativeIntProperty)&ci_UIntVectorObject_getNativeIntProperty, FUNCTIONID(UIntVectorObject_getNativeDoubleProperty)&ci_UIntVectorObject_getNativeDoubleProperty }; |
4470 | |
4471 | static const CallInfo* getUIntVectorHelpers[VI_SIZE] = |
4472 | { FUNCTIONID(UIntVectorObject_getUintProperty)&ci_UIntVectorObject_getUintProperty, FUNCTIONID(UIntVectorObject_getIntProperty)&ci_UIntVectorObject_getIntProperty, FUNCTIONID(UIntVectorObject_getDoubleProperty)&ci_UIntVectorObject_getDoubleProperty }; |
4473 | |
4474 | static const CallInfo* getDoubleVectorNativeHelpers[VI_SIZE] = |
4475 | { FUNCTIONID(DoubleVectorObject_getNativeUintProperty)&ci_DoubleVectorObject_getNativeUintProperty, FUNCTIONID(DoubleVectorObject_getNativeIntProperty)&ci_DoubleVectorObject_getNativeIntProperty, FUNCTIONID(DoubleVectorObject_getNativeDoubleProperty)&ci_DoubleVectorObject_getNativeDoubleProperty }; |
4476 | |
4477 | static const CallInfo* getDoubleVectorHelpers[VI_SIZE] = |
4478 | { FUNCTIONID(DoubleVectorObject_getUintProperty)&ci_DoubleVectorObject_getUintProperty, FUNCTIONID(DoubleVectorObject_getIntProperty)&ci_DoubleVectorObject_getIntProperty, FUNCTIONID(DoubleVectorObject_getDoubleProperty)&ci_DoubleVectorObject_getDoubleProperty }; |
4479 | |
4480 | static const CallInfo* getGenericHelpers[VI_SIZE] = |
4481 | { FUNCTIONID(getpropertylate_u)&ci_getpropertylate_u, FUNCTIONID(getpropertylate_i)&ci_getpropertylate_i, FUNCTIONID(getpropertylate_d)&ci_getpropertylate_d }; |
4482 | |
4483 | // Generate code for get obj[index] where index is a signed or unsigned integer type, or double type. |
4484 | LIns* CodegenLIR::emitGetIndexedProperty(int objIndexOnStack, LIns* index, Traits* result, IndexKind idxKind) |
4485 | { |
4486 | Traits* objType = state->value(objIndexOnStack).traits; |
4487 | const CallInfo* getter = NULL__null; |
4488 | boolbool valIsAtom = truetrue; |
4489 | |
4490 | if (objType == ARRAY_TYPE(core->traits.array_itraits)) { |
4491 | getter = getArrayHelpers[idxKind]; |
4492 | } |
4493 | else if (objType != NULL__null && objType->subtypeof(VECTOROBJ_TYPE(core->traits.vectorobj_itraits))) { |
4494 | getter = getObjectVectorHelpers[idxKind]; |
4495 | } |
4496 | else if (objType == VECTORINT_TYPE(core->traits.vectorint_itraits)) { |
4497 | if (result == INT_TYPE(core->traits.int_itraits)) { |
4498 | getter = getIntVectorNativeHelpers[idxKind]; |
4499 | valIsAtom = falsefalse; |
4500 | } |
4501 | else { |
4502 | getter = getIntVectorHelpers[idxKind]; |
4503 | } |
4504 | } |
4505 | else if (objType == VECTORUINT_TYPE(core->traits.vectoruint_itraits)) { |
4506 | if (result == UINT_TYPE(core->traits.uint_itraits)) { |
4507 | getter = getUIntVectorNativeHelpers[idxKind]; |
4508 | valIsAtom = falsefalse; |
4509 | } |
4510 | else { |
4511 | getter = getUIntVectorHelpers[idxKind]; |
4512 | } |
4513 | } |
4514 | else if (objType == VECTORDOUBLE_TYPE(core->traits.vectordouble_itraits)) { |
4515 | if (result == NUMBER_TYPE(core->traits.number_itraits)) { |
4516 | getter = getDoubleVectorNativeHelpers[idxKind]; |
4517 | valIsAtom = falsefalse; |
4518 | } |
4519 | else { |
4520 | getter = getDoubleVectorHelpers[idxKind]; |
4521 | } |
4522 | } |
4523 | if (getter) { |
4524 | LIns* value = callIns(getter, 2, localGetp(objIndexOnStack), index); |
4525 | return valIsAtom ? atomToNativeRep(result, value) : value; |
4526 | } |
4527 | else { |
4528 | LIns* value = callIns(getGenericHelpers[idxKind], 3, env_param, loadAtomRep(objIndexOnStack), index); |
4529 | return atomToNativeRep(result, value); |
4530 | } |
4531 | } |
4532 | |
4533 | static const CallInfo* setArrayHelpers[VI_SIZE] = |
4534 | { FUNCTIONID(ArrayObject_setUintProperty)&ci_ArrayObject_setUintProperty, FUNCTIONID(ArrayObject_setIntProperty)&ci_ArrayObject_setIntProperty, FUNCTIONID(ArrayObject_setDoubleProperty)&ci_ArrayObject_setDoubleProperty }; |
4535 | |
4536 | static const CallInfo* setObjectVectorHelpers[VI_SIZE] = |
4537 | { FUNCTIONID(ObjectVectorObject_setUintProperty)&ci_ObjectVectorObject_setUintProperty, FUNCTIONID(ObjectVectorObject_setIntProperty)&ci_ObjectVectorObject_setIntProperty, FUNCTIONID(ObjectVectorObject_setDoubleProperty)&ci_ObjectVectorObject_setDoubleProperty }; |
4538 | |
4539 | static const CallInfo* setIntVectorNativeHelpers[VI_SIZE] = |
4540 | { FUNCTIONID(IntVectorObject_setNativeUintProperty)&ci_IntVectorObject_setNativeUintProperty, FUNCTIONID(IntVectorObject_setNativeIntProperty)&ci_IntVectorObject_setNativeIntProperty, FUNCTIONID(IntVectorObject_setNativeDoubleProperty)&ci_IntVectorObject_setNativeDoubleProperty }; |
4541 | |
4542 | static const CallInfo* setIntVectorHelpers[VI_SIZE] = |
4543 | { FUNCTIONID(IntVectorObject_setUintProperty)&ci_IntVectorObject_setUintProperty, FUNCTIONID(IntVectorObject_setIntProperty)&ci_IntVectorObject_setIntProperty, FUNCTIONID(IntVectorObject_setDoubleProperty)&ci_IntVectorObject_setDoubleProperty }; |
4544 | |
4545 | static const CallInfo* setUIntVectorNativeHelpers[VI_SIZE] = |
4546 | { FUNCTIONID(UIntVectorObject_setNativeUintProperty)&ci_UIntVectorObject_setNativeUintProperty, FUNCTIONID(UIntVectorObject_setNativeIntProperty)&ci_UIntVectorObject_setNativeIntProperty, FUNCTIONID(UIntVectorObject_setNativeDoubleProperty)&ci_UIntVectorObject_setNativeDoubleProperty }; |
4547 | |
4548 | static const CallInfo* setUIntVectorHelpers[VI_SIZE] = |
4549 | { FUNCTIONID(UIntVectorObject_setUintProperty)&ci_UIntVectorObject_setUintProperty, FUNCTIONID(UIntVectorObject_setIntProperty)&ci_UIntVectorObject_setIntProperty, FUNCTIONID(UIntVectorObject_setDoubleProperty)&ci_UIntVectorObject_setDoubleProperty }; |
4550 | |
4551 | static const CallInfo* setDoubleVectorNativeHelpers[VI_SIZE] = |
4552 | { FUNCTIONID(DoubleVectorObject_setNativeUintProperty)&ci_DoubleVectorObject_setNativeUintProperty, FUNCTIONID(DoubleVectorObject_setNativeIntProperty)&ci_DoubleVectorObject_setNativeIntProperty, FUNCTIONID(DoubleVectorObject_setNativeDoubleProperty)&ci_DoubleVectorObject_setNativeDoubleProperty }; |
4553 | |
4554 | static const CallInfo* setDoubleVectorHelpers[VI_SIZE] = |
4555 | { FUNCTIONID(DoubleVectorObject_setUintProperty)&ci_DoubleVectorObject_setUintProperty, FUNCTIONID(DoubleVectorObject_setIntProperty)&ci_DoubleVectorObject_setIntProperty, FUNCTIONID(DoubleVectorObject_setDoubleProperty)&ci_DoubleVectorObject_setDoubleProperty }; |
4556 | |
4557 | static const CallInfo* setGenericHelpers[VI_SIZE] = |
4558 | { FUNCTIONID(setpropertylate_u)&ci_setpropertylate_u, FUNCTIONID(setpropertylate_i)&ci_setpropertylate_i, FUNCTIONID(setpropertylate_d)&ci_setpropertylate_d }; |
4559 | |
4560 | // Generate code for 'obj[index] = value' where index is a signed or unsigned integer type, or a double. |
4561 | void CodegenLIR::emitSetIndexedProperty(int objIndexOnStack, int valIndexOnStack, LIns* index, IndexKind idxKind) |
4562 | { |
4563 | Traits* valueType = state->value(valIndexOnStack).traits; |
4564 | Traits* objType = state->value(objIndexOnStack).traits; |
4565 | const CallInfo* setter = NULL__null; |
4566 | LIns* value = NULL__null; |
4567 | |
4568 | if (objType == ARRAY_TYPE(core->traits.array_itraits)) { |
4569 | value = loadAtomRep(valIndexOnStack); |
4570 | setter = setArrayHelpers[idxKind]; |
4571 | } |
4572 | else if (objType != NULL__null && objType->subtypeof(VECTOROBJ_TYPE(core->traits.vectorobj_itraits))) { |
4573 | value = loadAtomRep(valIndexOnStack); |
4574 | setter = setObjectVectorHelpers[idxKind]; |
4575 | } |
4576 | else if (objType == VECTORINT_TYPE(core->traits.vectorint_itraits)) { |
4577 | if (valueType == INT_TYPE(core->traits.int_itraits)) { |
4578 | value = localGet(valIndexOnStack); |
4579 | setter = setIntVectorNativeHelpers[idxKind]; |
4580 | } |
4581 | else { |
4582 | value = loadAtomRep(valIndexOnStack); |
4583 | setter = setIntVectorHelpers[idxKind]; |
4584 | } |
4585 | } |
4586 | else if (objType == VECTORUINT_TYPE(core->traits.vectoruint_itraits)) { |
4587 | if (valueType == UINT_TYPE(core->traits.uint_itraits)) { |
4588 | value = localGet(valIndexOnStack); |
4589 | setter = setUIntVectorNativeHelpers[idxKind]; |
4590 | } |
4591 | else { |
4592 | value = loadAtomRep(valIndexOnStack); |
4593 | setter = setUIntVectorHelpers[idxKind]; |
4594 | } |
4595 | } |
4596 | else if (objType == VECTORDOUBLE_TYPE(core->traits.vectordouble_itraits)) { |
4597 | if (valueType == NUMBER_TYPE(core->traits.number_itraits)) { |
4598 | value = localGetf(valIndexOnStack); |
4599 | setter = setDoubleVectorNativeHelpers[idxKind]; |
4600 | } |
4601 | else { |
4602 | value = loadAtomRep(valIndexOnStack); |
4603 | setter = setDoubleVectorHelpers[idxKind]; |
4604 | } |
4605 | } |
4606 | if (setter) { |
4607 | callIns(setter, 3, localGetp(objIndexOnStack), index, value); |
4608 | } else { |
4609 | value = loadAtomRep(valIndexOnStack); |
4610 | callIns(setGenericHelpers[idxKind], 4, env_param, loadAtomRep(objIndexOnStack), index, value); |
4611 | } |
4612 | } |
4613 | |
4614 | // Try to optimize our input argument to the fastest possible type. |
4615 | // Non-negative integer constants can be considered unsigned. |
4616 | // Promotions from int/uint to number can be used as integers. |
4617 | // Double constants that are actually integers can be used as integers. |
4618 | LIns *CodegenLIR::optimizeIndexArgumentType(int32_t sp, Traits** indexType) |
4619 | { |
4620 | LIns *index = NULL__null; |
4621 | if (*indexType == INT_TYPE(core->traits.int_itraits)) { |
4622 | index = localGet(sp); |
4623 | if (index->isImmI() && index->immI() > 0) { |
4624 | *indexType = UINT_TYPE(core->traits.uint_itraits); |
4625 | } |
4626 | |
4627 | return index; |
4628 | } |
4629 | else if (*indexType == UINT_TYPE(core->traits.uint_itraits)) { |
4630 | return localGet(sp); |
4631 | } |
4632 | // Convert Number expression to int or uint if it is a promotion |
4633 | // from int or uint, or if it is a constant in range for int or uint. |
4634 | else if (*indexType == NUMBER_TYPE(core->traits.number_itraits)) { |
4635 | index = localGetf(sp); |
4636 | if (index->opcode() == LIR_i2d) { |
4637 | *indexType = INT_TYPE(core->traits.int_itraits); |
4638 | return index->oprnd1(); |
4639 | } |
4640 | else if (index->opcode() == LIR_ui2d) { |
4641 | *indexType = UINT_TYPE(core->traits.uint_itraits); |
4642 | return index->oprnd1(); |
4643 | } |
4644 | else if (index->isImmD()) |
4645 | { |
4646 | double d = index->immD(); |
4647 | // Convert to uint if possible. |
4648 | uint32_t u = uint32_t(d); |
4649 | if (double(u) == d) { |
4650 | *indexType = UINT_TYPE(core->traits.uint_itraits); |
4651 | return InsConst(u); |
4652 | } |
4653 | // Failing that, it may still be possible to |
4654 | // convert to some negative values to int. |
4655 | int32_t i = int32_t(d); |
4656 | if (double(i) == d) { |
4657 | *indexType = INT_TYPE(core->traits.int_itraits); |
4658 | return InsConst(i); |
4659 | } |
4660 | } |
4661 | } |
4662 | |
4663 | return index; |
4664 | } |
4665 | |
4666 | void CodegenLIR::emit(AbcOpcode opcode, uintptr_t op1, uintptr_t op2, Traits* result) |
4667 | { |
4668 | int sp = state->sp(); |
4669 | |
4670 | switch (opcode) |
4671 | { |
4672 | // sign extends |
4673 | case OP_sxi1: |
4674 | case OP_sxi8: |
4675 | case OP_sxi16: |
4676 | { |
4677 | // straightforward shift based sign extension |
4678 | static const uint8_t kShiftAmt[3] = { 31, 24, 16 }; |
4679 | int32_t index = (int32_t) op1; |
4680 | LIns* val = localGet(index); |
4681 | if ((opcode == OP_sxi8 && val->opcode() == LIR_ldc2i) || |
4682 | (opcode == OP_sxi16 && val->opcode() == LIR_lds2i)) |
4683 | { |
4684 | // if we are sign-extending the result of a load-and-sign-extend |
4685 | // instruction, no need to do anything. |
4686 | break; |
4687 | } |
4688 | LIns* sh = InsConst(kShiftAmt[opcode - OP_sxi1]); |
4689 | LIns* shl = binaryIns(LIR_lshi, val, sh); |
4690 | LIns* res = binaryIns(LIR_rshi, shl, sh); |
4691 | localSet(index, res, result); |
4692 | break; |
4693 | } |
4694 | |
4695 | // loads |
4696 | case OP_lix8: |
4697 | case OP_lix16: |
4698 | case OP_li8: |
4699 | case OP_li16: |
4700 | case OP_li32: |
4701 | { |
4702 | int32_t index = (int32_t) op1; |
4703 | LIns* mopAddr = localGet(index); |
4704 | const MopsInfo& mi = kMopsLoadInfo[opcode-OP_lix8]; |
4705 | #ifdef VMCFG_MOPS_USE_EXPANDED_LOADSTORE_INT |
4706 | int32_t disp = 0; |
4707 | LIns* realAddr = mopAddrToRangeCheckedRealAddrAndDisp(mopAddr, mi.size, &disp); |
4708 | LIns* i2 = loadIns(mi.op, disp, realAddr, ACCSET_OTHER); |
4709 | #else |
4710 | LIns* realAddr = mopAddrToRangeCheckedRealAddrAndDisp(mopAddr, mi.size, NULL__null); |
4711 | LIns* i2 = callIns(mi.call, 1, realAddr); |
4712 | #endif |
4713 | localSet(index, i2, result); |
4714 | break; |
4715 | } |
4716 | |
4717 | case OP_lf32: |
4718 | case OP_lf64: |
4719 | { |
4720 | int32_t index = (int32_t) op1; |
4721 | LIns* mopAddr = localGet(index); |
4722 | const MopsInfo& mi = kMopsLoadInfo[opcode-OP_lix8]; |
4723 | #ifdef VMCFG_MOPS_USE_EXPANDED_LOADSTORE_FP |
4724 | int32_t disp = 0; |
4725 | LIns* realAddr = mopAddrToRangeCheckedRealAddrAndDisp(mopAddr, mi.size, &disp); |
4726 | LIns* i2 = loadIns(mi.op, disp, realAddr, ACCSET_OTHER); |
4727 | #else |
4728 | LIns* realAddr = mopAddrToRangeCheckedRealAddrAndDisp(mopAddr, mi.size, NULL__null); |
4729 | LIns* i2 = callIns(mi.call, 1, realAddr); |
4730 | #endif |
4731 | localSet(index, i2, result); |
4732 | break; |
4733 | } |
4734 | |
4735 | // stores |
4736 | case OP_si8: |
4737 | case OP_si16: |
4738 | case OP_si32: |
4739 | { |
4740 | LIns* svalue = localGet(sp-1); |
4741 | LIns* mopAddr = localGet(sp); |
4742 | const MopsInfo& mi = kMopsStoreInfo[opcode-OP_si8]; |
4743 | #ifdef VMCFG_MOPS_USE_EXPANDED_LOADSTORE_INT |
4744 | int32_t disp = 0; |
4745 | LIns* realAddr = mopAddrToRangeCheckedRealAddrAndDisp(mopAddr, mi.size, &disp); |
4746 | lirout->insStore(mi.op, svalue, realAddr, disp, ACCSET_OTHER); |
4747 | #else |
4748 | LIns* realAddr = mopAddrToRangeCheckedRealAddrAndDisp(mopAddr, mi.size, NULL__null); |
4749 | callIns(mi.call, 2, realAddr, svalue); |
4750 | #endif |
4751 | break; |
4752 | } |
4753 | |
4754 | case OP_sf32: |
4755 | case OP_sf64: |
4756 | { |
4757 | LIns* svalue = localGetf(sp-1); |
4758 | LIns* mopAddr = localGet(sp); |
4759 | const MopsInfo& mi = kMopsStoreInfo[opcode-OP_si8]; |
4760 | #ifdef VMCFG_MOPS_USE_EXPANDED_LOADSTORE_FP |
4761 | int32_t disp = 0; |
4762 | LIns* realAddr = mopAddrToRangeCheckedRealAddrAndDisp(mopAddr, mi.size, &disp); |
4763 | lirout->insStore(mi.op, svalue, realAddr, disp, ACCSET_OTHER); |
4764 | #else |
4765 | LIns* realAddr = mopAddrToRangeCheckedRealAddrAndDisp(mopAddr, mi.size, NULL__null); |
4766 | callIns(mi.call, 2, realAddr, svalue); |
4767 | #endif |
4768 | break; |
4769 | } |
4770 | |
4771 | case OP_jump: |
4772 | { |
4773 | // spill everything first |
4774 | const uint8_t* target = (const uint8_t*) op1; |
4775 | |
4776 | #ifdef DEBUGGER |
4777 | Sampler* s = core->get_sampler(); |
4778 | if (s && s->sampling() && target < state->abc_pc) |
4779 | { |
4780 | emitSampleCheck(); |
4781 | } |
4782 | #endif |
4783 | |
4784 | branchToAbcPos(LIR_j, 0, target); |
4785 | break; |
4786 | } |
4787 | |
4788 | case OP_lookupswitch: |
4789 | { |
4790 | //int index = integer(*(sp--)); |
4791 | //pc += readS24(index < readU16(pc+4) ? |
4792 | // (pc+6+3*index) : // matched case |
4793 | // (pc+1)); // default |
4794 | int count = int(1 + op2); |
4795 | const uint8_t* targetpc = (const uint8_t*) op1; |
4796 | |
4797 | AvmAssert(state->value(sp).traits == INT_TYPE)do { } while (0); |
4798 | AvmAssert(count >= 0)do { } while (0); |
4799 | |
4800 | // Compute address of jump table |
4801 | const uint8_t* pc = 4 + state->abc_pc; |
4802 | AvmCore::readU32(pc); // skip count |
4803 | |
4804 | // Delete any trailing table entries that == default case (effective for asc output) |
4805 | while (count > 0 && targetpc == (state->abc_pc + AvmCore::readS24(pc+3*(count-1)))) |
4806 | count--; |
4807 | |
4808 | if (count > 0) { |
4809 | LIns* index = localGet(sp); |
4810 | LIns* cmp = binaryIns(LIR_ltui, index, InsConst(count)); |
4811 | branchToAbcPos(LIR_jf, cmp, targetpc); |
4812 | |
4813 | // count == 1 is equivalent to if (case) else (default), so don't bother with jtbl |
4814 | if (NJ_JTBL_SUPPORTED1 && count > 1) { |
4815 | // Backend supports LIR_jtbl for jump tables |
4816 | LIns* jtbl = lirout->insJtbl(index, count); |
4817 | for (int i=0; i < count; i++) { |
4818 | const uint8_t* target = state->abc_pc + AvmCore::readS24(pc+3*i); |
4819 | patchLater(jtbl, target, i); |
4820 | } |
4821 | } else { |
4822 | // Backend doesn't support jump tables, use cascading if's |
4823 | for (int i=0; i < count; i++) { |
4824 | const uint8_t* target = state->abc_pc + AvmCore::readS24(pc+3*i); |
4825 | branchToAbcPos(LIR_jt, binaryIns(LIR_eqi, index, InsConst(i)), target); |
4826 | } |
4827 | } |
4828 | } |
4829 | else { |
4830 | // switch collapses into a single target |
4831 | branchToAbcPos(LIR_j, 0, targetpc); |
4832 | } |
4833 | break; |
4834 | } |
4835 | |
4836 | case OP_returnvoid: |
4837 | case OP_returnvalue: |
4838 | { |
4839 | // ISSUE if a method has multiple returns this causes some bloat |
4840 | |
4841 | #ifdef DEBUGGER |
4842 | if (haveDebugger) { |
4843 | callIns(FUNCTIONID(debugExit)&ci_debugExit, 2, |
4844 | env_param, csn); |
4845 | // now we toast the cse and restore contents in order to |
4846 | // ensure that any variable modifications made by the debugger |
4847 | // will be pulled in. |
4848 | //firstCse = ip; |
4849 | } |
4850 | #endif // DEBUGGER |
4851 | |
4852 | if (driver->hasReachableExceptions()) |
4853 | { |
4854 | // _ef.endTry(); |
4855 | callIns(FUNCTIONID(endTry)&ci_endTry, 1, _ef); |
4856 | } |
4857 | |
4858 | // replicate MethodFrame dtor inline -- must come after endTry call (if any) |
4859 | LIns* nextMethodFrame = loadIns(LIR_ldp, offsetof(MethodFrame,next)__builtin_offsetof(MethodFrame, next), methodFrame, ACCSET_OTHER); |
4860 | stp(nextMethodFrame, coreAddr, offsetof(AvmCore,currentMethodFrame)__builtin_offsetof(AvmCore, currentMethodFrame), ACCSET_OTHER); |
4861 | |
4862 | Traits* t = ms->returnTraits(); |
4863 | LIns* retvalue; |
4864 | if (opcode == OP_returnvalue) |
4865 | { |
4866 | // already coerced to required native type |
4867 | // use localCopy() to sniff type and use appropriate load instruction |
4868 | int32_t index = (int32_t) op1; |
4869 | retvalue = localCopy(index); |
4870 | } |
4871 | else |
4872 | { |
4873 | retvalue = undefConst; |
4874 | if (t && t != VOID_TYPE(core->traits.void_itraits)) |
4875 | { |
4876 | // implicitly coerce undefined to the return type |
4877 | retvalue = callIns(FUNCTIONID(coerce)&ci_coerce, 3, |
4878 | env_param, retvalue, InsConstPtr(t)); |
4879 | retvalue = atomToNativeRep(t, retvalue); |
4880 | } |
4881 | } |
4882 | switch (bt(t)) { |
4883 | case BUILTIN_number: |
4884 | Ins(LIR_retd, retvalue); |
4885 | break; |
4886 | case BUILTIN_int: |
4887 | retp(i2p(retvalue)); |
4888 | break; |
4889 | case BUILTIN_uint: |
4890 | case BUILTIN_boolean: |
4891 | retp(ui2p(retvalue)); |
4892 | break; |
4893 | default: |
4894 | retp(retvalue); |
4895 | break; |
4896 | } |
4897 | break; |
4898 | } |
4899 | |
4900 | case OP_typeof: |
4901 | { |
4902 | //sp[0] = typeof(sp[0]); |
4903 | int32_t index = (int32_t) op1; |
4904 | LIns* value = loadAtomRep(index); |
4905 | LIns* i3 = callIns(FUNCTIONID(typeof)&ci_typeof, 2, |
4906 | coreAddr, value); |
4907 | AvmAssert(result == STRING_TYPE)do { } while (0); |
4908 | localSet(index, i3, result); |
4909 | break; |
4910 | } |
4911 | |
4912 | case OP_not: |
4913 | { |
4914 | int32_t index = (int32_t) op1; |
4915 | AvmAssert(state->value(index).traits == BOOLEAN_TYPE && result == BOOLEAN_TYPE)do { } while (0); |
4916 | LIns* value = localGet(index); // 0 or 1 |
4917 | LIns* i3 = eqi0(value); // 1 or 0 |
4918 | localSet(index, i3, result); |
4919 | break; |
4920 | } |
4921 | |
4922 | case OP_negate: { |
4923 | int32_t index = (int32_t) op1; |
4924 | localSet(index, Ins(LIR_negd, localGetf(index)),result); |
4925 | break; |
4926 | } |
4927 | |
4928 | case OP_negate_i: { |
4929 | //framep[op1] = -framep[op1] |
4930 | int32_t index = (int32_t) op1; |
4931 | AvmAssert(state->value(index).traits == INT_TYPE)do { } while (0); |
4932 | localSet(index, Ins(LIR_negi, localGet(index)), result); |
4933 | break; |
4934 | } |
4935 | |
4936 | case OP_increment: |
4937 | case OP_decrement: |
4938 | case OP_inclocal: |
4939 | case OP_declocal: { |
4940 | int32_t index = (int32_t) op1; |
4941 | int32_t incr = (int32_t) op2; // 1 or -1 |
4942 | localSet(index, binaryIns(LIR_addd, localGetf(index), i2dIns(InsConst(incr))), result); |
4943 | break; |
4944 | } |
4945 | |
4946 | case OP_inclocal_i: |
4947 | case OP_declocal_i: |
4948 | case OP_increment_i: |
4949 | case OP_decrement_i: { |
4950 | int32_t index = (int32_t) op1; |
4951 | int32_t incr = (int32_t) op2; |
4952 | AvmAssert(state->value(index).traits == INT_TYPE)do { } while (0); |
4953 | localSet(index, binaryIns(LIR_addi, localGet(index), InsConst(incr)), result); |
4954 | break; |
4955 | } |
4956 | |
4957 | case OP_bitnot: { |
4958 | // *sp = core->intToAtom(~integer(*sp)); |
4959 | int32_t index = (int32_t) op1; |
4960 | AvmAssert(state->value(index).traits == INT_TYPE)do { } while (0); |
4961 | localSet(index, lirout->ins1(LIR_noti, localGet(index)), result); |
4962 | break; |
4963 | } |
4964 | |
4965 | case OP_modulo: { |
4966 | LIns* out = callIns(FUNCTIONID(mod)&ci_mod, 2, |
4967 | localGetf(sp-1), localGetf(sp)); |
4968 | localSet(sp-1, out, result); |
4969 | break; |
4970 | } |
4971 | |
4972 | case OP_divide: |
4973 | case OP_multiply: |
4974 | case OP_subtract: { |
4975 | LOpcode op; |
4976 | switch (opcode) { |
4977 | default: |
4978 | case OP_divide: op = LIR_divd; break; |
4979 | case OP_multiply: op = LIR_muld; break; |
4980 | case OP_subtract: op = LIR_subd; break; |
4981 | } |
4982 | localSet(sp-1, binaryIns(op, localGetf(sp-1), localGetf(sp)), result); |
4983 | break; |
4984 | } |
4985 | |
4986 | case OP_subtract_i: |
4987 | case OP_add_i: |
4988 | case OP_multiply_i: |
4989 | case OP_lshift: |
4990 | case OP_rshift: |
4991 | case OP_urshift: |
4992 | case OP_bitand: |
4993 | case OP_bitor: |
4994 | case OP_bitxor: |
4995 | { |
4996 | LOpcode op; |
4997 | switch (opcode) { |
4998 | default: |
4999 | case OP_bitxor: op = LIR_xori; break; |
5000 | case OP_bitor: op = LIR_ori; break; |
5001 | case OP_bitand: op = LIR_andi; break; |
5002 | case OP_urshift: op = LIR_rshui; break; |
5003 | case OP_rshift: op = LIR_rshi; break; |
5004 | case OP_lshift: op = LIR_lshi; break; |
5005 | case OP_multiply_i: op = LIR_muli; break; |
5006 | case OP_add_i: op = LIR_addi; break; |
5007 | case OP_subtract_i: op = LIR_subi; break; |
5008 | } |
5009 | LIns* lhs = localGet(sp-1); |
5010 | LIns* rhs = localGet(sp); |
5011 | LIns* out = binaryIns(op, lhs, rhs); |
5012 | localSet(sp-1, out, result); |
5013 | break; |
5014 | } |
5015 | |
5016 | case OP_throw: |
5017 | { |
5018 | //throwAtom(*sp--); |
5019 | int32_t index = (int32_t) op1; |
5020 | callIns(FUNCTIONID(throwAtom)&ci_throwAtom, 2, coreAddr, loadAtomRep(index)); |
5021 | break; |
5022 | } |
5023 | |
5024 | case OP_getsuper: |
5025 | { |
5026 | // stack in: obj [ns [name]] |
5027 | // stack out: value |
5028 | // sp[0] = env->getsuper(sp[0], multiname) |
5029 | int objDisp = sp; |
5030 | LIns* multi = initMultiname((Multiname*)op1, objDisp); |
5031 | AvmAssert(state->value(objDisp).notNull)do { } while (0); |
5032 | |
5033 | LIns* obj = loadAtomRep(objDisp); |
5034 | |
5035 | LIns* i3 = callIns(FUNCTIONID(getsuper)&ci_getsuper, 3, |
5036 | env_param, obj, multi); |
5037 | liveAlloc(multi); |
5038 | |
5039 | i3 = atomToNativeRep(result, i3); |
5040 | localSet(objDisp, i3, result); |
5041 | break; |
5042 | } |
5043 | |
5044 | case OP_setsuper: |
5045 | { |
5046 | // stack in: obj [ns [name]] value |
5047 | // stack out: nothing |
5048 | // core->setsuper(sp[-1], multiname, sp[0], env->vtable->base) |
5049 | int objDisp = sp-1; |
5050 | LIns* multi = initMultiname((Multiname*)op1, objDisp); |
5051 | AvmAssert(state->value(objDisp).notNull)do { } while (0); |
5052 | |
5053 | LIns* obj = loadAtomRep(objDisp); |
5054 | LIns* value = loadAtomRep(sp); |
5055 | |
5056 | callIns(FUNCTIONID(setsuper)&ci_setsuper, 4, |
5057 | env_param, obj, multi, value); |
5058 | liveAlloc(multi); |
5059 | break; |
5060 | } |
5061 | |
5062 | case OP_nextname: |
5063 | case OP_nextvalue: |
5064 | { |
5065 | // sp[-1] = next[name|value](sp[-1], sp[0]); |
5066 | LIns* obj = loadAtomRep(sp-1); |
5067 | AvmAssert(state->value(sp).traits == INT_TYPE)do { } while (0); |
5068 | LIns* index = localGet(sp); |
5069 | LIns* i1 = callIns((opcode == OP_nextname) ? FUNCTIONID(nextname)&ci_nextname : FUNCTIONID(nextvalue)&ci_nextvalue, 3, |
5070 | env_param, obj, index); |
5071 | localSet(sp-1, atomToNativeRep(result, i1), result); |
5072 | break; |
5073 | } |
5074 | |
5075 | case OP_hasnext: |
5076 | { |
5077 | // sp[-1] = hasnext(sp[-1], sp[0]); |
5078 | LIns* obj = loadAtomRep(sp-1); |
5079 | AvmAssert(state->value(sp).traits == INT_TYPE)do { } while (0); |
5080 | LIns* index = localGet(sp); |
5081 | LIns* i1 = callIns(FUNCTIONID(hasnext)&ci_hasnext, 3, |
5082 | env_param, obj, index); |
5083 | AvmAssert(result == INT_TYPE)do { } while (0); |
5084 | localSet(sp-1, i1, result); |
5085 | break; |
5086 | } |
5087 | |
5088 | case OP_hasnext2: |
5089 | { |
5090 | // fixme - if obj is already Atom, or index is already int, |
5091 | // easier to directly reference space in vars. |
5092 | int32_t obj_index = (int32_t) op1; |
5093 | int32_t index_index = (int32_t) op2; |
5094 | LIns* obj = insAlloc(sizeof(Atom)); |
5095 | LIns* index = insAlloc(sizeof(int32_t)); |
5096 | stp(loadAtomRep(obj_index), obj, 0, ACCSET_STORE_ANY); // Atom obj |
5097 | sti(localGet(index_index), index, 0, ACCSET_STORE_ANY); // int32_t index |
5098 | LIns* i1 = callIns(FUNCTIONID(hasnextproto)&ci_hasnextproto, 3, |
5099 | env_param, obj, index); |
5100 | localSet(obj_index, loadIns(LIR_ldp, 0, obj, ACCSET_LOAD_ANY), OBJECT_TYPE(core->traits.object_itraits)); // Atom obj |
5101 | localSet(index_index, loadIns(LIR_ldi, 0, index, ACCSET_LOAD_ANY), INT_TYPE(core->traits.int_itraits)); // int32_t index |
5102 | AvmAssert(result == BOOLEAN_TYPE)do { } while (0); |
5103 | localSet(sp+1, i1, result); |
5104 | break; |
5105 | } |
5106 | |
5107 | case OP_newfunction: |
5108 | { |
5109 | uint32_t function_id = (uint32_t) op1; |
5110 | int32_t index = (int32_t) op2; |
5111 | //sp[0] = core->newfunction(env, body, _scopeBase, scopeDepth); |
5112 | MethodInfo* func = pool->getMethodInfo(function_id); |
5113 | int extraScopes = state->scopeDepth; |
5114 | |
5115 | // prepare scopechain args for call |
5116 | LIns* ap = storeAtomArgs(extraScopes, ms->scope_base()); |
5117 | LIns* i3 = callIns(FUNCTIONID(newfunction)&ci_newfunction, 3, |
5118 | env_param, InsConstPtr(func), ap); |
5119 | liveAlloc(ap); |
5120 | |
5121 | AvmAssert(!result->isMachineType())do { } while (0); |
5122 | localSet(index, i3, result); |
5123 | break; |
5124 | } |
5125 | |
5126 | case OP_call: |
5127 | { |
5128 | // stack in: method obj arg1..N |
5129 | // sp[-argc-1] = op_call(env, sp[-argc], argc, ...) |
5130 | int argc = int(op1); |
5131 | int funcDisp = sp - argc - 1; |
5132 | int dest = funcDisp; |
5133 | |
5134 | // convert args to Atom[] for the call |
5135 | LIns* func = loadAtomRep(funcDisp); |
5136 | LIns* ap = storeAtomArgs(loadAtomRep(funcDisp+1), argc, funcDisp+2); |
5137 | LIns* i3 = callIns(FUNCTIONID(op_call)&ci_op_call, 4, env_param, func, InsConst(argc), ap); |
5138 | liveAlloc(ap); |
5139 | localSet(dest, atomToNativeRep(result, i3), result); |
5140 | break; |
5141 | } |
5142 | |
5143 | case OP_callproperty: |
5144 | case OP_callproplex: |
5145 | case OP_callpropvoid: |
5146 | { |
5147 | // stack in: obj [ns [name]] arg1..N |
5148 | // stack out: result |
5149 | |
5150 | // obj = sp[-argc] |
5151 | //tempAtom = callproperty(env, name, toVTable(obj), argc, ...); |
5152 | // *(sp -= argc) = tempAtom; |
5153 | int argc = int(op2); |
5154 | int argv = sp-argc+1; |
5155 | int baseDisp = sp-argc; |
5156 | const Multiname* name = pool->precomputedMultiname((int)op1); |
5157 | LIns* multi = initMultiname(name, baseDisp); |
5158 | AvmAssert(state->value(baseDisp).notNull)do { } while (0); |
5159 | |
5160 | // convert args to Atom[] for the call |
5161 | LIns* base = loadAtomRep(baseDisp); |
5162 | LIns* receiver = opcode == OP_callproplex ? InsConstAtom(nullObjectAtom) : base; |
5163 | LIns* ap = storeAtomArgs(receiver, argc, argv); |
5164 | |
5165 | Traits* baseTraits = state->value(baseDisp).traits; |
5166 | Binding b = toplevel->getBinding(baseTraits, name); |
5167 | |
5168 | LIns* out; |
5169 | if (AvmCore::isSlotBinding(b)) { |
5170 | // can early bind call to closure in slot |
5171 | Traits* slotType = Traits::readBinding(baseTraits, b); |
5172 | // todo if funcValue is already a ScriptObject then don't box it, use a different helper. |
5173 | LIns* funcValue = loadFromSlot(baseDisp, AvmCore::bindingToSlotId(b), slotType); |
5174 | LIns* funcAtom = nativeToAtom(funcValue, slotType); |
5175 | out = callIns(FUNCTIONID(op_call)&ci_op_call, 4, env_param, funcAtom, InsConst(argc), ap); |
5176 | } |
5177 | else if (!name->isRuntime()) { |
5178 | // use inline cache for late bound call |
5179 | // cache contains: [handler, vtable, [data], Multiname*] |
5180 | // and we call (*cache->handler)(cache, obj, argc, args*, MethodEnv*) |
5181 | CallCache* cache = call_cache_builder.allocateCacheSlot(name); |
5182 | LIns* cacheAddr = InsConstPtr(cache); |
5183 | LIns* handler = loadIns(LIR_ldp, offsetof(CallCache, call_handler)__builtin_offsetof(CallCache, call_handler), cacheAddr, ACCSET_OTHER); |
5184 | out = callIns(FUNCTIONID(call_cache_handler)&ci_call_cache_handler, 6, |
5185 | handler, cacheAddr, base, InsConst(argc), ap, env_param); |
5186 | } |
5187 | else { |
5188 | // generic late bound call to anything |
5189 | out = callIns(FUNCTIONID(callprop_late)&ci_callprop_late, 5, env_param, base, multi, InsConst(argc), ap); |
5190 | liveAlloc(multi); |
5191 | } |
5192 | liveAlloc(ap); |
5193 | localSet(baseDisp, atomToNativeRep(result, out), result); |
5194 | break; |
5195 | } |
5196 | |
5197 | case OP_constructprop: |
5198 | { |
5199 | // stack in: obj [ns [name]] arg1..N |
5200 | // stack out: result |
5201 | |
5202 | int argc = int(op2); |
5203 | // obj = sp[-argc] |
5204 | //tempAtom = callproperty(env, name, toVTable(obj), argc, ...); |
5205 | // *(sp -= argc) = tempAtom; |
5206 | int argv = sp-argc+1; |
5207 | |
5208 | int objDisp = sp-argc; |
5209 | LIns* multi = initMultiname((Multiname*)op1, objDisp); |
5210 | AvmAssert(state->value(objDisp).notNull)do { } while (0); |
5211 | |
5212 | // convert args to Atom[] for the call |
5213 | LIns* ap = storeAtomArgs(loadAtomRep(objDisp), argc, argv); |
5214 | LIns* i3 = callIns(FUNCTIONID(construct_late)&ci_construct_late, 4, |
5215 | env_param, multi, InsConst(argc), ap); |
5216 | liveAlloc(multi); |
5217 | liveAlloc(ap); |
5218 | |
5219 | localSet(objDisp, atomToNativeRep(result, i3), result); |
5220 | break; |
5221 | } |
5222 | |
5223 | case OP_callsuper: |
5224 | case OP_callsupervoid: |
5225 | { |
5226 | // stack in: obj [ns [name]] arg1..N |
5227 | // stack out: result |
5228 | // null check must have already happened. |
5229 | // tempAtom = callsuper(multiname, obj, sp-argc+1, argc, vtable->base); |
5230 | int argc = int(op2); |
5231 | int argv = sp - argc + 1; |
5232 | int objDisp = sp - argc; |
5233 | LIns* multi = initMultiname((Multiname*)op1, objDisp); |
5234 | AvmAssert(state->value(objDisp).notNull)do { } while (0); |
5235 | |
5236 | // convert args to Atom[] for the call |
5237 | LIns* obj = loadAtomRep(objDisp); |
5238 | |
5239 | LIns* ap = storeAtomArgs(obj, argc, argv); |
5240 | |
5241 | LIns* i3 = callIns(FUNCTIONID(callsuper)&ci_callsuper, 4, |
5242 | env_param, multi, InsConst(argc), ap); |
5243 | liveAlloc(multi); |
5244 | liveAlloc(ap); |
5245 | |
5246 | localSet(objDisp, atomToNativeRep(result, i3), result); |
5247 | break; |
5248 | } |
5249 | |
5250 | case OP_applytype: |
5251 | { |
5252 | // stack in: method arg1..N |
5253 | // sp[-argc] = applytype(env, sp[-argc], argc, null, arg1..N) |
5254 | int argc = int(op1); |
5255 | int funcDisp = sp - argc; |
5256 | int dest = funcDisp; |
5257 | int arg0 = sp - argc + 1; |
5258 | |
5259 | LIns* func = loadAtomRep(funcDisp); |
5260 | |
5261 | // convert args to Atom[] for the call |
5262 | LIns* ap = storeAtomArgs(argc, arg0); |
5263 | |
5264 | LIns* i3 = callIns(FUNCTIONID(op_applytype)&ci_op_applytype, 4, |
5265 | env_param, func, InsConst(argc), ap); |
5266 | liveAlloc(ap); |
5267 | |
5268 | localSet(dest, atomToNativeRep(result, i3), result); |
5269 | break; |
5270 | } |
5271 | |
5272 | case OP_newobject: |
5273 | { |
5274 | // result = env->op_newobject(sp, argc) |
5275 | int argc = int(op1); |
5276 | int dest = sp - (2*argc-1); |
5277 | int arg0 = dest; |
5278 | |
5279 | // convert args to Atom for the call[] |
5280 | LIns* ap = storeAtomArgs(2*argc, arg0); |
5281 | |
5282 | LIns* i3 = callIns(FUNCTIONID(op_newobject)&ci_op_newobject, 3, |
5283 | env_param, leaIns(sizeof(Atom)*(2*argc-1), ap), InsConst(argc)); |
5284 | liveAlloc(ap); |
5285 | |
5286 | localSet(dest, ptrToNativeRep(result, i3), result); |
5287 | break; |
5288 | } |
5289 | |
5290 | case OP_newactivation: |
5291 | { |
5292 | // result = env->newActivation() |
5293 | LIns* activation = callIns(FUNCTIONID(newActivation)&ci_newActivation, 1, env_param); |
5294 | localSet(sp+1, ptrToNativeRep(result, activation), result); |
5295 | break; |
5296 | } |
5297 | |
5298 | case OP_newcatch: |
5299 | { |
5300 | // result = core->newObject(env->activation, NULL); |
5301 | int dest = sp+1; |
5302 | |
5303 | LIns* activation = callIns(FUNCTIONID(newcatch)&ci_newcatch, 2, |
5304 | env_param, InsConstPtr(result)); |
5305 | |
5306 | localSet(dest, ptrToNativeRep(result, activation), result); |
5307 | break; |
5308 | } |
5309 | |
5310 | case OP_newarray: |
5311 | { |
5312 | // sp[-argc+1] = env->toplevel()->arrayClass->newarray(sp-argc+1, argc) |
5313 | int argc = int(op1); |
5314 | int arg0 = sp - 1*argc+1; |
5315 | |
5316 | // convert array elements to Atom[] |
5317 | LIns* ap = storeAtomArgs(argc, arg0); |
5318 | LIns* i3 = callIns(FUNCTIONID(newarray)&ci_newarray, 3, env_param, InsConst(argc), ap); |
5319 | liveAlloc(ap); |
5320 | |
5321 | AvmAssert(!result->isMachineType())do { } while (0); |
5322 | localSet(arg0, i3, result); |
5323 | break; |
5324 | } |
5325 | |
5326 | case OP_newclass: |
5327 | { |
5328 | // sp[0] = core->newclass(env, ctraits, scopeBase, scopeDepth, base) |
5329 | Traits* ctraits = (Traits*) op1; |
5330 | int localindex = int(op2); |
5331 | int extraScopes = state->scopeDepth; |
5332 | |
5333 | LIns* outer = loadEnvScope(); |
5334 | LIns* base = localGetp(localindex); |
5335 | |
5336 | // prepare scopechain args for call |
5337 | LIns* ap = storeAtomArgs(extraScopes, ms->scope_base()); |
5338 | |
5339 | LIns* i3 = callIns(FUNCTIONID(newclass)&ci_newclass, 5, |
5340 | env_param, InsConstPtr(ctraits), base, outer, ap); |
5341 | liveAlloc(ap); |
5342 | |
5343 | AvmAssert(!result->isMachineType())do { } while (0); |
5344 | localSet(localindex, i3, result); |
5345 | break; |
5346 | } |
5347 | |
5348 | case OP_getdescendants: |
5349 | { |
5350 | // stack in: obj [ns [name]] |
5351 | // stack out: value |
5352 | //sp[0] = core->getdescendants(sp[0], name); |
5353 | int objDisp = sp; |
5354 | Multiname* multiname = (Multiname*) op1; |
5355 | |
5356 | LIns* multi = initMultiname(multiname, objDisp); |
5357 | LIns* obj = loadAtomRep(objDisp); |
5358 | AvmAssert(state->value(objDisp).notNull)do { } while (0); |
5359 | |
5360 | LIns* out = callIns(FUNCTIONID(getdescendants)&ci_getdescendants, 3, |
5361 | env_param, obj, multi); |
5362 | liveAlloc(multi); |
5363 | |
5364 | localSet(objDisp, atomToNativeRep(result, out), result); |
5365 | break; |
5366 | } |
5367 | |
5368 | case OP_checkfilter: { |
5369 | int32_t index = (int32_t) op1; |
5370 | callIns(FUNCTIONID(checkfilter)&ci_checkfilter, 2, env_param, loadAtomRep(index)); |
5371 | break; |
5372 | } |
5373 | |
5374 | case OP_findpropstrict: |
5375 | case OP_findproperty: |
5376 | { |
5377 | // stack in: [ns [name]] |
5378 | // stack out: obj |
5379 | // sp[1] = env->findproperty(scopeBase, scopedepth, name, strict) |
5380 | int dest = sp; |
5381 | LIns* multi = initMultiname((Multiname*)op1, dest); |
5382 | dest++; |
5383 | int extraScopes = state->scopeDepth; |
5384 | |
5385 | // prepare scopechain args for call |
5386 | LIns* ap = storeAtomArgs(extraScopes, ms->scope_base()); |
5387 | |
5388 | LIns* outer = loadEnvScope(); |
5389 | |
5390 | LIns* withBase; |
5391 | if (state->withBase == -1) |
5392 | { |
5393 | withBase = InsConstPtr(0); |
5394 | } |
5395 | else |
5396 | { |
5397 | withBase = leaIns(state->withBase*sizeof(Atom), ap); |
5398 | } |
5399 | |
5400 | // return env->findproperty(outer, argv, extraScopes, name, strict); |
5401 | |
5402 | LIns* i3 = callIns(FUNCTIONID(findproperty)&ci_findproperty, 7, |
5403 | env_param, outer, ap, InsConst(extraScopes), multi, |
5404 | InsConst((int32_t)(opcode == OP_findpropstrict)), |
5405 | withBase); |
5406 | liveAlloc(multi); |
5407 | liveAlloc(ap); |
5408 | |
5409 | localSet(dest, atomToNativeRep(result, i3), result); |
5410 | break; |
5411 | } |
5412 | |
5413 | case OP_getproperty: |
5414 | { |
5415 | // stack in: obj [ns] [name] |
5416 | // stack out: value |
5417 | // obj=sp[0] |
5418 | //sp[0] = env->getproperty(obj, multiname); |
5419 | |
5420 | const Multiname* multiname = pool->precomputedMultiname((int)op1); |
5421 | boolbool attr = multiname->isAttr(); |
5422 | Traits* indexType = state->value(sp).traits; |
5423 | LIns* index = NULL__null; |
5424 | boolbool maybeIntegerIndex = !attr && multiname->isRtname() && multiname->containsAnyPublicNamespace(); |
5425 | |
5426 | if (maybeIntegerIndex) |
5427 | index = optimizeIndexArgumentType(sp, &indexType); |
5428 | |
5429 | if (maybeIntegerIndex && indexType == INT_TYPE(core->traits.int_itraits)) { |
5430 | LIns *value = emitGetIndexedProperty(sp-1, index, result, VI_INT); |
5431 | localSet(sp-1, value, result); |
5432 | } |
5433 | else if (maybeIntegerIndex && indexType == UINT_TYPE(core->traits.uint_itraits)) { |
5434 | LIns *value = emitGetIndexedProperty(sp-1, index, result, VI_UINT); |
5435 | localSet(sp-1, value, result); |
5436 | } |
5437 | else if (maybeIntegerIndex && indexType == NUMBER_TYPE(core->traits.number_itraits)) { |
5438 | boolbool bGeneratedFastPath = falsefalse; |
5439 | #ifdef VMCFG_FASTPATH_ADD_INLINE |
5440 | if (inlineFastpath) { |
5441 | // TODO: Since a helper must be called anyhow, I conjecture that fused add-and-index helpers |
5442 | // may be more efficient than the inlining done here. On the other hand, it is plausible that |
5443 | // we would go the other way and completely inline the getNativeXXXProperty cases, in which |
5444 | // case we'd want to retain this for the sake of more thorough inlining. |
5445 | LOpcode op = index->opcode(); |
5446 | if (op == LIR_addd || op == LIR_subd) { |
5447 | CodegenLabel slow_path("slow"); |
5448 | CodegenLabel done_path("done"); |
5449 | LIns* tempResult = insAllocForTraits(result); |
5450 | suspendCSE(); |
5451 | LIns* a = index->oprnd1(); |
5452 | LIns* b = index->oprnd2(); |
5453 | // Our addjovi only works with signed integers so don't use isPromote. |
5454 | a = (a->opcode() == LIR_i2d) ? a->oprnd1() : imm2Int(a); |
5455 | b = (b->opcode() == LIR_i2d) ? b->oprnd1() : imm2Int(b); |
5456 | if (a && b) { |
5457 | // Inline fast path for index generated from integer add |
5458 | // if (a+b) does not overflow && ((a+b) >= 0) |
5459 | // call getUintProperty(obj, a + b) |
5460 | // else |
5461 | // call getprop_index_add (env, obj, multiname, a, b) |
5462 | // (for subtraction, we emit (a-b) and call getprop_index_subtract) |
5463 | LIns* addOp = branchJovToLabel((op == LIR_addd) ? LIR_addjovi : LIR_subjovi, a, b, slow_path); |
5464 | branchToLabel(LIR_jt, binaryIns(LIR_lti, addOp, InsConst(0)), slow_path); |
5465 | LIns* value0 = emitGetIndexedProperty(sp-1, addOp, result, VI_UINT); |
5466 | stForTraits(result, value0, tempResult, 0, ACCSET_STORE_ANY); |
5467 | branchToLabel(LIR_j, NULL__null, done_path); |
5468 | |
5469 | emitLabel(slow_path); |
5470 | LIns* value1 = emitGetIndexedProperty(sp-1, index, result, VI_DOUBLE); |
5471 | stForTraits(result, value1, tempResult, 0, ACCSET_STORE_ANY); |
5472 | |
5473 | emitLabel(done_path); |
5474 | localSet(sp-1, ldForTraits(result, tempResult, 0, ACCSET_LOAD_ANY), result); |
5475 | bGeneratedFastPath = truetrue; |
5476 | } |
5477 | resumeCSE(); |
5478 | } |
5479 | } |
5480 | #endif // VMCFG_FASTPATH_ADD_INLINE |
5481 | if (!bGeneratedFastPath) { |
5482 | LIns *value = emitGetIndexedProperty(sp-1, index, result, VI_DOUBLE); |
5483 | localSet(sp-1, value, result); |
5484 | } |
5485 | } |
5486 | else if (maybeIntegerIndex && indexType != STRING_TYPE(core->traits.string_itraits)) { |
5487 | LIns* multi = InsConstPtr(multiname); // inline ptr to precomputed name |
5488 | LIns* index = loadAtomRep(sp); |
5489 | AvmAssert(state->value(sp-1).notNull)do { } while (0); |
5490 | LIns* obj = loadAtomRep(sp-1); |
5491 | LIns* value = callIns(FUNCTIONID(getprop_index)&ci_getprop_index, 4, |
5492 | env_param, obj, multi, index); |
5493 | |
5494 | localSet(sp-1, atomToNativeRep(result, value), result); |
5495 | } |
5496 | else { |
5497 | int objDisp = sp; |
5498 | LIns* multi = initMultiname(multiname, objDisp); |
5499 | AvmAssert(state->value(objDisp).notNull)do { } while (0); |
5500 | |
5501 | LIns* value; |
5502 | LIns* obj = loadAtomRep(objDisp); |
5503 | if (multiname->isRuntime()) { |
5504 | //return getprop_late(obj, name); |
5505 | value = callIns(FUNCTIONID(getprop_late)&ci_getprop_late, 3, env_param, obj, multi); |
5506 | liveAlloc(multi); |
5507 | } else { |
5508 | // static name, use property cache |
5509 | GetCache* cache = get_cache_builder.allocateCacheSlot(multiname); |
5510 | LIns* cacheAddr = InsConstPtr(cache); |
5511 | LIns* handler = loadIns(LIR_ldp, offsetof(GetCache, get_handler)__builtin_offsetof(GetCache, get_handler), cacheAddr, ACCSET_OTHER); |
5512 | value = callIns(FUNCTIONID(get_cache_handler)&ci_get_cache_handler, 4, handler, cacheAddr, env_param, obj); |
5513 | } |
5514 | |
5515 | localSet(objDisp, atomToNativeRep(result, value), result); |
5516 | } |
5517 | break; |
5518 | } |
5519 | case OP_initproperty: |
5520 | case OP_setproperty: |
5521 | { |
5522 | // stack in: obj [ns] [name] value |
5523 | // stack out: |
5524 | // obj = sp[-1] |
5525 | //env->setproperty(obj, multiname, sp[0], toVTable(obj)); |
5526 | |
5527 | const Multiname* multiname = (const Multiname*)op1; |
5528 | boolbool attr = multiname->isAttr(); |
5529 | Traits* indexType = state->value(sp-1).traits; |
5530 | LIns* index = NULL__null; |
5531 | boolbool maybeIntegerIndex = !attr && multiname->isRtname() && multiname->containsAnyPublicNamespace(); |
5532 | |
5533 | if (maybeIntegerIndex) |
5534 | index = optimizeIndexArgumentType(sp-1, &indexType); |
5535 | |
5536 | if (maybeIntegerIndex && indexType == INT_TYPE(core->traits.int_itraits)) { |
5537 | emitSetIndexedProperty(sp-2, sp, index, VI_INT); |
5538 | } |
5539 | else if (maybeIntegerIndex && indexType == UINT_TYPE(core->traits.uint_itraits)) { |
5540 | emitSetIndexedProperty(sp-2, sp, index, VI_UINT); |
5541 | } |
5542 | else if (maybeIntegerIndex && indexType == NUMBER_TYPE(core->traits.number_itraits)) { |
5543 | boolbool bGeneratedFastPath = falsefalse; |
5544 | #ifdef VMCFG_FASTPATH_ADD_INLINE |
5545 | if (inlineFastpath) { |
5546 | LOpcode op = index->opcode(); |
5547 | if (op == LIR_addd || op == LIR_subd) { |
5548 | CodegenLabel slow_path("slow"); |
5549 | CodegenLabel done_path("done"); |
5550 | suspendCSE(); |
5551 | LIns *a = index->oprnd1(); |
5552 | LIns *b = index->oprnd2(); |
5553 | // Our addjovi only works with signed integers. |
5554 | a = (a->opcode() == LIR_i2d) ? a->oprnd1() : imm2Int(a); |
5555 | b = (b->opcode() == LIR_i2d) ? b->oprnd1() : imm2Int(b); |
5556 | if (a && b) { |
5557 | // Inline fast path for index generated from integer add |
5558 | // if (a + b) does not overflow && ((a+b) >= 0) |
5559 | // call setProperty(obj, value, a + b) |
5560 | // else |
5561 | // call set/initprop_index_add(env, obj, name, value, a, b) |
5562 | // (for subtraction, we emit (a-b) and call set/initprop_index_subtract) |
5563 | LIns* addOp = branchJovToLabel((op == LIR_addd) ? LIR_addjovi : LIR_subjovi, a, b, slow_path); |
5564 | branchToLabel(LIR_jt, binaryIns(LIR_lti, addOp, InsConst(0)), slow_path); |
5565 | emitSetIndexedProperty(sp-2, sp, addOp, VI_UINT); |
5566 | branchToLabel(LIR_j, NULL__null, done_path); |
5567 | |
5568 | emitLabel(slow_path); |
5569 | emitSetIndexedProperty(sp-2, sp, index, VI_DOUBLE); |
5570 | |
5571 | emitLabel(done_path); |
5572 | bGeneratedFastPath = truetrue; |
5573 | } |
5574 | resumeCSE(); |
5575 | } |
5576 | } |
5577 | #endif // VMCFG_FASTPATH_ADD_INLINE |
5578 | if (!bGeneratedFastPath) { |
5579 | emitSetIndexedProperty(sp-2, sp, index, VI_DOUBLE); |
5580 | } |
5581 | } |
5582 | else if (maybeIntegerIndex) { |
5583 | LIns* name = InsConstPtr(multiname); // precomputed multiname |
5584 | LIns* value = loadAtomRep(sp); |
5585 | LIns* index = loadAtomRep(sp-1); |
5586 | AvmAssert(state->value(sp-2).notNull)do { } while (0); |
5587 | LIns* obj = loadAtomRep(sp-2); |
5588 | const CallInfo* func = (opcode == OP_setproperty) ? FUNCTIONID(setprop_index)&ci_setprop_index : FUNCTIONID(initprop_index)&ci_initprop_index; |
5589 | callIns(func, 5, env_param, obj, name, value, index); |
5590 | } |
5591 | else { |
5592 | int objDisp = sp-1; |
5593 | LIns* value = loadAtomRep(sp); |
5594 | LIns* multi = initMultiname(multiname, objDisp); |
5595 | AvmAssert(state->value(objDisp).notNull)do { } while (0); |
5596 | |
5597 | LIns* obj = loadAtomRep(objDisp); |
5598 | if (opcode == OP_setproperty) { |
5599 | if (!multiname->isRuntime()) { |
5600 | // use inline cache for dynamic setproperty access |
5601 | SetCache* cache = set_cache_builder.allocateCacheSlot(multiname); |
5602 | LIns* cacheAddr = InsConstPtr(cache); |
5603 | LIns* handler = loadIns(LIR_ldp, offsetof(SetCache, set_handler)__builtin_offsetof(SetCache, set_handler), cacheAddr, ACCSET_OTHER); |
5604 | callIns(FUNCTIONID(set_cache_handler)&ci_set_cache_handler, 5, handler, cacheAddr, obj, value, env_param); |
5605 | } else { |
5606 | // last resort slow path for OP_setproperty |
5607 | callIns(FUNCTIONID(setprop_late)&ci_setprop_late, 4, env_param, obj, multi, value); |
5608 | liveAlloc(multi); |
5609 | } |
5610 | } |
5611 | else { |
5612 | // initproplate is rare in jit code because we typically interpret static |
5613 | // initializers, and constructor initializers tend to early-bind successfully. |
5614 | callIns(FUNCTIONID(initprop_late)&ci_initprop_late, 4, env_param, obj, multi, value); |
5615 | liveAlloc(multi); |
5616 | } |
5617 | } |
5618 | break; |
5619 | } |
5620 | |
5621 | case OP_deleteproperty: |
5622 | { |
5623 | // stack in: obj [ns] [name] |
5624 | // stack out: Boolean |
5625 | //sp[0] = delproperty(sp[0], multiname); |
5626 | int objDisp = sp; |
5627 | Multiname *multiname = (Multiname*)op1; |
5628 | if(!multiname->isRtname()) { |
5629 | LIns* multi = initMultiname(multiname, objDisp, truetrue); |
5630 | |
5631 | LIns* obj = loadAtomRep(objDisp); |
5632 | |
5633 | LIns* i3 = callIns(FUNCTIONID(delproperty)&ci_delproperty, 3, |
5634 | env_param, obj, multi); |
5635 | liveAlloc(multi); |
5636 | |
5637 | localSet(objDisp, atomToNativeRep(result, i3), result); |
5638 | } else { |
5639 | LIns* _tempname = copyMultiname(multiname); |
5640 | LIns* index = loadAtomRep(objDisp--); |
5641 | |
5642 | if( !multiname->isRtns() ) |
5643 | { |
5644 | // copy the compile-time namespace to the temp multiname |
5645 | LIns* mSpace = InsConstPtr(multiname->ns); |
5646 | stp(mSpace, _tempname, offsetof(Multiname, ns)__builtin_offsetof(Multiname, ns), ACCSET_OTHER); |
5647 | } |
5648 | else |
5649 | { |
5650 | // intern the runtime namespace and copy to the temp multiname |
5651 | LIns* nsAtom = loadAtomRep(objDisp--); |
5652 | LIns* internNs = callIns(FUNCTIONID(internRtns)&ci_internRtns, 2, |
5653 | env_param, nsAtom); |
5654 | |
5655 | stp(internNs, _tempname, offsetof(Multiname,ns)__builtin_offsetof(Multiname, ns), ACCSET_OTHER); |
5656 | } |
5657 | liveAlloc(_tempname); |
5658 | |
5659 | AvmAssert(state->value(objDisp).notNull)do { } while (0); |
5660 | LIns* obj = loadAtomRep(objDisp); |
5661 | |
5662 | LIns* value = callIns(FUNCTIONID(delpropertyHelper)&ci_delpropertyHelper, 4, |
5663 | env_param, obj, _tempname, index); |
5664 | |
5665 | localSet(objDisp, atomToNativeRep(result, value), result); |
5666 | } |
5667 | break; |
5668 | } |
5669 | |
5670 | case OP_esc_xelem: // ToXMLString will call EscapeElementValue |
5671 | { |
5672 | //sp[0] = core->ToXMLString(sp[0]); |
5673 | int32_t index = (int32_t) op1; |
5674 | LIns* value = loadAtomRep(index); |
5675 | LIns* i3 = callIns(FUNCTIONID(ToXMLString)&ci_ToXMLString, 2, |
5676 | coreAddr, value); |
5677 | AvmAssert(result == STRING_TYPE)do { } while (0); |
5678 | localSet(index, i3, result); |
5679 | break; |
5680 | } |
5681 | |
5682 | case OP_esc_xattr: |
5683 | { |
5684 | //sp[0] = core->EscapeAttributeValue(sp[0]); |
5685 | int32_t index = (int32_t) op1; |
5686 | LIns* value = loadAtomRep(index); |
5687 | LIns* i3 = callIns(FUNCTIONID(EscapeAttributeValue)&ci_EscapeAttributeValue, 2, |
5688 | coreAddr, value); |
5689 | AvmAssert(result == STRING_TYPE)do { } while (0); |
5690 | localSet(index, i3, result); |
5691 | break; |
5692 | } |
5693 | |
5694 | case OP_astype: |
5695 | { |
5696 | // sp[0] = AvmCore::astype(sp[0], traits) |
5697 | Traits *type = (Traits*) op1; |
5698 | int32_t index = (int32_t) op2; |
5699 | LIns* obj = loadAtomRep(index); |
5700 | LIns* i1 = callIns(FUNCTIONID(astype)&ci_astype, 2, obj, InsConstPtr(type)); |
5701 | i1 = atomToNativeRep(result, i1); |
5702 | localSet(index, i1, result); |
5703 | break; |
5704 | } |
5705 | |
5706 | case OP_astypelate: |
5707 | { |
5708 | //sp[-1] = astype_late(env, sp[-1], sp[0]); |
5709 | LIns* type = loadAtomRep(sp); |
5710 | LIns* obj = loadAtomRep(sp-1); |
5711 | LIns* i3 = callIns(FUNCTIONID(astype_late)&ci_astype_late, 3, env_param, obj, type); |
5712 | i3 = atomToNativeRep(result, i3); |
5713 | localSet(sp-1, i3, result); |
5714 | break; |
5715 | } |
5716 | |
5717 | case OP_strictequals: |
5718 | { |
5719 | AvmAssert(result == BOOLEAN_TYPE)do { } while (0); |
5720 | localSet(sp-1, cmpEq(FUNCTIONID(stricteq)&ci_stricteq, sp-1, sp), result); |
5721 | break; |
5722 | } |
5723 | |
5724 | case OP_equals: |
5725 | { |
5726 | AvmAssert(result == BOOLEAN_TYPE)do { } while (0); |
5727 | localSet(sp-1, cmpEq(FUNCTIONID(equals)&ci_equals, sp-1, sp), result); |
5728 | break; |
5729 | } |
5730 | |
5731 | case OP_lessthan: |
5732 | { |
5733 | AvmAssert(result == BOOLEAN_TYPE)do { } while (0); |
5734 | localSet(sp-1, cmpLt(sp-1, sp), result); |
5735 | break; |
5736 | } |
5737 | |
5738 | case OP_lessequals: |
5739 | { |
5740 | AvmAssert(result == BOOLEAN_TYPE)do { } while (0); |
5741 | localSet(sp-1, cmpLe(sp-1, sp), result); |
5742 | break; |
5743 | } |
5744 | |
5745 | case OP_greaterthan: |
5746 | { |
5747 | AvmAssert(result == BOOLEAN_TYPE)do { } while (0); |
5748 | localSet(sp-1, cmpLt(sp, sp-1), result); |
5749 | break; |
5750 | } |
5751 | |
5752 | case OP_greaterequals: |
5753 | { |
5754 | AvmAssert(result == BOOLEAN_TYPE)do { } while (0); |
5755 | localSet(sp-1, cmpLe(sp, sp-1), result); |
5756 | break; |
5757 | } |
5758 | |
5759 | case OP_instanceof: |
5760 | { |
5761 | LIns* lhs = loadAtomRep(sp-1); |
5762 | LIns* rhs = loadAtomRep(sp); |
5763 | LIns* out = callIns(FUNCTIONID(instanceof)&ci_instanceof, 3, env_param, lhs, rhs); |
5764 | out = atomToNativeRep(result, out); |
5765 | localSet(sp-1, out, result); |
5766 | break; |
5767 | } |
5768 | |
5769 | case OP_in: |
5770 | { |
5771 | // sp[-1] = env->in_operator(sp[-1], sp[0]) |
5772 | int lhsDisp = sp-1; |
5773 | LIns *lhs; |
5774 | LIns *rhs = loadAtomRep(sp); |
5775 | LIns *out; |
5776 | Traits *lhsType = state->value(lhsDisp).traits; |
5777 | switch (bt(lhsType)) { |
5778 | case BUILTIN_uint: |
5779 | lhs = localGet(lhsDisp); |
5780 | out = callIns(FUNCTIONID(haspropertylate_u)&ci_haspropertylate_u, 3, env_param, rhs, lhs); |
5781 | break; |
5782 | case BUILTIN_int: |
5783 | lhs = localGet(lhsDisp); |
5784 | out = callIns(FUNCTIONID(haspropertylate_i)&ci_haspropertylate_i, 3, env_param, rhs, lhs); |
5785 | break; |
5786 | default: |
5787 | lhs = loadAtomRep(lhsDisp); |
5788 | out = callIns(FUNCTIONID(op_in)&ci_op_in, 3, env_param, lhs, rhs); |
5789 | out = atomToNativeRep(result, out); |
5790 | break; |
5791 | } |
5792 | localSet(sp-1, out, result); |
5793 | break; |
5794 | } |
5795 | |
5796 | case OP_dxns: |
5797 | { |
5798 | LIns* uri = InsConstPtr((String*)op1); // namespace uri from string pool |
5799 | callIns(FUNCTIONID(setDxns)&ci_setDxns, 3, coreAddr, methodFrame, uri); |
5800 | break; |
5801 | } |
5802 | |
5803 | case OP_dxnslate: |
5804 | { |
5805 | int32_t index = (int32_t) op1; |
5806 | LIns* atom = loadAtomRep(index); |
5807 | callIns(FUNCTIONID(setDxnsLate)&ci_setDxnsLate, 3, coreAddr, methodFrame, atom); |
5808 | break; |
5809 | } |
5810 | |
5811 | /* |
5812 | * debugger instructions |
5813 | */ |
5814 | case OP_debugfile: |
5815 | { |
5816 | #ifdef DEBUGGER |
5817 | if (haveDebugger) { |
5818 | // todo refactor api's so we don't have to pass argv/argc |
5819 | LIns* debugger = loadIns(LIR_ldp, offsetof(AvmCore, _debugger)__builtin_offsetof(AvmCore, _debugger), coreAddr, ACCSET_OTHER, LOAD_CONST); |
5820 | callIns(FUNCTIONID(debugFile)&ci_debugFile, 2, |
5821 | debugger, |
5822 | InsConstPtr((String*)op1)); |
5823 | } |
5824 | #endif // DEBUGGER |
5825 | #ifdef VMCFG_VTUNE |
5826 | Ins(LIR_file, InsConstPtr((String*)op1)); |
5827 | #endif /* VMCFG_VTUNE */ |
5828 | break; |
5829 | } |
5830 | |
5831 | case OP_debugline: |
5832 | { |
5833 | #ifdef DEBUGGER |
5834 | if (haveDebugger) { |
5835 | // todo refactor api's so we don't have to pass argv/argc |
5836 | LIns* debugger = loadIns(LIR_ldp, offsetof(AvmCore, _debugger)__builtin_offsetof(AvmCore, _debugger), coreAddr, ACCSET_OTHER, LOAD_CONST); |
5837 | callIns(FUNCTIONID(debugLine)&ci_debugLine, 2, |
5838 | debugger, |
5839 | InsConst((int32_t)op1)); |
5840 | } |
5841 | #endif // DEBUGGER |
5842 | #ifdef VMCFG_VTUNE |
5843 | Ins(LIR_line, InsConst((int32_t)op1)); |
5844 | #endif /* VMCFG_VTUNE */ |
5845 | break; |
5846 | } |
5847 | |
5848 | default: |
5849 | { |
5850 | AvmAssert(false)do { } while (0); // unsupported |
5851 | } |
5852 | } |
5853 | |
5854 | } // emit() |
5855 | |
5856 | void CodegenLIR::emitIf(AbcOpcode opcode, const uint8_t* target, int a, int b) |
5857 | { |
5858 | #ifdef DEBUGGER |
5859 | Sampler* s = core->get_sampler(); |
5860 | if (s && s->sampling() && target < state->abc_pc) |
5861 | { |
5862 | emitSampleCheck(); |
5863 | } |
5864 | #endif |
5865 | |
5866 | // |
5867 | // compile instructions that cannot throw exceptions before we add exception handling logic |
5868 | // |
5869 | |
5870 | // op1 = abc opcode target |
5871 | // op2 = what local var contains condition |
5872 | |
5873 | LIns* cond; |
5874 | LOpcode br; |
5875 | |
5876 | switch (opcode) |
5877 | { |
5878 | case OP_iftrue: |
5879 | NanoAssert(state->value(a).traits == BOOLEAN_TYPE)do { } while (0); |
5880 | br = LIR_jf; |
5881 | cond = eqi0(localGet(a)); |
5882 | break; |
5883 | case OP_iffalse: |
5884 | NanoAssert(state->value(a).traits == BOOLEAN_TYPE)do { } while (0); |
5885 | br = LIR_jt; |
5886 | cond = eqi0(localGet(a)); |
5887 | break; |
5888 | case OP_iflt: |
5889 | br = LIR_jt; |
5890 | cond = cmpLt(a, b); |
5891 | break; |
5892 | case OP_ifnlt: |
5893 | br = LIR_jf; |
5894 | cond = cmpLt(a, b); |
5895 | break; |
5896 | case OP_ifle: |
5897 | br = LIR_jt; |
5898 | cond = cmpLe(a, b); |
5899 | break; |
5900 | case OP_ifnle: |
5901 | br = LIR_jf; |
5902 | cond = cmpLe(a, b); |
5903 | break; |
5904 | case OP_ifgt: // a>b === b<a |
5905 | br = LIR_jt; |
5906 | cond = cmpLt(b, a); |
5907 | break; |
5908 | case OP_ifngt: // !(a>b) === !(b<a) |
5909 | br = LIR_jf; |
5910 | cond = cmpLt(b, a); |
5911 | break; |
5912 | case OP_ifge: // a>=b === b<=a |
5913 | br = LIR_jt; |
5914 | cond = cmpLe(b, a); |
5915 | break; |
5916 | case OP_ifnge: // !(a>=b) === !(a<=b) |
5917 | br = LIR_jf; |
5918 | cond = cmpLe(b, a); |
5919 | break; |
5920 | case OP_ifeq: |
5921 | br = LIR_jt; |
5922 | cond = cmpEq(FUNCTIONID(equals)&ci_equals, a, b); |
5923 | break; |
5924 | case OP_ifne: |
5925 | br = LIR_jf; |
5926 | cond = cmpEq(FUNCTIONID(equals)&ci_equals, a, b); |
5927 | break; |
5928 | case OP_ifstricteq: |
5929 | br = LIR_jt; |
5930 | cond = cmpEq(FUNCTIONID(stricteq)&ci_stricteq, a, b); |
5931 | break; |
5932 | case OP_ifstrictne: |
5933 | br = LIR_jf; |
5934 | cond = cmpEq(FUNCTIONID(stricteq)&ci_stricteq, a, b); |
5935 | break; |
5936 | default: |
5937 | AvmAssert(false)do { } while (0); |
5938 | return; |
5939 | } |
5940 | |
5941 | if (cond->isImmI()) { |
5942 | if ((br == LIR_jt && cond->immI()) || (br == LIR_jf && !cond->immI())) { |
5943 | // taken |
5944 | br = LIR_j; |
5945 | cond = 0; |
5946 | } |
5947 | else { |
5948 | // not taken = no-op |
5949 | return; |
5950 | } |
5951 | } |
5952 | |
5953 | branchToAbcPos(br, cond, target); |
5954 | } // emitIf() |
5955 | |
5956 | // Try to optimize comparison of a function call yielding NUMBER_TYPE with another expression |
5957 | // of INT_TYPE. We may be able to simplify the function and/or the comparison based on the |
5958 | // type or value, if constant, of the other argument. The function call is normally presumed |
5959 | // to be the left-hand argument. The swap parameter, if true, reverses this convention. |
5960 | |
5961 | static Specialization intCmpWithNumber[] = { |
5962 | { FUNCTIONID(String_charCodeAtFI)&ci_String_charCodeAtFI, FUNCTIONID(String_charCodeAtIU)&ci_String_charCodeAtIU }, |
5963 | { FUNCTIONID(String_charCodeAtFU)&ci_String_charCodeAtFU, FUNCTIONID(String_charCodeAtIU)&ci_String_charCodeAtIU }, |
5964 | { FUNCTIONID(String_charCodeAtFF)&ci_String_charCodeAtFF, FUNCTIONID(String_charCodeAtIF)&ci_String_charCodeAtIF }, |
5965 | { 0, 0 } |
5966 | }; |
5967 | |
5968 | LIns *CodegenLIR::optimizeIntCmpWithNumberCall(int callIndex, int otherIndex, LOpcode icmp, boolbool swap) |
5969 | { |
5970 | LIns* numSide = localGetf(callIndex); |
5971 | const CallInfo *ci = numSide->callInfo(); |
5972 | |
5973 | // Try to optimize charCodeAt to return an integer if possible. Because it can return NaN for |
5974 | // out of bounds access, we need to limit our support to constant integer values that generate |
5975 | // the same results for both NaN (floating point result) and zero (NaN cast to integer result). |
5976 | // These are the six possibilities: |
5977 | // String.CharCodeAt == int - any constant integer but zero |
5978 | // String.CharCodeAt < int - zero or any negative integer constant |
5979 | // String.CharCodeAt <= int - any negative integer constant |
5980 | // int == String.CharCodeAt - any constant integer but zero |
5981 | // int < String.CharCodeAt - zero or any positive integer constant |
5982 | // int <= String.CharCodeAt - any positive integer constant |
5983 | |
5984 | if (ci == FUNCTIONID(String_charCodeAtFI)&ci_String_charCodeAtFI || ci == FUNCTIONID(String_charCodeAtFU)&ci_String_charCodeAtFU || ci == FUNCTIONID(String_charCodeAtFF)&ci_String_charCodeAtFF) { |
5985 | |
5986 | AvmAssert(numSide->opcode() == LIR_calld)do { } while (0); |
5987 | |
5988 | LIns* intSide = localGet(otherIndex); |
5989 | if (!intSide->isImmI()) |
5990 | return NULL__null; |
5991 | int32_t intVal = intSide->immI(); |
5992 | |
5993 | if ((icmp == LIR_eqi && intVal != 0) || |
5994 | (icmp == LIR_lti && (swap ? intVal >= 0 : intVal <= 0)) || |
5995 | (icmp == LIR_lei && (swap ? intVal > 0 : intVal < 0))) { |
5996 | |
5997 | numSide = specializeIntCall(numSide, intCmpWithNumber); |
5998 | AvmAssert(numSide != NULL)do { } while (0); |
5999 | if (swap) { |
6000 | return binaryIns(icmp, intSide, numSide); |
6001 | } else { |
6002 | return binaryIns(icmp, numSide, intSide); |
6003 | } |
6004 | } |
6005 | } |
6006 | |
6007 | return NULL__null; |
6008 | } |
6009 | |
6010 | static Specialization stringCmpWithString[] = { |
6011 | { FUNCTIONID(String_charAtI)&ci_String_charAtI, FUNCTIONID(String_charCodeAtII)&ci_String_charCodeAtII }, |
6012 | { FUNCTIONID(String_charAtU)&ci_String_charAtU, FUNCTIONID(String_charCodeAtIU)&ci_String_charCodeAtIU }, |
6013 | { FUNCTIONID(String_charAtF)&ci_String_charAtF, FUNCTIONID(String_charCodeAtIF)&ci_String_charCodeAtIF }, |
6014 | { 0, 0 } |
6015 | }; |
6016 | |
6017 | LIns *CodegenLIR::optimizeStringCmpWithStringCall(int callIndex, int otherIndex, LOpcode icmp, boolbool swap) |
6018 | { |
6019 | LIns* callSide = localGetp(callIndex); |
6020 | const CallInfo *ci = callSide->callInfo(); |
6021 | if (ci == FUNCTIONID(String_charAtI)&ci_String_charAtI || ci == FUNCTIONID(String_charAtU)&ci_String_charAtU || ci == FUNCTIONID(String_charAtF)&ci_String_charAtF) { |
6022 | LIns* strSide = localGetp(otherIndex); |
6023 | if (!strSide->isImmP()) |
6024 | return NULL__null; |
6025 | |
6026 | String *s = (String *) strSide->immP(); |
6027 | // If we have a single character constant string that is not the null |
6028 | // character, we can use charCodeAt which is about 2x faster. |
6029 | if (s->length() == 1 && s->charAt(0) != 0) { |
6030 | int32_t firstChar = s->charAt(0); |
6031 | strSide = InsConst(firstChar); |
6032 | callSide = specializeIntCall(callSide, stringCmpWithString); |
6033 | AvmAssert(callSide != NULL)do { } while (0); |
6034 | if (swap) { |
6035 | return binaryIns(icmp, strSide, callSide); |
6036 | } else { |
6037 | return binaryIns(icmp, callSide, strSide); |
6038 | } |
6039 | } |
6040 | } |
6041 | |
6042 | return NULL__null; |
6043 | } |
6044 | |
6045 | // Faster compares for int, uint, double, boolean |
6046 | LIns* CodegenLIR::cmpOptimization(int lhsi, int rhsi, LOpcode icmp, LOpcode ucmp, LOpcode fcmp) |
6047 | { |
6048 | Traits* lht = state->value(lhsi).traits; |
6049 | Traits* rht = state->value(rhsi).traits; |
6050 | |
6051 | if (lht == rht && (lht == INT_TYPE(core->traits.int_itraits) || lht == BOOLEAN_TYPE(core->traits.boolean_itraits))) |
6052 | { |
6053 | LIns* lhs = localGet(lhsi); |
6054 | LIns* rhs = localGet(rhsi); |
6055 | return binaryIns(icmp, lhs, rhs); |
6056 | } |
6057 | else if (lht == rht && lht == UINT_TYPE(core->traits.uint_itraits)) |
6058 | { |
6059 | LIns* lhs = localGet(lhsi); |
6060 | LIns* rhs = localGet(rhsi); |
6061 | return binaryIns(ucmp, lhs, rhs); |
6062 | } |
6063 | else if (lht && lht->isNumeric() && rht && rht->isNumeric()) |
6064 | { |
6065 | // Comparing the result of a call returning a Number to another int value. |
6066 | if (lht == NUMBER_TYPE(core->traits.number_itraits) && rht == INT_TYPE(core->traits.int_itraits) && localGetf(lhsi)->opcode() == LIR_calld) { |
6067 | LIns* result = optimizeIntCmpWithNumberCall(lhsi, rhsi, icmp, falsefalse); |
6068 | if (result) |
6069 | return result; |
6070 | } |
6071 | if (rht == NUMBER_TYPE(core->traits.number_itraits) && lht == INT_TYPE(core->traits.int_itraits) && localGetf(rhsi)->opcode() == LIR_calld) { |
6072 | LIns* result = optimizeIntCmpWithNumberCall(rhsi, lhsi, icmp, truetrue); |
6073 | if (result) |
6074 | return result; |
6075 | } |
6076 | |
6077 | // If we're comparing a uint to an int and the int is a non-negative |
6078 | // integer constant, don't promote to doubles for the compare |
6079 | if ((lht == UINT_TYPE(core->traits.uint_itraits)) && (rht == INT_TYPE(core->traits.int_itraits))) |
6080 | { |
6081 | LIns* lhs = localGet(lhsi); |
6082 | LIns* rhs = localGet(rhsi); |
6083 | #ifdef AVMPLUS_64BIT |
6084 | // 32-bit signed and unsigned values fit in 64-bit registers |
6085 | // so we can promote and simply do a signed 64bit compare |
6086 | LOpcode qcmp = cmpOpcodeI2Q(icmp); |
6087 | NanoAssert((icmp == LIR_eqi && qcmp == LIR_eqq) ||do { } while (0) |
6088 | (icmp == LIR_lti && qcmp == LIR_ltq) ||do { } while (0) |
6089 | (icmp == LIR_lei && qcmp == LIR_leq))do { } while (0); |
6090 | return binaryIns(qcmp, ui2p(lhs), i2p(rhs)); |
6091 | #else |
6092 | if (rhs->isImmI() && rhs->immI() >= 0) |
6093 | return binaryIns(ucmp, lhs, rhs); |
6094 | #endif |
6095 | } |
6096 | else if ((lht == INT_TYPE(core->traits.int_itraits)) && (rht == UINT_TYPE(core->traits.uint_itraits))) |
6097 | { |
6098 | LIns* lhs = localGet(lhsi); |
6099 | LIns* rhs = localGet(rhsi); |
6100 | #ifdef AVMPLUS_64BIT |
6101 | // 32-bit signed and unsigned values fit in 64-bit registers |
6102 | // so we can promote and simply do a signed 64bit compare |
6103 | LOpcode qcmp = cmpOpcodeI2Q(icmp); |
6104 | NanoAssert((icmp == LIR_eqi && qcmp == LIR_eqq) ||do { } while (0) |
6105 | (icmp == LIR_lti && qcmp == LIR_ltq) ||do { } while (0) |
6106 | (icmp == LIR_lei && qcmp == LIR_leq))do { } while (0); |
6107 | return binaryIns(qcmp, i2p(lhs), ui2p(rhs)); |
6108 | #else |
6109 | if (lhs->isImmI() && lhs->immI() >= 0) |
6110 | return binaryIns(ucmp, lhs, rhs); |
6111 | #endif |
6112 | } |
6113 | |
6114 | LIns* lhs = promoteNumberIns(lht, lhsi); |
6115 | LIns* rhs = promoteNumberIns(rht, rhsi); |
6116 | return binaryIns(fcmp, lhs, rhs); |
6117 | } |
6118 | |
6119 | if (lht == STRING_TYPE(core->traits.string_itraits) && rht == STRING_TYPE(core->traits.string_itraits)) { |
6120 | if (localGetp(lhsi)->opcode() == LIR_calli) { |
6121 | LIns* result = optimizeStringCmpWithStringCall(lhsi, rhsi, icmp, falsefalse); |
6122 | if (result) |
6123 | return result; |
6124 | } |
6125 | else if (localGetp(rhsi)->opcode() == LIR_calli) { |
6126 | LIns* result = optimizeStringCmpWithStringCall(rhsi, lhsi, icmp, truetrue); |
6127 | if (result) |
6128 | return result; |
6129 | } |
6130 | } |
6131 | |
6132 | return NULL__null; |
6133 | } |
6134 | |
6135 | // set cc's for < operator |
6136 | LIns* CodegenLIR::cmpLt(int lhsi, int rhsi) |
6137 | { |
6138 | LIns *result = cmpOptimization(lhsi, rhsi, LIR_lti, LIR_ltui, LIR_ltd); |
6139 | if (result) |
6140 | return result; |
6141 | |
6142 | AvmAssert(trueAtom == 13)do { } while (0); |
6143 | AvmAssert(falseAtom == 5)do { } while (0); |
6144 | AvmAssert(undefinedAtom == 4)do { } while (0); |
6145 | LIns* lhs = loadAtomRep(lhsi); |
6146 | LIns* rhs = loadAtomRep(rhsi); |
6147 | LIns* atom = callIns(FUNCTIONID(compare)&ci_compare, 3, |
6148 | coreAddr, lhs, rhs); |
6149 | |
6150 | // caller will use jt for (a<b) and jf for !(a<b) |
6151 | // compare ^8 <8 |
6152 | // true 1101 0101 y |
6153 | // false 0101 1101 n |
6154 | // undefined 0100 1100 n |
6155 | |
6156 | LIns* c = InsConst(8); |
6157 | return binaryIns(LIR_lti, binaryIns(LIR_xori, p2i(atom), c), c); |
6158 | } |
6159 | |
6160 | LIns* CodegenLIR::cmpLe(int lhsi, int rhsi) |
6161 | { |
6162 | LIns *result = cmpOptimization(lhsi, rhsi, LIR_lei, LIR_leui, LIR_led); |
6163 | if (result) |
6164 | return result; |
6165 | |
6166 | LIns* lhs = loadAtomRep(lhsi); |
6167 | LIns* rhs = loadAtomRep(rhsi); |
6168 | LIns* atom = callIns(FUNCTIONID(compare)&ci_compare, 3, |
6169 | coreAddr, rhs, lhs); |
6170 | |
6171 | // assume caller will use jt for (a<=b) and jf for !(a<=b) |
6172 | // compare ^1 <=4 |
6173 | // true 1101 1100 n |
6174 | // false 0101 0100 y |
6175 | // undefined 0100 0101 n |
6176 | |
6177 | LIns* c2 = InsConst(1); |
6178 | LIns* c4 = InsConst(4); |
6179 | return binaryIns(LIR_lei, binaryIns(LIR_xori, p2i(atom), c2), c4); |
6180 | } |
6181 | |
6182 | LIns* CodegenLIR::cmpEq(const CallInfo *fid, int lhsi, int rhsi) |
6183 | { |
6184 | LIns *result = cmpOptimization(lhsi, rhsi, LIR_eqi, LIR_eqi, LIR_eqd); |
6185 | if (result) { |
6186 | return result; |
6187 | } |
6188 | |
6189 | Traits* lht = state->value(lhsi).traits; |
6190 | Traits* rht = state->value(rhsi).traits; |
6191 | |
6192 | // There are various conditions we can check for that simplify our equality check down |
6193 | // to a ptr comparison: |
6194 | // - null and a type that does not require complex equality checks |
6195 | // - null and a string - no string comparison is performed, just ptrs |
6196 | // - both types do not have complex equality checks (non builtin derived Object types) |
6197 | // This does not work for various other types (not a complete list) such as: |
6198 | // string vs string - performs string comparison |
6199 | // number vs string - type conversion is performed |
6200 | // XML types - complex equality checks |
6201 | // OBJECT_TYPE - this can mean even Number and String |
6202 | if (((lht == NULL_TYPE(core->traits.null_itraits)) && (rht && (!rht->hasComplexEqualityRules() || rht == STRING_TYPE(core->traits.string_itraits)))) || |
6203 | ((rht == NULL_TYPE(core->traits.null_itraits)) && (lht && (!lht->hasComplexEqualityRules() || lht == STRING_TYPE(core->traits.string_itraits)))) || |
6204 | ((rht && !rht->hasComplexEqualityRules()) && (lht && !lht->hasComplexEqualityRules()))) { |
6205 | LIns* lhs = localGetp(lhsi); |
6206 | LIns* rhs = localGetp(rhsi); |
6207 | return binaryIns(LIR_eqp, lhs, rhs); |
6208 | } |
6209 | |
6210 | if ((lht == rht) && (lht == STRING_TYPE(core->traits.string_itraits))) { |
6211 | LIns* lhs = localGetp(lhsi); |
6212 | LIns* rhs = localGetp(rhsi); |
6213 | return callIns(FUNCTIONID(String_equals)&ci_String_equals, 2, lhs, rhs, result); |
6214 | } |
6215 | |
6216 | LIns* lhs = loadAtomRep(lhsi); |
6217 | LIns* rhs = loadAtomRep(rhsi); |
6218 | LIns* out = callIns(fid, 3, coreAddr, lhs, rhs); |
6219 | result = binaryIns(LIR_eqp, out, InsConstAtom(trueAtom)); |
6220 | return result; |
6221 | } |
6222 | |
6223 | void CodegenLIR::writeEpilogue(const FrameState *state) |
6224 | { |
6225 | this->state = state; |
6226 | this->labelCount = driver->getBlockCount(); |
6227 | |
6228 | if (mop_rangeCheckFailed_label.unpatchedEdges) { |
6229 | emitLabel(mop_rangeCheckFailed_label); |
6230 | Ins(LIR_regfence); |
6231 | callIns(FUNCTIONID(mop_rangeCheckFailed)&ci_mop_rangeCheckFailed, 1, env_param); |
6232 | } |
6233 | |
6234 | if (npe_label.unpatchedEdges) { |
6235 | emitLabel(npe_label); |
6236 | Ins(LIR_regfence); |
6237 | callIns(FUNCTIONID(npe)&ci_npe, 1, env_param); |
6238 | } |
6239 | |
6240 | if (upe_label.unpatchedEdges) { |
6241 | emitLabel(upe_label); |
6242 | Ins(LIR_regfence); |
6243 | callIns(FUNCTIONID(upe)&ci_upe, 1, env_param); |
6244 | } |
6245 | |
6246 | if (interrupt_label.unpatchedEdges) { |
6247 | emitLabel(interrupt_label); |
6248 | Ins(LIR_regfence); |
6249 | callIns(FUNCTIONID(handleInterruptMethodEnv)&ci_handleInterruptMethodEnv, 1, env_param); |
6250 | } |
6251 | |
6252 | if (driver->hasReachableExceptions()) { |
6253 | // exception case |
6254 | emitLabel(catch_label); |
6255 | |
6256 | // This regfence is necessary for correctness, |
6257 | // as register contents after a longjmp are unpredictable. |
6258 | Ins(LIR_regfence); |
6259 | |
6260 | // _ef.beginCatch() |
6261 | int stackBase = ms->stack_base(); |
6262 | LIns* pc = loadIns(LIR_ldp, 0, _save_eip, ACCSET_OTHER); |
6263 | LIns* slotAddr = leaIns(stackBase * VARSIZE, vars); |
6264 | LIns* tagAddr = leaIns(stackBase, tags); |
6265 | LIns* handler_ordinal = callIns(FUNCTIONID(beginCatch)&ci_beginCatch, 6, coreAddr, _ef, InsConstPtr(info), pc, slotAddr, tagAddr); |
6266 | |
6267 | int handler_count = info->abc_exceptions()->exception_count; |
6268 | // Jump to catch handler |
6269 | // Find last handler, to optimize branches generated below. |
6270 | int i; |
6271 | for (i = handler_count-1; i >= 0; i--) { |
6272 | ExceptionHandler* h = &info->abc_exceptions()->exceptions[i]; |
6273 | const uint8_t* handler_pc = code_pos + h->target; |
6274 | if (driver->hasFrameState(handler_pc)) break; |
6275 | } |
6276 | int last_ordinal = i; |
6277 | // There should be at least one reachable handler. |
6278 | AvmAssert(last_ordinal >= 0)do { } while (0); |
6279 | // Do a compare & branch to each possible target. |
6280 | for (int j = 0; j <= last_ordinal; j++) { |
6281 | ExceptionHandler* h = &info->abc_exceptions()->exceptions[j]; |
6282 | const uint8_t* handler_pc = code_pos + h->target; |
6283 | if (driver->hasFrameState(handler_pc)) { |
6284 | CodegenLabel& label = getCodegenLabel(handler_pc); |
6285 | AvmAssert(label.labelIns != NULL)do { } while (0); |
6286 | if (j == last_ordinal) { |
6287 | lirout->insBranch(LIR_j, NULL__null, label.labelIns); |
6288 | } else { |
6289 | LIns* cond = binaryIns(LIR_eqi, handler_ordinal, InsConst(j)); |
6290 | // Don't use branchToLabel() here because we don't want to check null bits; |
6291 | // this backedge is internal to exception handling and doesn't affect user |
6292 | // variable dataflow. |
6293 | lirout->insBranch(LIR_jt, cond, label.labelIns); |
6294 | } |
6295 | } |
6296 | } |
6297 | livep(_ef); |
6298 | livep(_save_eip); |
6299 | } |
6300 | |
6301 | if (prolog->env_scope) livep(prolog->env_scope); |
6302 | if (prolog->env_vtable) livep(prolog->env_vtable); |
6303 | if (prolog->env_abcenv) livep(prolog->env_abcenv); |
6304 | if (prolog->env_domainenv) livep(prolog->env_domainenv); |
6305 | if (prolog->env_toplevel) livep(prolog->env_toplevel); |
6306 | if (restArgc) { |
6307 | lirout->ins1(LIR_livei, restArgc); |
6308 | livep(ap_param); |
6309 | } |
6310 | |
6311 | #ifdef DEBUGGER |
6312 | if (haveDebugger) |
6313 | livep(csn); |
6314 | #endif |
6315 | |
6316 | // extend live range of critical stuff |
6317 | // fixme -- this should be automatic based on live analysis |
6318 | livep(methodFrame); |
6319 | livep(env_param); |
6320 | frag->lastIns = livep(coreAddr); |
6321 | skip_ins->overwriteWithSkip(prolog->lastIns); |
6322 | |
6323 | info->set_lookup_cache_size(finddef_cache_builder.next_cache); |
6324 | |
6325 | // After CodeWriter::writeEpilogue() is called, driver is invalid |
6326 | // and could be destructed. Null out our pointer as a precaution. |
6327 | this->driver = NULL__null; |
6328 | } |
6329 | |
6330 | // emit code to create a stack-allocated copy of the given multiname. |
6331 | // this helper only initializes Multiname.flags and Multiname.next_index |
6332 | LIns* CodegenLIR::copyMultiname(const Multiname* multiname) |
6333 | { |
6334 | LIns* name = insAlloc(sizeof(Multiname)); |
6335 | sti(InsConst(multiname->ctFlags()), name, offsetof(Multiname, flags)__builtin_offsetof(Multiname, flags), ACCSET_OTHER); |
6336 | sti(InsConst(multiname->next_index), name, offsetof(Multiname, next_index)__builtin_offsetof(Multiname, next_index), ACCSET_OTHER); |
6337 | return name; |
6338 | } |
6339 | |
6340 | LIns* CodegenLIR::initMultiname(const Multiname* multiname, int& csp, boolbool isDelete /*=false*/) |
6341 | { |
6342 | if (!multiname->isRuntime()) { |
6343 | // use the precomputed multiname |
6344 | return InsConstPtr(multiname); |
6345 | } |
6346 | |
6347 | // create an initialize a copy of the given multiname |
6348 | LIns* _tempname = copyMultiname(multiname); |
6349 | |
6350 | // then initialize its name and ns|nsset fields. |
6351 | LIns* nameAtom = NULL__null; |
6352 | if (multiname->isRtname()) |
6353 | { |
6354 | nameAtom = loadAtomRep(csp--); |
6355 | } |
6356 | else |
6357 | { |
6358 | // copy the compile-time name to the temp name |
6359 | LIns* mName = InsConstPtr(multiname->name); |
6360 | stp(mName, _tempname, offsetof(Multiname,name)__builtin_offsetof(Multiname, name), ACCSET_OTHER); |
6361 | } |
6362 | |
6363 | if (multiname->isRtns()) |
6364 | { |
6365 | // intern the runtime namespace and copy to the temp multiname |
6366 | LIns* nsAtom = loadAtomRep(csp--); |
6367 | LIns* internNs = callIns(FUNCTIONID(internRtns)&ci_internRtns, 2, |
6368 | env_param, nsAtom); |
6369 | |
6370 | stp(internNs, _tempname, offsetof(Multiname,ns)__builtin_offsetof(Multiname, ns), ACCSET_OTHER); |
6371 | } |
6372 | else |
6373 | { |
6374 | // copy the compile-time namespace to the temp multiname |
6375 | LIns* mSpace = InsConstPtr(multiname->ns); |
6376 | stp(mSpace, _tempname, offsetof(Multiname, ns)__builtin_offsetof(Multiname, ns), ACCSET_OTHER); |
6377 | } |
6378 | |
6379 | // Call initMultinameLate as the last step, since if a runtime |
6380 | // namespace is present, initMultinameLate may clobber it if a |
6381 | // QName is provided as index. |
6382 | if (nameAtom) |
6383 | { |
6384 | if (isDelete) |
6385 | { |
6386 | callIns(FUNCTIONID(initMultinameLateForDelete)&ci_initMultinameLateForDelete, 3, |
6387 | env_param, _tempname, nameAtom); |
6388 | } |
6389 | else |
6390 | { |
6391 | callIns(FUNCTIONID(initMultinameLate)&ci_initMultinameLate, 3, |
6392 | coreAddr, _tempname, nameAtom); |
6393 | } |
6394 | } |
6395 | |
6396 | return _tempname; |
6397 | } |
6398 | |
6399 | LIns* CodegenLIR::mopAddrToRangeCheckedRealAddrAndDisp(LIns* mopAddr, int32_t const size, int32_t* disp) |
6400 | { |
6401 | AvmAssert(size > 0)do { } while (0); // it's signed to help make the int promotion correct |
6402 | |
6403 | if (!mopsRangeCheckFilter) { |
6404 | // add a MopsRangeCheckFilter to the back end of the lirout pipeline, just after CseFilter. |
6405 | // fixme bug Bug 554030: We must put this after CseFilter and ExprFilter so that |
6406 | // the range-check expression using LIR_addi/LIR_subi are not modified (by ExprFilter) |
6407 | // and no not become referenced by other unrelated code (by CseFilter). |
6408 | AvmAssert(lirout == varTracker)do { } while (0); |
6409 | mopsRangeCheckFilter = new (*alloc1) MopsRangeCheckFilter(redirectWriter->out, prolog, loadEnvDomainEnv()); |
6410 | redirectWriter->out = mopsRangeCheckFilter; |
6411 | } |
6412 | |
6413 | // note, mopAddr and disp are both in/out parameters |
6414 | LIns* br = NULL__null; |
6415 | LIns* mopsMemoryBase = mopsRangeCheckFilter->emitRangeCheck(mopAddr, size, disp, br); |
6416 | if (br) |
6417 | patchLater(br, mop_rangeCheckFailed_label); |
6418 | |
6419 | |
6420 | // if mopAddr is a compiletime constant, we still have to do the range-check above |
6421 | // (since globalMemorySize can vary at runtime), but we might be able to encode |
6422 | // the entire address into the displacement (if any)... |
6423 | if (mopAddr->isImmI() && disp != NULL__null && sumFitsInInt32(*disp, mopAddr->immI())) |
6424 | { |
6425 | *disp += mopAddr->immI(); |
6426 | return mopsMemoryBase; |
6427 | } |
6428 | |
6429 | // (yes, i2p, not u2p... it might legitimately be negative due to the |
6430 | // displacement optimization in emitCheck().) |
6431 | return binaryIns(LIR_addp, mopsMemoryBase, i2p(mopAddr)); |
6432 | } |
6433 | |
6434 | LIns* CodegenLIR::loadEnvScope() |
6435 | { |
6436 | LIns* scope = prolog->env_scope; |
6437 | if (!scope) |
6438 | { |
6439 | prolog->env_scope = scope = prolog->insLoad(LIR_ldp, env_param, offsetof(MethodEnv, _scope)__builtin_offsetof(MethodEnv, _scope), ACCSET_OTHER, LOAD_CONST); |
6440 | verbose_only( if (vbNames) { |
6441 | vbNames->lirNameMap->addName(scope, "env_scope"); |
6442 | }) |
6443 | verbose_only( if (vbWriter) { vbWriter->flush(); } ) |
6444 | } |
6445 | return scope; |
6446 | } |
6447 | |
6448 | LIns* CodegenLIR::loadEnvVTable() |
6449 | { |
6450 | LIns* vtable = prolog->env_vtable; |
6451 | if (!vtable) |
6452 | { |
6453 | LIns* scope = loadEnvScope(); |
6454 | prolog->env_vtable = vtable = prolog->insLoad(LIR_ldp, scope, offsetof(ScopeChain, _vtable)__builtin_offsetof(ScopeChain, _vtable), ACCSET_OTHER, LOAD_CONST); |
6455 | verbose_only( if (vbNames) { |
6456 | vbNames->lirNameMap->addName(vtable, "env_vtable"); |
6457 | }) |
6458 | verbose_only( if (vbWriter) { vbWriter->flush(); } ) |
6459 | } |
6460 | return vtable; |
6461 | } |
6462 | |
6463 | LIns* CodegenLIR::loadEnvAbcEnv() |
6464 | { |
6465 | LIns* abcenv = prolog->env_abcenv; |
6466 | if (!abcenv) |
6467 | { |
6468 | LIns* scope = loadEnvScope(); |
6469 | prolog->env_abcenv = abcenv = prolog->insLoad(LIR_ldp, scope, offsetof(ScopeChain, _abcEnv)__builtin_offsetof(ScopeChain, _abcEnv), ACCSET_OTHER, LOAD_CONST); |
6470 | verbose_only( if (vbNames) { |
6471 | vbNames->lirNameMap->addName(abcenv, "env_abcenv"); |
6472 | }) |
6473 | verbose_only( if (vbWriter) { vbWriter->flush(); } ) |
6474 | } |
6475 | return abcenv; |
6476 | } |
6477 | |
6478 | LIns* CodegenLIR::loadEnvDomainEnv() |
6479 | { |
6480 | LIns* domainenv = prolog->env_domainenv; |
6481 | if (!domainenv) |
6482 | { |
6483 | LIns* abcenv = loadEnvAbcEnv(); |
6484 | prolog->env_domainenv = domainenv = prolog->insLoad(LIR_ldp, abcenv, offsetof(AbcEnv, m_domainEnv)__builtin_offsetof(AbcEnv, m_domainEnv), ACCSET_OTHER, LOAD_CONST); |
6485 | verbose_only( if (vbNames) { |
6486 | vbNames->lirNameMap->addName(domainenv, "env_domainenv"); |
6487 | }) |
6488 | verbose_only( if (vbWriter) { vbWriter->flush(); } ) |
6489 | } |
6490 | return domainenv; |
6491 | } |
6492 | |
6493 | LIns* CodegenLIR::loadEnvToplevel() |
6494 | { |
6495 | LIns* toplevel = prolog->env_toplevel; |
6496 | if (!toplevel) |
6497 | { |
6498 | LIns* vtable = loadEnvVTable(); |
6499 | prolog->env_toplevel = toplevel = prolog->insLoad(LIR_ldp, vtable, offsetof(VTable, _toplevel)__builtin_offsetof(VTable, _toplevel), ACCSET_OTHER, LOAD_CONST); |
6500 | verbose_only( if (vbNames) { |
6501 | vbNames->lirNameMap->addName(toplevel, "env_toplevel"); |
6502 | }) |
6503 | verbose_only( if (vbWriter) { vbWriter->flush(); } ) |
6504 | } |
6505 | return toplevel; |
6506 | } |
6507 | |
6508 | /** |
6509 | * given an object and type, produce code that loads the VTable for |
6510 | * the object. Handles all types: primitive vables get loaded from |
6511 | * Toplevel, Object and * vtables get loaded by calling the toVTable() helper. |
6512 | * ScriptObject* vtables are loaded from the ScriptObject. |
6513 | */ |
6514 | LIns* CodegenLIR::loadVTable(LIns* obj, Traits* t) |
6515 | { |
6516 | if (t && !t->isMachineType() && t != STRING_TYPE(core->traits.string_itraits) && t != NAMESPACE_TYPE(core->traits.namespace_itraits) && t != NULL_TYPE(core->traits.null_itraits)) |
6517 | { |
6518 | // must be a pointer to a scriptobject, and we've done the n |
6519 | // all other types are ScriptObject, and we've done the null check |
6520 | return loadIns(LIR_ldp, offsetof(ScriptObject, vtable)__builtin_offsetof(ScriptObject, vtable), obj, ACCSET_OTHER, LOAD_CONST); |
6521 | } |
6522 | |
6523 | LIns* toplevel = loadEnvToplevel(); |
6524 | |
6525 | int offset; |
6526 | if (t == NAMESPACE_TYPE(core->traits.namespace_itraits)) offset = offsetof(Toplevel, _namespaceClass)__builtin_offsetof(Toplevel, _namespaceClass); |
6527 | else if (t == STRING_TYPE(core->traits.string_itraits)) offset = offsetof(Toplevel, _stringClass)__builtin_offsetof(Toplevel, _stringClass); |
6528 | else if (t == BOOLEAN_TYPE(core->traits.boolean_itraits)) offset = offsetof(Toplevel, _booleanClass)__builtin_offsetof(Toplevel, _booleanClass); |
6529 | else if (t == NUMBER_TYPE(core->traits.number_itraits)) offset = offsetof(Toplevel, _numberClass)__builtin_offsetof(Toplevel, _numberClass); |
6530 | else if (t == INT_TYPE(core->traits.int_itraits)) offset = offsetof(Toplevel, _intClass)__builtin_offsetof(Toplevel, _intClass); |
6531 | else if (t == UINT_TYPE(core->traits.uint_itraits)) offset = offsetof(Toplevel, _uintClass)__builtin_offsetof(Toplevel, _uintClass); |
6532 | else |
6533 | { |
6534 | // *, Object or Void |
6535 | LIns* atom = nativeToAtom(obj, t); |
6536 | return callIns(FUNCTIONID(toVTable)&ci_toVTable, 2, toplevel, atom); |
6537 | } |
6538 | |
6539 | // now offset != -1 and we are returning a primitive vtable |
6540 | |
6541 | LIns* cc = loadIns(LIR_ldp, offset, toplevel, ACCSET_OTHER, LOAD_CONST); |
6542 | LIns* cvtable = loadIns(LIR_ldp, offsetof(ClassClosure, vtable)__builtin_offsetof(ClassClosure, vtable), cc, ACCSET_OTHER, LOAD_CONST); |
6543 | return loadIns(LIR_ldp, offsetof(VTable, ivtable)__builtin_offsetof(VTable, ivtable), cvtable, ACCSET_OTHER, LOAD_CONST); |
6544 | } |
6545 | |
6546 | LIns* CodegenLIR::promoteNumberIns(Traits* t, int i) |
6547 | { |
6548 | if (t == NUMBER_TYPE(core->traits.number_itraits)) |
6549 | { |
6550 | return localGetf(i); |
6551 | } |
6552 | if (t == INT_TYPE(core->traits.int_itraits) || t == BOOLEAN_TYPE(core->traits.boolean_itraits)) |
6553 | { |
6554 | return i2dIns(localGet(i)); |
6555 | } |
6556 | AvmAssert(t == UINT_TYPE)do { } while (0); |
6557 | return ui2dIns(localGet(i)); |
6558 | } |
6559 | |
6560 | /// set position of a label and patch all pending jumps to point here. |
6561 | void CodegenLIR::emitLabel(CodegenLabel& label) { |
6562 | varTracker->trackLabel(label, state); |
6563 | |
6564 | // patch all unpatched branches to this label |
6565 | LIns* labelIns = label.labelIns; |
6566 | boolbool jtbl_forward_target = falsefalse; |
6567 | for (Seq<InEdge>* p = label.unpatchedEdges; p != NULL__null; p = p->tail) { |
6568 | InEdge& patch = p->head; |
6569 | LIns* branchIns = patch.branchIns; |
6570 | if (branchIns->isop(LIR_jtbl)) { |
6571 | jtbl_forward_target = truetrue; |
6572 | branchIns->setTarget(patch.index, labelIns); |
6573 | } else { |
6574 | AvmAssert(branchIns->isBranch() && patch.index == 0)do { } while (0); |
6575 | branchIns->setTarget(labelIns); |
6576 | } |
6577 | } |
6578 | if (jtbl_forward_target) { |
6579 | // A jtbl (switch) jumps forward to here, creating a situation our |
6580 | // register allocator cannot handle; force regs to be loaded at the |
6581 | // start of this block. |
6582 | Ins(LIR_regfence); |
6583 | } |
6584 | |
6585 | #ifdef NJ_VERBOSE |
6586 | if (vbNames && label.name) |
6587 | vbNames->lirNameMap->addName(label.labelIns, label.name); |
6588 | #endif |
6589 | } |
6590 | |
6591 | #ifdef DEBUGGER |
6592 | void CodegenLIR::emitSampleCheck() |
6593 | { |
6594 | /* @todo inline the sample check code, help! */ |
6595 | callIns(FUNCTIONID(sampleCheck)&ci_sampleCheck, 1, coreAddr); |
6596 | } |
6597 | #endif |
6598 | |
6599 | #ifdef NJ_VERBOSE |
6600 | boolbool CodegenLIR::verbose() |
6601 | { |
6602 | return pool->isVerbose(VB_jit, info); |
6603 | } |
6604 | #endif |
6605 | |
6606 | // emit a conditional branch to the given label. If we have already emitted |
6607 | // code for that label then the branch is complete. If not then add a patch |
6608 | // record to the label, which will patch the branch when the label position |
6609 | // is reached. cond == NULL for unconditional branches (LIR_j). |
6610 | void CodegenLIR::branchToLabel(LOpcode op, LIns *cond, CodegenLabel& label) { |
6611 | if (cond && !cond->isCmp()) { |
6612 | // branching on a non-condition expression, so test (v==0) |
6613 | // and invert the sense of the branch. |
6614 | cond = eqi0(cond); |
6615 | op = invertCondJmpOpcode(op); |
6616 | } |
6617 | LIns* labelIns = label.labelIns; |
6618 | LIns* br = lirout->insBranch(op, cond, labelIns); |
6619 | if (br != NULL__null) { |
6620 | if (labelIns != NULL__null) { |
6621 | varTracker->checkBackEdge(label, state); |
6622 | } else { |
6623 | label.unpatchedEdges = new (*alloc1) Seq<InEdge>(InEdge(br), label.unpatchedEdges); |
6624 | varTracker->trackForwardEdge(label, falsefalse); |
6625 | } |
6626 | } else { |
6627 | // branch was optimized away. do nothing. |
6628 | } |
6629 | } |
6630 | |
6631 | LIns* CodegenLIR::branchJovToLabel(LOpcode op, LIns *a, LIns *b, CodegenLabel& label) { |
6632 | LIns* labelIns = label.labelIns; |
6633 | LIns* result = lirout->insBranchJov(op, a, b, labelIns); |
6634 | NanoAssert(result)do { } while (0); |
6635 | if (result->isop(op)) { |
6636 | if (labelIns != NULL__null) { |
6637 | varTracker->checkBackEdge(label, state); |
6638 | } else { |
6639 | label.unpatchedEdges = new (*alloc1) Seq<InEdge>(InEdge(result), label.unpatchedEdges); |
6640 | varTracker->trackForwardEdge(label, falsefalse); |
6641 | } |
6642 | } else { |
6643 | // The root operator of the expression has been eliminated via |
6644 | // constant folding or other simplification. This is only valid |
6645 | // if no overflow is possible, in which case the branch is not needed. |
6646 | // Do nothing. |
6647 | } |
6648 | return result; |
6649 | } |
6650 | |
6651 | // emit a relative branch to the given ABC pc-offset by mapping pc |
6652 | // to a corresponding CodegenLabel, and creating a new one if necessary |
6653 | void CodegenLIR::branchToAbcPos(LOpcode op, LIns *cond, const uint8_t* pc) { |
6654 | CodegenLabel& label = getCodegenLabel(pc); |
6655 | branchToLabel(op, cond, label); |
6656 | } |
6657 | |
6658 | CodegenLabel& CodegenLIR::createLabel(const char* name) { |
6659 | return *(new (*alloc1) CodegenLabel(name)); |
6660 | } |
6661 | |
6662 | CodegenLabel& CodegenLIR::createLabel(const char* prefix, int id) { |
6663 | CodegenLabel* label = new (*alloc1) CodegenLabel(); |
6664 | #ifdef NJ_VERBOSE |
6665 | if (vbNames) { |
6666 | char *name = new (*lir_alloc) char[VMPI_strlen::strlen(prefix)+16]; |
6667 | VMPI_sprintf::sprintf(name, "%s%d", prefix, id); |
6668 | label->name = name; |
6669 | } |
6670 | #else |
6671 | (void) prefix; |
6672 | (void) id; |
6673 | #endif |
6674 | return *label; |
6675 | } |
6676 | |
6677 | CodegenLabel& CodegenLIR::getCodegenLabel(const uint8_t* pc) { |
6678 | AvmAssert(driver->hasFrameState(pc))do { } while (0); |
6679 | if (!blockLabels) |
6680 | blockLabels = new (*alloc1) HashMap<const uint8_t*,CodegenLabel*>(*alloc1, driver->getBlockCount()); |
6681 | CodegenLabel* label = blockLabels->get(pc); |
6682 | if (!label) { |
6683 | label = new (*alloc1) CodegenLabel(); |
6684 | blockLabels->put(pc, label); |
6685 | } |
6686 | #ifdef NJ_VERBOSE |
6687 | if (!label->name && vbNames) { |
6688 | char *name = new (*lir_alloc) char[16]; |
6689 | VMPI_sprintf::sprintf(name, "B%d", int(pc - code_pos)); |
6690 | label->name = name; |
6691 | } |
6692 | #endif |
6693 | return *label; |
6694 | } |
6695 | |
6696 | /// connect to a label for one entry of a switch |
6697 | void CodegenLIR::patchLater(LIns* jtbl, const uint8_t* pc, uint32_t index) { |
6698 | CodegenLabel& target = getCodegenLabel(pc); |
6699 | if (target.labelIns != 0) { |
6700 | jtbl->setTarget(index, target.labelIns); // backward edge |
6701 | varTracker->checkBackEdge(target, state); |
6702 | } else { |
6703 | target.unpatchedEdges = new (*alloc1) Seq<InEdge>(InEdge(jtbl, index), target.unpatchedEdges); |
6704 | varTracker->trackForwardEdge(target, falsefalse); |
6705 | } |
6706 | } |
6707 | |
6708 | void CodegenLIR::patchLater(LIns *br, CodegenLabel &target) { |
6709 | if (!br) return; // occurs if branch was unconditional and thus never emitted. |
6710 | if (target.labelIns != 0) { |
6711 | br->setTarget(target.labelIns); // backwards edge |
6712 | varTracker->checkBackEdge(target, state); |
6713 | } else { |
6714 | target.unpatchedEdges = new (*alloc1) Seq<InEdge>(InEdge(br), target.unpatchedEdges); |
6715 | varTracker->trackForwardEdge(target, falsefalse); |
6716 | } |
6717 | } |
6718 | |
6719 | LIns* CodegenLIR::insAlloc(int32_t size) { |
6720 | return lirout->insAlloc(size >= 4 ? size : 4); |
6721 | } |
6722 | |
6723 | LIns* CodegenLIR::insAllocForTraits(Traits *t) |
6724 | { |
6725 | switch (bt(t)) { |
6726 | case BUILTIN_number: |
6727 | return insAlloc(sizeof(double)); |
6728 | case BUILTIN_int: |
6729 | case BUILTIN_uint: |
6730 | case BUILTIN_boolean: |
6731 | return insAlloc(sizeof(int32_t)); |
6732 | default: |
6733 | return insAlloc(sizeof(intptr_t)); |
6734 | } |
6735 | } |
6736 | |
6737 | CodeMgr::CodeMgr() : codeAlloc(), bindingCaches(NULL__null) |
6738 | { |
6739 | verbose_only( log.lcbits = 0; ) |
6740 | } |
6741 | |
6742 | void CodeMgr::flushBindingCaches() |
6743 | { |
6744 | // this clears vtable so all kObjectType receivers are invalidated. |
6745 | // of course, this field is also "tag" for primitive receivers, |
6746 | // but 0 is never a legal value there (and this is asserted when the tag is set) |
6747 | // so this should safely invalidate those as well (though we don't really need to invalidate them) |
6748 | for (BindingCache* b = bindingCaches; b != NULL__null; b = b->next) |
6749 | b->vtable = NULL__null; |
6750 | } |
6751 | |
6752 | void analyze_edge(LIns* label, nanojit::BitSet &livein, |
6753 | LabelBitSet& labels, InsList* looplabels) |
6754 | { |
6755 | nanojit::BitSet *lset = labels.get(label); |
6756 | if (lset) { |
6757 | livein.setFrom(*lset); |
6758 | } else { |
6759 | AvmAssertMsg(looplabels != NULL, "Unexpected back-edge")do { } while (0); |
6760 | looplabels->add(label); |
6761 | } |
6762 | } |
6763 | |
6764 | // Treat addp(vars, const) as a load from vars[const] |
6765 | // for the sake of dead store analysis. |
6766 | void analyze_addp(LIns* ins, LIns* vars, nanojit::BitSet& varlivein) |
6767 | { |
6768 | AvmAssert(ins->isop(LIR_addp))do { } while (0); |
6769 | if (ins->oprnd1() == vars && ins->oprnd2()->isImmP()) { |
6770 | AvmAssert(IS_ALIGNED(ins->oprnd2()->immP(), VARSIZE))do { } while (0); |
6771 | int d = int(uintptr_t(ins->oprnd2()->immP()) / VARSIZE); |
6772 | varlivein.set(d); |
6773 | } |
6774 | } |
6775 | |
6776 | // Treat the calculated address of addp(vars, const) as the target |
6777 | // of a store to the variable pointed to, as well as its associated tag. |
6778 | void analyze_addp_store(LIns* ins, LIns* vars, nanojit::BitSet& varlivein, nanojit::BitSet& taglivein) |
6779 | { |
6780 | AvmAssert(ins->isop(LIR_addp))do { } while (0); |
6781 | if (ins->oprnd1() == vars && ins->oprnd2()->isImmP()) { |
6782 | AvmAssert(IS_ALIGNED(ins->oprnd2()->immP(), VARSIZE))do { } while (0); |
6783 | int d = int(uintptr_t(ins->oprnd2()->immP()) / VARSIZE); |
6784 | varlivein.clear(d); |
6785 | taglivein.clear(d); |
6786 | } |
6787 | } |
6788 | |
6789 | void analyze_call(LIns* ins, LIns* catcher, LIns* vars, DEBUGGER_ONLY(bool haveDebugger, int dbg_framesize,) |
6790 | nanojit::BitSet& varlivein, LabelBitSet& varlabels, |
6791 | nanojit::BitSet& taglivein, LabelBitSet& taglabels) |
6792 | { |
6793 | if (ins->callInfo() == FUNCTIONID(beginCatch)&ci_beginCatch) { |
6794 | // beginCatch(core, ef, info, pc, &vars[i], &tags[i]) => store to &vars[i] and &tag[i] |
6795 | LIns* varPtrArg = ins->arg(4); // varPtrArg == vars, OR addp(vars, index) |
6796 | if (varPtrArg == vars) { |
6797 | varlivein.clear(0); |
6798 | taglivein.clear(0); |
6799 | } else if (varPtrArg->isop(LIR_addp)) { |
6800 | analyze_addp_store(varPtrArg, vars, varlivein, taglivein); |
6801 | } |
6802 | } else if (!ins->callInfo()->_isPure) { |
6803 | if (catcher) { |
6804 | // non-cse call is like a conditional forward branch to the catcher label. |
6805 | // this could be made more precise by checking whether this call |
6806 | // can really throw, and only processing edges to the subset of |
6807 | // reachable catch blocks. If we haven't seen the catch label yet then |
6808 | // the call is to an exception handling helper (eg beginCatch()) |
6809 | // that won't throw. |
6810 | nanojit::BitSet *varlset = varlabels.get(catcher); |
6811 | if (varlset) |
6812 | varlivein.setFrom(*varlset); |
6813 | nanojit::BitSet *taglset = taglabels.get(catcher); |
6814 | if (taglset) |
6815 | taglivein.setFrom(*taglset); |
6816 | } |
6817 | #ifdef DEBUGGER |
6818 | if (haveDebugger) { |
6819 | // all vars and scopes must be considered "read" by any call |
6820 | // the debugger can stop in. The debugger also will access tags[]. |
6821 | for (int i = 0, n = dbg_framesize; i < n; i++) { |
6822 | varlivein.set(i); |
6823 | taglivein.set(i); |
6824 | } |
6825 | } |
6826 | #endif |
6827 | } |
6828 | else if (ins->callInfo() == FUNCTIONID(makeatom)&ci_makeatom) { |
6829 | // makeatom(core, &vars[index], tag[index]) => treat as load from &vars[index] |
6830 | LIns* varPtrArg = ins->arg(1); // varPtrArg == vars, OR addp(vars, index) |
6831 | if (varPtrArg == vars) |
6832 | varlivein.set(0); |
6833 | else if (varPtrArg->isop(LIR_addp)) |
6834 | analyze_addp(varPtrArg, vars, varlivein); |
6835 | } |
6836 | else if (ins->callInfo() == FUNCTIONID(restargHelper)&ci_restargHelper) { |
6837 | // restargHelper(Toplevel*, Multiname*, Atom, ArrayObject**, uint32_t, Atom*) |
6838 | // The ArrayObject** is a reference to a var |
6839 | LIns* varPtrArg = ins->arg(3); // varPtrArg == vars, OR addp(vars, index) |
6840 | if (varPtrArg == vars) |
6841 | varlivein.set(0); |
6842 | else if (varPtrArg->isop(LIR_addp)) |
6843 | analyze_addp(varPtrArg, vars, varlivein); |
6844 | } |
6845 | } |
6846 | |
6847 | // Analyze a LIR_label. We're at the top of a block, save livein for |
6848 | // this block so it can be propagated to predecessors. |
6849 | boolbool analyze_label(LIns* i, Allocator& alloc, boolbool again, |
6850 | int framesize, InsList* looplabels, |
6851 | nanojit::BitSet& varlivein, LabelBitSet& varlabels, |
6852 | nanojit::BitSet& taglivein, LabelBitSet& taglabels) |
6853 | { |
6854 | nanojit::BitSet *var_lset = varlabels.get(i); |
6855 | if (!var_lset) { |
6856 | var_lset = new (alloc) nanojit::BitSet(alloc, framesize); |
6857 | varlabels.put(i, var_lset); |
6858 | } |
6859 | if (var_lset->setFrom(varlivein) && !again) { |
6860 | for (Seq<LIns*>* p = looplabels->get(); p != NULL__null; p = p->tail) { |
6861 | if (p->head == i) { |
6862 | again = truetrue; |
6863 | break; |
6864 | } |
6865 | } |
6866 | } |
6867 | nanojit::BitSet *tag_lset = taglabels.get(i); |
6868 | if (!tag_lset) { |
6869 | tag_lset = new (alloc) nanojit::BitSet(alloc, framesize); |
6870 | taglabels.put(i, tag_lset); |
6871 | } |
6872 | if (tag_lset->setFrom(taglivein) && !again) { |
6873 | for (Seq<LIns*>* p = looplabels->get(); p != NULL__null; p = p->tail) { |
6874 | if (p->head == i) { |
6875 | again = truetrue; |
6876 | break; |
6877 | } |
6878 | } |
6879 | } |
6880 | return again; |
6881 | } |
6882 | |
6883 | void CodegenLIR::deadvars_analyze(Allocator& alloc, |
6884 | nanojit::BitSet& varlivein, LabelBitSet& varlabels, |
6885 | nanojit::BitSet& taglivein, LabelBitSet& taglabels) |
6886 | { |
6887 | LIns *catcher = this->catch_label.labelIns; |
6888 | InsList looplabels(alloc); |
6889 | |
6890 | verbose_only(int iter = 0;) |
6891 | boolbool again; |
6892 | do { |
6893 | again = falsefalse; |
6894 | varlivein.reset(); |
6895 | taglivein.reset(); |
6896 | LirReader in(frag->lastIns); |
6897 | for (LIns *i = in.read(); !i->isop(LIR_start); i = in.read()) { |
6898 | LOpcode op = i->opcode(); |
6899 | switch (op) { |
6900 | case LIR_reti: |
6901 | CASE64(LIR_retq:) |
6902 | case LIR_retd: |
6903 | varlivein.reset(); |
6904 | taglivein.reset(); |
6905 | break; |
6906 | CASE64(LIR_stq:) |
6907 | case LIR_sti: |
6908 | case LIR_std: |
6909 | case LIR_sti2c: |
6910 | if (i->oprnd2() == vars) { |
6911 | int d = i->disp() / VARSIZE; |
6912 | varlivein.clear(d); |
6913 | } else if (i->oprnd2() == tags) { |
6914 | int d = i->disp(); // 1 byte per tag |
6915 | taglivein.clear(d); |
6916 | } |
6917 | break; |
6918 | case LIR_addp: |
6919 | // treat pointer calculations into vars as a read from vars |
6920 | // FIXME: Bug 569677 |
6921 | // This is extremely fragile. There is no reason to suppose |
6922 | // that the address will be used for a read rather than a store, |
6923 | // other than that to do so would break the deadvars analysis |
6924 | // because of the dodgy assumption made here. |
6925 | analyze_addp(i, vars, varlivein); |
6926 | break; |
6927 | CASE64(LIR_ldq:) |
6928 | case LIR_ldi: |
6929 | case LIR_ldd: |
6930 | case LIR_lduc2ui: case LIR_ldc2i: |
6931 | if (i->oprnd1() == vars) { |
6932 | int d = i->disp() / VARSIZE; |
6933 | varlivein.set(d); |
6934 | } |
6935 | else if (i->oprnd1() == tags) { |
6936 | int d = i->disp(); // 1 byte per tag |
6937 | taglivein.set(d); |
6938 | } |
6939 | break; |
6940 | case LIR_label: |
6941 | again |= analyze_label(i, alloc, again, framesize, &looplabels, |
6942 | varlivein, varlabels, taglivein, taglabels); |
6943 | break; |
6944 | case LIR_j: |
6945 | // the fallthrough path is unreachable, clear it. |
6946 | varlivein.reset(); |
6947 | taglivein.reset(); |
6948 | // fall through to other branch cases |
6949 | case LIR_jt: |
6950 | case LIR_jf: |
6951 | // merge the LiveIn sets from each successor: the fall |
6952 | // through case (livein) and the branch case (lset). |
6953 | analyze_edge(i->getTarget(), varlivein, varlabels, &looplabels); |
6954 | analyze_edge(i->getTarget(), taglivein, taglabels, &looplabels); |
6955 | break; |
6956 | case LIR_jtbl: |
6957 | varlivein.reset(); // fallthrough path is unreachable, clear it. |
6958 | taglivein.reset(); // fallthrough path is unreachable, clear it. |
6959 | for (uint32_t j=0, n=i->getTableSize(); j < n; j++) { |
6960 | analyze_edge(i->getTarget(j), varlivein, varlabels, &looplabels); |
6961 | analyze_edge(i->getTarget(j), taglivein, taglabels, &looplabels); |
6962 | } |
6963 | break; |
6964 | CASE64(LIR_callq:) |
6965 | case LIR_calli: |
6966 | case LIR_calld: |
6967 | case LIR_callv: |
6968 | analyze_call(i, catcher, vars, DEBUGGER_ONLY(haveDebugger, dbg_framesize,) |
6969 | varlivein, varlabels, taglivein, taglabels); |
6970 | break; |
6971 | } |
6972 | } |
6973 | verbose_only(iter++;) |
6974 | } |
6975 | while (again); |
6976 | |
6977 | // now make a final pass, modifying LIR to delete dead stores (make them LIR_neartramps) |
6978 | verbose_only( if (pool->isVerbose(LC_Liveness, info)) |
6979 | AvmLog("killing dead stores after %d LA iterations.\n",iter); |
6980 | ) |
6981 | } |
6982 | |
6983 | // Erase the instruction by rewriting it as a skip. |
6984 | // TODO this can go away if we turn this kill pass into a LirReader |
6985 | // and do the work inline with the assembly pass. |
6986 | void CodegenLIR::eraseIns(LIns* ins, LIns* prevIns) |
6987 | { |
6988 | #ifdef NJ_VERBOSE |
6989 | LInsPrinter *printer = frag->lirbuf->printer; |
6990 | boolbool verbose = printer && pool->isVerbose(LC_AfterDCE, info); |
6991 | if (verbose) { |
6992 | InsBuf b; |
6993 | AvmLog("- %s\n", printer->formatIns(&b, ins)); |
6994 | } |
6995 | #endif |
6996 | ins->overwriteWithSkip(prevIns); |
6997 | } |
6998 | |
6999 | void CodegenLIR::deadvars_kill(Allocator& alloc, |
7000 | nanojit::BitSet& varlivein, LabelBitSet& varlabels, |
7001 | nanojit::BitSet& taglivein, LabelBitSet& taglabels) |
7002 | { |
7003 | #ifdef NJ_VERBOSE |
7004 | LInsPrinter *printer = frag->lirbuf->printer; |
7005 | boolbool verbose = printer && pool->isVerbose(LC_AfterDCE, info); |
7006 | InsBuf b; |
7007 | #endif |
7008 | LIns *catcher = this->catch_label.labelIns; |
7009 | varlivein.reset(); |
7010 | taglivein.reset(); |
7011 | boolbool tags_touched = falsefalse; |
7012 | boolbool vars_touched = falsefalse; |
7013 | LirReader in(frag->lastIns); |
7014 | for (LIns *i = in.read(); !i->isop(LIR_start); i = in.read()) { |
7015 | LOpcode op = i->opcode(); |
7016 | switch (op) { |
7017 | case LIR_reti: |
7018 | CASE64(LIR_retq:) |
7019 | case LIR_retd: |
7020 | varlivein.reset(); |
7021 | taglivein.reset(); |
7022 | break; |
7023 | CASE64(LIR_stq:) |
7024 | case LIR_sti: |
7025 | case LIR_std: |
7026 | case LIR_sti2c: |
7027 | if (i->oprnd2() == vars) { |
7028 | int d = i->disp() / VARSIZE; |
7029 | if (!varlivein.get(d)) { |
7030 | eraseIns(i, in.peek()); |
7031 | continue; |
7032 | } else { |
7033 | varlivein.clear(d); |
7034 | vars_touched = truetrue; |
7035 | } |
7036 | } |
7037 | else if (i->oprnd2() == tags) { |
7038 | int d = i->disp(); // 1 byte per tag |
7039 | if (!taglivein.get(d)) { |
7040 | eraseIns(i, in.peek()); |
7041 | continue; |
7042 | } else { |
7043 | // keep the store |
7044 | taglivein.clear(d); |
7045 | tags_touched = truetrue; |
7046 | } |
7047 | } |
7048 | break; |
7049 | case LIR_addp: |
7050 | // treat pointer calculations into vars as a read from vars |
7051 | analyze_addp(i, vars, varlivein); |
7052 | break; |
7053 | CASE64(LIR_ldq:) |
7054 | case LIR_ldi: |
7055 | case LIR_ldd: |
7056 | case LIR_lduc2ui: case LIR_ldc2i: |
7057 | if (i->oprnd1() == vars) { |
7058 | int d = i->disp() / VARSIZE; |
7059 | varlivein.set(d); |
7060 | } |
7061 | else if (i->oprnd1() == tags) { |
7062 | int d = i->disp(); // 1 byte per tag |
7063 | taglivein.set(d); |
7064 | } |
7065 | break; |
7066 | case LIR_label: |
7067 | analyze_label(i, alloc, truetrue, framesize, 0, |
7068 | varlivein, varlabels, taglivein, taglabels); |
7069 | break; |
7070 | case LIR_j: |
7071 | // the fallthrough path is unreachable, clear it. |
7072 | varlivein.reset(); |
7073 | taglivein.reset(); |
7074 | // fall through to other branch cases |
7075 | case LIR_jt: |
7076 | case LIR_jf: |
7077 | // merge the LiveIn sets from each successor: the fall |
7078 | // through case (live) and the branch case (lset). |
7079 | analyze_edge(i->getTarget(), varlivein, varlabels, 0); |
7080 | analyze_edge(i->getTarget(), taglivein, taglabels, 0); |
7081 | break; |
7082 | case LIR_jtbl: |
7083 | varlivein.reset(); |
7084 | taglivein.reset(); |
7085 | for (uint32_t j = 0, n = i->getTableSize(); j < n; j++) { |
7086 | analyze_edge(i->getTarget(j), varlivein, varlabels, 0); |
7087 | analyze_edge(i->getTarget(j), taglivein, taglabels, 0); |
7088 | } |
7089 | break; |
7090 | CASE64(LIR_callq:) |
7091 | case LIR_calli: |
7092 | case LIR_calld: |
7093 | case LIR_callv: |
7094 | analyze_call(i, catcher, vars, DEBUGGER_ONLY(haveDebugger, dbg_framesize,) |
7095 | varlivein, varlabels, taglivein, taglabels); |
7096 | break; |
7097 | } |
7098 | verbose_only(if (verbose) { |
7099 | AvmLog(" %s\n", printer->formatIns(&b, i)); |
7100 | }) |
7101 | } |
7102 | // if we have not removed all stores to the tags array, mark it live |
7103 | // so its live range will span loops. |
7104 | if (tags_touched) |
7105 | livep(tags); |
7106 | if (vars_touched) |
7107 | livep(vars); |
7108 | } |
7109 | |
7110 | /* |
7111 | * this is iterative live variable analysis. We walk backwards through |
7112 | * the code. when we see a load, we mark the variable live, and when |
7113 | * we see a store, we mark it dead. Dead stores are dropped, not returned |
7114 | * by read(). |
7115 | * |
7116 | * at labels, we save the liveIn set associated with that label. |
7117 | * |
7118 | * at branches, we merge the liveIn sets from the fall through case (which |
7119 | * is the current set) and the branch case (which was saved with the label). |
7120 | * this filter can be run multiple times, which is required to pick up |
7121 | * loop-carried live variables. |
7122 | * |
7123 | * once the live sets are stable, the DeadVars.kill flag is set to cause the filter |
7124 | * to not only drop dead stores, but overwrite them as tramps so they'll be |
7125 | * ignored by any later passes even without using this filter. |
7126 | */ |
7127 | |
7128 | void CodegenLIR::deadvars() |
7129 | { |
7130 | // allocator used only for duration of this phase. no exceptions are |
7131 | // thrown while this phase runs, hence no try/catch is necessary. |
7132 | Allocator dv_alloc; |
7133 | |
7134 | // map of label -> bitset, tracking what is livein at each label. |
7135 | // populated by deadvars_analyze, then used by deadvars_kill. |
7136 | // Estimated number of required buckets is based on the driver's block count, |
7137 | // which is slightly below the actual # of labels in LIR. Being slightly low |
7138 | // is okay for a bucket hashtable. note: labelCount is 0 for simple 1-block |
7139 | // methods, so use labelCount+1 as the estimate to ensure we have >0 buckets. |
7140 | LabelBitSet varlabels(dv_alloc, labelCount + 1); |
7141 | LabelBitSet taglabels(dv_alloc, labelCount + 1); |
7142 | |
7143 | // scratch bitset used by both dv_analyze and dv_kill. Each resets |
7144 | // the bitset before using it. creating it here saves one allocation. |
7145 | nanojit::BitSet varlivein(dv_alloc, framesize); |
7146 | nanojit::BitSet taglivein(dv_alloc, framesize); |
7147 | |
7148 | // If catch_label.labelIns is non-null, we emitted an exception |
7149 | // handler dispatcher, which generates back-edges into the method. |
7150 | if (varTracker->hasBackedges() || catch_label.labelIns) |
7151 | deadvars_analyze(dv_alloc, varlivein, varlabels, taglivein, taglabels); |
7152 | deadvars_kill(dv_alloc, varlivein, varlabels, taglivein, taglabels); |
7153 | } |
7154 | |
7155 | #ifdef NJ_VERBOSE |
7156 | void listing(const char* title, AvmLogControl &log, Fragment* frag) |
7157 | { |
7158 | LirReader reader(frag->lastIns); |
7159 | Allocator lister_alloc; |
7160 | ReverseLister lister(&reader, lister_alloc, frag->lirbuf->printer, &log, title); |
7161 | for (LIns* ins = lister.read(); !ins->isop(LIR_start); ins = lister.read()) |
7162 | {} |
7163 | lister.finish(); |
7164 | } |
7165 | |
7166 | // build a filename based on path/title |
7167 | static const char* filenameFor(const char* path, const char* name, const char* ext, Allocator& alloc) |
7168 | { |
7169 | char* filename = new (alloc) char[1+VMPI_strlen::strlen(name)+VMPI_strlen::strlen(path)+VMPI_strlen::strlen(ext)]; |
7170 | VMPI_strcpy::strcpy(filename, path); |
7171 | VMPI_strcat::strcat(filename, "/"); // sorry windows |
7172 | char* dst = &filename[VMPI_strlen::strlen(filename)]; |
7173 | for(const char* s = name; *s; s++) { |
7174 | char c = *s; |
7175 | c = VMPI_isalnum::isalnum(*s) ? c : '.'; // convert non-alpha to dots |
7176 | c = ( c == '.' && dst[-1] == '.' ) ? 0 : c; // dont print multiple dots |
7177 | if (c) |
7178 | *dst++ = c; |
7179 | } |
7180 | dst = dst[-1] == '.' ? dst-1 : dst; // rm trailing dot |
7181 | VMPI_strcpy::strcpy(dst, ext); |
7182 | return filename; |
7183 | } |
7184 | |
7185 | // write out a control flow graph of LIR instructions |
7186 | static void lircfg(FILE* f, const Fragment* frag, InsSet* ignore, Allocator& alloc, CfgLister::CfgMode mode) |
7187 | { |
7188 | LirReader reader(frag->lastIns); |
7189 | CfgLister cfg(&reader, alloc, mode); |
7190 | |
7191 | for (LIns* ins = cfg.read(); !ins->isop(LIR_start); ins = cfg.read()) |
7192 | {} |
7193 | |
7194 | cfg.printGmlCfg(f, frag->lirbuf->printer, ignore); |
7195 | fclose(f); |
7196 | } |
7197 | |
7198 | #endif |
7199 | |
7200 | // return pointer to generated code on success, NULL on failure (frame size too large) |
7201 | GprMethodProc CodegenLIR::emitMD() |
7202 | { |
7203 | deadvars(); // deadvars_kill() will add livep(vars) or livep(tags) if necessary |
7204 | |
7205 | // do this very last so it's after livep(vars) |
7206 | frag->lastIns = livep(undefConst); |
7207 | |
7208 | PERFM_NTPROF_BEGIN("compile"); |
7209 | mmfx_delete( alloc1 )::MMgcDestructTaggedScalarChecked(alloc1); |
7210 | alloc1 = NULL__null; |
7211 | |
7212 | CodeMgr *mgr = pool->codeMgr; |
7213 | #ifdef NJ_VERBOSE |
7214 | if (pool->isVerbose(LC_ReadLIR, info)) { |
7215 | StringBuffer sb(core); |
7216 | sb << info; |
7217 | core->console << "Final LIR " << info; |
7218 | listing(sb.c_str(), mgr->log, frag); |
7219 | } |
7220 | if (pool->isVerbose(LC_Liveness, info)) { |
7221 | Allocator live_alloc; |
7222 | LirReader in(frag->lastIns); |
7223 | nanojit::live(&in, live_alloc, frag, &mgr->log); |
7224 | } |
7225 | if (pool->isVerbose(LC_AfterDCE | LC_Native, info)) { |
7226 | StringBuffer sb(core); |
7227 | sb << info; |
7228 | mgr->log.printf("jit-assembler %s\n", sb.c_str()); |
7229 | } |
7230 | if (pool->isVerbose(VB_lircfg, info)) { |
7231 | // For the control-flow graph : |
7232 | // The list of instructions that we don't want to show explicit |
7233 | // edges for are added to the 'ignore' set. |
7234 | Allocator alloc; |
7235 | boolbool hideCommonEdges = truetrue; |
7236 | InsSet ignore(alloc); |
7237 | if (hideCommonEdges) |
7238 | { |
7239 | ignore.put(npe_label.labelIns, truetrue); |
7240 | ignore.put(upe_label.labelIns, truetrue); |
7241 | ignore.put(interrupt_label.labelIns, truetrue); |
7242 | ignore.put(mop_rangeCheckFailed_label.labelIns, truetrue); |
7243 | ignore.put(catch_label.labelIns, truetrue); |
7244 | } |
7245 | |
7246 | // type of cfg graph to produce |
7247 | CfgLister::CfgMode mode = pool->isVerbose(VB_lircfg_ins, info) ? CfgLister::CFG_INS |
7248 | : pool->isVerbose(VB_lircfg_bb, info) ? CfgLister::CFG_BB |
7249 | : CfgLister::CFG_EBB; |
7250 | |
7251 | StringBuffer sb(core); |
7252 | sb << info; // method name |
7253 | const char* filename = filenameFor(".", sb.c_str(), ".gml", alloc); |
7254 | FILE* f = fopen(filename, "w"); |
7255 | lircfg(f, frag, &ignore, alloc, mode); |
7256 | fclose(f); |
7257 | } |
7258 | #endif |
7259 | |
7260 | // Use the 'active' log if we are in verbose output mode otherwise sink the output |
7261 | LogControl* log = &(mgr->log); |
7262 | verbose_only( |
7263 | SinkLogControl sink; |
7264 | log = pool->isVerbose(VB_jit,info) ? log : &sink; |
7265 | ) |
7266 | |
7267 | Assembler *assm = new (*lir_alloc) Assembler(mgr->codeAlloc, mgr->allocator, *lir_alloc, log, core->config.njconfig); |
7268 | #ifdef VMCFG_VTUNE |
7269 | assm->vtuneHandle = vtuneInit(info->getMethodName()); |
7270 | #endif /* VMCFG_VTUNE */ |
7271 | |
7272 | assm->setNoiseGenerator( &noise ); |
7273 | |
7274 | verbose_only( |
7275 | StringList asmOutput(*lir_alloc); |
7276 | if (!pool->isVerbose(VB_raw, info)) |
7277 | assm->_outputCache = &asmOutput; |
7278 | ); |
7279 | |
7280 | assm->beginAssembly(frag); |
7281 | LirReader reader(frag->lastIns); |
7282 | assm->assemble(frag, &reader); |
7283 | assm->endAssembly(frag); |
7284 | PERFM_NTPROF_END("compile"); |
7285 | |
7286 | verbose_only( |
7287 | assm->_outputCache = 0; |
7288 | for (Seq<char*>* p = asmOutput.get(); p != NULL; p = p->tail) { |
7289 | assm->outputf("%s", p->head); |
7290 | } |
7291 | ); |
7292 | |
7293 | PERFM_NVPROF("IR-bytes", frag->lirbuf->byteCount()); |
7294 | PERFM_NVPROF("IR", frag->lirbuf->insCount()); |
7295 | |
7296 | GprMethodProc code; |
7297 | boolbool keep = !assm->error(); |
7298 | if (keep) { |
7299 | // save pointer to generated code |
7300 | code = (GprMethodProc) frag->code(); |
7301 | PERFM_NVPROF("JIT method bytes", CodeAlloc::size(assm->codeList)); |
7302 | } else { |
7303 | verbose_only (if (pool->isVerbose(VB_execpolicy)) |
7304 | AvmLog("execpolicy revert to interp (%d) compiler error %d \n", info->unique_method_id(), assm->error()); |
7305 | ) |
7306 | // assm puked, or we did something untested, so interpret. |
7307 | code = NULL__null; |
7308 | PERFM_NVPROF("lir-error",1); |
7309 | } |
7310 | |
7311 | #ifdef VMCFG_VTUNE |
7312 | if (assm->vtuneHandle) { |
7313 | vtuneCleanup(assm->vtuneHandle); |
7314 | } |
7315 | #endif /* VMCFG_VTUNE */ |
7316 | return code; |
7317 | } |
7318 | |
7319 | REALLY_INLINEinline __attribute__((always_inline)) BindingCache::BindingCache(const Multiname* name, BindingCache* next) |
7320 | : name(name), next(next) |
7321 | {} |
7322 | |
7323 | REALLY_INLINEinline __attribute__((always_inline)) CallCache::CallCache(const Multiname* name, BindingCache* next) |
7324 | : BindingCache(name, next), call_handler(callprop_miss) |
7325 | {} |
7326 | |
7327 | REALLY_INLINEinline __attribute__((always_inline)) GetCache::GetCache(const Multiname* name, BindingCache* next) |
7328 | : BindingCache(name, next), get_handler(getprop_miss) |
7329 | {} |
7330 | |
7331 | REALLY_INLINEinline __attribute__((always_inline)) SetCache::SetCache(const Multiname* name, BindingCache* next) |
7332 | : BindingCache(name, next), set_handler(setprop_miss) |
7333 | {} |
7334 | |
7335 | template <class C> |
7336 | C* CacheBuilder<C>::findCacheSlot(const Multiname* name) |
7337 | { |
7338 | for (Seq<C*> *p = caches.get(); p != NULL__null; p = p->tail) |
7339 | if (p->head->name == name) |
7340 | return p->head; |
7341 | return NULL__null; |
7342 | } |
7343 | |
7344 | // The cache structure is expected to be small in the normal case, so use a |
7345 | // linear list. For some programs, notably classical JS programs, it may however |
7346 | // be larger, and we may need a more sophisticated structure. |
7347 | template <class C> |
7348 | C* CacheBuilder<C>::allocateCacheSlot(const Multiname* name) |
7349 | { |
7350 | C* c = findCacheSlot(name); |
7351 | if (!c) { |
7352 | _nvprof("binding cache bytes", sizeof(C)); |
7353 | c = new (codeMgr.allocator) C(name, codeMgr.bindingCaches); |
7354 | codeMgr.bindingCaches = c; |
7355 | caches.add(c); |
7356 | } |
7357 | return c; |
7358 | } |
7359 | } |
7360 | |
7361 | #endif // VMCFG_NANOJIT |
7362 |