Bug Summary

File:platform/mac/avmshell/../../../eval/eval-cogen.cpp
Location:line 351, column 29
Description:Value stored to 'ct' is never read

Annotated Source Code

1/* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
2/* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
3/* ***** BEGIN LICENSE BLOCK *****
4 * Version: MPL 1.1/GPL 2.0/LGPL 2.1
5 *
6 * The contents of this file are subject to the Mozilla Public License Version
7 * 1.1 (the "License"); you may not use this file except in compliance with
8 * the License. You may obtain a copy of the License at
9 * http://www.mozilla.org/MPL/
10 *
11 * Software distributed under the License is distributed on an "AS IS" basis,
12 * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 * for the specific language governing rights and limitations under the
14 * License.
15 *
16 * The Original Code is [Open Source Virtual Machine.].
17 *
18 * The Initial Developer of the Original Code is
19 * Adobe System Incorporated.
20 * Portions created by the Initial Developer are Copyright (C) 2008
21 * the Initial Developer. All Rights Reserved.
22 *
23 * Contributor(s):
24 * Adobe AS3 Team
25 *
26 * Alternatively, the contents of this file may be used under the terms of
27 * either the GNU General Public License Version 2 or later (the "GPL"), or
28 * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
29 * in which case the provisions of the GPL or the LGPL are applicable instead
30 * of those above. If you wish to allow use of your version of this file only
31 * under the terms of either the GPL or the LGPL, and not to allow others to
32 * use your version of this file under the terms of the MPL, indicate your
33 * decision by deleting the provisions above and replace them with the notice
34 * and other provisions required by the GPL or the LGPL. If you do not delete
35 * the provisions above, a recipient may use your version of this file under
36 * the terms of any one of the MPL, the GPL or the LGPL.
37 *
38 * ***** END LICENSE BLOCK ***** */
39
40#include "avmplus.h"
41
42#ifdef VMCFG_EVAL
43
44#include "eval.h"
45
46namespace avmplus
47{
48 namespace RTC
49 {
50 using namespace ActionBlockConstants;
51
52 VarScopeCtx * Ctx::findVarScope()
53 {
54 Ctx* ctx = this;
55 while (ctx->tag != CTX_Function && ctx->tag != CTX_ClassMethod && ctx->tag != CTX_Program)
56 ctx = ctx->next;
57 return (VarScopeCtx*)ctx;
58 }
59
60 const Cogen::BinopMapping Cogen::binopMapping[] = {
61 {0, 0}, // unused
62 {0, OP_astypelate}, // OPR_as
63 {0, OP_add}, // OPR_plus,
64 {0, 0}, // OPR_assign,
65 {0, 0}, // OPR_init,
66 {0, 0}, // OPR_comma,
67 {0, OP_subtract}, // OPR_minus,
68 {0, OP_multiply}, // OPR_multiply,
69 {0, OP_divide}, // OPR_divide,
70 {0, OP_modulo}, // OPR_remainder,
71 {0, OP_lshift}, // OPR_leftShift,
72 {0, OP_rshift}, // OPR_rightShift,
73 {0, OP_urshift}, // OPR_rightShiftUnsigned,
74 {0, OP_instanceof}, // OPR_instanceof
75 {0, OP_in}, // OPR_in
76 {0, OP_istypelate}, // OPR_is
77 {0, OP_bitand}, // OPR_bitwiseAnd,
78 {0, OP_bitor}, // OPR_bitwiseOr,
79 {0, OP_bitxor}, // OPR_bitwiseXor,
80 {0, 0}, // OPR_logicalAnd,
81 {0, 0}, // OPR_logicalOr,
82 {0, OP_lessthan}, // OPR_less,
83 {0, OP_lessequals}, // OPR_lessOrEqual,
84 {0, OP_greaterthan}, // OPR_greater,
85 {0, OP_greaterequals}, // OPR_greaterOrEqual,
86 {0, OP_equals}, // OPR_equal,
87 {1, OP_equals}, // OPR_notEqual,
88 {0, OP_strictequals}, // OPR_strictEqual,
89 {1, OP_strictequals}, // OPR_strictNotEqual
90 };
91
92 Cogen::Cogen(Compiler *compiler, ABCFile* abc, ABCTraitsTable* traits, ABCMethodBodyInfo* body, uint32_t first_temp)
93 : compiler(compiler)
94 , abc(abc)
95 , allocator(compiler->allocator)
96 , code(compiler->allocator)
97 , labels(compiler->allocator)
98 , traits(traits)
99 , body(body)
100 , last_linenum(0)
101 , label_counter(0)
102 , temp_counter(first_temp)
103 , sets_dxns(falsefalse)
104 , need_activation(falsefalse)
105 , stack_depth(0)
106 , max_stack_depth(0)
107 , scope_depth(0)
108 {
109 }
110
111 uint8_t* Cogen::serializeCodeBytes(uint8_t* b) const
112 {
113 code.serialize(b);
114 fixupBackpatches(b);
115 return b + code.size();
116 }
117
118 /* Stack height tracking is "simplest possible": we assume that even for
119 * unconditional branches the stack height in the taken branch and in the
120 * untaken (impossible) branch are the same; this means that anyone jumping
121 * to a label following a jump must be careful to match the stack height.
122 * This does not seem to be an issue in practice; the verifier keeps us
123 * honest; and we don't need to track the expected stack height at every
124 * label. Also, we're not interested in the actual stack height everywhere,
125 * just a conservative approximation to it.
126 */
127 void Cogen::stackMovement(AbcOpcode opcode)
128 {
129 stack_depth = stack_depth + opcodeInfo[opcode].stack;
130 AvmAssert((int32_t)stack_depth >= 0)do { } while (0);
131 if (stack_depth > max_stack_depth)
132 max_stack_depth = stack_depth;
133 }
134
135 void Cogen::stackMovement(AbcOpcode opcode, boolbool hasRTNS, boolbool hasRTName, uint32_t pops)
136 {
137 stack_depth = stack_depth + opcodeInfo[opcode].stack - hasRTNS - hasRTName - pops;
138 AvmAssert((int32_t)stack_depth >= 0)do { } while (0);
139 if (stack_depth > max_stack_depth)
140 max_stack_depth = stack_depth;
141 }
142
143 void Cogen::emitOp(AbcOpcode opcode)
144 {
145 code.emitU8((uint8_t)opcode);
146 stackMovement(opcode);
147 }
148
149 void Cogen::emitOpU30(AbcOpcode opcode, uint32_t u30)
150 {
151 code.emitU8((uint8_t)opcode);
152 code.emitU30(u30);
153 stackMovement(opcode);
154 }
155
156 void Cogen::emitOpU30Special(AbcOpcode opcode, uint32_t u30, uint32_t pops)
157 {
158 code.emitU8((uint8_t)opcode);
159 code.emitU30(u30);
160 stackMovement(opcode, falsefalse, falsefalse, pops);
161 }
162
163 void Cogen::emitOpU30U30(AbcOpcode opcode, uint32_t u30_1, uint32_t u30_2)
164 {
165 code.emitU8((uint8_t)opcode);
166 code.emitU30(u30_1);
167 code.emitU30(u30_2);
168 stackMovement(opcode);
169 }
170
171 void Cogen::emitOpU8(AbcOpcode opcode, uint8_t b)
172 {
173 code.emitU8((uint8_t)opcode);
174 code.emitU8(b);
175 stackMovement(opcode);
176 }
177
178 void Cogen::emitOpS8(AbcOpcode opcode, int8_t b)
179 {
180 code.emitU8((uint8_t)opcode);
181 code.emitS8(b);
182 stackMovement(opcode);
183 }
184
185 void Cogen::I_getlocal(uint32_t index) {
186 if (index < 4)
187 emitOp((AbcOpcode)(OP_getlocal0 + index));
188 else
189 emitOpU30(OP_getlocal, index);
190 }
191
192 void Cogen::I_setlocal(uint32_t index) {
193 if (index < 4)
194 emitOp((AbcOpcode)(OP_setlocal0 + index));
195 else
196 emitOpU30(OP_setlocal, index);
197 }
198
199 void Cogen::I_debugfile(uint32_t index)
200 {
201 if (compiler->debugging)
202 emitOpU30(OP_debugfile, index);
203 }
204
205 void Cogen::I_debugline(uint32_t linenum)
206 {
207 if (compiler->debugging && linenum > last_linenum)
208 {
209 last_linenum = linenum;
210 emitOpU30(OP_debugline, linenum);
211 }
212 }
213
214 void Cogen::callMN(AbcOpcode opcode, uint32_t index, uint32_t nargs) {
215 code.emitU8((uint8_t)opcode);
216 code.emitU30(index);
217 code.emitU30(nargs);
218 stackMovement(opcode, abc->hasRTNS(index), abc->hasRTName(index), nargs);
219 }
220
221 void Cogen::propU30(AbcOpcode opcode, uint32_t index)
222 {
223 code.emitU8((uint8_t)opcode);
224 code.emitU30(index);
225 stackMovement(opcode, abc->hasRTNS(index), abc->hasRTName(index), 0);
226 }
227
228 Label* Cogen::newLabel()
229 {
230 Label* l = ALLOC(Label, ())::new (allocator->alloc(sizeof(Label))) Label ();
231 labels.addAtEnd(l);
232 return l;
233 }
234
235 void Cogen::emitJump(AbcOpcode opcode, Label* label)
236 {
237 // OPTIMIZEME: don't need to register backpatches for branches to known labels.
238 code.emitU8((uint8_t)opcode);
239 code.emitS24(3);
240 stackMovement(opcode);
241 label->backpatches = ALLOC(Seq<uint32_t>, (code.size() - 3, label->backpatches))::new (allocator->alloc(sizeof(Seq<uint32_t>))) Seq<
uint32_t> (code.size() - 3, label->backpatches)
;
242 }
243
244 void Cogen::I_label(Label* label)
245 {
246 AvmAssert(label->address == ~0U)do { } while (0);
247 label->address = code.size();
248 code.emitU8((uint8_t)OP_label);
249 }
250
251 // The location to be patched must contain a signed adjustment that will be
252 // added to the offset value. For regular jump instructions this should be '3',
253 // because the jump is relative to the end of the instruction - 3 bytes after
254 // the address of the offset field. For lookupswitch it is a value that depends
255 // on the location within the lookupswitch instruction of the offset word,
256 // because the jump is relative to the start of the instruction.
257
258 void Cogen::fixupBackpatches(uint8_t* b) const
259 {
260 for ( Seq<Label*>* labels = this->labels.get() ; labels != NULL__null ; labels = labels->tl ) {
261 uint32_t addr = labels->hd->address;
262 boolbool backward = falsefalse;
263 AvmAssert(addr != ~0U)do { } while (0);
264 for ( Seq<uint32_t>* backpatches = labels->hd->backpatches ; backpatches != NULL__null ; backpatches = backpatches->tl ) {
265 uint32_t loc = backpatches->hd;
266 int32_t adjustment = readS24(b + loc);
267 int32_t offset = (int32_t)(addr - (loc + adjustment));
268 backward = backward || offset < 0;
269 emitS24(b + loc, offset);
270 }
271 if (!backward) {
272 // Work around verifier bug: if a branch to this label is never a backward
273 // branch then replace OP_label with OP_nop. The verifier always assumes
274 // that OP_label is the target of a backward branch.
275 b[addr] = OP_nop;
276 }
277 }
278 }
279
280 uint32_t Cogen::emitException(uint32_t from, uint32_t to, uint32_t target, uint32_t type, uint32_t name_index)
281 {
282 return body->exceptions.addAtEnd(ALLOC(ABCExceptionInfo, (from, to, target, type, name_index))::new (allocator->alloc(sizeof(ABCExceptionInfo))) ABCExceptionInfo
(from, to, target, type, name_index)
);
283 }
284
285 uint32_t Cogen::emitTypeName(Compiler* compiler, QualifiedName* t)
286 {
287 ABCFile* abc = &compiler->abc;
288 if (t == NULL__null)
289 return 0;
290 uint32_t ns = compiler->NS_public;
291 if (t->qualifier != NULL__null) {
292 AvmAssert(t->qualifier->tag() == TAG_simpleName)do { } while (0);
293 ns = abc->addNamespace(CONSTANT_Namespace, abc->addString(((SimpleName*)t->qualifier)->name));
294 }
295 AvmAssert(t->name->tag() == TAG_simpleName)do { } while (0);
296 return abc->addQName(ns, abc->addString(((SimpleName*)t->name)->name));
297 }
298
299 void Cogen::I_lookupswitch(Label* default_label, Label** case_labels, uint32_t ncases)
300 {
301 AvmAssert( ncases > 0 )do { } while (0);
302 AvmAssert( default_label != NULL )do { } while (0);
303 // AvmAssert( forall c in case_labels c != NULL );
304
305 uint32_t here = code.size();
306 code.emitU8((uint8_t)OP_lookupswitch);
307 code.emitS24((int32_t)(here - code.size()));
308 default_label->backpatches = ALLOC(Seq<uint32_t>, (code.size() - 3, default_label->backpatches))::new (allocator->alloc(sizeof(Seq<uint32_t>))) Seq<
uint32_t> (code.size() - 3, default_label->backpatches)
;
309 code.emitU30(ncases - 1);
310 for ( uint32_t i=0 ; i < ncases ; i++ ) {
311 Label* label = case_labels[i];
312 code.emitS24((int32_t)(here - code.size()));
313 label->backpatches = ALLOC(Seq<uint32_t>, (code.size() - 3, label->backpatches))::new (allocator->alloc(sizeof(Seq<uint32_t>))) Seq<
uint32_t> (code.size() - 3, label->backpatches)
;
314 }
315 stackMovement(OP_lookupswitch);
316 }
317
318 void FunctionDefn::cogenGuts(Compiler* compiler, Ctx* ctx, ABCMethodInfo** info, ABCMethodBodyInfo** body)
319 {
320 Allocator* allocator = compiler->allocator;
321 ABCFile* abc = &compiler->abc;
322 ABCTraitsTable* traits;
323 Str* name = this->name;
324 if (name == NULL__null)
325 name = compiler->SYM_anonymous;
326
327 SeqBuilder<uint32_t> param_types(allocator);
328 SeqBuilder<DefaultValue*> default_values(allocator);
329 uint32_t numdefaults = 0;
330 for ( Seq<FunctionParam*>* params = this->params ; params != NULL__null ; params = params->tl ) {
331 param_types.addAtEnd(Cogen::emitTypeName(compiler, params->hd->type_name));
332 if (params->hd->default_value != NULL__null) {
333 Expr* dv = params->hd->default_value;
334 uint32_t cv = 0;
335 uint32_t ct = 0;
336 switch (dv->tag()) {
337 case TAG_literalString:
338 ct = CONSTANT_Utf8;
339 cv = abc->addString(((LiteralString*)dv)->value);
340 break;
341 case TAG_literalUInt:
342 ct = CONSTANT_UInt;
343 cv = abc->addUInt(((LiteralUInt*)dv)->value);
344 break;
345 case TAG_literalInt:
346 ct = CONSTANT_Int;
347 cv = abc->addInt(((LiteralInt*)dv)->value);
348 break;
349 case TAG_literalDouble:
350 case TAG_literalBoolean:
351 ct = CONSTANT_Double;
Value stored to 'ct' is never read
352 cv = abc->addDouble(((LiteralDouble*)dv)->value);
353 if (((LiteralBoolean*)dv)->value)
354 ct = CONSTANT_True;
355 else
356 ct = CONSTANT_False;
357 break;
358 case TAG_literalNull:
359 ct = CONSTANT_Null;
360 break;
361 default:
362 // EXTENDME: we can sort-of support arbitrary default values here if we want to.
363 //
364 // AS3 does not support default value other than the six cases above. Doing better
365 // would be nice.
366 //
367 // We can use one of the obscure namespace default values as a placeholder, then
368 // generate code to test for that value and compute the correct default value.
369 // But the signature of the function won't be right; the type of the argument
370 // must be '*'. May be close enough, as long as we assign a provided argument
371 // value to a typed slot and get a type check on entry.
372 compiler->syntaxError(params->hd->default_value->pos, SYNTAXERR_IMPOSSIBLE_DEFAULT);
373 }
374 numdefaults++;
375 default_values.addAtEnd(ALLOC(DefaultValue, (ct, cv))::new (allocator->alloc(sizeof(DefaultValue))) DefaultValue
(ct, cv)
);
376 }
377 }
378 *info = ALLOC(ABCMethodInfo, (compiler, abc->addString(name), numparams, param_types.get(), numdefaults, default_values.get(), Cogen::emitTypeName(compiler, return_type_name)))::new (allocator->alloc(sizeof(ABCMethodInfo))) ABCMethodInfo
(compiler, abc->addString(name), numparams, param_types.get
(), numdefaults, default_values.get(), Cogen::emitTypeName(compiler
, return_type_name))
;
379 traits = ALLOC(ABCTraitsTable, (compiler))::new (allocator->alloc(sizeof(ABCTraitsTable))) ABCTraitsTable
(compiler)
;
380 *body = ALLOC(ABCMethodBodyInfo, (compiler, *info, traits, 1 + numparams + (uses_arguments || (rest_param != NULL))))::new (allocator->alloc(sizeof(ABCMethodBodyInfo))) ABCMethodBodyInfo
(compiler, *info, traits, 1 + numparams + (uses_arguments ||
(rest_param != __null)))
;
381
382 cogen(&(*body)->cogen, ctx);
383
384 uint32_t flags = 0;
385 AvmAssert( !(uses_arguments && (rest_param != NULL)) )do { } while (0);
386 if (uses_arguments)
387 flags |= abcMethod_NEED_ARGUMENTS;
388 if (rest_param != NULL__null)
389 flags |= abcMethod_NEED_REST;
390 if (uses_dxns)
391 flags |= abcMethod_SETS_DXNS;
392 (*info)->setFlags((uint8_t)((*body)->getFlags() | flags));
393 }
394
395 void CodeBlock::cogen(Cogen* cogen, Ctx* ctx)
396 {
397 Compiler* compiler = cogen->compiler;
398 ABCFile* abc = cogen->abc;
399 uint32_t activation = 0; // 0 means "unallocated"
400 FunctionDefn* fn = NULL__null;
401
402 if (tag == CODE_Function)
403 fn = (FunctionDefn*)this;
404
405 cogen->I_debugfile(cogen->emitString(compiler->str_filename));
406
407 if (tag == CODE_Program) {
408 cogen->I_getlocal(0);
409 cogen->I_pushscope();
410 }
411
412 if (fn && (fn->bindings != NULL__null || fn->uses_arguments)) {
413 activation = cogen->getTemp();
414 cogen->I_newactivation();
415 cogen->I_dup();
416 cogen->I_setlocal(activation);
417 cogen->I_pushscope();
418 }
419
420 for ( Seq<Binding*>* bindings = this->bindings ; bindings != NULL__null ; bindings = bindings->tl ) {
421 Binding* b = bindings->hd;
422 uint32_t id = abc->addQName(compiler->NS_public, cogen->emitString(b->name));
423 uint32_t type_id = cogen->emitTypeName(compiler, b->type_name);
424 switch (bindings->hd->kind) {
425 case TAG_namespaceBinding: // FIXME: namespace bindings should be const, but the VM does not allow TAG_constBinding
426 case TAG_varBinding:
427 cogen->emitSlotTrait(id, type_id);
428 break;
429 case TAG_constBinding:
430 cogen->emitConstTrait(id, type_id);
431 break;
432 default:
433 compiler->internalError(0, "Unknown binding tag");
434 }
435 }
436
437 for ( Seq<NamespaceDefn*>* namespaces = this->namespaces ; namespaces != NULL__null ; namespaces = namespaces->tl ) {
438 uint32_t id = abc->addQName(compiler->NS_public, cogen->emitString(namespaces->hd->name));
439 uint32_t ns = 0;
440 Expr* value = namespaces->hd->value;
441 if (value == NULL__null)
442 ns = abc->addNamespace(CONSTANT_Namespace, cogen->emitString(compiler->intern(compiler->namespace_counter++)));
443 else if (value->tag() == TAG_literalString)
444 ns = abc->addNamespace(CONSTANT_ExplicitNamespace, cogen->emitString(((LiteralString*)value)->value));
445 if (tag == CODE_Program)
446 cogen->I_getlocal(0);
447 else {
448 AvmAssert(activation != 0)do { } while (0);
449 cogen->I_getlocal(activation);
450 }
451 if (ns != 0)
452 cogen->I_pushnamespace(ns);
453 else {
454 // FIXME: semantic check for namespaces.
455 // Check that the name on the RHS is actually a ns
456 // Code is the same as for 'use default namespace'
457 // If we can't tell (name may be shadowed?) then
458 // emit code that checks at run-time. If we can tell,
459 // then don't emit code for looking it up at run-time
460 // here, but just reference the definition of the other
461 // binding? (That's an optimization.)
462 //value->cogen(cogen);
463 compiler->internalError(0, "Namespace should have been resolved before code generation");
464 }
465 cogen->I_initproperty(id);
466 }
467
468 if (fn) {
469 uint32_t i=1;
470 Seq<FunctionParam*>* params=fn->params;
471 for ( ; params != NULL__null ; params = params->tl, i++ ) {
472 uint32_t id = abc->addQName(compiler->NS_public, cogen->emitString(params->hd->name));
473 AvmAssert(activation != 0)do { } while (0);
474 cogen->I_getlocal(activation);
475 cogen->I_getlocal(i);
476 cogen->I_setproperty(id);
477 }
478 if (fn->uses_arguments || fn->rest_param) {
479 AvmAssert(activation != 0)do { } while (0);
480 cogen->I_getlocal(activation);
481 cogen->I_getlocal(i);
482 if (fn->uses_arguments)
483 cogen->I_setproperty(abc->addQName(compiler->NS_public, cogen->emitString(compiler->SYM_arguments)));
484 else
485 cogen->I_setproperty(abc->addQName(compiler->NS_public, cogen->emitString(fn->rest_param->name)));
486 }
487 }
488
489 for ( Seq<FunctionDefn*>* functions = this->functions ; functions != NULL__null ; functions = functions->tl ) {
490 FunctionDefn* func = functions->hd;
491 ABCMethodInfo* fn_info;
492 ABCMethodBodyInfo* fn_body;
493 func->cogenGuts(compiler, ctx, &fn_info, &fn_body);
494 uint32_t fname = abc->addQName(compiler->NS_public, cogen->emitString(func->name));
495 cogen->I_getlocal(activation);
496 cogen->I_newfunction(fn_info->index);
497 cogen->I_setproperty(fname);
498 }
499
500 cogenBody(cogen, ctx, activation);
501 }
502
503 uint32_t Cogen::buildNssetWithPublic(Seq<Namespace*>* ns)
504 {
505 SeqBuilder<uint32_t> s(allocator);
506 s.addAtEnd(compiler->NS_public);
507 while (ns != NULL__null) {
508 if (ns->hd->tag() != TAG_commonNamespace)
509 compiler->internalError(0, "Namespace should have been resolved before now.");
510 CommonNamespace* cns = (CommonNamespace*)ns->hd;
511 s.addAtEnd(abc->addNamespace(CONSTANT_Namespace, abc->addString(cns->name)));
512 ns = ns->tl;
513 }
514 return abc->addNsset(s.get());
515 }
516 }
517}
518
519#endif // VMCFG_EVAL