1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
|
/*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Common subroutines and data.
*/
#if defined(WITH_JIT)
/*
* JIT-related re-entries into the interpreter. In general, if the
* exit from a translation can at some point be chained, the entry
* here requires that control arrived via a call, and that the "rp"
* on TOS is actually a pointer to a 32-bit cell containing the Dalvik PC
* of the next insn to handle. If no chaining will happen, the entry
* should be reached via a direct jump and rPC set beforehand.
*/
.global dvmJitToInterpPunt
/*
* The compiler will generate a jump to this entry point when it is
* having difficulty translating a Dalvik instruction. We must skip
* the code cache lookup & prevent chaining to avoid bouncing between
* the interpreter and code cache. rPC must be set on entry.
*/
dvmJitToInterpPunt:
#if defined(WITH_JIT_TUNING)
movl rPC, OUT_ARG0(%esp)
call dvmBumpPunt
#endif
FETCH_INST_R %edx
GOTO_NEXT_R %edx
.global dvmJitToInterpSingleStep
/*
* Return to the interpreter to handle a single instruction.
* Should be reached via a call.
* On entry:
* 0(%esp) <= native return address within trace
* rPC <= Dalvik PC of this instruction
* OUT_ARG0+4(%esp) <= Dalvik PC of next instruction
*/
dvmJitToInterpSingleStep:
pop %eax
movl rGLUE, %ecx
movl OUT_ARG0(%esp), %edx
movl %eax,offGlue_jitResumeNPC(%ecx)
movl %edx,offGlue_jitResumeDPC(%ecx)
movl $$kInterpEntryInstr,offGlue_entryPoint(%ecx)
movl $$1,rINST # changeInterp <= true
jmp common_gotoBail
.global dvmJitToInterpNoChainNoProfile
/*
* Return from the translation cache to the interpreter to do method
* invocation. Check if the translation exists for the callee, but don't
* chain to it. rPC must be set on entry.
*/
dvmJitToInterpNoChainNoProfile:
#if defined(WITH_JIT_TUNING)
call dvmBumpNoChain
#endif
movl rPC,OUT_ARG0(%esp)
call dvmJitGetCodeAddr # is there a translation?
movl rGLUE,%ecx
movl offGlue_self(%ecx), %ecx # ecx <- glue->self
movl %eax,offThread_inJitCodeCache(%ecx) # set inJitCodeCache flag
cmpl $$0, %eax
jz 1f
call *%eax # exec translation if we've got one
# won't return
1:
FETCH_INST_R %edx
GOTO_NEXT_R %edx
/*
* Return from the translation cache and immediately request a
* translation fro the exit target, but don't attempt to chain.
* rPC set on entry.
*/
.global dvmJitToInterpTraceSelectNoChain
dvmJitToInterpTraceSelectNoChain:
#if defined(WITH_JIT_TUNING)
call dvmBumpNoChain
#endif
movl rPC,OUT_ARG0(%esp)
call dvmJitGetCodeAddr # is there a translation?
movl rGLUE,%ecx
movl offGlue_self(%ecx),%ecx
cmpl $$0,%eax
movl %eax,offThread_inJitCodeCache(%ecx) # set inJitCodeCache flag
jz 1f
call *%eax # jump to tranlation
# won't return
/* No Translation - request one */
1:
GET_JIT_PROF_TABLE %ecx %eax
cmpl $$0, %eax # JIT enabled?
jnz 2f # Request one if so
FETCH_INST_R %edx # Continue interpreting if not
GOTO_NEXT_R %edx
2:
movl $$kJitTSelectRequestHot,rINST # ask for trace select
jmp common_selectTrace
/*
* Return from the translation cache and immediately request a
* translation for the exit target. Reached via a call, and
* (TOS)->rPC.
*/
.global dvmJitToInterpTraceSelect
dvmJitToInterpTraceSelect:
pop rINST # save chain cell address in callee save reg
movl (rINST),rPC
movl rPC,OUT_ARG0(%esp)
call dvmJitGetCodeAddr # is there a translation?
cmpl $$0,%eax
jz 1b # no - ask for one
movl %eax,OUT_ARG0(%esp)
# FIXME - need to adjust rINST to beginning of sequence
movl rINST,OUT_ARG1(%esp)
call dvmJitChain # Attempt dvmJitChain(codeAddr,chainAddr)
cmpl $$0,%eax # Success?
jz toInterpreter # didn't chain - interpret
call *%eax
# won't return
/*
* Placeholder entries for x86 JIT
*/
.global dvmJitToInterpBackwardBranch
dvmJitToInterpBackwardBranch:
.global dvmJitToInterpNormal
dvmJitToInterpNormal:
.global dvmJitToInterpNoChain
dvmJitToInterpNoChain:
toInterpreter:
jmp common_abort
#endif
/*
* Common code when a backwards branch is taken
*
* On entry:
* ebx (a.k.a. rINST) -> PC adjustment in 16-bit words
*/
common_backwardBranch:
movl rGLUE,%ecx
call common_periodicChecks # rPC and ecx/rGLUE preserved
#if defined(WITH_JIT)
GET_JIT_PROF_TABLE %ecx %edx
ADVANCE_PC_INDEXED rINST
cmpl $$0,%edx
FETCH_INST
jz 1f # Profiling off - continue
.global updateProfile
updateProfile:
common_updateProfile:
# quick & dirty hash
movl rPC, %eax
shrl $$12, %eax
xorl rPC, %eax
andl $$((1<<JIT_PROF_SIZE_LOG_2)-1),%eax
decb (%edx,%eax)
jz 2f
1:
GOTO_NEXT
2:
/*
* Here, we switch to the debug interpreter to request
* trace selection. First, though, check to see if there
* is already a native translation in place (and, if so,
* jump to it now.
*/
GET_JIT_THRESHOLD %ecx rINST
EXPORT_PC
movb rINSTbl,(%edx,%eax) # reset counter
movl offGlue_self(%ecx),rINST
movl rPC,OUT_ARG0(%esp)
call dvmJitGetCodeAddr # already have one?
movl %eax,offThread_inJitCodeCache(rINST) # set the inJitCodeCache flag
cmpl $$0,%eax
jz 1f
call *%eax # FIXME: decide call vs/ jmp!. No return either way
1:
movl $$kJitTSelectRequest,%eax
# On entry, eax<- jitState, rPC valid
common_selectTrace:
movl rGLUE,%ecx
movl %eax,offGlue_jitState(%ecx)
movl $$kInterpEntryInstr,offGlue_entryPoint(%ecx)
movl $$1,rINST
jmp common_gotoBail
#else
ADVANCE_PC_INDEXED rINST
FETCH_INST
GOTO_NEXT
#endif
/*
* Common code for jumbo method invocation.
*
* On entry:
* eax = Method* methodToCall
* rINSTw trashed, must reload
*/
common_invokeMethodJumbo:
.LinvokeNewJumbo:
/*
* prepare to copy args to "outs" area of current frame
*/
movzwl 6(rPC),rINST # rINST<- BBBB
movzwl 8(rPC), %ecx # %ecx<- CCCC
ADVANCE_PC 2 # adjust pc to make return similar
SAVEAREA_FROM_FP %edx # %edx<- &StackSaveArea
test rINST, rINST
movl rINST, LOCAL0_OFFSET(%ebp) # LOCAL0_OFFSET(%ebp)<- BBBB
jz .LinvokeArgsDone # no args; jump to args done
jmp .LinvokeRangeArgs # handle args like invoke range
/*
* Common code for method invocation with range.
*
* On entry:
* eax = Method* methodToCall
* rINSTw trashed, must reload
*/
common_invokeMethodRange:
.LinvokeNewRange:
/*
* prepare to copy args to "outs" area of current frame
*/
movzbl 1(rPC),rINST # rINST<- AA
movzwl 4(rPC), %ecx # %ecx<- CCCC
SAVEAREA_FROM_FP %edx # %edx<- &StackSaveArea
test rINST, rINST
movl rINST, LOCAL0_OFFSET(%ebp) # LOCAL0_OFFSET(%ebp)<- AA
jz .LinvokeArgsDone # no args; jump to args done
/*
* %eax=methodToCall, %ecx=CCCC, LOCAL0_OFFSET(%ebp)=count, %edx=&outs (&stackSaveArea)
* (very few methods have > 10 args; could unroll for common cases)
*/
.LinvokeRangeArgs:
movl %ebx, LOCAL1_OFFSET(%ebp) # LOCAL1_OFFSET(%ebp)<- save %ebx
lea (rFP, %ecx, 4), %ecx # %ecx<- &vCCCC
shll $$2, LOCAL0_OFFSET(%ebp) # LOCAL0_OFFSET(%ebp)<- offset
subl LOCAL0_OFFSET(%ebp), %edx # %edx<- update &outs
shrl $$2, LOCAL0_OFFSET(%ebp) # LOCAL0_OFFSET(%ebp)<- offset
1:
movl (%ecx), %ebx # %ebx<- vCCCC
lea 4(%ecx), %ecx # %ecx<- &vCCCC++
subl $$1, LOCAL0_OFFSET(%ebp) # LOCAL0_OFFSET<- LOCAL0_OFFSET--
movl %ebx, (%edx) # *outs<- vCCCC
lea 4(%edx), %edx # outs++
jne 1b # loop if count (LOCAL0_OFFSET(%ebp)) not zero
movl LOCAL1_OFFSET(%ebp), %ebx # %ebx<- restore %ebx
jmp .LinvokeArgsDone # continue
/*
* %eax is "Method* methodToCall", the method we're trying to call
* prepare to copy args to "outs" area of current frame
*/
common_invokeMethodNoRange:
.LinvokeNewNoRange:
movzbl 1(rPC),rINST # rINST<- BA
movl rINST, LOCAL0_OFFSET(%ebp) # LOCAL0_OFFSET(%ebp)<- BA
shrl $$4, LOCAL0_OFFSET(%ebp) # LOCAL0_OFFSET(%ebp)<- B
je .LinvokeArgsDone # no args; jump to args done
movzwl 4(rPC), %ecx # %ecx<- GFED
SAVEAREA_FROM_FP %edx # %edx<- &StackSaveArea
/*
* %eax=methodToCall, %ecx=GFED, LOCAL0_OFFSET(%ebp)=count, %edx=outs
*/
.LinvokeNonRange:
cmp $$2, LOCAL0_OFFSET(%ebp) # compare LOCAL0_OFFSET(%ebp) to 2
movl %ecx, LOCAL1_OFFSET(%ebp) # LOCAL1_OFFSET(%ebp)<- GFED
jl 1f # handle 1 arg
je 2f # handle 2 args
cmp $$4, LOCAL0_OFFSET(%ebp) # compare LOCAL0_OFFSET(%ebp) to 4
jl 3f # handle 3 args
je 4f # handle 4 args
5:
andl $$15, rINST # rINSTw<- A
lea -4(%edx), %edx # %edx<- update &outs; &outs--
movl (rFP, rINST, 4), %ecx # %ecx<- vA
movl %ecx, (%edx) # *outs<- vA
movl LOCAL1_OFFSET(%ebp), %ecx # %ecx<- GFED
4:
shr $$12, %ecx # %ecx<- G
lea -4(%edx), %edx # %edx<- update &outs; &outs--
movl (rFP, %ecx, 4), %ecx # %ecx<- vG
movl %ecx, (%edx) # *outs<- vG
movl LOCAL1_OFFSET(%ebp), %ecx # %ecx<- GFED
3:
and $$0x0f00, %ecx # %ecx<- 0F00
shr $$8, %ecx # %ecx<- F
lea -4(%edx), %edx # %edx<- update &outs; &outs--
movl (rFP, %ecx, 4), %ecx # %ecx<- vF
movl %ecx, (%edx) # *outs<- vF
movl LOCAL1_OFFSET(%ebp), %ecx # %ecx<- GFED
2:
and $$0x00f0, %ecx # %ecx<- 00E0
shr $$4, %ecx # %ecx<- E
lea -4(%edx), %edx # %edx<- update &outs; &outs--
movl (rFP, %ecx, 4), %ecx # %ecx<- vE
movl %ecx, (%edx) # *outs<- vE
movl LOCAL1_OFFSET(%ebp), %ecx # %ecx<- GFED
1:
and $$0x000f, %ecx # %ecx<- 000D
movl (rFP, %ecx, 4), %ecx # %ecx<- vD
movl %ecx, -4(%edx) # *--outs<- vD
0:
/*
* %eax is "Method* methodToCall", the method we're trying to call
* find space for the new stack frame, check for overflow
*/
.LinvokeArgsDone:
movzwl offMethod_registersSize(%eax), %edx # %edx<- methodToCall->regsSize
movzwl offMethod_outsSize(%eax), %ecx # %ecx<- methodToCall->outsSize
movl %eax, LOCAL0_OFFSET(%ebp) # LOCAL0_OFFSET<- methodToCall
shl $$2, %edx # %edx<- update offset
SAVEAREA_FROM_FP %eax # %eax<- &StackSaveArea
subl %edx, %eax # %eax<- newFP; (old savearea - regsSize)
movl rGLUE,%edx # %edx<- pMterpGlue
movl %eax, LOCAL1_OFFSET(%ebp) # LOCAL1_OFFSET(%ebp)<- &outs
subl $$sizeofStackSaveArea, %eax # %eax<- newSaveArea (stack save area using newFP)
movl offGlue_interpStackEnd(%edx), %edx # %edx<- glue->interpStackEnd
movl %edx, LOCAL2_OFFSET(%ebp) # LOCAL2_OFFSET<- glue->interpStackEnd
shl $$2, %ecx # %ecx<- update offset for outsSize
movl %eax, %edx # %edx<- newSaveArea
sub %ecx, %eax # %eax<- bottom; (newSaveArea - outsSize)
cmp LOCAL2_OFFSET(%ebp), %eax # compare interpStackEnd and bottom
movl LOCAL0_OFFSET(%ebp), %eax # %eax<- restore methodToCall
jl .LstackOverflow # handle frame overflow
/*
* set up newSaveArea
*/
#ifdef EASY_GDB
SAVEAREA_FROM_FP %ecx # %ecx<- &StackSaveArea
movl %ecx, offStackSaveArea_prevSave(%edx) # newSaveArea->prevSave<- &outs
#endif
movl rFP, offStackSaveArea_prevFrame(%edx) # newSaveArea->prevFrame<- rFP
movl rPC, offStackSaveArea_savedPc(%edx) # newSaveArea->savedPc<- rPC
testl $$ACC_NATIVE, offMethod_accessFlags(%eax) # check for native call
movl %eax, offStackSaveArea_method(%edx) # newSaveArea->method<- method to call
jne .LinvokeNative # handle native call
/*
* Update "glue" values for the new method
* %eax=methodToCall, LOCAL1_OFFSET(%ebp)=newFp
*/
movl offMethod_clazz(%eax), %edx # %edx<- method->clazz
movl rGLUE,%ecx # %ecx<- pMterpGlue
movl offClassObject_pDvmDex(%edx), %edx # %edx<- method->clazz->pDvmDex
movl %eax, offGlue_method(%ecx) # glue->method<- methodToCall
movl %edx, offGlue_methodClassDex(%ecx) # glue->methodClassDex<- method->clazz->pDvmDex
movl offMethod_insns(%eax), rPC # rPC<- methodToCall->insns
movl offGlue_self(%ecx), %eax # %eax<- glue->self
movl LOCAL1_OFFSET(%ebp), rFP # rFP<- newFP
movl rFP, offThread_curFrame(%eax) # glue->self->curFrame<- newFP
FETCH_INST
GOTO_NEXT # jump to methodToCall->insns
/*
* Prep for the native call
* %eax=methodToCall, LOCAL1_OFFSET(%ebp)=newFP, %edx=newSaveArea
*/
.LinvokeNative:
movl rGLUE,%ecx # %ecx<- pMterpGlue
movl %eax, OUT_ARG1(%esp) # push parameter methodToCall
movl offGlue_self(%ecx), %ecx # %ecx<- glue->self
movl offThread_jniLocal_topCookie(%ecx), %eax # %eax<- self->localRef->...
movl %eax, offStackSaveArea_localRefCookie(%edx) # newSaveArea->localRefCookie<- top
movl %edx, OUT_ARG4(%esp) # save newSaveArea
movl LOCAL1_OFFSET(%ebp), %edx # %edx<- newFP
movl %edx, offThread_curFrame(%ecx) # glue->self->curFrame<- newFP
movl %ecx, OUT_ARG3(%esp) # save glue->self
movl %ecx, OUT_ARG2(%esp) # push parameter glue->self
movl rGLUE,%ecx # %ecx<- pMterpGlue
movl OUT_ARG1(%esp), %eax # %eax<- methodToCall
lea offGlue_retval(%ecx), %ecx # %ecx<- &retval
movl %ecx, OUT_ARG0(%esp) # push parameter pMterpGlue
push %edx # push parameter newFP
call *offMethod_nativeFunc(%eax) # call methodToCall->nativeFunc
lea 4(%esp), %esp
movl OUT_ARG4(%esp), %ecx # %ecx<- newSaveArea
movl OUT_ARG3(%esp), %eax # %eax<- glue->self
movl offStackSaveArea_localRefCookie(%ecx), %edx # %edx<- old top
cmp $$0, offThread_exception(%eax) # check for exception
movl rFP, offThread_curFrame(%eax) # glue->self->curFrame<- rFP
movl %edx, offThread_jniLocal_topCookie(%eax) # new top <- old top
jne common_exceptionThrown # handle exception
FETCH_INST_OPCODE 3 %edx
ADVANCE_PC 3
GOTO_NEXT_R %edx # jump to next instruction
.LstackOverflow: # eax=methodToCall
movl %eax, OUT_ARG1(%esp) # push parameter methodToCall
movl rGLUE,%eax # %eax<- pMterpGlue
movl offGlue_self(%eax), %eax # %eax<- glue->self
movl %eax, OUT_ARG0(%esp) # push parameter self
call dvmHandleStackOverflow # call: (Thread* self, Method* meth)
jmp common_exceptionThrown # handle exception
/*
* Do we need the thread to be suspended or have debugger/profiling activity?
*
* On entry:
* ebx -> PC adjustment in 16-bit words (must be preserved)
* ecx -> GLUE pointer
* reentry type, e.g. kInterpEntryInstr stored in rGLUE->entryPoint
*
* Note: A call will normally kill %eax and %ecx. To
* streamline the normal case, this routine will preserve
* %ecx in addition to the normal caller save regs. The save/restore
* is a bit ugly, but will happen in the relatively uncommon path.
* TODO: Basic-block style Jit will need a hook here as well. Fold it into
* the suspendCount check so we can get both in 1 shot.
* TUNING: Improve scheduling here & do initial single test for all.
*/
common_periodicChecks:
movl offGlue_pSelfSuspendCount(%ecx),%eax # eax <- &suspendCount
cmpl $$0,(%eax)
jne 1f
6:
movl offGlue_pInterpBreak(%ecx),%eax # eax <- &interpBreak
cmpl $$0,(%eax) # something interesting happening?
jne 3f # yes - switch interpreters
ret
/* Check for suspend */
1:
/* At this point, the return pointer to the caller of
* common_periodicChecks is on the top of stack. We need to preserve
* GLUE(ecx).
* The outgoing profile is:
* bool dvmCheckSuspendPending(Thread* self)
* Because we reached here via a call, go ahead and build a new frame.
*/
EXPORT_PC # need for precise GC
movl offGlue_self(%ecx),%eax # eax<- glue->self
push %ebp
movl %esp,%ebp
subl $$24,%esp
movl %eax,OUT_ARG0(%esp)
call dvmCheckSuspendPending
addl $$24,%esp
pop %ebp
movl rGLUE,%ecx
/*
* Need to check to see if debugger or profiler flags got set
* while we were suspended.
*/
jmp 6b
/* Switch interpreters */
/* Note: %ebx contains the 16-bit word offset to be applied to rPC to
* "complete" the interpretation of backwards branches. In effect, we
* are completing the interpretation of the branch instruction here,
* and the new interpreter will resume interpretation at the branch
* target. However, a switch request recognized during the handling
* of a return from method instruction results in an immediate abort,
* and the new interpreter will resume by re-interpreting the return
* instruction.
*/
3:
leal (rPC,%ebx,2),rPC # adjust pc to show target
movl rGLUE,%ecx # bail expect GLUE already loaded
movl $$1,rINST # set changeInterp to true
jmp common_gotoBail
/*
* Common code for handling a return instruction
*/
common_returnFromMethod:
movl rGLUE,%ecx
/* Set entry mode in case we bail */
movb $$kInterpEntryReturn,offGlue_entryPoint(%ecx)
xorl rINST,rINST # zero offset in case we switch interps
call common_periodicChecks # Note: expects %ecx to be preserved
SAVEAREA_FROM_FP %eax # eax<- saveArea (old)
movl offStackSaveArea_prevFrame(%eax),rFP # rFP<- prevFrame
movl (offStackSaveArea_method-sizeofStackSaveArea)(rFP),rINST
cmpl $$0,rINST # break?
je common_gotoBail # break frame, bail out completely
movl offStackSaveArea_savedPc(%eax),rPC # pc<- saveArea->savedPC
movl offGlue_self(%ecx),%eax # eax<- self
movl rINST,offGlue_method(%ecx) # glue->method = newSave->meethod
movl rFP,offThread_curFrame(%eax) # self->curFrame = fp
movl offMethod_clazz(rINST),%eax # eax<- method->clazz
FETCH_INST_OPCODE 3 %edx
movl offClassObject_pDvmDex(%eax),%eax # eax<- method->clazz->pDvmDex
ADVANCE_PC 3
movl %eax,offGlue_methodClassDex(%ecx)
/* not bailing - restore entry mode to default */
movb $$kInterpEntryInstr,offGlue_entryPoint(%ecx)
GOTO_NEXT_R %edx
/*
* Prepare to strip the current frame and "longjump" back to caller of
* dvmMterpStdRun.
*
* on entry:
* rINST holds changeInterp
* ecx holds glue pointer
*
* expected profile: dvmMterpStdBail(MterpGlue *glue, bool changeInterp)
*/
common_gotoBail:
movl rPC,offGlue_pc(%ecx) # export state to glue
movl rFP,offGlue_fp(%ecx)
movl %ecx,OUT_ARG0(%esp) # glue in arg0
movl rINST,OUT_ARG1(%esp) # changeInterp in arg1
call dvmMterpStdBail # bail out....
/*
* After returning from a "glued" function, pull out the updated values
* and start executing at the next instruction.
*/
common_resumeAfterGlueCall:
LOAD_PC_FP_FROM_GLUE
FETCH_INST
GOTO_NEXT
/*
* Integer divide or mod by zero
*/
common_errDivideByZero:
EXPORT_PC
movl $$.LstrArithmeticException,%eax
movl %eax,OUT_ARG0(%esp)
movl $$.LstrDivideByZero,%eax
movl %eax,OUT_ARG1(%esp)
call dvmThrowException
jmp common_exceptionThrown
/*
* Attempt to allocate an array with a negative size.
*/
common_errNegativeArraySize:
EXPORT_PC
movl $$.LstrNegativeArraySizeException,%eax
movl %eax,OUT_ARG0(%esp)
xorl %eax,%eax
movl %eax,OUT_ARG1(%esp)
call dvmThrowException
jmp common_exceptionThrown
/*
* Attempt to allocate an array with a negative size.
*/
common_errNoSuchMethod:
EXPORT_PC
movl $$.LstrNoSuchMethodError,%eax
movl %eax,OUT_ARG0(%esp)
xorl %eax,%eax
movl %eax,OUT_ARG1(%esp)
call dvmThrowException
jmp common_exceptionThrown
/*
* Hit a null object when we weren't expecting one. Export the PC, throw a
* NullPointerException and goto the exception processing code.
*/
common_errNullObject:
EXPORT_PC
movl $$.LstrNullPointerException,%eax
movl %eax,OUT_ARG0(%esp)
xorl %eax,%eax
movl %eax,OUT_ARG1(%esp)
call dvmThrowException
jmp common_exceptionThrown
/*
* Array index exceeds max.
* On entry:
* eax <- array object
* ecx <- index
*/
common_errArrayIndex:
EXPORT_PC
movl offArrayObject_length(%eax), %eax
movl %ecx,OUT_ARG0(%esp)
movl %eax,OUT_ARG1(%esp)
call dvmThrowAIOOBE # dvmThrowAIOO(index, length)
jmp common_exceptionThrown
/*
* Somebody has thrown an exception. Handle it.
*
* If the exception processing code returns to us (instead of falling
* out of the interpreter), continue with whatever the next instruction
* now happens to be.
*
* This does not return.
*/
common_exceptionThrown:
movl rGLUE,%ecx
movl rPC,offGlue_pc(%ecx)
movl rFP,offGlue_fp(%ecx)
movl %ecx,OUT_ARG0(%esp)
call dvmMterp_exceptionThrown
jmp common_resumeAfterGlueCall
common_abort:
movl $$0xdeadf00d,%eax
call *%eax
/*
* Strings
*/
.section .rodata
.LstrNullPointerException:
.asciz "Ljava/lang/NullPointerException;"
.LstrArithmeticException:
.asciz "Ljava/lang/ArithmeticException;"
.LstrDivideByZero:
.asciz "divide by zero"
.LstrNegativeArraySizeException:
.asciz "Ljava/lang/NegativeArraySizeException;"
.LstrInstantiationError:
.asciz "Ljava/lang/InstantiationError;"
.LstrNoSuchMethodError:
.asciz "Ljava/lang/NoSuchMethodError;"
.LstrInternalErrorA:
.asciz "Ljava/lang/InternalError;"
.LstrFilledNewArrayNotImplA:
.asciz "filled-new-array only implemented for 'int'"
|