1 /*
2 * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 // no precompiled headers
26 #include "classfile/vmSymbols.hpp"
27 #include "gc_interface/collectedHeap.hpp"
28 #include "interpreter/bytecodeHistogram.hpp"
29 #include "interpreter/bytecodeInterpreter.hpp"
30 #include "interpreter/bytecodeInterpreter.inline.hpp"
31 #include "interpreter/bytecodeInterpreterProfiling.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "interpreter/interpreterRuntime.hpp"
34 #include "memory/resourceArea.hpp"
35 #include "oops/methodCounters.hpp"
36 #include "oops/objArrayKlass.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "prims/jvmtiExport.hpp"
39 #include "prims/jvmtiThreadState.hpp"
40 #include "runtime/atomic.inline.hpp"
41 #include "runtime/biasedLocking.hpp"
42 #include "runtime/frame.inline.hpp"
43 #include "runtime/handles.inline.hpp"
44 #include "runtime/interfaceSupport.hpp"
45 #include "runtime/orderAccess.inline.hpp"
46 #include "runtime/sharedRuntime.hpp"
47 #include "runtime/threadCritical.hpp"
48 #include "utilities/exceptions.hpp"
49
50 // no precompiled headers
51 #ifdef CC_INTERP
52
53 /*
54 * USELABELS - If using GCC, then use labels for the opcode dispatching
55 * rather -then a switch statement. This improves performance because it
56 * gives us the oportunity to have the instructions that calculate the
57 * next opcode to jump to be intermixed with the rest of the instructions
58 * that implement the opcode (see UPDATE_PC_AND_TOS_AND_CONTINUE macro).
59 */
60 #undef USELABELS
61 #ifdef __GNUC__
62 /*
63 ASSERT signifies debugging. It is much easier to step thru bytecodes if we
64 don't use the computed goto approach.
65 */
66 #ifndef ASSERT
67 #define USELABELS
68 #endif
69 #endif
70
71 #undef CASE
72 #ifdef USELABELS
73 #define CASE(opcode) opc ## opcode
74 #define DEFAULT opc_default
75 #else
76 #define CASE(opcode) case Bytecodes:: opcode
77 #define DEFAULT default
78 #endif
79
80 /*
81 * PREFETCH_OPCCODE - Some compilers do better if you prefetch the next
82 * opcode before going back to the top of the while loop, rather then having
83 * the top of the while loop handle it. This provides a better opportunity
84 * for instruction scheduling. Some compilers just do this prefetch
85 * automatically. Some actually end up with worse performance if you
86 * force the prefetch. Solaris gcc seems to do better, but cc does worse.
87 */
88 #undef PREFETCH_OPCCODE
89 #define PREFETCH_OPCCODE
90
91 /*
92 Interpreter safepoint: it is expected that the interpreter will have no live
93 handles of its own creation live at an interpreter safepoint. Therefore we
94 run a HandleMarkCleaner and trash all handles allocated in the call chain
95 since the JavaCalls::call_helper invocation that initiated the chain.
96 There really shouldn't be any handles remaining to trash but this is cheap
97 in relation to a safepoint.
98 */
99 #define SAFEPOINT \
100 if ( SafepointSynchronize::is_synchronizing()) { \
101 { \
102 /* zap freed handles rather than GC'ing them */ \
103 HandleMarkCleaner __hmc(THREAD); \
104 } \
105 CALL_VM(SafepointSynchronize::block(THREAD), handle_exception); \
106 }
107
108 /*
109 * VM_JAVA_ERROR - Macro for throwing a java exception from
110 * the interpreter loop. Should really be a CALL_VM but there
111 * is no entry point to do the transition to vm so we just
112 * do it by hand here.
113 */
114 #define VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \
115 DECACHE_STATE(); \
116 SET_LAST_JAVA_FRAME(); \
117 { \
118 InterpreterRuntime::note_a_trap(THREAD, istate->method(), BCI()); \
119 ThreadInVMfromJava trans(THREAD); \
120 Exceptions::_throw_msg(THREAD, __FILE__, __LINE__, name, msg); \
121 } \
122 RESET_LAST_JAVA_FRAME(); \
123 CACHE_STATE();
124
125 // Normal throw of a java error.
126 #define VM_JAVA_ERROR(name, msg, note_a_trap) \
127 VM_JAVA_ERROR_NO_JUMP(name, msg, note_a_trap) \
128 goto handle_exception;
129
130 #ifdef PRODUCT
131 #define DO_UPDATE_INSTRUCTION_COUNT(opcode)
132 #else
133 #define DO_UPDATE_INSTRUCTION_COUNT(opcode) \
134 { \
135 BytecodeCounter::_counter_value++; \
136 BytecodeHistogram::_counters[(Bytecodes::Code)opcode]++; \
137 if (StopInterpreterAt && StopInterpreterAt == BytecodeCounter::_counter_value) os::breakpoint(); \
138 if (TraceBytecodes) { \
139 CALL_VM((void)SharedRuntime::trace_bytecode(THREAD, 0, \
140 topOfStack[Interpreter::expr_index_at(1)], \
141 topOfStack[Interpreter::expr_index_at(2)]), \
142 handle_exception); \
143 } \
144 }
145 #endif
146
147 #undef DEBUGGER_SINGLE_STEP_NOTIFY
148 #ifdef VM_JVMTI
149 /* NOTE: (kbr) This macro must be called AFTER the PC has been
150 incremented. JvmtiExport::at_single_stepping_point() may cause a
151 breakpoint opcode to get inserted at the current PC to allow the
152 debugger to coalesce single-step events.
153
154 As a result if we call at_single_stepping_point() we refetch opcode
155 to get the current opcode. This will override any other prefetching
156 that might have occurred.
157 */
158 #define DEBUGGER_SINGLE_STEP_NOTIFY() \
159 { \
160 if (_jvmti_interp_events) { \
161 if (JvmtiExport::should_post_single_step()) { \
162 DECACHE_STATE(); \
163 SET_LAST_JAVA_FRAME(); \
164 ThreadInVMfromJava trans(THREAD); \
165 JvmtiExport::at_single_stepping_point(THREAD, \
166 istate->method(), \
167 pc); \
168 RESET_LAST_JAVA_FRAME(); \
169 CACHE_STATE(); \
170 if (THREAD->pop_frame_pending() && \
171 !THREAD->pop_frame_in_process()) { \
172 goto handle_Pop_Frame; \
173 } \
174 if (THREAD->jvmti_thread_state() && \
175 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \
176 goto handle_Early_Return; \
177 } \
178 opcode = *pc; \
179 } \
180 } \
181 }
182 #else
183 #define DEBUGGER_SINGLE_STEP_NOTIFY()
184 #endif
185
186 /*
187 * CONTINUE - Macro for executing the next opcode.
188 */
189 #undef CONTINUE
190 #ifdef USELABELS
191 // Have to do this dispatch this way in C++ because otherwise gcc complains about crossing an
192 // initialization (which is is the initialization of the table pointer...)
193 #define DISPATCH(opcode) goto *(void*)dispatch_table[opcode]
194 #define CONTINUE { \
195 opcode = *pc; \
196 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
197 DEBUGGER_SINGLE_STEP_NOTIFY(); \
198 DISPATCH(opcode); \
199 }
200 #else
201 #ifdef PREFETCH_OPCCODE
202 #define CONTINUE { \
203 opcode = *pc; \
204 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
205 DEBUGGER_SINGLE_STEP_NOTIFY(); \
206 continue; \
207 }
208 #else
209 #define CONTINUE { \
210 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
211 DEBUGGER_SINGLE_STEP_NOTIFY(); \
212 continue; \
213 }
214 #endif
215 #endif
216
217
218 #define UPDATE_PC(opsize) {pc += opsize; }
219 /*
220 * UPDATE_PC_AND_TOS - Macro for updating the pc and topOfStack.
221 */
222 #undef UPDATE_PC_AND_TOS
223 #define UPDATE_PC_AND_TOS(opsize, stack) \
224 {pc += opsize; MORE_STACK(stack); }
225
226 /*
227 * UPDATE_PC_AND_TOS_AND_CONTINUE - Macro for updating the pc and topOfStack,
228 * and executing the next opcode. It's somewhat similar to the combination
229 * of UPDATE_PC_AND_TOS and CONTINUE, but with some minor optimizations.
230 */
231 #undef UPDATE_PC_AND_TOS_AND_CONTINUE
232 #ifdef USELABELS
233 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
234 pc += opsize; opcode = *pc; MORE_STACK(stack); \
235 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
236 DEBUGGER_SINGLE_STEP_NOTIFY(); \
237 DISPATCH(opcode); \
238 }
239
240 #define UPDATE_PC_AND_CONTINUE(opsize) { \
241 pc += opsize; opcode = *pc; \
242 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
243 DEBUGGER_SINGLE_STEP_NOTIFY(); \
244 DISPATCH(opcode); \
245 }
246 #else
247 #ifdef PREFETCH_OPCCODE
248 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
249 pc += opsize; opcode = *pc; MORE_STACK(stack); \
250 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
251 DEBUGGER_SINGLE_STEP_NOTIFY(); \
252 goto do_continue; \
253 }
254
255 #define UPDATE_PC_AND_CONTINUE(opsize) { \
256 pc += opsize; opcode = *pc; \
257 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
258 DEBUGGER_SINGLE_STEP_NOTIFY(); \
259 goto do_continue; \
260 }
261 #else
262 #define UPDATE_PC_AND_TOS_AND_CONTINUE(opsize, stack) { \
263 pc += opsize; MORE_STACK(stack); \
264 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
265 DEBUGGER_SINGLE_STEP_NOTIFY(); \
266 goto do_continue; \
267 }
268
269 #define UPDATE_PC_AND_CONTINUE(opsize) { \
270 pc += opsize; \
271 DO_UPDATE_INSTRUCTION_COUNT(opcode); \
272 DEBUGGER_SINGLE_STEP_NOTIFY(); \
273 goto do_continue; \
274 }
275 #endif /* PREFETCH_OPCCODE */
276 #endif /* USELABELS */
277
278 // About to call a new method, update the save the adjusted pc and return to frame manager
279 #define UPDATE_PC_AND_RETURN(opsize) \
280 DECACHE_TOS(); \
281 istate->set_bcp(pc+opsize); \
282 return;
283
284
285 #define METHOD istate->method()
286 #define GET_METHOD_COUNTERS(res) \
287 res = METHOD->method_counters(); \
288 if (res == NULL) { \
289 CALL_VM(res = InterpreterRuntime::build_method_counters(THREAD, METHOD), handle_exception); \
290 }
291
292 #define OSR_REQUEST(res, branch_pc) \
293 CALL_VM(res=InterpreterRuntime::frequency_counter_overflow(THREAD, branch_pc), handle_exception);
294 /*
295 * For those opcodes that need to have a GC point on a backwards branch
296 */
297
298 // Backedge counting is kind of strange. The asm interpreter will increment
299 // the backedge counter as a separate counter but it does it's comparisons
300 // to the sum (scaled) of invocation counter and backedge count to make
301 // a decision. Seems kind of odd to sum them together like that
302
303 // skip is delta from current bcp/bci for target, branch_pc is pre-branch bcp
304
305
306 #define DO_BACKEDGE_CHECKS(skip, branch_pc) \
307 if ((skip) <= 0) { \
308 MethodCounters* mcs; \
309 GET_METHOD_COUNTERS(mcs); \
310 if (UseLoopCounter) { \
311 bool do_OSR = UseOnStackReplacement; \
312 mcs->backedge_counter()->increment(); \
313 if (ProfileInterpreter) { \
314 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception); \
315 /* Check for overflow against MDO count. */ \
316 do_OSR = do_OSR \
317 && (mdo_last_branch_taken_count >= (uint)InvocationCounter::InterpreterBackwardBranchLimit)\
318 /* When ProfileInterpreter is on, the backedge_count comes */ \
319 /* from the methodDataOop, which value does not get reset on */ \
320 /* the call to frequency_counter_overflow(). To avoid */ \
321 /* excessive calls to the overflow routine while the method is */ \
322 /* being compiled, add a second test to make sure the overflow */ \
323 /* function is called only once every overflow_frequency. */ \
324 && (!(mdo_last_branch_taken_count & 1023)); \
325 } else { \
326 /* check for overflow of backedge counter */ \
327 do_OSR = do_OSR \
328 && mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter()); \
329 } \
330 if (do_OSR) { \
331 nmethod* osr_nmethod; \
332 OSR_REQUEST(osr_nmethod, branch_pc); \
333 if (osr_nmethod != NULL && osr_nmethod->osr_entry_bci() != InvalidOSREntryBci) { \
334 intptr_t* buf; \
335 /* Call OSR migration with last java frame only, no checks. */ \
336 CALL_VM_NAKED_LJF(buf=SharedRuntime::OSR_migration_begin(THREAD)); \
337 istate->set_msg(do_osr); \
338 istate->set_osr_buf((address)buf); \
339 istate->set_osr_entry(osr_nmethod->osr_entry()); \
340 return; \
341 } \
342 } \
343 } /* UseCompiler ... */ \
344 SAFEPOINT; \
345 }
346
347 /*
348 * For those opcodes that need to have a GC point on a backwards branch
349 */
350
351 /*
352 * Macros for caching and flushing the interpreter state. Some local
353 * variables need to be flushed out to the frame before we do certain
354 * things (like pushing frames or becomming gc safe) and some need to
355 * be recached later (like after popping a frame). We could use one
356 * macro to cache or decache everything, but this would be less then
357 * optimal because we don't always need to cache or decache everything
358 * because some things we know are already cached or decached.
359 */
360 #undef DECACHE_TOS
361 #undef CACHE_TOS
362 #undef CACHE_PREV_TOS
363 #define DECACHE_TOS() istate->set_stack(topOfStack);
364
365 #define CACHE_TOS() topOfStack = (intptr_t *)istate->stack();
366
367 #undef DECACHE_PC
368 #undef CACHE_PC
369 #define DECACHE_PC() istate->set_bcp(pc);
370 #define CACHE_PC() pc = istate->bcp();
371 #define CACHE_CP() cp = istate->constants();
372 #define CACHE_LOCALS() locals = istate->locals();
373 #undef CACHE_FRAME
374 #define CACHE_FRAME()
375
376 // BCI() returns the current bytecode-index.
377 #undef BCI
378 #define BCI() ((int)(intptr_t)(pc - (intptr_t)istate->method()->code_base()))
379
380 /*
381 * CHECK_NULL - Macro for throwing a NullPointerException if the object
382 * passed is a null ref.
383 * On some architectures/platforms it should be possible to do this implicitly
384 */
385 #undef CHECK_NULL
386 #define CHECK_NULL(obj_) \
387 if ((obj_) == NULL) { \
388 VM_JAVA_ERROR(vmSymbols::java_lang_NullPointerException(), NULL, note_nullCheck_trap); \
389 } \
390 VERIFY_OOP(obj_)
391
392 #define VMdoubleConstZero() 0.0
393 #define VMdoubleConstOne() 1.0
394 #define VMlongConstZero() (max_jlong-max_jlong)
395 #define VMlongConstOne() ((max_jlong-max_jlong)+1)
396
397 /*
398 * Alignment
399 */
400 #define VMalignWordUp(val) (((uintptr_t)(val) + 3) & ~3)
401
402 // Decache the interpreter state that interpreter modifies directly (i.e. GC is indirect mod)
403 #define DECACHE_STATE() DECACHE_PC(); DECACHE_TOS();
404
405 // Reload interpreter state after calling the VM or a possible GC
406 #define CACHE_STATE() \
407 CACHE_TOS(); \
408 CACHE_PC(); \
409 CACHE_CP(); \
410 CACHE_LOCALS();
411
412 // Call the VM with last java frame only.
413 #define CALL_VM_NAKED_LJF(func) \
414 DECACHE_STATE(); \
415 SET_LAST_JAVA_FRAME(); \
416 func; \
417 RESET_LAST_JAVA_FRAME(); \
418 CACHE_STATE();
419
420 // Call the VM. Don't check for pending exceptions.
421 #define CALL_VM_NOCHECK(func) \
422 CALL_VM_NAKED_LJF(func) \
423 if (THREAD->pop_frame_pending() && \
424 !THREAD->pop_frame_in_process()) { \
425 goto handle_Pop_Frame; \
426 } \
427 if (THREAD->jvmti_thread_state() && \
428 THREAD->jvmti_thread_state()->is_earlyret_pending()) { \
429 goto handle_Early_Return; \
430 }
431
432 // Call the VM and check for pending exceptions
433 #define CALL_VM(func, label) { \
434 CALL_VM_NOCHECK(func); \
435 if (THREAD->has_pending_exception()) goto label; \
436 }
437
438 /*
439 * BytecodeInterpreter::run(interpreterState istate)
440 * BytecodeInterpreter::runWithChecks(interpreterState istate)
441 *
442 * The real deal. This is where byte codes actually get interpreted.
443 * Basically it's a big while loop that iterates until we return from
444 * the method passed in.
445 *
446 * The runWithChecks is used if JVMTI is enabled.
447 *
448 */
449 #if defined(VM_JVMTI)
450 void
451 BytecodeInterpreter::runWithChecks(interpreterState istate) {
452 #else
453 void
454 BytecodeInterpreter::run(interpreterState istate) {
455 #endif
456
457 // In order to simplify some tests based on switches set at runtime
458 // we invoke the interpreter a single time after switches are enabled
459 // and set simpler to to test variables rather than method calls or complex
460 // boolean expressions.
461
462 static int initialized = 0;
463 static int checkit = 0;
464 static intptr_t* c_addr = NULL;
465 static intptr_t c_value;
466
467 if (checkit && *c_addr != c_value) {
468 os::breakpoint();
469 }
470 #ifdef VM_JVMTI
471 static bool _jvmti_interp_events = 0;
472 #endif
473
474 static int _compiling; // (UseCompiler || CountCompiledCalls)
475
476 #ifdef ASSERT
477 if (istate->_msg != initialize) {
478 assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit");
479 #ifndef SHARK
480 IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong"));
481 #endif // !SHARK
482 }
483 // Verify linkages.
484 interpreterState l = istate;
485 do {
486 assert(l == l->_self_link, "bad link");
487 l = l->_prev_link;
488 } while (l != NULL);
489 // Screwups with stack management usually cause us to overwrite istate
490 // save a copy so we can verify it.
491 interpreterState orig = istate;
492 #endif
493
494 register intptr_t* topOfStack = (intptr_t *)istate->stack(); /* access with STACK macros */
495 register address pc = istate->bcp();
496 register jubyte opcode;
497 register intptr_t* locals = istate->locals();
498 register ConstantPoolCache* cp = istate->constants(); // method()->constants()->cache()
499 #ifdef LOTS_OF_REGS
500 register JavaThread* THREAD = istate->thread();
501 #else
502 #undef THREAD
503 #define THREAD istate->thread()
504 #endif
505
506 #ifdef USELABELS
507 const static void* const opclabels_data[256] = {
508 /* 0x00 */ &&opc_nop, &&opc_aconst_null,&&opc_iconst_m1,&&opc_iconst_0,
509 /* 0x04 */ &&opc_iconst_1,&&opc_iconst_2, &&opc_iconst_3, &&opc_iconst_4,
510 /* 0x08 */ &&opc_iconst_5,&&opc_lconst_0, &&opc_lconst_1, &&opc_fconst_0,
511 /* 0x0C */ &&opc_fconst_1,&&opc_fconst_2, &&opc_dconst_0, &&opc_dconst_1,
512
513 /* 0x10 */ &&opc_bipush, &&opc_sipush, &&opc_ldc, &&opc_ldc_w,
514 /* 0x14 */ &&opc_ldc2_w, &&opc_iload, &&opc_lload, &&opc_fload,
515 /* 0x18 */ &&opc_dload, &&opc_aload, &&opc_iload_0,&&opc_iload_1,
516 /* 0x1C */ &&opc_iload_2,&&opc_iload_3,&&opc_lload_0,&&opc_lload_1,
517
518 /* 0x20 */ &&opc_lload_2,&&opc_lload_3,&&opc_fload_0,&&opc_fload_1,
519 /* 0x24 */ &&opc_fload_2,&&opc_fload_3,&&opc_dload_0,&&opc_dload_1,
520 /* 0x28 */ &&opc_dload_2,&&opc_dload_3,&&opc_aload_0,&&opc_aload_1,
521 /* 0x2C */ &&opc_aload_2,&&opc_aload_3,&&opc_iaload, &&opc_laload,
522
523 /* 0x30 */ &&opc_faload, &&opc_daload, &&opc_aaload, &&opc_baload,
524 /* 0x34 */ &&opc_caload, &&opc_saload, &&opc_istore, &&opc_lstore,
525 /* 0x38 */ &&opc_fstore, &&opc_dstore, &&opc_astore, &&opc_istore_0,
526 /* 0x3C */ &&opc_istore_1,&&opc_istore_2,&&opc_istore_3,&&opc_lstore_0,
527
528 /* 0x40 */ &&opc_lstore_1,&&opc_lstore_2,&&opc_lstore_3,&&opc_fstore_0,
529 /* 0x44 */ &&opc_fstore_1,&&opc_fstore_2,&&opc_fstore_3,&&opc_dstore_0,
530 /* 0x48 */ &&opc_dstore_1,&&opc_dstore_2,&&opc_dstore_3,&&opc_astore_0,
531 /* 0x4C */ &&opc_astore_1,&&opc_astore_2,&&opc_astore_3,&&opc_iastore,
532
533 /* 0x50 */ &&opc_lastore,&&opc_fastore,&&opc_dastore,&&opc_aastore,
534 /* 0x54 */ &&opc_bastore,&&opc_castore,&&opc_sastore,&&opc_pop,
535 /* 0x58 */ &&opc_pop2, &&opc_dup, &&opc_dup_x1, &&opc_dup_x2,
536 /* 0x5C */ &&opc_dup2, &&opc_dup2_x1,&&opc_dup2_x2,&&opc_swap,
537
538 /* 0x60 */ &&opc_iadd,&&opc_ladd,&&opc_fadd,&&opc_dadd,
539 /* 0x64 */ &&opc_isub,&&opc_lsub,&&opc_fsub,&&opc_dsub,
540 /* 0x68 */ &&opc_imul,&&opc_lmul,&&opc_fmul,&&opc_dmul,
541 /* 0x6C */ &&opc_idiv,&&opc_ldiv,&&opc_fdiv,&&opc_ddiv,
542
543 /* 0x70 */ &&opc_irem, &&opc_lrem, &&opc_frem,&&opc_drem,
544 /* 0x74 */ &&opc_ineg, &&opc_lneg, &&opc_fneg,&&opc_dneg,
545 /* 0x78 */ &&opc_ishl, &&opc_lshl, &&opc_ishr,&&opc_lshr,
546 /* 0x7C */ &&opc_iushr,&&opc_lushr,&&opc_iand,&&opc_land,
547
548 /* 0x80 */ &&opc_ior, &&opc_lor,&&opc_ixor,&&opc_lxor,
549 /* 0x84 */ &&opc_iinc,&&opc_i2l,&&opc_i2f, &&opc_i2d,
550 /* 0x88 */ &&opc_l2i, &&opc_l2f,&&opc_l2d, &&opc_f2i,
551 /* 0x8C */ &&opc_f2l, &&opc_f2d,&&opc_d2i, &&opc_d2l,
552
553 /* 0x90 */ &&opc_d2f, &&opc_i2b, &&opc_i2c, &&opc_i2s,
554 /* 0x94 */ &&opc_lcmp, &&opc_fcmpl,&&opc_fcmpg,&&opc_dcmpl,
555 /* 0x98 */ &&opc_dcmpg,&&opc_ifeq, &&opc_ifne, &&opc_iflt,
556 /* 0x9C */ &&opc_ifge, &&opc_ifgt, &&opc_ifle, &&opc_if_icmpeq,
557
558 /* 0xA0 */ &&opc_if_icmpne,&&opc_if_icmplt,&&opc_if_icmpge, &&opc_if_icmpgt,
559 /* 0xA4 */ &&opc_if_icmple,&&opc_if_acmpeq,&&opc_if_acmpne, &&opc_goto,
560 /* 0xA8 */ &&opc_jsr, &&opc_ret, &&opc_tableswitch,&&opc_lookupswitch,
561 /* 0xAC */ &&opc_ireturn, &&opc_lreturn, &&opc_freturn, &&opc_dreturn,
562
563 /* 0xB0 */ &&opc_areturn, &&opc_return, &&opc_getstatic, &&opc_putstatic,
564 /* 0xB4 */ &&opc_getfield, &&opc_putfield, &&opc_invokevirtual,&&opc_invokespecial,
565 /* 0xB8 */ &&opc_invokestatic,&&opc_invokeinterface,&&opc_invokedynamic,&&opc_new,
566 /* 0xBC */ &&opc_newarray, &&opc_anewarray, &&opc_arraylength, &&opc_athrow,
567
568 /* 0xC0 */ &&opc_checkcast, &&opc_instanceof, &&opc_monitorenter, &&opc_monitorexit,
569 /* 0xC4 */ &&opc_wide, &&opc_multianewarray, &&opc_ifnull, &&opc_ifnonnull,
570 /* 0xC8 */ &&opc_goto_w, &&opc_jsr_w, &&opc_breakpoint, &&opc_default,
571 /* 0xCC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
572
573 /* 0xD0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
574 /* 0xD4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
575 /* 0xD8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
576 /* 0xDC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
577
578 /* 0xE0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
579 /* 0xE4 */ &&opc_default, &&opc_fast_aldc, &&opc_fast_aldc_w, &&opc_return_register_finalizer,
580 /* 0xE8 */ &&opc_invokehandle,&&opc_default, &&opc_default, &&opc_default,
581 /* 0xEC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
582
583 /* 0xF0 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
584 /* 0xF4 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
585 /* 0xF8 */ &&opc_default, &&opc_default, &&opc_default, &&opc_default,
586 /* 0xFC */ &&opc_default, &&opc_default, &&opc_default, &&opc_default
587 };
588 register uintptr_t *dispatch_table = (uintptr_t*)&opclabels_data[0];
589 #endif /* USELABELS */
590
591 #ifdef ASSERT
592 // this will trigger a VERIFY_OOP on entry
593 if (istate->msg() != initialize && ! METHOD->is_static()) {
594 oop rcvr = LOCALS_OBJECT(0);
595 VERIFY_OOP(rcvr);
596 }
597 #endif
598 // #define HACK
599 #ifdef HACK
600 bool interesting = false;
601 #endif // HACK
602
603 /* QQQ this should be a stack method so we don't know actual direction */
604 guarantee(istate->msg() == initialize ||
605 topOfStack >= istate->stack_limit() &&
606 topOfStack < istate->stack_base(),
607 "Stack top out of range");
608
609 #ifdef CC_INTERP_PROFILE
610 // MethodData's last branch taken count.
611 uint mdo_last_branch_taken_count = 0;
612 #else
613 const uint mdo_last_branch_taken_count = 0;
614 #endif
615
616 switch (istate->msg()) {
617 case initialize: {
618 if (initialized++) ShouldNotReachHere(); // Only one initialize call.
619 _compiling = (UseCompiler || CountCompiledCalls);
620 #ifdef VM_JVMTI
621 _jvmti_interp_events = JvmtiExport::can_post_interpreter_events();
622 #endif
623 return;
624 }
625 break;
626 case method_entry: {
627 THREAD->set_do_not_unlock();
628 // count invocations
629 assert(initialized, "Interpreter not initialized");
630 if (_compiling) {
631 MethodCounters* mcs;
632 GET_METHOD_COUNTERS(mcs);
633 if (ProfileInterpreter) {
634 METHOD->increment_interpreter_invocation_count(THREAD);
635 }
636 mcs->invocation_counter()->increment();
637 if (mcs->invocation_counter()->reached_InvocationLimit(mcs->backedge_counter())) {
638 CALL_VM((void)InterpreterRuntime::frequency_counter_overflow(THREAD, NULL), handle_exception);
639 // We no longer retry on a counter overflow.
640 }
641 // Get or create profile data. Check for pending (async) exceptions.
642 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
643 SAFEPOINT;
644 }
645
646 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) {
647 // initialize
648 os::breakpoint();
649 }
650
651 #ifdef HACK
652 {
653 ResourceMark rm;
654 char *method_name = istate->method()->name_and_sig_as_C_string();
655 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) {
656 tty->print_cr("entering: depth %d bci: %d",
657 (istate->_stack_base - istate->_stack),
658 istate->_bcp - istate->_method->code_base());
659 interesting = true;
660 }
661 }
662 #endif // HACK
663
664 // Lock method if synchronized.
665 if (METHOD->is_synchronized()) {
666 // oop rcvr = locals[0].j.r;
667 oop rcvr;
668 if (METHOD->is_static()) {
669 rcvr = METHOD->constants()->pool_holder()->java_mirror();
670 } else {
671 rcvr = LOCALS_OBJECT(0);
672 VERIFY_OOP(rcvr);
673 }
674 // The initial monitor is ours for the taking.
675 // Monitor not filled in frame manager any longer as this caused race condition with biased locking.
676 BasicObjectLock* mon = &istate->monitor_base()[-1];
677 mon->set_obj(rcvr);
678 bool success = false;
679 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
680 markOop mark = rcvr->mark();
681 intptr_t hash = (intptr_t) markOopDesc::no_hash;
682 // Implies UseBiasedLocking.
683 if (mark->has_bias_pattern()) {
684 uintptr_t thread_ident;
685 uintptr_t anticipated_bias_locking_value;
686 thread_ident = (uintptr_t)istate->thread();
687 anticipated_bias_locking_value =
688 (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
689 ~((uintptr_t) markOopDesc::age_mask_in_place);
690
691 if (anticipated_bias_locking_value == 0) {
692 // Already biased towards this thread, nothing to do.
693 if (PrintBiasedLockingStatistics) {
694 (* BiasedLocking::biased_lock_entry_count_addr())++;
695 }
696 success = true;
697 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
698 // Try to revoke bias.
699 markOop header = rcvr->klass()->prototype_header();
700 if (hash != markOopDesc::no_hash) {
701 header = header->copy_set_hash(hash);
702 }
703 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) {
704 if (PrintBiasedLockingStatistics)
705 (*BiasedLocking::revoked_lock_entry_count_addr())++;
706 }
707 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) {
708 // Try to rebias.
709 markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident);
710 if (hash != markOopDesc::no_hash) {
711 new_header = new_header->copy_set_hash(hash);
712 }
713 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) {
714 if (PrintBiasedLockingStatistics) {
715 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
716 }
717 } else {
718 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
719 }
720 success = true;
721 } else {
722 // Try to bias towards thread in case object is anonymously biased.
723 markOop header = (markOop) ((uintptr_t) mark &
724 ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
725 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
726 if (hash != markOopDesc::no_hash) {
727 header = header->copy_set_hash(hash);
728 }
729 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
730 // Debugging hint.
731 DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
732 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) {
733 if (PrintBiasedLockingStatistics) {
734 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
735 }
736 } else {
737 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
738 }
739 success = true;
740 }
741 }
742
743 // Traditional lightweight locking.
744 if (!success) {
745 markOop displaced = rcvr->mark()->set_unlocked();
746 mon->lock()->set_displaced_header(displaced);
747 bool call_vm = UseHeavyMonitors;
748 if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) {
749 // Is it simple recursive case?
750 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
751 mon->lock()->set_displaced_header(NULL);
752 } else {
753 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
754 }
755 }
756 }
757 }
758 THREAD->clr_do_not_unlock();
759
760 // Notify jvmti
761 #ifdef VM_JVMTI
762 if (_jvmti_interp_events) {
763 // Whenever JVMTI puts a thread in interp_only_mode, method
764 // entry/exit events are sent for that thread to track stack depth.
765 if (THREAD->is_interp_only_mode()) {
766 CALL_VM(InterpreterRuntime::post_method_entry(THREAD),
767 handle_exception);
768 }
769 }
770 #endif /* VM_JVMTI */
771
772 goto run;
773 }
774
775 case popping_frame: {
776 // returned from a java call to pop the frame, restart the call
777 // clear the message so we don't confuse ourselves later
778 assert(THREAD->pop_frame_in_process(), "wrong frame pop state");
779 istate->set_msg(no_request);
780 if (_compiling) {
781 // Set MDX back to the ProfileData of the invoke bytecode that will be
782 // restarted.
783 SET_MDX(NULL);
784 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
785 }
786 THREAD->clr_pop_frame_in_process();
787 goto run;
788 }
789
790 case method_resume: {
791 if ((istate->_stack_base - istate->_stack_limit) != istate->method()->max_stack() + 1) {
792 // resume
793 os::breakpoint();
794 }
795 #ifdef HACK
796 {
797 ResourceMark rm;
798 char *method_name = istate->method()->name_and_sig_as_C_string();
799 if (strstr(method_name, "runThese$TestRunner.run()V") != NULL) {
800 tty->print_cr("resume: depth %d bci: %d",
801 (istate->_stack_base - istate->_stack) ,
802 istate->_bcp - istate->_method->code_base());
803 interesting = true;
804 }
805 }
806 #endif // HACK
807 // returned from a java call, continue executing.
808 if (THREAD->pop_frame_pending() && !THREAD->pop_frame_in_process()) {
809 goto handle_Pop_Frame;
810 }
811 if (THREAD->jvmti_thread_state() &&
812 THREAD->jvmti_thread_state()->is_earlyret_pending()) {
813 goto handle_Early_Return;
814 }
815
816 if (THREAD->has_pending_exception()) goto handle_exception;
817 // Update the pc by the saved amount of the invoke bytecode size
818 UPDATE_PC(istate->bcp_advance());
819
820 if (_compiling) {
821 // Get or create profile data. Check for pending (async) exceptions.
822 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
823 }
824 goto run;
825 }
826
827 case deopt_resume2: {
828 // Returned from an opcode that will reexecute. Deopt was
829 // a result of a PopFrame request.
830 //
831
832 if (_compiling) {
833 // Get or create profile data. Check for pending (async) exceptions.
834 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
835 }
836 goto run;
837 }
838
839 case deopt_resume: {
840 // Returned from an opcode that has completed. The stack has
841 // the result all we need to do is skip across the bytecode
842 // and continue (assuming there is no exception pending)
843 //
844 // compute continuation length
845 //
846 // Note: it is possible to deopt at a return_register_finalizer opcode
847 // because this requires entering the vm to do the registering. While the
848 // opcode is complete we can't advance because there are no more opcodes
849 // much like trying to deopt at a poll return. In that has we simply
850 // get out of here
851 //
852 if ( Bytecodes::code_at(METHOD, pc) == Bytecodes::_return_register_finalizer) {
853 // this will do the right thing even if an exception is pending.
854 goto handle_return;
855 }
856 UPDATE_PC(Bytecodes::length_at(METHOD, pc));
857 if (THREAD->has_pending_exception()) goto handle_exception;
858
859 if (_compiling) {
860 // Get or create profile data. Check for pending (async) exceptions.
861 BI_PROFILE_GET_OR_CREATE_METHOD_DATA(handle_exception);
862 }
863 goto run;
864 }
865 case got_monitors: {
866 // continue locking now that we have a monitor to use
867 // we expect to find newly allocated monitor at the "top" of the monitor stack.
868 oop lockee = STACK_OBJECT(-1);
869 VERIFY_OOP(lockee);
870 // derefing's lockee ought to provoke implicit null check
871 // find a free monitor
872 BasicObjectLock* entry = (BasicObjectLock*) istate->stack_base();
873 assert(entry->obj() == NULL, "Frame manager didn't allocate the monitor");
874 entry->set_obj(lockee);
875 bool success = false;
876 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
877
878 markOop mark = lockee->mark();
879 intptr_t hash = (intptr_t) markOopDesc::no_hash;
880 // implies UseBiasedLocking
881 if (mark->has_bias_pattern()) {
882 uintptr_t thread_ident;
883 uintptr_t anticipated_bias_locking_value;
884 thread_ident = (uintptr_t)istate->thread();
885 anticipated_bias_locking_value =
886 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
887 ~((uintptr_t) markOopDesc::age_mask_in_place);
888
889 if (anticipated_bias_locking_value == 0) {
890 // already biased towards this thread, nothing to do
891 if (PrintBiasedLockingStatistics) {
892 (* BiasedLocking::biased_lock_entry_count_addr())++;
893 }
894 success = true;
895 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
896 // try revoke bias
897 markOop header = lockee->klass()->prototype_header();
898 if (hash != markOopDesc::no_hash) {
899 header = header->copy_set_hash(hash);
900 }
901 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
902 if (PrintBiasedLockingStatistics) {
903 (*BiasedLocking::revoked_lock_entry_count_addr())++;
904 }
905 }
906 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
907 // try rebias
908 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
909 if (hash != markOopDesc::no_hash) {
910 new_header = new_header->copy_set_hash(hash);
911 }
912 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
913 if (PrintBiasedLockingStatistics) {
914 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
915 }
916 } else {
917 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
918 }
919 success = true;
920 } else {
921 // try to bias towards thread in case object is anonymously biased
922 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
923 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
924 if (hash != markOopDesc::no_hash) {
925 header = header->copy_set_hash(hash);
926 }
927 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
928 // debugging hint
929 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
930 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
931 if (PrintBiasedLockingStatistics) {
932 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
933 }
934 } else {
935 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
936 }
937 success = true;
938 }
939 }
940
941 // traditional lightweight locking
942 if (!success) {
943 markOop displaced = lockee->mark()->set_unlocked();
944 entry->lock()->set_displaced_header(displaced);
945 bool call_vm = UseHeavyMonitors;
946 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
947 // Is it simple recursive case?
948 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
949 entry->lock()->set_displaced_header(NULL);
950 } else {
951 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
952 }
953 }
954 }
955 UPDATE_PC_AND_TOS(1, -1);
956 goto run;
957 }
958 default: {
959 fatal("Unexpected message from frame manager");
960 }
961 }
962
963 run:
964
965 DO_UPDATE_INSTRUCTION_COUNT(*pc)
966 DEBUGGER_SINGLE_STEP_NOTIFY();
967 #ifdef PREFETCH_OPCCODE
968 opcode = *pc; /* prefetch first opcode */
969 #endif
970
971 #ifndef USELABELS
972 while (1)
973 #endif
974 {
975 #ifndef PREFETCH_OPCCODE
976 opcode = *pc;
977 #endif
978 // Seems like this happens twice per opcode. At worst this is only
979 // need at entry to the loop.
980 // DEBUGGER_SINGLE_STEP_NOTIFY();
981 /* Using this labels avoids double breakpoints when quickening and
982 * when returing from transition frames.
983 */
984 opcode_switch:
985 assert(istate == orig, "Corrupted istate");
986 /* QQQ Hmm this has knowledge of direction, ought to be a stack method */
987 assert(topOfStack >= istate->stack_limit(), "Stack overrun");
988 assert(topOfStack < istate->stack_base(), "Stack underrun");
989
990 #ifdef USELABELS
991 DISPATCH(opcode);
992 #else
993 switch (opcode)
994 #endif
995 {
996 CASE(_nop):
997 UPDATE_PC_AND_CONTINUE(1);
998
999 /* Push miscellaneous constants onto the stack. */
1000
1001 CASE(_aconst_null):
1002 SET_STACK_OBJECT(NULL, 0);
1003 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1004
1005 #undef OPC_CONST_n
1006 #define OPC_CONST_n(opcode, const_type, value) \
1007 CASE(opcode): \
1008 SET_STACK_ ## const_type(value, 0); \
1009 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1010
1011 OPC_CONST_n(_iconst_m1, INT, -1);
1012 OPC_CONST_n(_iconst_0, INT, 0);
1013 OPC_CONST_n(_iconst_1, INT, 1);
1014 OPC_CONST_n(_iconst_2, INT, 2);
1015 OPC_CONST_n(_iconst_3, INT, 3);
1016 OPC_CONST_n(_iconst_4, INT, 4);
1017 OPC_CONST_n(_iconst_5, INT, 5);
1018 OPC_CONST_n(_fconst_0, FLOAT, 0.0);
1019 OPC_CONST_n(_fconst_1, FLOAT, 1.0);
1020 OPC_CONST_n(_fconst_2, FLOAT, 2.0);
1021
1022 #undef OPC_CONST2_n
1023 #define OPC_CONST2_n(opcname, value, key, kind) \
1024 CASE(_##opcname): \
1025 { \
1026 SET_STACK_ ## kind(VM##key##Const##value(), 1); \
1027 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \
1028 }
1029 OPC_CONST2_n(dconst_0, Zero, double, DOUBLE);
1030 OPC_CONST2_n(dconst_1, One, double, DOUBLE);
1031 OPC_CONST2_n(lconst_0, Zero, long, LONG);
1032 OPC_CONST2_n(lconst_1, One, long, LONG);
1033
1034 /* Load constant from constant pool: */
1035
1036 /* Push a 1-byte signed integer value onto the stack. */
1037 CASE(_bipush):
1038 SET_STACK_INT((jbyte)(pc[1]), 0);
1039 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
1040
1041 /* Push a 2-byte signed integer constant onto the stack. */
1042 CASE(_sipush):
1043 SET_STACK_INT((int16_t)Bytes::get_Java_u2(pc + 1), 0);
1044 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
1045
1046 /* load from local variable */
1047
1048 CASE(_aload):
1049 VERIFY_OOP(LOCALS_OBJECT(pc[1]));
1050 SET_STACK_OBJECT(LOCALS_OBJECT(pc[1]), 0);
1051 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
1052
1053 CASE(_iload):
1054 CASE(_fload):
1055 SET_STACK_SLOT(LOCALS_SLOT(pc[1]), 0);
1056 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 1);
1057
1058 CASE(_lload):
1059 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(pc[1]), 1);
1060 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
1061
1062 CASE(_dload):
1063 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(pc[1]), 1);
1064 UPDATE_PC_AND_TOS_AND_CONTINUE(2, 2);
1065
1066 #undef OPC_LOAD_n
1067 #define OPC_LOAD_n(num) \
1068 CASE(_aload_##num): \
1069 VERIFY_OOP(LOCALS_OBJECT(num)); \
1070 SET_STACK_OBJECT(LOCALS_OBJECT(num), 0); \
1071 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \
1072 \
1073 CASE(_iload_##num): \
1074 CASE(_fload_##num): \
1075 SET_STACK_SLOT(LOCALS_SLOT(num), 0); \
1076 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1); \
1077 \
1078 CASE(_lload_##num): \
1079 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(num), 1); \
1080 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2); \
1081 CASE(_dload_##num): \
1082 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_DOUBLE_AT(num), 1); \
1083 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1084
1085 OPC_LOAD_n(0);
1086 OPC_LOAD_n(1);
1087 OPC_LOAD_n(2);
1088 OPC_LOAD_n(3);
1089
1090 /* store to a local variable */
1091
1092 CASE(_astore):
1093 astore(topOfStack, -1, locals, pc[1]);
1094 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
1095
1096 CASE(_istore):
1097 CASE(_fstore):
1098 SET_LOCALS_SLOT(STACK_SLOT(-1), pc[1]);
1099 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -1);
1100
1101 CASE(_lstore):
1102 SET_LOCALS_LONG(STACK_LONG(-1), pc[1]);
1103 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
1104
1105 CASE(_dstore):
1106 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), pc[1]);
1107 UPDATE_PC_AND_TOS_AND_CONTINUE(2, -2);
1108
1109 CASE(_wide): {
1110 uint16_t reg = Bytes::get_Java_u2(pc + 2);
1111
1112 opcode = pc[1];
1113
1114 // Wide and it's sub-bytecode are counted as separate instructions. If we
1115 // don't account for this here, the bytecode trace skips the next bytecode.
1116 DO_UPDATE_INSTRUCTION_COUNT(opcode);
1117
1118 switch(opcode) {
1119 case Bytecodes::_aload:
1120 VERIFY_OOP(LOCALS_OBJECT(reg));
1121 SET_STACK_OBJECT(LOCALS_OBJECT(reg), 0);
1122 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
1123
1124 case Bytecodes::_iload:
1125 case Bytecodes::_fload:
1126 SET_STACK_SLOT(LOCALS_SLOT(reg), 0);
1127 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 1);
1128
1129 case Bytecodes::_lload:
1130 SET_STACK_LONG_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
1131 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
1132
1133 case Bytecodes::_dload:
1134 SET_STACK_DOUBLE_FROM_ADDR(LOCALS_LONG_AT(reg), 1);
1135 UPDATE_PC_AND_TOS_AND_CONTINUE(4, 2);
1136
1137 case Bytecodes::_astore:
1138 astore(topOfStack, -1, locals, reg);
1139 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
1140
1141 case Bytecodes::_istore:
1142 case Bytecodes::_fstore:
1143 SET_LOCALS_SLOT(STACK_SLOT(-1), reg);
1144 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -1);
1145
1146 case Bytecodes::_lstore:
1147 SET_LOCALS_LONG(STACK_LONG(-1), reg);
1148 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
1149
1150 case Bytecodes::_dstore:
1151 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), reg);
1152 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -2);
1153
1154 case Bytecodes::_iinc: {
1155 int16_t offset = (int16_t)Bytes::get_Java_u2(pc+4);
1156 // Be nice to see what this generates.... QQQ
1157 SET_LOCALS_INT(LOCALS_INT(reg) + offset, reg);
1158 UPDATE_PC_AND_CONTINUE(6);
1159 }
1160 case Bytecodes::_ret:
1161 // Profile ret.
1162 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(reg))));
1163 // Now, update the pc.
1164 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(reg));
1165 UPDATE_PC_AND_CONTINUE(0);
1166 default:
1167 VM_JAVA_ERROR(vmSymbols::java_lang_InternalError(), "undefined opcode", note_no_trap);
1168 }
1169 }
1170
1171
1172 #undef OPC_STORE_n
1173 #define OPC_STORE_n(num) \
1174 CASE(_astore_##num): \
1175 astore(topOfStack, -1, locals, num); \
1176 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1177 CASE(_istore_##num): \
1178 CASE(_fstore_##num): \
1179 SET_LOCALS_SLOT(STACK_SLOT(-1), num); \
1180 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1181
1182 OPC_STORE_n(0);
1183 OPC_STORE_n(1);
1184 OPC_STORE_n(2);
1185 OPC_STORE_n(3);
1186
1187 #undef OPC_DSTORE_n
1188 #define OPC_DSTORE_n(num) \
1189 CASE(_dstore_##num): \
1190 SET_LOCALS_DOUBLE(STACK_DOUBLE(-1), num); \
1191 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
1192 CASE(_lstore_##num): \
1193 SET_LOCALS_LONG(STACK_LONG(-1), num); \
1194 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
1195
1196 OPC_DSTORE_n(0);
1197 OPC_DSTORE_n(1);
1198 OPC_DSTORE_n(2);
1199 OPC_DSTORE_n(3);
1200
1201 /* stack pop, dup, and insert opcodes */
1202
1203
1204 CASE(_pop): /* Discard the top item on the stack */
1205 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1206
1207
1208 CASE(_pop2): /* Discard the top 2 items on the stack */
1209 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2);
1210
1211
1212 CASE(_dup): /* Duplicate the top item on the stack */
1213 dup(topOfStack);
1214 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1215
1216 CASE(_dup2): /* Duplicate the top 2 items on the stack */
1217 dup2(topOfStack);
1218 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1219
1220 CASE(_dup_x1): /* insert top word two down */
1221 dup_x1(topOfStack);
1222 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1223
1224 CASE(_dup_x2): /* insert top word three down */
1225 dup_x2(topOfStack);
1226 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1227
1228 CASE(_dup2_x1): /* insert top 2 slots three down */
1229 dup2_x1(topOfStack);
1230 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1231
1232 CASE(_dup2_x2): /* insert top 2 slots four down */
1233 dup2_x2(topOfStack);
1234 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1235
1236 CASE(_swap): { /* swap top two elements on the stack */
1237 swap(topOfStack);
1238 UPDATE_PC_AND_CONTINUE(1);
1239 }
1240
1241 /* Perform various binary integer operations */
1242
1243 #undef OPC_INT_BINARY
1244 #define OPC_INT_BINARY(opcname, opname, test) \
1245 CASE(_i##opcname): \
1246 if (test && (STACK_INT(-1) == 0)) { \
1247 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
1248 "/ by zero", note_div0Check_trap); \
1249 } \
1250 SET_STACK_INT(VMint##opname(STACK_INT(-2), \
1251 STACK_INT(-1)), \
1252 -2); \
1253 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1254 CASE(_l##opcname): \
1255 { \
1256 if (test) { \
1257 jlong l1 = STACK_LONG(-1); \
1258 if (VMlongEqz(l1)) { \
1259 VM_JAVA_ERROR(vmSymbols::java_lang_ArithmeticException(), \
1260 "/ by long zero", note_div0Check_trap); \
1261 } \
1262 } \
1263 /* First long at (-1,-2) next long at (-3,-4) */ \
1264 SET_STACK_LONG(VMlong##opname(STACK_LONG(-3), \
1265 STACK_LONG(-1)), \
1266 -3); \
1267 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
1268 }
1269
1270 OPC_INT_BINARY(add, Add, 0);
1271 OPC_INT_BINARY(sub, Sub, 0);
1272 OPC_INT_BINARY(mul, Mul, 0);
1273 OPC_INT_BINARY(and, And, 0);
1274 OPC_INT_BINARY(or, Or, 0);
1275 OPC_INT_BINARY(xor, Xor, 0);
1276 OPC_INT_BINARY(div, Div, 1);
1277 OPC_INT_BINARY(rem, Rem, 1);
1278
1279
1280 /* Perform various binary floating number operations */
1281 /* On some machine/platforms/compilers div zero check can be implicit */
1282
1283 #undef OPC_FLOAT_BINARY
1284 #define OPC_FLOAT_BINARY(opcname, opname) \
1285 CASE(_d##opcname): { \
1286 SET_STACK_DOUBLE(VMdouble##opname(STACK_DOUBLE(-3), \
1287 STACK_DOUBLE(-1)), \
1288 -3); \
1289 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -2); \
1290 } \
1291 CASE(_f##opcname): \
1292 SET_STACK_FLOAT(VMfloat##opname(STACK_FLOAT(-2), \
1293 STACK_FLOAT(-1)), \
1294 -2); \
1295 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1296
1297
1298 OPC_FLOAT_BINARY(add, Add);
1299 OPC_FLOAT_BINARY(sub, Sub);
1300 OPC_FLOAT_BINARY(mul, Mul);
1301 OPC_FLOAT_BINARY(div, Div);
1302 OPC_FLOAT_BINARY(rem, Rem);
1303
1304 /* Shift operations
1305 * Shift left int and long: ishl, lshl
1306 * Logical shift right int and long w/zero extension: iushr, lushr
1307 * Arithmetic shift right int and long w/sign extension: ishr, lshr
1308 */
1309
1310 #undef OPC_SHIFT_BINARY
1311 #define OPC_SHIFT_BINARY(opcname, opname) \
1312 CASE(_i##opcname): \
1313 SET_STACK_INT(VMint##opname(STACK_INT(-2), \
1314 STACK_INT(-1)), \
1315 -2); \
1316 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1317 CASE(_l##opcname): \
1318 { \
1319 SET_STACK_LONG(VMlong##opname(STACK_LONG(-2), \
1320 STACK_INT(-1)), \
1321 -2); \
1322 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1323 }
1324
1325 OPC_SHIFT_BINARY(shl, Shl);
1326 OPC_SHIFT_BINARY(shr, Shr);
1327 OPC_SHIFT_BINARY(ushr, Ushr);
1328
1329 /* Increment local variable by constant */
1330 CASE(_iinc):
1331 {
1332 // locals[pc[1]].j.i += (jbyte)(pc[2]);
1333 SET_LOCALS_INT(LOCALS_INT(pc[1]) + (jbyte)(pc[2]), pc[1]);
1334 UPDATE_PC_AND_CONTINUE(3);
1335 }
1336
1337 /* negate the value on the top of the stack */
1338
1339 CASE(_ineg):
1340 SET_STACK_INT(VMintNeg(STACK_INT(-1)), -1);
1341 UPDATE_PC_AND_CONTINUE(1);
1342
1343 CASE(_fneg):
1344 SET_STACK_FLOAT(VMfloatNeg(STACK_FLOAT(-1)), -1);
1345 UPDATE_PC_AND_CONTINUE(1);
1346
1347 CASE(_lneg):
1348 {
1349 SET_STACK_LONG(VMlongNeg(STACK_LONG(-1)), -1);
1350 UPDATE_PC_AND_CONTINUE(1);
1351 }
1352
1353 CASE(_dneg):
1354 {
1355 SET_STACK_DOUBLE(VMdoubleNeg(STACK_DOUBLE(-1)), -1);
1356 UPDATE_PC_AND_CONTINUE(1);
1357 }
1358
1359 /* Conversion operations */
1360
1361 CASE(_i2f): /* convert top of stack int to float */
1362 SET_STACK_FLOAT(VMint2Float(STACK_INT(-1)), -1);
1363 UPDATE_PC_AND_CONTINUE(1);
1364
1365 CASE(_i2l): /* convert top of stack int to long */
1366 {
1367 // this is ugly QQQ
1368 jlong r = VMint2Long(STACK_INT(-1));
1369 MORE_STACK(-1); // Pop
1370 SET_STACK_LONG(r, 1);
1371
1372 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1373 }
1374
1375 CASE(_i2d): /* convert top of stack int to double */
1376 {
1377 // this is ugly QQQ (why cast to jlong?? )
1378 jdouble r = (jlong)STACK_INT(-1);
1379 MORE_STACK(-1); // Pop
1380 SET_STACK_DOUBLE(r, 1);
1381
1382 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1383 }
1384
1385 CASE(_l2i): /* convert top of stack long to int */
1386 {
1387 jint r = VMlong2Int(STACK_LONG(-1));
1388 MORE_STACK(-2); // Pop
1389 SET_STACK_INT(r, 0);
1390 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1391 }
1392
1393 CASE(_l2f): /* convert top of stack long to float */
1394 {
1395 jlong r = STACK_LONG(-1);
1396 MORE_STACK(-2); // Pop
1397 SET_STACK_FLOAT(VMlong2Float(r), 0);
1398 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1399 }
1400
1401 CASE(_l2d): /* convert top of stack long to double */
1402 {
1403 jlong r = STACK_LONG(-1);
1404 MORE_STACK(-2); // Pop
1405 SET_STACK_DOUBLE(VMlong2Double(r), 1);
1406 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1407 }
1408
1409 CASE(_f2i): /* Convert top of stack float to int */
1410 SET_STACK_INT(SharedRuntime::f2i(STACK_FLOAT(-1)), -1);
1411 UPDATE_PC_AND_CONTINUE(1);
1412
1413 CASE(_f2l): /* convert top of stack float to long */
1414 {
1415 jlong r = SharedRuntime::f2l(STACK_FLOAT(-1));
1416 MORE_STACK(-1); // POP
1417 SET_STACK_LONG(r, 1);
1418 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1419 }
1420
1421 CASE(_f2d): /* convert top of stack float to double */
1422 {
1423 jfloat f;
1424 jdouble r;
1425 f = STACK_FLOAT(-1);
1426 r = (jdouble) f;
1427 MORE_STACK(-1); // POP
1428 SET_STACK_DOUBLE(r, 1);
1429 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1430 }
1431
1432 CASE(_d2i): /* convert top of stack double to int */
1433 {
1434 jint r1 = SharedRuntime::d2i(STACK_DOUBLE(-1));
1435 MORE_STACK(-2);
1436 SET_STACK_INT(r1, 0);
1437 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1438 }
1439
1440 CASE(_d2f): /* convert top of stack double to float */
1441 {
1442 jfloat r1 = VMdouble2Float(STACK_DOUBLE(-1));
1443 MORE_STACK(-2);
1444 SET_STACK_FLOAT(r1, 0);
1445 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1446 }
1447
1448 CASE(_d2l): /* convert top of stack double to long */
1449 {
1450 jlong r1 = SharedRuntime::d2l(STACK_DOUBLE(-1));
1451 MORE_STACK(-2);
1452 SET_STACK_LONG(r1, 1);
1453 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 2);
1454 }
1455
1456 CASE(_i2b):
1457 SET_STACK_INT(VMint2Byte(STACK_INT(-1)), -1);
1458 UPDATE_PC_AND_CONTINUE(1);
1459
1460 CASE(_i2c):
1461 SET_STACK_INT(VMint2Char(STACK_INT(-1)), -1);
1462 UPDATE_PC_AND_CONTINUE(1);
1463
1464 CASE(_i2s):
1465 SET_STACK_INT(VMint2Short(STACK_INT(-1)), -1);
1466 UPDATE_PC_AND_CONTINUE(1);
1467
1468 /* comparison operators */
1469
1470
1471 #define COMPARISON_OP(name, comparison) \
1472 CASE(_if_icmp##name): { \
1473 const bool cmp = (STACK_INT(-2) comparison STACK_INT(-1)); \
1474 int skip = cmp \
1475 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1476 address branch_pc = pc; \
1477 /* Profile branch. */ \
1478 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
1479 UPDATE_PC_AND_TOS(skip, -2); \
1480 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1481 CONTINUE; \
1482 } \
1483 CASE(_if##name): { \
1484 const bool cmp = (STACK_INT(-1) comparison 0); \
1485 int skip = cmp \
1486 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1487 address branch_pc = pc; \
1488 /* Profile branch. */ \
1489 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
1490 UPDATE_PC_AND_TOS(skip, -1); \
1491 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1492 CONTINUE; \
1493 }
1494
1495 #define COMPARISON_OP2(name, comparison) \
1496 COMPARISON_OP(name, comparison) \
1497 CASE(_if_acmp##name): { \
1498 const bool cmp = (STACK_OBJECT(-2) comparison STACK_OBJECT(-1)); \
1499 int skip = cmp \
1500 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1501 address branch_pc = pc; \
1502 /* Profile branch. */ \
1503 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
1504 UPDATE_PC_AND_TOS(skip, -2); \
1505 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1506 CONTINUE; \
1507 }
1508
1509 #define NULL_COMPARISON_NOT_OP(name) \
1510 CASE(_if##name): { \
1511 const bool cmp = (!(STACK_OBJECT(-1) == NULL)); \
1512 int skip = cmp \
1513 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1514 address branch_pc = pc; \
1515 /* Profile branch. */ \
1516 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
1517 UPDATE_PC_AND_TOS(skip, -1); \
1518 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1519 CONTINUE; \
1520 }
1521
1522 #define NULL_COMPARISON_OP(name) \
1523 CASE(_if##name): { \
1524 const bool cmp = ((STACK_OBJECT(-1) == NULL)); \
1525 int skip = cmp \
1526 ? (int16_t)Bytes::get_Java_u2(pc + 1) : 3; \
1527 address branch_pc = pc; \
1528 /* Profile branch. */ \
1529 BI_PROFILE_UPDATE_BRANCH(/*is_taken=*/cmp); \
1530 UPDATE_PC_AND_TOS(skip, -1); \
1531 DO_BACKEDGE_CHECKS(skip, branch_pc); \
1532 CONTINUE; \
1533 }
1534 COMPARISON_OP(lt, <);
1535 COMPARISON_OP(gt, >);
1536 COMPARISON_OP(le, <=);
1537 COMPARISON_OP(ge, >=);
1538 COMPARISON_OP2(eq, ==); /* include ref comparison */
1539 COMPARISON_OP2(ne, !=); /* include ref comparison */
1540 NULL_COMPARISON_OP(null);
1541 NULL_COMPARISON_NOT_OP(nonnull);
1542
1543 /* Goto pc at specified offset in switch table. */
1544
1545 CASE(_tableswitch): {
1546 jint* lpc = (jint*)VMalignWordUp(pc+1);
1547 int32_t key = STACK_INT(-1);
1548 int32_t low = Bytes::get_Java_u4((address)&lpc[1]);
1549 int32_t high = Bytes::get_Java_u4((address)&lpc[2]);
1550 int32_t skip;
1551 key -= low;
1552 if (((uint32_t) key > (uint32_t)(high - low))) {
1553 key = -1;
1554 skip = Bytes::get_Java_u4((address)&lpc[0]);
1555 } else {
1556 skip = Bytes::get_Java_u4((address)&lpc[key + 3]);
1557 }
1558 // Profile switch.
1559 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/key);
1560 // Does this really need a full backedge check (osr)?
1561 address branch_pc = pc;
1562 UPDATE_PC_AND_TOS(skip, -1);
1563 DO_BACKEDGE_CHECKS(skip, branch_pc);
1564 CONTINUE;
1565 }
1566
1567 /* Goto pc whose table entry matches specified key. */
1568
1569 CASE(_lookupswitch): {
1570 jint* lpc = (jint*)VMalignWordUp(pc+1);
1571 int32_t key = STACK_INT(-1);
1572 int32_t skip = Bytes::get_Java_u4((address) lpc); /* default amount */
1573 // Remember index.
1574 int index = -1;
1575 int newindex = 0;
1576 int32_t npairs = Bytes::get_Java_u4((address) &lpc[1]);
1577 while (--npairs >= 0) {
1578 lpc += 2;
1579 if (key == (int32_t)Bytes::get_Java_u4((address)lpc)) {
1580 skip = Bytes::get_Java_u4((address)&lpc[1]);
1581 index = newindex;
1582 break;
1583 }
1584 newindex += 1;
1585 }
1586 // Profile switch.
1587 BI_PROFILE_UPDATE_SWITCH(/*switch_index=*/index);
1588 address branch_pc = pc;
1589 UPDATE_PC_AND_TOS(skip, -1);
1590 DO_BACKEDGE_CHECKS(skip, branch_pc);
1591 CONTINUE;
1592 }
1593
1594 CASE(_fcmpl):
1595 CASE(_fcmpg):
1596 {
1597 SET_STACK_INT(VMfloatCompare(STACK_FLOAT(-2),
1598 STACK_FLOAT(-1),
1599 (opcode == Bytecodes::_fcmpl ? -1 : 1)),
1600 -2);
1601 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1602 }
1603
1604 CASE(_dcmpl):
1605 CASE(_dcmpg):
1606 {
1607 int r = VMdoubleCompare(STACK_DOUBLE(-3),
1608 STACK_DOUBLE(-1),
1609 (opcode == Bytecodes::_dcmpl ? -1 : 1));
1610 MORE_STACK(-4); // Pop
1611 SET_STACK_INT(r, 0);
1612 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1613 }
1614
1615 CASE(_lcmp):
1616 {
1617 int r = VMlongCompare(STACK_LONG(-3), STACK_LONG(-1));
1618 MORE_STACK(-4);
1619 SET_STACK_INT(r, 0);
1620 UPDATE_PC_AND_TOS_AND_CONTINUE(1, 1);
1621 }
1622
1623
1624 /* Return from a method */
1625
1626 CASE(_areturn):
1627 CASE(_ireturn):
1628 CASE(_freturn):
1629 {
1630 // Allow a safepoint before returning to frame manager.
1631 SAFEPOINT;
1632
1633 goto handle_return;
1634 }
1635
1636 CASE(_lreturn):
1637 CASE(_dreturn):
1638 {
1639 // Allow a safepoint before returning to frame manager.
1640 SAFEPOINT;
1641 goto handle_return;
1642 }
1643
1644 CASE(_return_register_finalizer): {
1645
1646 oop rcvr = LOCALS_OBJECT(0);
1647 VERIFY_OOP(rcvr);
1648 if (rcvr->klass()->has_finalizer()) {
1649 CALL_VM(InterpreterRuntime::register_finalizer(THREAD, rcvr), handle_exception);
1650 }
1651 goto handle_return;
1652 }
1653 CASE(_return): {
1654
1655 // Allow a safepoint before returning to frame manager.
1656 SAFEPOINT;
1657 goto handle_return;
1658 }
1659
1660 /* Array access byte-codes */
1661
1662 /* Every array access byte-code starts out like this */
1663 // arrayOopDesc* arrObj = (arrayOopDesc*)STACK_OBJECT(arrayOff);
1664 #define ARRAY_INTRO(arrayOff) \
1665 arrayOop arrObj = (arrayOop)STACK_OBJECT(arrayOff); \
1666 jint index = STACK_INT(arrayOff + 1); \
1667 char message[jintAsStringSize]; \
1668 CHECK_NULL(arrObj); \
1669 if ((uint32_t)index >= (uint32_t)arrObj->length()) { \
1670 sprintf(message, "%d", index); \
1671 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), \
1672 message, note_rangeCheck_trap); \
1673 }
1674
1675 /* 32-bit loads. These handle conversion from < 32-bit types */
1676 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \
1677 { \
1678 ARRAY_INTRO(-2); \
1679 (void)extra; \
1680 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \
1681 -2); \
1682 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \
1683 }
1684
1685 /* 64-bit loads */
1686 #define ARRAY_LOADTO64(T,T2, stackRes, extra) \
1687 { \
1688 ARRAY_INTRO(-2); \
1689 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \
1690 (void)extra; \
1691 UPDATE_PC_AND_CONTINUE(1); \
1692 }
1693
1694 CASE(_iaload):
1695 ARRAY_LOADTO32(T_INT, jint, "%d", STACK_INT, 0);
1696 CASE(_faload):
1697 ARRAY_LOADTO32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0);
1698 CASE(_aaload): {
1699 ARRAY_INTRO(-2);
1700 SET_STACK_OBJECT(((objArrayOop) arrObj)->obj_at(index), -2);
1701 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1702 }
1703 CASE(_baload):
1704 ARRAY_LOADTO32(T_BYTE, jbyte, "%d", STACK_INT, 0);
1705 CASE(_caload):
1706 ARRAY_LOADTO32(T_CHAR, jchar, "%d", STACK_INT, 0);
1707 CASE(_saload):
1708 ARRAY_LOADTO32(T_SHORT, jshort, "%d", STACK_INT, 0);
1709 CASE(_laload):
1710 ARRAY_LOADTO64(T_LONG, jlong, STACK_LONG, 0);
1711 CASE(_daload):
1712 ARRAY_LOADTO64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
1713
1714 /* 32-bit stores. These handle conversion to < 32-bit types */
1715 #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \
1716 { \
1717 ARRAY_INTRO(-3); \
1718 (void)extra; \
1719 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
1720 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \
1721 }
1722
1723 /* 64-bit stores */
1724 #define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \
1725 { \
1726 ARRAY_INTRO(-4); \
1727 (void)extra; \
1728 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \
1729 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \
1730 }
1731
1732 CASE(_iastore):
1733 ARRAY_STOREFROM32(T_INT, jint, "%d", STACK_INT, 0);
1734 CASE(_fastore):
1735 ARRAY_STOREFROM32(T_FLOAT, jfloat, "%f", STACK_FLOAT, 0);
1736 /*
1737 * This one looks different because of the assignability check
1738 */
1739 CASE(_aastore): {
1740 oop rhsObject = STACK_OBJECT(-1);
1741 VERIFY_OOP(rhsObject);
1742 ARRAY_INTRO( -3);
1743 // arrObj, index are set
1744 if (rhsObject != NULL) {
1745 /* Check assignability of rhsObject into arrObj */
1746 Klass* rhsKlass = rhsObject->klass(); // EBX (subclass)
1747 Klass* elemKlass = ObjArrayKlass::cast(arrObj->klass())->element_klass(); // superklass EAX
1748 //
1749 // Check for compatibilty. This check must not GC!!
1750 // Seems way more expensive now that we must dispatch
1751 //
1752 if (rhsKlass != elemKlass && !rhsKlass->is_subtype_of(elemKlass)) { // ebx->is...
1753 // Decrement counter if subtype check failed.
1754 BI_PROFILE_SUBTYPECHECK_FAILED(rhsKlass);
1755 VM_JAVA_ERROR(vmSymbols::java_lang_ArrayStoreException(), "", note_arrayCheck_trap);
1756 }
1757 // Profile checkcast with null_seen and receiver.
1758 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, rhsKlass);
1759 } else {
1760 // Profile checkcast with null_seen and receiver.
1761 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL);
1762 }
1763 ((objArrayOop) arrObj)->obj_at_put(index, rhsObject);
1764 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3);
1765 }
1766 CASE(_bastore):
1767 ARRAY_STOREFROM32(T_BYTE, jbyte, "%d", STACK_INT, 0);
1768 CASE(_castore):
1769 ARRAY_STOREFROM32(T_CHAR, jchar, "%d", STACK_INT, 0);
1770 CASE(_sastore):
1771 ARRAY_STOREFROM32(T_SHORT, jshort, "%d", STACK_INT, 0);
1772 CASE(_lastore):
1773 ARRAY_STOREFROM64(T_LONG, jlong, STACK_LONG, 0);
1774 CASE(_dastore):
1775 ARRAY_STOREFROM64(T_DOUBLE, jdouble, STACK_DOUBLE, 0);
1776
1777 CASE(_arraylength):
1778 {
1779 arrayOop ary = (arrayOop) STACK_OBJECT(-1);
1780 CHECK_NULL(ary);
1781 SET_STACK_INT(ary->length(), -1);
1782 UPDATE_PC_AND_CONTINUE(1);
1783 }
1784
1785 /* monitorenter and monitorexit for locking/unlocking an object */
1786
1787 CASE(_monitorenter): {
1788 oop lockee = STACK_OBJECT(-1);
1789 // derefing's lockee ought to provoke implicit null check
1790 CHECK_NULL(lockee);
1791 // find a free monitor or one already allocated for this object
1792 // if we find a matching object then we need a new monitor
1793 // since this is recursive enter
1794 BasicObjectLock* limit = istate->monitor_base();
1795 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
1796 BasicObjectLock* entry = NULL;
1797 while (most_recent != limit ) {
1798 if (most_recent->obj() == NULL) entry = most_recent;
1799 else if (most_recent->obj() == lockee) break;
1800 most_recent++;
1801 }
1802 if (entry != NULL) {
1803 entry->set_obj(lockee);
1804 int success = false;
1805 uintptr_t epoch_mask_in_place = (uintptr_t)markOopDesc::epoch_mask_in_place;
1806
1807 markOop mark = lockee->mark();
1808 intptr_t hash = (intptr_t) markOopDesc::no_hash;
1809 // implies UseBiasedLocking
1810 if (mark->has_bias_pattern()) {
1811 uintptr_t thread_ident;
1812 uintptr_t anticipated_bias_locking_value;
1813 thread_ident = (uintptr_t)istate->thread();
1814 anticipated_bias_locking_value =
1815 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
1816 ~((uintptr_t) markOopDesc::age_mask_in_place);
1817
1818 if (anticipated_bias_locking_value == 0) {
1819 // already biased towards this thread, nothing to do
1820 if (PrintBiasedLockingStatistics) {
1821 (* BiasedLocking::biased_lock_entry_count_addr())++;
1822 }
1823 success = true;
1824 }
1825 else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
1826 // try revoke bias
1827 markOop header = lockee->klass()->prototype_header();
1828 if (hash != markOopDesc::no_hash) {
1829 header = header->copy_set_hash(hash);
1830 }
1831 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
1832 if (PrintBiasedLockingStatistics)
1833 (*BiasedLocking::revoked_lock_entry_count_addr())++;
1834 }
1835 }
1836 else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
1837 // try rebias
1838 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
1839 if (hash != markOopDesc::no_hash) {
1840 new_header = new_header->copy_set_hash(hash);
1841 }
1842 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
1843 if (PrintBiasedLockingStatistics)
1844 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
1845 }
1846 else {
1847 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1848 }
1849 success = true;
1850 }
1851 else {
1852 // try to bias towards thread in case object is anonymously biased
1853 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
1854 (uintptr_t)markOopDesc::age_mask_in_place |
1855 epoch_mask_in_place));
1856 if (hash != markOopDesc::no_hash) {
1857 header = header->copy_set_hash(hash);
1858 }
1859 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
1860 // debugging hint
1861 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
1862 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
1863 if (PrintBiasedLockingStatistics)
1864 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
1865 }
1866 else {
1867 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1868 }
1869 success = true;
1870 }
1871 }
1872
1873 // traditional lightweight locking
1874 if (!success) {
1875 markOop displaced = lockee->mark()->set_unlocked();
1876 entry->lock()->set_displaced_header(displaced);
1877 bool call_vm = UseHeavyMonitors;
1878 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
1879 // Is it simple recursive case?
1880 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
1881 entry->lock()->set_displaced_header(NULL);
1882 } else {
1883 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1884 }
1885 }
1886 }
1887 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1888 } else {
1889 istate->set_msg(more_monitors);
1890 UPDATE_PC_AND_RETURN(0); // Re-execute
1891 }
1892 }
1893
1894 CASE(_monitorexit): {
1895 oop lockee = STACK_OBJECT(-1);
1896 CHECK_NULL(lockee);
1897 // derefing's lockee ought to provoke implicit null check
1898 // find our monitor slot
1899 BasicObjectLock* limit = istate->monitor_base();
1900 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
1901 while (most_recent != limit ) {
1902 if ((most_recent)->obj() == lockee) {
1903 BasicLock* lock = most_recent->lock();
1904 markOop header = lock->displaced_header();
1905 most_recent->set_obj(NULL);
1906 if (!lockee->mark()->has_bias_pattern()) {
1907 bool call_vm = UseHeavyMonitors;
1908 // If it isn't recursive we either must swap old header or call the runtime
1909 if (header != NULL || call_vm) {
1910 if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
1911 // restore object for the slow case
1912 most_recent->set_obj(lockee);
1913 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception);
1914 }
1915 }
1916 }
1917 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1918 }
1919 most_recent++;
1920 }
1921 // Need to throw illegal monitor state exception
1922 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception);
1923 ShouldNotReachHere();
1924 }
1925
1926 /* All of the non-quick opcodes. */
1927
1928 /* -Set clobbersCpIndex true if the quickened opcode clobbers the
1929 * constant pool index in the instruction.
1930 */
1931 CASE(_getfield):
1932 CASE(_getstatic):
1933 {
1934 u2 index;
1935 ConstantPoolCacheEntry* cache;
1936 index = Bytes::get_native_u2(pc+1);
1937
1938 // QQQ Need to make this as inlined as possible. Probably need to
1939 // split all the bytecode cases out so c++ compiler has a chance
1940 // for constant prop to fold everything possible away.
1941
1942 cache = cp->entry_at(index);
1943 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
1944 CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode),
1945 handle_exception);
1946 cache = cp->entry_at(index);
1947 }
1948
1949 #ifdef VM_JVMTI
1950 if (_jvmti_interp_events) {
1951 int *count_addr;
1952 oop obj;
1953 // Check to see if a field modification watch has been set
1954 // before we take the time to call into the VM.
1955 count_addr = (int *)JvmtiExport::get_field_access_count_addr();
1956 if ( *count_addr > 0 ) {
1957 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
1958 obj = (oop)NULL;
1959 } else {
1960 obj = (oop) STACK_OBJECT(-1);
1961 VERIFY_OOP(obj);
1962 }
1963 CALL_VM(InterpreterRuntime::post_field_access(THREAD,
1964 obj,
1965 cache),
1966 handle_exception);
1967 }
1968 }
1969 #endif /* VM_JVMTI */
1970
1971 oop obj;
1972 if ((Bytecodes::Code)opcode == Bytecodes::_getstatic) {
1973 Klass* k = cache->f1_as_klass();
1974 obj = k->java_mirror();
1975 MORE_STACK(1); // Assume single slot push
1976 } else {
1977 obj = (oop) STACK_OBJECT(-1);
1978 CHECK_NULL(obj);
1979 }
1980
1981 //
1982 // Now store the result on the stack
1983 //
1984 TosState tos_type = cache->flag_state();
1985 int field_offset = cache->f2_as_index();
1986 if (cache->is_volatile()) {
1987 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
1988 OrderAccess::fence();
1989 }
1990 if (tos_type == atos) {
1991 VERIFY_OOP(obj->obj_field_acquire(field_offset));
1992 SET_STACK_OBJECT(obj->obj_field_acquire(field_offset), -1);
1993 } else if (tos_type == itos) {
1994 SET_STACK_INT(obj->int_field_acquire(field_offset), -1);
1995 } else if (tos_type == ltos) {
1996 SET_STACK_LONG(obj->long_field_acquire(field_offset), 0);
1997 MORE_STACK(1);
1998 } else if (tos_type == btos) {
1999 SET_STACK_INT(obj->byte_field_acquire(field_offset), -1);
2000 } else if (tos_type == ctos) {
2001 SET_STACK_INT(obj->char_field_acquire(field_offset), -1);
2002 } else if (tos_type == stos) {
2003 SET_STACK_INT(obj->short_field_acquire(field_offset), -1);
2004 } else if (tos_type == ftos) {
2005 SET_STACK_FLOAT(obj->float_field_acquire(field_offset), -1);
2006 } else {
2007 SET_STACK_DOUBLE(obj->double_field_acquire(field_offset), 0);
2008 MORE_STACK(1);
2009 }
2010 } else {
2011 if (tos_type == atos) {
2012 VERIFY_OOP(obj->obj_field(field_offset));
2013 SET_STACK_OBJECT(obj->obj_field(field_offset), -1);
2014 } else if (tos_type == itos) {
2015 SET_STACK_INT(obj->int_field(field_offset), -1);
2016 } else if (tos_type == ltos) {
2017 SET_STACK_LONG(obj->long_field(field_offset), 0);
2018 MORE_STACK(1);
2019 } else if (tos_type == btos) {
2020 SET_STACK_INT(obj->byte_field(field_offset), -1);
2021 } else if (tos_type == ctos) {
2022 SET_STACK_INT(obj->char_field(field_offset), -1);
2023 } else if (tos_type == stos) {
2024 SET_STACK_INT(obj->short_field(field_offset), -1);
2025 } else if (tos_type == ftos) {
2026 SET_STACK_FLOAT(obj->float_field(field_offset), -1);
2027 } else {
2028 SET_STACK_DOUBLE(obj->double_field(field_offset), 0);
2029 MORE_STACK(1);
2030 }
2031 }
2032
2033 UPDATE_PC_AND_CONTINUE(3);
2034 }
2035
2036 CASE(_putfield):
2037 CASE(_putstatic):
2038 {
2039 u2 index = Bytes::get_native_u2(pc+1);
2040 ConstantPoolCacheEntry* cache = cp->entry_at(index);
2041 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
2042 CALL_VM(InterpreterRuntime::resolve_get_put(THREAD, (Bytecodes::Code)opcode),
2043 handle_exception);
2044 cache = cp->entry_at(index);
2045 }
2046
2047 #ifdef VM_JVMTI
2048 if (_jvmti_interp_events) {
2049 int *count_addr;
2050 oop obj;
2051 // Check to see if a field modification watch has been set
2052 // before we take the time to call into the VM.
2053 count_addr = (int *)JvmtiExport::get_field_modification_count_addr();
2054 if ( *count_addr > 0 ) {
2055 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
2056 obj = (oop)NULL;
2057 }
2058 else {
2059 if (cache->is_long() || cache->is_double()) {
2060 obj = (oop) STACK_OBJECT(-3);
2061 } else {
2062 obj = (oop) STACK_OBJECT(-2);
2063 }
2064 VERIFY_OOP(obj);
2065 }
2066
2067 CALL_VM(InterpreterRuntime::post_field_modification(THREAD,
2068 obj,
2069 cache,
2070 (jvalue *)STACK_SLOT(-1)),
2071 handle_exception);
2072 }
2073 }
2074 #endif /* VM_JVMTI */
2075
2076 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
2077 // out so c++ compiler has a chance for constant prop to fold everything possible away.
2078
2079 oop obj;
2080 int count;
2081 TosState tos_type = cache->flag_state();
2082
2083 count = -1;
2084 if (tos_type == ltos || tos_type == dtos) {
2085 --count;
2086 }
2087 if ((Bytecodes::Code)opcode == Bytecodes::_putstatic) {
2088 Klass* k = cache->f1_as_klass();
2089 obj = k->java_mirror();
2090 } else {
2091 --count;
2092 obj = (oop) STACK_OBJECT(count);
2093 CHECK_NULL(obj);
2094 }
2095
2096 //
2097 // Now store the result
2098 //
2099 int field_offset = cache->f2_as_index();
2100 if (cache->is_volatile()) {
2101 if (tos_type == itos) {
2102 obj->release_int_field_put(field_offset, STACK_INT(-1));
2103 } else if (tos_type == atos) {
2104 VERIFY_OOP(STACK_OBJECT(-1));
2105 obj->release_obj_field_put(field_offset, STACK_OBJECT(-1));
2106 } else if (tos_type == btos) {
2107 obj->release_byte_field_put(field_offset, STACK_INT(-1));
2108 } else if (tos_type == ltos) {
2109 obj->release_long_field_put(field_offset, STACK_LONG(-1));
2110 } else if (tos_type == ctos) {
2111 obj->release_char_field_put(field_offset, STACK_INT(-1));
2112 } else if (tos_type == stos) {
2113 obj->release_short_field_put(field_offset, STACK_INT(-1));
2114 } else if (tos_type == ftos) {
2115 obj->release_float_field_put(field_offset, STACK_FLOAT(-1));
2116 } else {
2117 obj->release_double_field_put(field_offset, STACK_DOUBLE(-1));
2118 }
2119 OrderAccess::storeload();
2120 } else {
2121 if (tos_type == itos) {
2122 obj->int_field_put(field_offset, STACK_INT(-1));
2123 } else if (tos_type == atos) {
2124 VERIFY_OOP(STACK_OBJECT(-1));
2125 obj->obj_field_put(field_offset, STACK_OBJECT(-1));
2126 } else if (tos_type == btos) {
2127 obj->byte_field_put(field_offset, STACK_INT(-1));
2128 } else if (tos_type == ltos) {
2129 obj->long_field_put(field_offset, STACK_LONG(-1));
2130 } else if (tos_type == ctos) {
2131 obj->char_field_put(field_offset, STACK_INT(-1));
2132 } else if (tos_type == stos) {
2133 obj->short_field_put(field_offset, STACK_INT(-1));
2134 } else if (tos_type == ftos) {
2135 obj->float_field_put(field_offset, STACK_FLOAT(-1));
2136 } else {
2137 obj->double_field_put(field_offset, STACK_DOUBLE(-1));
2138 }
2139 }
2140
2141 UPDATE_PC_AND_TOS_AND_CONTINUE(3, count);
2142 }
2143
2144 CASE(_new): {
2145 u2 index = Bytes::get_Java_u2(pc+1);
2146 ConstantPool* constants = istate->method()->constants();
2147 if (!constants->tag_at(index).is_unresolved_klass()) {
2148 // Make sure klass is initialized and doesn't have a finalizer
2149 Klass* entry = constants->slot_at(index).get_klass();
2150 assert(entry->is_klass(), "Should be resolved klass");
2151 Klass* k_entry = (Klass*) entry;
2152 assert(k_entry->oop_is_instance(), "Should be InstanceKlass");
2153 InstanceKlass* ik = (InstanceKlass*) k_entry;
2154 if ( ik->is_initialized() && ik->can_be_fastpath_allocated() ) {
2155 size_t obj_size = ik->size_helper();
2156 oop result = NULL;
2157 // If the TLAB isn't pre-zeroed then we'll have to do it
2158 bool need_zero = !ZeroTLAB;
2159 if (UseTLAB) {
2160 result = (oop) THREAD->tlab().allocate(obj_size);
2161 }
2162 // Disable non-TLAB-based fast-path, because profiling requires that all
2163 // allocations go through InterpreterRuntime::_new() if THREAD->tlab().allocate
2164 // returns NULL.
2165 #ifndef CC_INTERP_PROFILE
2166 if (result == NULL) {
2167 need_zero = true;
2168 // Try allocate in shared eden
2169 retry:
2170 HeapWord* compare_to = *Universe::heap()->top_addr();
2171 HeapWord* new_top = compare_to + obj_size;
2172 if (new_top <= *Universe::heap()->end_addr()) {
2173 if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) {
2174 goto retry;
2175 }
2176 result = (oop) compare_to;
2177 }
2178 }
2179 #endif
2180 if (result != NULL) {
2181 // Initialize object (if nonzero size and need) and then the header
2182 if (need_zero ) {
2183 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize;
2184 obj_size -= sizeof(oopDesc) / oopSize;
2185 if (obj_size > 0 ) {
2186 memset(to_zero, 0, obj_size * HeapWordSize);
2187 }
2188 }
2189 if (UseBiasedLocking) {
2190 result->set_mark(ik->prototype_header());
2191 } else {
2192 result->set_mark(markOopDesc::prototype());
2193 }
2194 result->set_klass_gap(0);
2195 result->set_klass(k_entry);
2196 // Must prevent reordering of stores for object initialization
2197 // with stores that publish the new object.
2198 OrderAccess::storestore();
2199 SET_STACK_OBJECT(result, 0);
2200 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
2201 }
2202 }
2203 }
2204 // Slow case allocation
2205 CALL_VM(InterpreterRuntime::_new(THREAD, METHOD->constants(), index),
2206 handle_exception);
2207 // Must prevent reordering of stores for object initialization
2208 // with stores that publish the new object.
2209 OrderAccess::storestore();
2210 SET_STACK_OBJECT(THREAD->vm_result(), 0);
2211 THREAD->set_vm_result(NULL);
2212 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 1);
2213 }
2214 CASE(_anewarray): {
2215 u2 index = Bytes::get_Java_u2(pc+1);
2216 jint size = STACK_INT(-1);
2217 CALL_VM(InterpreterRuntime::anewarray(THREAD, METHOD->constants(), index, size),
2218 handle_exception);
2219 // Must prevent reordering of stores for object initialization
2220 // with stores that publish the new object.
2221 OrderAccess::storestore();
2222 SET_STACK_OBJECT(THREAD->vm_result(), -1);
2223 THREAD->set_vm_result(NULL);
2224 UPDATE_PC_AND_CONTINUE(3);
2225 }
2226 CASE(_multianewarray): {
2227 jint dims = *(pc+3);
2228 jint size = STACK_INT(-1);
2229 // stack grows down, dimensions are up!
2230 jint *dimarray =
2231 (jint*)&topOfStack[dims * Interpreter::stackElementWords+
2232 Interpreter::stackElementWords-1];
2233 //adjust pointer to start of stack element
2234 CALL_VM(InterpreterRuntime::multianewarray(THREAD, dimarray),
2235 handle_exception);
2236 // Must prevent reordering of stores for object initialization
2237 // with stores that publish the new object.
2238 OrderAccess::storestore();
2239 SET_STACK_OBJECT(THREAD->vm_result(), -dims);
2240 THREAD->set_vm_result(NULL);
2241 UPDATE_PC_AND_TOS_AND_CONTINUE(4, -(dims-1));
2242 }
2243 CASE(_checkcast):
2244 if (STACK_OBJECT(-1) != NULL) {
2245 VERIFY_OOP(STACK_OBJECT(-1));
2246 u2 index = Bytes::get_Java_u2(pc+1);
2247 // Constant pool may have actual klass or unresolved klass. If it is
2248 // unresolved we must resolve it.
2249 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
2250 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
2251 }
2252 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass();
2253 Klass* objKlass = STACK_OBJECT(-1)->klass(); // ebx
2254 //
2255 // Check for compatibilty. This check must not GC!!
2256 // Seems way more expensive now that we must dispatch.
2257 //
2258 if (objKlass != klassOf && !objKlass->is_subtype_of(klassOf)) {
2259 // Decrement counter at checkcast.
2260 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass);
2261 ResourceMark rm(THREAD);
2262 const char* objName = objKlass->external_name();
2263 const char* klassName = klassOf->external_name();
2264 char* message = SharedRuntime::generate_class_cast_message(
2265 objName, klassName);
2266 VM_JAVA_ERROR(vmSymbols::java_lang_ClassCastException(), message, note_classCheck_trap);
2267 }
2268 // Profile checkcast with null_seen and receiver.
2269 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/false, objKlass);
2270 } else {
2271 // Profile checkcast with null_seen and receiver.
2272 BI_PROFILE_UPDATE_CHECKCAST(/*null_seen=*/true, NULL);
2273 }
2274 UPDATE_PC_AND_CONTINUE(3);
2275
2276 CASE(_instanceof):
2277 if (STACK_OBJECT(-1) == NULL) {
2278 SET_STACK_INT(0, -1);
2279 // Profile instanceof with null_seen and receiver.
2280 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/true, NULL);
2281 } else {
2282 VERIFY_OOP(STACK_OBJECT(-1));
2283 u2 index = Bytes::get_Java_u2(pc+1);
2284 // Constant pool may have actual klass or unresolved klass. If it is
2285 // unresolved we must resolve it.
2286 if (METHOD->constants()->tag_at(index).is_unresolved_klass()) {
2287 CALL_VM(InterpreterRuntime::quicken_io_cc(THREAD), handle_exception);
2288 }
2289 Klass* klassOf = (Klass*) METHOD->constants()->slot_at(index).get_klass();
2290 Klass* objKlass = STACK_OBJECT(-1)->klass();
2291 //
2292 // Check for compatibilty. This check must not GC!!
2293 // Seems way more expensive now that we must dispatch.
2294 //
2295 if ( objKlass == klassOf || objKlass->is_subtype_of(klassOf)) {
2296 SET_STACK_INT(1, -1);
2297 } else {
2298 SET_STACK_INT(0, -1);
2299 // Decrement counter at checkcast.
2300 BI_PROFILE_SUBTYPECHECK_FAILED(objKlass);
2301 }
2302 // Profile instanceof with null_seen and receiver.
2303 BI_PROFILE_UPDATE_INSTANCEOF(/*null_seen=*/false, objKlass);
2304 }
2305 UPDATE_PC_AND_CONTINUE(3);
2306
2307 CASE(_ldc_w):
2308 CASE(_ldc):
2309 {
2310 u2 index;
2311 bool wide = false;
2312 int incr = 2; // frequent case
2313 if (opcode == Bytecodes::_ldc) {
2314 index = pc[1];
2315 } else {
2316 index = Bytes::get_Java_u2(pc+1);
2317 incr = 3;
2318 wide = true;
2319 }
2320
2321 ConstantPool* constants = METHOD->constants();
2322 switch (constants->tag_at(index).value()) {
2323 case JVM_CONSTANT_Integer:
2324 SET_STACK_INT(constants->int_at(index), 0);
2325 break;
2326
2327 case JVM_CONSTANT_Float:
2328 SET_STACK_FLOAT(constants->float_at(index), 0);
2329 break;
2330
2331 case JVM_CONSTANT_String:
2332 {
2333 oop result = constants->resolved_references()->obj_at(index);
2334 if (result == NULL) {
2335 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode), handle_exception);
2336 SET_STACK_OBJECT(THREAD->vm_result(), 0);
2337 THREAD->set_vm_result(NULL);
2338 } else {
2339 VERIFY_OOP(result);
2340 SET_STACK_OBJECT(result, 0);
2341 }
2342 break;
2343 }
2344
2345 case JVM_CONSTANT_Class:
2346 VERIFY_OOP(constants->resolved_klass_at(index)->java_mirror());
2347 SET_STACK_OBJECT(constants->resolved_klass_at(index)->java_mirror(), 0);
2348 break;
2349
2350 case JVM_CONSTANT_UnresolvedClass:
2351 case JVM_CONSTANT_UnresolvedClassInError:
2352 CALL_VM(InterpreterRuntime::ldc(THREAD, wide), handle_exception);
2353 SET_STACK_OBJECT(THREAD->vm_result(), 0);
2354 THREAD->set_vm_result(NULL);
2355 break;
2356
2357 default: ShouldNotReachHere();
2358 }
2359 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
2360 }
2361
2362 CASE(_ldc2_w):
2363 {
2364 u2 index = Bytes::get_Java_u2(pc+1);
2365
2366 ConstantPool* constants = METHOD->constants();
2367 switch (constants->tag_at(index).value()) {
2368
2369 case JVM_CONSTANT_Long:
2370 SET_STACK_LONG(constants->long_at(index), 1);
2371 break;
2372
2373 case JVM_CONSTANT_Double:
2374 SET_STACK_DOUBLE(constants->double_at(index), 1);
2375 break;
2376 default: ShouldNotReachHere();
2377 }
2378 UPDATE_PC_AND_TOS_AND_CONTINUE(3, 2);
2379 }
2380
2381 CASE(_fast_aldc_w):
2382 CASE(_fast_aldc): {
2383 u2 index;
2384 int incr;
2385 if (opcode == Bytecodes::_fast_aldc) {
2386 index = pc[1];
2387 incr = 2;
2388 } else {
2389 index = Bytes::get_native_u2(pc+1);
2390 incr = 3;
2391 }
2392
2393 // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
2394 // This kind of CP cache entry does not need to match the flags byte, because
2395 // there is a 1-1 relation between bytecode type and CP entry type.
2396 ConstantPool* constants = METHOD->constants();
2397 oop result = constants->resolved_references()->obj_at(index);
2398 if (result == NULL) {
2399 CALL_VM(InterpreterRuntime::resolve_ldc(THREAD, (Bytecodes::Code) opcode),
2400 handle_exception);
2401 result = THREAD->vm_result();
2402 }
2403
2404 VERIFY_OOP(result);
2405 SET_STACK_OBJECT(result, 0);
2406 UPDATE_PC_AND_TOS_AND_CONTINUE(incr, 1);
2407 }
2408
2409 CASE(_invokedynamic): {
2410
2411 u4 index = Bytes::get_native_u4(pc+1);
2412 ConstantPoolCacheEntry* cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index);
2413
2414 // We are resolved if the resolved_references field contains a non-null object (CallSite, etc.)
2415 // This kind of CP cache entry does not need to match the flags byte, because
2416 // there is a 1-1 relation between bytecode type and CP entry type.
2417 if (! cache->is_resolved((Bytecodes::Code) opcode)) {
2418 CALL_VM(InterpreterRuntime::resolve_invokedynamic(THREAD),
2419 handle_exception);
2420 cache = cp->constant_pool()->invokedynamic_cp_cache_entry_at(index);
2421 }
2422
2423 Method* method = cache->f1_as_method();
2424 if (VerifyOops) method->verify();
2425
2426 if (cache->has_appendix()) {
2427 ConstantPool* constants = METHOD->constants();
2428 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0);
2429 MORE_STACK(1);
2430 }
2431
2432 istate->set_msg(call_method);
2433 istate->set_callee(method);
2434 istate->set_callee_entry_point(method->from_interpreted_entry());
2435 istate->set_bcp_advance(5);
2436
2437 // Invokedynamic has got a call counter, just like an invokestatic -> increment!
2438 BI_PROFILE_UPDATE_CALL();
2439
2440 UPDATE_PC_AND_RETURN(0); // I'll be back...
2441 }
2442
2443 CASE(_invokehandle): {
2444
2445 u2 index = Bytes::get_native_u2(pc+1);
2446 ConstantPoolCacheEntry* cache = cp->entry_at(index);
2447
2448 if (! cache->is_resolved((Bytecodes::Code) opcode)) {
2449 CALL_VM(InterpreterRuntime::resolve_invokehandle(THREAD),
2450 handle_exception);
2451 cache = cp->entry_at(index);
2452 }
2453
2454 Method* method = cache->f1_as_method();
2455 if (VerifyOops) method->verify();
2456
2457 if (cache->has_appendix()) {
2458 ConstantPool* constants = METHOD->constants();
2459 SET_STACK_OBJECT(cache->appendix_if_resolved(constants), 0);
2460 MORE_STACK(1);
2461 }
2462
2463 istate->set_msg(call_method);
2464 istate->set_callee(method);
2465 istate->set_callee_entry_point(method->from_interpreted_entry());
2466 istate->set_bcp_advance(3);
2467
2468 // Invokehandle has got a call counter, just like a final call -> increment!
2469 BI_PROFILE_UPDATE_FINALCALL();
2470
2471 UPDATE_PC_AND_RETURN(0); // I'll be back...
2472 }
2473
2474 CASE(_invokeinterface): {
2475 u2 index = Bytes::get_native_u2(pc+1);
2476
2477 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
2478 // out so c++ compiler has a chance for constant prop to fold everything possible away.
2479
2480 ConstantPoolCacheEntry* cache = cp->entry_at(index);
2481 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
2482 CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode),
2483 handle_exception);
2484 cache = cp->entry_at(index);
2485 }
2486
2487 istate->set_msg(call_method);
2488
2489 // Special case of invokeinterface called for virtual method of
2490 // java.lang.Object. See cpCacheOop.cpp for details.
2491 // This code isn't produced by javac, but could be produced by
2492 // another compliant java compiler.
2493 if (cache->is_forced_virtual()) {
2494 Method* callee;
2495 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
2496 if (cache->is_vfinal()) {
2497 callee = cache->f2_as_vfinal_method();
2498 // Profile 'special case of invokeinterface' final call.
2499 BI_PROFILE_UPDATE_FINALCALL();
2500 } else {
2501 // Get receiver.
2502 int parms = cache->parameter_size();
2503 // Same comments as invokevirtual apply here.
2504 oop rcvr = STACK_OBJECT(-parms);
2505 VERIFY_OOP(rcvr);
2506 InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass();
2507 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()];
2508 // Profile 'special case of invokeinterface' virtual call.
2509 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass());
2510 }
2511 istate->set_callee(callee);
2512 istate->set_callee_entry_point(callee->from_interpreted_entry());
2513 #ifdef VM_JVMTI
2514 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
2515 istate->set_callee_entry_point(callee->interpreter_entry());
2516 }
2517 #endif /* VM_JVMTI */
2518 istate->set_bcp_advance(5);
2519 UPDATE_PC_AND_RETURN(0); // I'll be back...
2520 }
2521
2522 // this could definitely be cleaned up QQQ
2523 Method* callee;
2524 Klass* iclass = cache->f1_as_klass();
2525 // InstanceKlass* interface = (InstanceKlass*) iclass;
2526 // get receiver
2527 int parms = cache->parameter_size();
2528 oop rcvr = STACK_OBJECT(-parms);
2529 CHECK_NULL(rcvr);
2530 InstanceKlass* int2 = (InstanceKlass*) rcvr->klass();
2531 itableOffsetEntry* ki = (itableOffsetEntry*) int2->start_of_itable();
2532 int i;
2533 for ( i = 0 ; i < int2->itable_length() ; i++, ki++ ) {
2534 if (ki->interface_klass() == iclass) break;
2535 }
2536 // If the interface isn't found, this class doesn't implement this
2537 // interface. The link resolver checks this but only for the first
2538 // time this interface is called.
2539 if (i == int2->itable_length()) {
2540 VM_JAVA_ERROR(vmSymbols::java_lang_IncompatibleClassChangeError(), "", note_no_trap);
2541 }
2542 int mindex = cache->f2_as_index();
2543 itableMethodEntry* im = ki->first_method_entry(rcvr->klass());
2544 callee = im[mindex].method();
2545 if (callee == NULL) {
2546 VM_JAVA_ERROR(vmSymbols::java_lang_AbstractMethodError(), "", note_no_trap);
2547 }
2548
2549 // Profile virtual call.
2550 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass());
2551
2552 istate->set_callee(callee);
2553 istate->set_callee_entry_point(callee->from_interpreted_entry());
2554 #ifdef VM_JVMTI
2555 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
2556 istate->set_callee_entry_point(callee->interpreter_entry());
2557 }
2558 #endif /* VM_JVMTI */
2559 istate->set_bcp_advance(5);
2560 UPDATE_PC_AND_RETURN(0); // I'll be back...
2561 }
2562
2563 CASE(_invokevirtual):
2564 CASE(_invokespecial):
2565 CASE(_invokestatic): {
2566 u2 index = Bytes::get_native_u2(pc+1);
2567
2568 ConstantPoolCacheEntry* cache = cp->entry_at(index);
2569 // QQQ Need to make this as inlined as possible. Probably need to split all the bytecode cases
2570 // out so c++ compiler has a chance for constant prop to fold everything possible away.
2571
2572 if (!cache->is_resolved((Bytecodes::Code)opcode)) {
2573 CALL_VM(InterpreterRuntime::resolve_invoke(THREAD, (Bytecodes::Code)opcode),
2574 handle_exception);
2575 cache = cp->entry_at(index);
2576 }
2577
2578 istate->set_msg(call_method);
2579 {
2580 Method* callee;
2581 if ((Bytecodes::Code)opcode == Bytecodes::_invokevirtual) {
2582 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
2583 if (cache->is_vfinal()) {
2584 callee = cache->f2_as_vfinal_method();
2585 // Profile final call.
2586 BI_PROFILE_UPDATE_FINALCALL();
2587 } else {
2588 // get receiver
2589 int parms = cache->parameter_size();
2590 // this works but needs a resourcemark and seems to create a vtable on every call:
2591 // Method* callee = rcvr->klass()->vtable()->method_at(cache->f2_as_index());
2592 //
2593 // this fails with an assert
2594 // InstanceKlass* rcvrKlass = InstanceKlass::cast(STACK_OBJECT(-parms)->klass());
2595 // but this works
2596 oop rcvr = STACK_OBJECT(-parms);
2597 VERIFY_OOP(rcvr);
2598 InstanceKlass* rcvrKlass = (InstanceKlass*)rcvr->klass();
2599 /*
2600 Executing this code in java.lang.String:
2601 public String(char value[]) {
2602 this.count = value.length;
2603 this.value = (char[])value.clone();
2604 }
2605
2606 a find on rcvr->klass() reports:
2607 {type array char}{type array class}
2608 - klass: {other class}
2609
2610 but using InstanceKlass::cast(STACK_OBJECT(-parms)->klass()) causes in assertion failure
2611 because rcvr->klass()->oop_is_instance() == 0
2612 However it seems to have a vtable in the right location. Huh?
2613
2614 */
2615 callee = (Method*) rcvrKlass->start_of_vtable()[ cache->f2_as_index()];
2616 // Profile virtual call.
2617 BI_PROFILE_UPDATE_VIRTUALCALL(rcvr->klass());
2618 }
2619 } else {
2620 if ((Bytecodes::Code)opcode == Bytecodes::_invokespecial) {
2621 CHECK_NULL(STACK_OBJECT(-(cache->parameter_size())));
2622 }
2623 callee = cache->f1_as_method();
2624
2625 // Profile call.
2626 BI_PROFILE_UPDATE_CALL();
2627 }
2628
2629 istate->set_callee(callee);
2630 istate->set_callee_entry_point(callee->from_interpreted_entry());
2631 #ifdef VM_JVMTI
2632 if (JvmtiExport::can_post_interpreter_events() && THREAD->is_interp_only_mode()) {
2633 istate->set_callee_entry_point(callee->interpreter_entry());
2634 }
2635 #endif /* VM_JVMTI */
2636 istate->set_bcp_advance(3);
2637 UPDATE_PC_AND_RETURN(0); // I'll be back...
2638 }
2639 }
2640
2641 /* Allocate memory for a new java object. */
2642
2643 CASE(_newarray): {
2644 BasicType atype = (BasicType) *(pc+1);
2645 jint size = STACK_INT(-1);
2646 CALL_VM(InterpreterRuntime::newarray(THREAD, atype, size),
2647 handle_exception);
2648 // Must prevent reordering of stores for object initialization
2649 // with stores that publish the new object.
2650 OrderAccess::storestore();
2651 SET_STACK_OBJECT(THREAD->vm_result(), -1);
2652 THREAD->set_vm_result(NULL);
2653
2654 UPDATE_PC_AND_CONTINUE(2);
2655 }
2656
2657 /* Throw an exception. */
2658
2659 CASE(_athrow): {
2660 oop except_oop = STACK_OBJECT(-1);
2661 CHECK_NULL(except_oop);
2662 // set pending_exception so we use common code
2663 THREAD->set_pending_exception(except_oop, NULL, 0);
2664 goto handle_exception;
2665 }
2666
2667 /* goto and jsr. They are exactly the same except jsr pushes
2668 * the address of the next instruction first.
2669 */
2670
2671 CASE(_jsr): {
2672 /* push bytecode index on stack */
2673 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 3), 0);
2674 MORE_STACK(1);
2675 /* FALL THROUGH */
2676 }
2677
2678 CASE(_goto):
2679 {
2680 int16_t offset = (int16_t)Bytes::get_Java_u2(pc + 1);
2681 // Profile jump.
2682 BI_PROFILE_UPDATE_JUMP();
2683 address branch_pc = pc;
2684 UPDATE_PC(offset);
2685 DO_BACKEDGE_CHECKS(offset, branch_pc);
2686 CONTINUE;
2687 }
2688
2689 CASE(_jsr_w): {
2690 /* push return address on the stack */
2691 SET_STACK_ADDR(((address)pc - (intptr_t)(istate->method()->code_base()) + 5), 0);
2692 MORE_STACK(1);
2693 /* FALL THROUGH */
2694 }
2695
2696 CASE(_goto_w):
2697 {
2698 int32_t offset = Bytes::get_Java_u4(pc + 1);
2699 // Profile jump.
2700 BI_PROFILE_UPDATE_JUMP();
2701 address branch_pc = pc;
2702 UPDATE_PC(offset);
2703 DO_BACKEDGE_CHECKS(offset, branch_pc);
2704 CONTINUE;
2705 }
2706
2707 /* return from a jsr or jsr_w */
2708
2709 CASE(_ret): {
2710 // Profile ret.
2711 BI_PROFILE_UPDATE_RET(/*bci=*/((int)(intptr_t)(LOCALS_ADDR(pc[1]))));
2712 // Now, update the pc.
2713 pc = istate->method()->code_base() + (intptr_t)(LOCALS_ADDR(pc[1]));
2714 UPDATE_PC_AND_CONTINUE(0);
2715 }
2716
2717 /* debugger breakpoint */
2718
2719 CASE(_breakpoint): {
2720 Bytecodes::Code original_bytecode;
2721 DECACHE_STATE();
2722 SET_LAST_JAVA_FRAME();
2723 original_bytecode = InterpreterRuntime::get_original_bytecode_at(THREAD,
2724 METHOD, pc);
2725 RESET_LAST_JAVA_FRAME();
2726 CACHE_STATE();
2727 if (THREAD->has_pending_exception()) goto handle_exception;
2728 CALL_VM(InterpreterRuntime::_breakpoint(THREAD, METHOD, pc),
2729 handle_exception);
2730
2731 opcode = (jubyte)original_bytecode;
2732 goto opcode_switch;
2733 }
2734
2735 DEFAULT:
2736 fatal(err_msg("Unimplemented opcode %d = %s", opcode,
2737 Bytecodes::name((Bytecodes::Code)opcode)));
2738 goto finish;
2739
2740 } /* switch(opc) */
2741
2742
2743 #ifdef USELABELS
2744 check_for_exception:
2745 #endif
2746 {
2747 if (!THREAD->has_pending_exception()) {
2748 CONTINUE;
2749 }
2750 /* We will be gcsafe soon, so flush our state. */
2751 DECACHE_PC();
2752 goto handle_exception;
2753 }
2754 do_continue: ;
2755
2756 } /* while (1) interpreter loop */
2757
2758
2759 // An exception exists in the thread state see whether this activation can handle it
2760 handle_exception: {
2761
2762 HandleMarkCleaner __hmc(THREAD);
2763 Handle except_oop(THREAD, THREAD->pending_exception());
2764 // Prevent any subsequent HandleMarkCleaner in the VM
2765 // from freeing the except_oop handle.
2766 HandleMark __hm(THREAD);
2767
2768 THREAD->clear_pending_exception();
2769 assert(except_oop(), "No exception to process");
2770 intptr_t continuation_bci;
2771 // expression stack is emptied
2772 topOfStack = istate->stack_base() - Interpreter::stackElementWords;
2773 CALL_VM(continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(THREAD, except_oop()),
2774 handle_exception);
2775
2776 except_oop = THREAD->vm_result();
2777 THREAD->set_vm_result(NULL);
2778 if (continuation_bci >= 0) {
2779 // Place exception on top of stack
2780 SET_STACK_OBJECT(except_oop(), 0);
2781 MORE_STACK(1);
2782 pc = METHOD->code_base() + continuation_bci;
2783 if (TraceExceptions) {
2784 ttyLocker ttyl;
2785 ResourceMark rm;
2786 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), p2i(except_oop()));
2787 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string());
2788 tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT,
2789 (int)(istate->bcp() - METHOD->code_base()),
2790 (int)continuation_bci, p2i(THREAD));
2791 }
2792 // for AbortVMOnException flag
2793 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
2794
2795 // Update profiling data.
2796 BI_PROFILE_ALIGN_TO_CURRENT_BCI();
2797 goto run;
2798 }
2799 if (TraceExceptions) {
2800 ttyLocker ttyl;
2801 ResourceMark rm;
2802 tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", except_oop->print_value_string(), p2i(except_oop()));
2803 tty->print_cr(" thrown in interpreter method <%s>", METHOD->print_value_string());
2804 tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT,
2805 (int)(istate->bcp() - METHOD->code_base()),
2806 p2i(THREAD));
2807 }
2808 // for AbortVMOnException flag
2809 NOT_PRODUCT(Exceptions::debug_check_abort(except_oop));
2810 // No handler in this activation, unwind and try again
2811 THREAD->set_pending_exception(except_oop(), NULL, 0);
2812 goto handle_return;
2813 } // handle_exception:
2814
2815 // Return from an interpreter invocation with the result of the interpretation
2816 // on the top of the Java Stack (or a pending exception)
2817
2818 handle_Pop_Frame: {
2819
2820 // We don't really do anything special here except we must be aware
2821 // that we can get here without ever locking the method (if sync).
2822 // Also we skip the notification of the exit.
2823
2824 istate->set_msg(popping_frame);
2825 // Clear pending so while the pop is in process
2826 // we don't start another one if a call_vm is done.
2827 THREAD->clr_pop_frame_pending();
2828 // Let interpreter (only) see the we're in the process of popping a frame
2829 THREAD->set_pop_frame_in_process();
2830
2831 goto handle_return;
2832
2833 } // handle_Pop_Frame
2834
2835 // ForceEarlyReturn ends a method, and returns to the caller with a return value
2836 // given by the invoker of the early return.
2837 handle_Early_Return: {
2838
2839 istate->set_msg(early_return);
2840
2841 // Clear expression stack.
2842 topOfStack = istate->stack_base() - Interpreter::stackElementWords;
2843
2844 JvmtiThreadState *ts = THREAD->jvmti_thread_state();
2845
2846 // Push the value to be returned.
2847 switch (istate->method()->result_type()) {
2848 case T_BOOLEAN:
2849 case T_SHORT:
2850 case T_BYTE:
2851 case T_CHAR:
2852 case T_INT:
2853 SET_STACK_INT(ts->earlyret_value().i, 0);
2854 MORE_STACK(1);
2855 break;
2856 case T_LONG:
2857 SET_STACK_LONG(ts->earlyret_value().j, 1);
2858 MORE_STACK(2);
2859 break;
2860 case T_FLOAT:
2861 SET_STACK_FLOAT(ts->earlyret_value().f, 0);
2862 MORE_STACK(1);
2863 break;
2864 case T_DOUBLE:
2865 SET_STACK_DOUBLE(ts->earlyret_value().d, 1);
2866 MORE_STACK(2);
2867 break;
2868 case T_ARRAY:
2869 case T_OBJECT:
2870 SET_STACK_OBJECT(ts->earlyret_oop(), 0);
2871 MORE_STACK(1);
2872 break;
2873 }
2874
2875 ts->clr_earlyret_value();
2876 ts->set_earlyret_oop(NULL);
2877 ts->clr_earlyret_pending();
2878
2879 // Fall through to handle_return.
2880
2881 } // handle_Early_Return
2882
2883 handle_return: {
2884 // A storestore barrier is required to order initialization of
2885 // final fields with publishing the reference to the object that
2886 // holds the field. Without the barrier the value of final fields
2887 // can be observed to change.
2888 OrderAccess::storestore();
2889
2890 DECACHE_STATE();
2891
2892 bool suppress_error = istate->msg() == popping_frame || istate->msg() == early_return;
2893 bool suppress_exit_event = THREAD->has_pending_exception() || istate->msg() == popping_frame;
2894 Handle original_exception(THREAD, THREAD->pending_exception());
2895 Handle illegal_state_oop(THREAD, NULL);
2896
2897 // We'd like a HandleMark here to prevent any subsequent HandleMarkCleaner
2898 // in any following VM entries from freeing our live handles, but illegal_state_oop
2899 // isn't really allocated yet and so doesn't become live until later and
2900 // in unpredicatable places. Instead we must protect the places where we enter the
2901 // VM. It would be much simpler (and safer) if we could allocate a real handle with
2902 // a NULL oop in it and then overwrite the oop later as needed. This isn't
2903 // unfortunately isn't possible.
2904
2905 THREAD->clear_pending_exception();
2906
2907 //
2908 // As far as we are concerned we have returned. If we have a pending exception
2909 // that will be returned as this invocation's result. However if we get any
2910 // exception(s) while checking monitor state one of those IllegalMonitorStateExceptions
2911 // will be our final result (i.e. monitor exception trumps a pending exception).
2912 //
2913
2914 // If we never locked the method (or really passed the point where we would have),
2915 // there is no need to unlock it (or look for other monitors), since that
2916 // could not have happened.
2917
2918 if (THREAD->do_not_unlock()) {
2919
2920 // Never locked, reset the flag now because obviously any caller must
2921 // have passed their point of locking for us to have gotten here.
2922
2923 THREAD->clr_do_not_unlock();
2924 } else {
2925 // At this point we consider that we have returned. We now check that the
2926 // locks were properly block structured. If we find that they were not
2927 // used properly we will return with an illegal monitor exception.
2928 // The exception is checked by the caller not the callee since this
2929 // checking is considered to be part of the invocation and therefore
2930 // in the callers scope (JVM spec 8.13).
2931 //
2932 // Another weird thing to watch for is if the method was locked
2933 // recursively and then not exited properly. This means we must
2934 // examine all the entries in reverse time(and stack) order and
2935 // unlock as we find them. If we find the method monitor before
2936 // we are at the initial entry then we should throw an exception.
2937 // It is not clear the template based interpreter does this
2938 // correctly
2939
2940 BasicObjectLock* base = istate->monitor_base();
2941 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base();
2942 bool method_unlock_needed = METHOD->is_synchronized();
2943 // We know the initial monitor was used for the method don't check that
2944 // slot in the loop
2945 if (method_unlock_needed) base--;
2946
2947 // Check all the monitors to see they are unlocked. Install exception if found to be locked.
2948 while (end < base) {
2949 oop lockee = end->obj();
2950 if (lockee != NULL) {
2951 BasicLock* lock = end->lock();
2952 markOop header = lock->displaced_header();
2953 end->set_obj(NULL);
2954
2955 if (!lockee->mark()->has_bias_pattern()) {
2956 // If it isn't recursive we either must swap old header or call the runtime
2957 if (header != NULL) {
2958 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
2959 // restore object for the slow case
2960 end->set_obj(lockee);
2961 {
2962 // Prevent any HandleMarkCleaner from freeing our live handles
2963 HandleMark __hm(THREAD);
2964 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end));
2965 }
2966 }
2967 }
2968 }
2969 // One error is plenty
2970 if (illegal_state_oop() == NULL && !suppress_error) {
2971 {
2972 // Prevent any HandleMarkCleaner from freeing our live handles
2973 HandleMark __hm(THREAD);
2974 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
2975 }
2976 assert(THREAD->has_pending_exception(), "Lost our exception!");
2977 illegal_state_oop = THREAD->pending_exception();
2978 THREAD->clear_pending_exception();
2979 }
2980 }
2981 end++;
2982 }
2983 // Unlock the method if needed
2984 if (method_unlock_needed) {
2985 if (base->obj() == NULL) {
2986 // The method is already unlocked this is not good.
2987 if (illegal_state_oop() == NULL && !suppress_error) {
2988 {
2989 // Prevent any HandleMarkCleaner from freeing our live handles
2990 HandleMark __hm(THREAD);
2991 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
2992 }
2993 assert(THREAD->has_pending_exception(), "Lost our exception!");
2994 illegal_state_oop = THREAD->pending_exception();
2995 THREAD->clear_pending_exception();
2996 }
2997 } else {
2998 //
2999 // The initial monitor is always used for the method
3000 // However if that slot is no longer the oop for the method it was unlocked
3001 // and reused by something that wasn't unlocked!
3002 //
3003 // deopt can come in with rcvr dead because c2 knows
3004 // its value is preserved in the monitor. So we can't use locals[0] at all
3005 // and must use first monitor slot.
3006 //
3007 oop rcvr = base->obj();
3008 if (rcvr == NULL) {
3009 if (!suppress_error) {
3010 VM_JAVA_ERROR_NO_JUMP(vmSymbols::java_lang_NullPointerException(), "", note_nullCheck_trap);
3011 illegal_state_oop = THREAD->pending_exception();
3012 THREAD->clear_pending_exception();
3013 }
3014 } else if (UseHeavyMonitors) {
3015 {
3016 // Prevent any HandleMarkCleaner from freeing our live handles.
3017 HandleMark __hm(THREAD);
3018 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
3019 }
3020 if (THREAD->has_pending_exception()) {
3021 if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
3022 THREAD->clear_pending_exception();
3023 }
3024 } else {
3025 BasicLock* lock = base->lock();
3026 markOop header = lock->displaced_header();
3027 base->set_obj(NULL);
3028
3029 if (!rcvr->mark()->has_bias_pattern()) {
3030 base->set_obj(NULL);
3031 // If it isn't recursive we either must swap old header or call the runtime
3032 if (header != NULL) {
3033 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
3034 // restore object for the slow case
3035 base->set_obj(rcvr);
3036 {
3037 // Prevent any HandleMarkCleaner from freeing our live handles
3038 HandleMark __hm(THREAD);
3039 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
3040 }
3041 if (THREAD->has_pending_exception()) {
3042 if (!suppress_error) illegal_state_oop = THREAD->pending_exception();
3043 THREAD->clear_pending_exception();
3044 }
3045 }
3046 }
3047 }
3048 }
3049 }
3050 }
3051 }
3052 // Clear the do_not_unlock flag now.
3053 THREAD->clr_do_not_unlock();
3054
3055 //
3056 // Notify jvmti/jvmdi
3057 //
3058 // NOTE: we do not notify a method_exit if we have a pending exception,
3059 // including an exception we generate for unlocking checks. In the former
3060 // case, JVMDI has already been notified by our call for the exception handler
3061 // and in both cases as far as JVMDI is concerned we have already returned.
3062 // If we notify it again JVMDI will be all confused about how many frames
3063 // are still on the stack (4340444).
3064 //
3065 // NOTE Further! It turns out the the JVMTI spec in fact expects to see
3066 // method_exit events whenever we leave an activation unless it was done
3067 // for popframe. This is nothing like jvmdi. However we are passing the
3068 // tests at the moment (apparently because they are jvmdi based) so rather
3069 // than change this code and possibly fail tests we will leave it alone
3070 // (with this note) in anticipation of changing the vm and the tests
3071 // simultaneously.
3072
3073
3074 //
3075 suppress_exit_event = suppress_exit_event || illegal_state_oop() != NULL;
3076
3077
3078
3079 #ifdef VM_JVMTI
3080 if (_jvmti_interp_events) {
3081 // Whenever JVMTI puts a thread in interp_only_mode, method
3082 // entry/exit events are sent for that thread to track stack depth.
3083 if ( !suppress_exit_event && THREAD->is_interp_only_mode() ) {
3084 {
3085 // Prevent any HandleMarkCleaner from freeing our live handles
3086 HandleMark __hm(THREAD);
3087 CALL_VM_NOCHECK(InterpreterRuntime::post_method_exit(THREAD));
3088 }
3089 }
3090 }
3091 #endif /* VM_JVMTI */
3092
3093 //
3094 // See if we are returning any exception
3095 // A pending exception that was pending prior to a possible popping frame
3096 // overrides the popping frame.
3097 //
3098 assert(!suppress_error || (suppress_error && illegal_state_oop() == NULL), "Error was not suppressed");
3099 if (illegal_state_oop() != NULL || original_exception() != NULL) {
3100 // Inform the frame manager we have no result.
3101 istate->set_msg(throwing_exception);
3102 if (illegal_state_oop() != NULL)
3103 THREAD->set_pending_exception(illegal_state_oop(), NULL, 0);
3104 else
3105 THREAD->set_pending_exception(original_exception(), NULL, 0);
3106 UPDATE_PC_AND_RETURN(0);
3107 }
3108
3109 if (istate->msg() == popping_frame) {
3110 // Make it simpler on the assembly code and set the message for the frame pop.
3111 // returns
3112 if (istate->prev() == NULL) {
3113 // We must be returning to a deoptimized frame (because popframe only happens between
3114 // two interpreted frames). We need to save the current arguments in C heap so that
3115 // the deoptimized frame when it restarts can copy the arguments to its expression
3116 // stack and re-execute the call. We also have to notify deoptimization that this
3117 // has occurred and to pick the preserved args copy them to the deoptimized frame's
3118 // java expression stack. Yuck.
3119 //
3120 THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize),
3121 LOCALS_SLOT(METHOD->size_of_parameters() - 1));
3122 THREAD->set_popframe_condition_bit(JavaThread::popframe_force_deopt_reexecution_bit);
3123 }
3124 } else {
3125 istate->set_msg(return_from_method);
3126 }
3127
3128 // Normal return
3129 // Advance the pc and return to frame manager
3130 UPDATE_PC_AND_RETURN(1);
3131 } /* handle_return: */
3132
3133 // This is really a fatal error return
3134
3135 finish:
3136 DECACHE_TOS();
3137 DECACHE_PC();
3138
3139 return;
3140 }
3141
3142 /*
3143 * All the code following this point is only produced once and is not present
3144 * in the JVMTI version of the interpreter
3145 */
3146
3147 #ifndef VM_JVMTI
3148
3149 // This constructor should only be used to contruct the object to signal
3150 // interpreter initialization. All other instances should be created by
3151 // the frame manager.
3152 BytecodeInterpreter::BytecodeInterpreter(messages msg) {
3153 if (msg != initialize) ShouldNotReachHere();
3154 _msg = msg;
3155 _self_link = this;
3156 _prev_link = NULL;
3157 }
3158
3159 // Inline static functions for Java Stack and Local manipulation
3160
3161 // The implementations are platform dependent. We have to worry about alignment
3162 // issues on some machines which can change on the same platform depending on
3163 // whether it is an LP64 machine also.
3164 address BytecodeInterpreter::stack_slot(intptr_t *tos, int offset) {
3165 return (address) tos[Interpreter::expr_index_at(-offset)];
3166 }
3167
3168 jint BytecodeInterpreter::stack_int(intptr_t *tos, int offset) {
3169 return *((jint*) &tos[Interpreter::expr_index_at(-offset)]);
3170 }
3171
3172 jfloat BytecodeInterpreter::stack_float(intptr_t *tos, int offset) {
3173 return *((jfloat *) &tos[Interpreter::expr_index_at(-offset)]);
3174 }
3175
3176 oop BytecodeInterpreter::stack_object(intptr_t *tos, int offset) {
3177 return cast_to_oop(tos [Interpreter::expr_index_at(-offset)]);
3178 }
3179
3180 jdouble BytecodeInterpreter::stack_double(intptr_t *tos, int offset) {
3181 return ((VMJavaVal64*) &tos[Interpreter::expr_index_at(-offset)])->d;
3182 }
3183
3184 jlong BytecodeInterpreter::stack_long(intptr_t *tos, int offset) {
3185 return ((VMJavaVal64 *) &tos[Interpreter::expr_index_at(-offset)])->l;
3186 }
3187
3188 // only used for value types
3189 void BytecodeInterpreter::set_stack_slot(intptr_t *tos, address value,
3190 int offset) {
3191 *((address *)&tos[Interpreter::expr_index_at(-offset)]) = value;
3192 }
3193
3194 void BytecodeInterpreter::set_stack_int(intptr_t *tos, int value,
3195 int offset) {
3196 *((jint *)&tos[Interpreter::expr_index_at(-offset)]) = value;
3197 }
3198
3199 void BytecodeInterpreter::set_stack_float(intptr_t *tos, jfloat value,
3200 int offset) {
3201 *((jfloat *)&tos[Interpreter::expr_index_at(-offset)]) = value;
3202 }
3203
3204 void BytecodeInterpreter::set_stack_object(intptr_t *tos, oop value,
3205 int offset) {
3206 *((oop *)&tos[Interpreter::expr_index_at(-offset)]) = value;
3207 }
3208
3209 // needs to be platform dep for the 32 bit platforms.
3210 void BytecodeInterpreter::set_stack_double(intptr_t *tos, jdouble value,
3211 int offset) {
3212 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d = value;
3213 }
3214
3215 void BytecodeInterpreter::set_stack_double_from_addr(intptr_t *tos,
3216 address addr, int offset) {
3217 (((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->d =
3218 ((VMJavaVal64*)addr)->d);
3219 }
3220
3221 void BytecodeInterpreter::set_stack_long(intptr_t *tos, jlong value,
3222 int offset) {
3223 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb;
3224 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l = value;
3225 }
3226
3227 void BytecodeInterpreter::set_stack_long_from_addr(intptr_t *tos,
3228 address addr, int offset) {
3229 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset+1)])->l = 0xdeedbeeb;
3230 ((VMJavaVal64*)&tos[Interpreter::expr_index_at(-offset)])->l =
3231 ((VMJavaVal64*)addr)->l;
3232 }
3233
3234 // Locals
3235
3236 address BytecodeInterpreter::locals_slot(intptr_t* locals, int offset) {
3237 return (address)locals[Interpreter::local_index_at(-offset)];
3238 }
3239 jint BytecodeInterpreter::locals_int(intptr_t* locals, int offset) {
3240 return (jint)locals[Interpreter::local_index_at(-offset)];
3241 }
3242 jfloat BytecodeInterpreter::locals_float(intptr_t* locals, int offset) {
3243 return (jfloat)locals[Interpreter::local_index_at(-offset)];
3244 }
3245 oop BytecodeInterpreter::locals_object(intptr_t* locals, int offset) {
3246 return cast_to_oop(locals[Interpreter::local_index_at(-offset)]);
3247 }
3248 jdouble BytecodeInterpreter::locals_double(intptr_t* locals, int offset) {
3249 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d;
3250 }
3251 jlong BytecodeInterpreter::locals_long(intptr_t* locals, int offset) {
3252 return ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l;
3253 }
3254
3255 // Returns the address of locals value.
3256 address BytecodeInterpreter::locals_long_at(intptr_t* locals, int offset) {
3257 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]);
3258 }
3259 address BytecodeInterpreter::locals_double_at(intptr_t* locals, int offset) {
3260 return ((address)&locals[Interpreter::local_index_at(-(offset+1))]);
3261 }
3262
3263 // Used for local value or returnAddress
3264 void BytecodeInterpreter::set_locals_slot(intptr_t *locals,
3265 address value, int offset) {
3266 *((address*)&locals[Interpreter::local_index_at(-offset)]) = value;
3267 }
3268 void BytecodeInterpreter::set_locals_int(intptr_t *locals,
3269 jint value, int offset) {
3270 *((jint *)&locals[Interpreter::local_index_at(-offset)]) = value;
3271 }
3272 void BytecodeInterpreter::set_locals_float(intptr_t *locals,
3273 jfloat value, int offset) {
3274 *((jfloat *)&locals[Interpreter::local_index_at(-offset)]) = value;
3275 }
3276 void BytecodeInterpreter::set_locals_object(intptr_t *locals,
3277 oop value, int offset) {
3278 *((oop *)&locals[Interpreter::local_index_at(-offset)]) = value;
3279 }
3280 void BytecodeInterpreter::set_locals_double(intptr_t *locals,
3281 jdouble value, int offset) {
3282 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = value;
3283 }
3284 void BytecodeInterpreter::set_locals_long(intptr_t *locals,
3285 jlong value, int offset) {
3286 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = value;
3287 }
3288 void BytecodeInterpreter::set_locals_double_from_addr(intptr_t *locals,
3289 address addr, int offset) {
3290 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->d = ((VMJavaVal64*)addr)->d;
3291 }
3292 void BytecodeInterpreter::set_locals_long_from_addr(intptr_t *locals,
3293 address addr, int offset) {
3294 ((VMJavaVal64*)&locals[Interpreter::local_index_at(-(offset+1))])->l = ((VMJavaVal64*)addr)->l;
3295 }
3296
3297 void BytecodeInterpreter::astore(intptr_t* tos, int stack_offset,
3298 intptr_t* locals, int locals_offset) {
3299 intptr_t value = tos[Interpreter::expr_index_at(-stack_offset)];
3300 locals[Interpreter::local_index_at(-locals_offset)] = value;
3301 }
3302
3303
3304 void BytecodeInterpreter::copy_stack_slot(intptr_t *tos, int from_offset,
3305 int to_offset) {
3306 tos[Interpreter::expr_index_at(-to_offset)] =
3307 (intptr_t)tos[Interpreter::expr_index_at(-from_offset)];
3308 }
3309
3310 void BytecodeInterpreter::dup(intptr_t *tos) {
3311 copy_stack_slot(tos, -1, 0);
3312 }
3313 void BytecodeInterpreter::dup2(intptr_t *tos) {
3314 copy_stack_slot(tos, -2, 0);
3315 copy_stack_slot(tos, -1, 1);
3316 }
3317
3318 void BytecodeInterpreter::dup_x1(intptr_t *tos) {
3319 /* insert top word two down */
3320 copy_stack_slot(tos, -1, 0);
3321 copy_stack_slot(tos, -2, -1);
3322 copy_stack_slot(tos, 0, -2);
3323 }
3324
3325 void BytecodeInterpreter::dup_x2(intptr_t *tos) {
3326 /* insert top word three down */
3327 copy_stack_slot(tos, -1, 0);
3328 copy_stack_slot(tos, -2, -1);
3329 copy_stack_slot(tos, -3, -2);
3330 copy_stack_slot(tos, 0, -3);
3331 }
3332 void BytecodeInterpreter::dup2_x1(intptr_t *tos) {
3333 /* insert top 2 slots three down */
3334 copy_stack_slot(tos, -1, 1);
3335 copy_stack_slot(tos, -2, 0);
3336 copy_stack_slot(tos, -3, -1);
3337 copy_stack_slot(tos, 1, -2);
3338 copy_stack_slot(tos, 0, -3);
3339 }
3340 void BytecodeInterpreter::dup2_x2(intptr_t *tos) {
3341 /* insert top 2 slots four down */
3342 copy_stack_slot(tos, -1, 1);
3343 copy_stack_slot(tos, -2, 0);
3344 copy_stack_slot(tos, -3, -1);
3345 copy_stack_slot(tos, -4, -2);
3346 copy_stack_slot(tos, 1, -3);
3347 copy_stack_slot(tos, 0, -4);
3348 }
3349
3350
3351 void BytecodeInterpreter::swap(intptr_t *tos) {
3352 // swap top two elements
3353 intptr_t val = tos[Interpreter::expr_index_at(1)];
3354 // Copy -2 entry to -1
3355 copy_stack_slot(tos, -2, -1);
3356 // Store saved -1 entry into -2
3357 tos[Interpreter::expr_index_at(2)] = val;
3358 }
3359 // --------------------------------------------------------------------------------
3360 // Non-product code
3361 #ifndef PRODUCT
3362
3363 const char* BytecodeInterpreter::C_msg(BytecodeInterpreter::messages msg) {
3364 switch (msg) {
3365 case BytecodeInterpreter::no_request: return("no_request");
3366 case BytecodeInterpreter::initialize: return("initialize");
3367 // status message to C++ interpreter
3368 case BytecodeInterpreter::method_entry: return("method_entry");
3369 case BytecodeInterpreter::method_resume: return("method_resume");
3370 case BytecodeInterpreter::got_monitors: return("got_monitors");
3371 case BytecodeInterpreter::rethrow_exception: return("rethrow_exception");
3372 // requests to frame manager from C++ interpreter
3373 case BytecodeInterpreter::call_method: return("call_method");
3374 case BytecodeInterpreter::return_from_method: return("return_from_method");
3375 case BytecodeInterpreter::more_monitors: return("more_monitors");
3376 case BytecodeInterpreter::throwing_exception: return("throwing_exception");
3377 case BytecodeInterpreter::popping_frame: return("popping_frame");
3378 case BytecodeInterpreter::do_osr: return("do_osr");
3379 // deopt
3380 case BytecodeInterpreter::deopt_resume: return("deopt_resume");
3381 case BytecodeInterpreter::deopt_resume2: return("deopt_resume2");
3382 default: return("BAD MSG");
3383 }
3384 }
3385 void
3386 BytecodeInterpreter::print() {
3387 tty->print_cr("thread: " INTPTR_FORMAT, (uintptr_t) this->_thread);
3388 tty->print_cr("bcp: " INTPTR_FORMAT, (uintptr_t) this->_bcp);
3389 tty->print_cr("locals: " INTPTR_FORMAT, (uintptr_t) this->_locals);
3390 tty->print_cr("constants: " INTPTR_FORMAT, (uintptr_t) this->_constants);
3391 {
3392 ResourceMark rm;
3393 char *method_name = _method->name_and_sig_as_C_string();
3394 tty->print_cr("method: " INTPTR_FORMAT "[ %s ]", (uintptr_t) this->_method, method_name);
3395 }
3396 tty->print_cr("mdx: " INTPTR_FORMAT, (uintptr_t) this->_mdx);
3397 tty->print_cr("stack: " INTPTR_FORMAT, (uintptr_t) this->_stack);
3398 tty->print_cr("msg: %s", C_msg(this->_msg));
3399 tty->print_cr("result_to_call._callee: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee);
3400 tty->print_cr("result_to_call._callee_entry_point: " INTPTR_FORMAT, (uintptr_t) this->_result._to_call._callee_entry_point);
3401 tty->print_cr("result_to_call._bcp_advance: %d ", this->_result._to_call._bcp_advance);
3402 tty->print_cr("osr._osr_buf: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_buf);
3403 tty->print_cr("osr._osr_entry: " INTPTR_FORMAT, (uintptr_t) this->_result._osr._osr_entry);
3404 tty->print_cr("prev_link: " INTPTR_FORMAT, (uintptr_t) this->_prev_link);
3405 tty->print_cr("native_mirror: " INTPTR_FORMAT, (uintptr_t) this->_oop_temp);
3406 tty->print_cr("stack_base: " INTPTR_FORMAT, (uintptr_t) this->_stack_base);
3407 tty->print_cr("stack_limit: " INTPTR_FORMAT, (uintptr_t) this->_stack_limit);
3408 tty->print_cr("monitor_base: " INTPTR_FORMAT, (uintptr_t) this->_monitor_base);
3409 #ifdef SPARC
3410 tty->print_cr("last_Java_pc: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_pc);
3411 tty->print_cr("frame_bottom: " INTPTR_FORMAT, (uintptr_t) this->_frame_bottom);
3412 tty->print_cr("&native_fresult: " INTPTR_FORMAT, (uintptr_t) &this->_native_fresult);
3413 tty->print_cr("native_lresult: " INTPTR_FORMAT, (uintptr_t) this->_native_lresult);
3414 #endif
3415 #if !defined(ZERO) && defined(PPC)
3416 tty->print_cr("last_Java_fp: " INTPTR_FORMAT, (uintptr_t) this->_last_Java_fp);
3417 #endif // !ZERO
3418 tty->print_cr("self_link: " INTPTR_FORMAT, (uintptr_t) this->_self_link);
3419 }
3420
3421 extern "C" {
3422 void PI(uintptr_t arg) {
3423 ((BytecodeInterpreter*)arg)->print();
3424 }
3425 }
3426 #endif // PRODUCT
3427
3428 #endif // JVMTI
3429 #endif // CC_INTERP