changeset 3829:38f1b987027f

Merge jdk7u25-b16
author andrew
date Fri, 09 Aug 2013 12:21:36 +0100
parents a6502a8a6b31 (diff) 40acb370626f (current diff)
children 4b04ad70d347
files .hgtags make/hotspot_version src/share/vm/memory/allocation.cpp src/share/vm/memory/allocation.hpp src/share/vm/opto/library_call.cpp
diffstat 82 files changed, 25918 insertions(+), 233 deletions(-) [+]
line wrap: on
line diff
--- a/.hgtags	Tue Jun 04 10:47:35 2013 -0700
+++ b/.hgtags	Fri Aug 09 12:21:36 2013 +0100
@@ -50,6 +50,7 @@
 faf94d94786b621f8e13cbcc941ca69c6d967c3f jdk7-b73
 f4b900403d6e4b0af51447bd13bbe23fe3a1dac7 jdk7-b74
 d8dd291a362acb656026a9c0a9da48501505a1e7 jdk7-b75
+b4ab978ce52c41bb7e8ee86285e6c9f28122bbe1 icedtea7-1.12
 9174bb32e934965288121f75394874eeb1fcb649 jdk7-b76
 455105fc81d941482f8f8056afaa7aa0949c9300 jdk7-b77
 e703499b4b51e3af756ae77c3d5e8b3058a14e4e jdk7-b78
@@ -87,6 +88,7 @@
 07226e9eab8f74b37346b32715f829a2ef2c3188 hs18-b01
 e7e7e36ccdb5d56edd47e5744351202d38f3b7ad jdk7-b87
 4b60f23c42231f7ecd62ad1fcb6a9ca26fa57d1b jdk7-b88
+a393ff93e7e54dd94cc4211892605a32f9c77dad icedtea7-1.13
 15836273ac2494f36ef62088bc1cb6f3f011f565 jdk7-b89
 4b60f23c42231f7ecd62ad1fcb6a9ca26fa57d1b hs18-b02
 605c9707a766ff518cd841fc04f9bb4b36a3a30b jdk7-b90
@@ -160,6 +162,7 @@
 b898f0fc3cedc972d884d31a751afd75969531cf hs21-b05
 bd586e392d93b7ed7a1636dcc8da2b6a4203a102 jdk7-b136
 bd586e392d93b7ed7a1636dcc8da2b6a4203a102 hs21-b06
+591c7dc0b2ee879f87a7b5519a5388e0d81520be icedtea-1.14
 2dbcb4a4d8dace5fe78ceb563b134f1fb296cd8f jdk7-b137
 2dbcb4a4d8dace5fe78ceb563b134f1fb296cd8f hs21-b07
 0930dc920c185afbf40fed9a655290b8e5b16783 jdk7-b138
@@ -182,6 +185,7 @@
 38fa55e5e79232d48f1bb8cf27d88bc094c9375a hs21-b16
 81d815b05abb564aa1f4100ae13491c949b9a07e jdk7-b147
 81d815b05abb564aa1f4100ae13491c949b9a07e hs21-b17
+7693eb0fce1f6b484cce96c233ea20bdad8a09e0 icedtea-2.0-branchpoint
 9b0ca45cd756d538c4c30afab280a91868eee1a5 jdk7u2-b01
 0cc8a70952c368e06de2adab1f2649a408f5e577 jdk8-b01
 31e253c1da429124bb87570ab095d9bc89850d0a jdk8-b02
@@ -210,6 +214,7 @@
 3ba0bb2e7c8ddac172f5b995aae57329cdd2dafa hs22-b10
 f17fe2f4b6aacc19cbb8ee39476f2f13a1c4d3cd jdk7u2-b13
 0744602f85c6fe62255326df595785eb2b32166d jdk7u2-b21
+f8f4d3f9b16567b91bcef4caaa8417c8de8015f0 icedtea-2.1-branchpoint
 a40d238623e5b1ab1224ea6b36dc5b23d0a53880 jdk7u3-b02
 6986bfb4c82e00b938c140f2202133350e6e73f8 jdk7u3-b03
 8e6375b46717d74d4885f839b4e72d03f357a45f jdk7u3-b04
@@ -264,6 +269,7 @@
 f92a171cf0071ca6c3fa8231d7d570377f8b2f4d hs23-b16
 f92a171cf0071ca6c3fa8231d7d570377f8b2f4d hs23-b16
 931e5f39e365a0d550d79148ff87a7f9e864d2e1 hs23-b16
+a2c5354863dcb3d147b7b6f55ef514b1bfecf920 icedtea-2.2-branchpoint
 efb5f2662c96c472caa3327090268c75a86dd9c0 jdk7u4-b13
 82e719a2e6416838b4421637646cbfd7104c7716 jdk7u4-b14
 e5f7f95411fb9e837800b4152741c962118e5d7a jdk7u5-b01
@@ -311,6 +317,7 @@
 cefe884c708aa6dfd63aff45f6c698a6bc346791 jdk7u6-b16
 270a40a57b3d05ca64070208dcbb895b5b509d8e hs23.2-b08
 7a37cec9d0d44ae6ea3d26a95407e42d99af6843 jdk7u6-b17
+354cfde7db2f1fd46312d883a63c8a76d5381bab icedtea-2.3-branchpoint
 df0df4ae5af2f40b7f630c53a86e8c3d68ef5b66 jdk7u6-b18
 1257f4373a06f788bd656ae1c7a953a026a285b9 jdk7u6-b19
 a0c2fa4baeb6aad6f33dc87b676b21345794d61e hs23.2-b09
@@ -320,6 +327,15 @@
 df57f6208cb76b4e8d1a0bd0eea3d2ad577cb79b jdk7u6-b23
 b03c2687fb16514652e79261ad68d2c601dcee62 jdk7u6-b24
 cffde29ea7cc8647f17002a4d0e94065dcd82839 jdk7u6-b30
+eede732f62dd73953dce03e003415729c6c335b2 icedtea-2.3
+c798442fa4c00ad251f6cbe989d32485845bf247 icedtea-2.3.1
+2a413d946cb1acdcbe1110098f79b7a1f267bf75 icedtea-2.3.2
+0885feeea95caa8b92f46234872f0c3839d8850b icedtea-2.3.3
+87f0aa7a8f8f0209148ad0746a3486d9cf16ebaa icedtea-2.3.4
+f2b98ad97b3c9d2d9fb98b0dd9b82506a5688e55 icedtea-2.3.5
+ad27d7c42279783903b26cd42b09741f5640f6db icedtea-2.3.6
+ad27d7c42279783903b26cd42b09741f5640f6db icedtea-2.3.6
+0000000000000000000000000000000000000000 icedtea-2.3.6
 7566374c3c89b7d99be9bcdb9342283a3bea6930 jdk7u6-b31
 f7933fecea9aa494e4032e17ff07e5fcec4b5961 jdk7u7-b10
 eeef33dc4b40f9ba50c4c9b1ac61b30f0f2523bf jdk7u7-b30
@@ -358,6 +374,14 @@
 94bf1e3dafef3cc06d3f97f81d304313ccd999ee jdk7u11-b07
 2b543aa340e4a75671fe05803fcee08bf3e136db jdk7u11-b08
 34a7b6dda06e2ff6f7e9ad563e3fc3ecd8993579 jdk7u13-b09
+0000000000000000000000000000000000000000 icedtea-2.3.6
+849ee586b0be87d4f03ed566a75e5352b647605d icedtea-2.3.6
+849ee586b0be87d4f03ed566a75e5352b647605d icedtea-2.3.6
+0000000000000000000000000000000000000000 icedtea-2.3.6
+0000000000000000000000000000000000000000 icedtea-2.3.6
+19e37453ab1be02d146ace33f19d73719c119eaf icedtea-2.3.6
+bc0de5a0ece28d5e8035cc7659cd6f71a838882a icedtea-2.3.7
+104e2c65892dc642bcad698d0b21cecb08e67c9e icedtea-2.3.8
 e0e52e35e0c53a84daadae95f626e36fd74f3eba jdk7u13-b10
 be57a8d7a1a75971c3b1e7777dcacd20f3d33264 jdk7u13-b30
 e0e52e35e0c53a84daadae95f626e36fd74f3eba jdk7u13-b20
@@ -369,6 +393,10 @@
 5b55cef461b034766f05a46640caa123aa4247d4 jdk7u15-b03
 34a7b6dda06e2ff6f7e9ad563e3fc3ecd8993579 jdk7u15-b32
 a4dfda7a2655209abb170b2fa4914dbbba89bcd3 jdk7u17-b01
+ad5a321edea22c86af5c7844ed49f7b6105ab7ff icedtea-2.3.9
+4e374ade4066e340199c6f2371769e9aa2852282 icedtea-2.3.10
+4413a836dcc7022fa64f60d7faa491aba1248edc icedtea-2.3.11
+37b254871acbef27e5f29c73982ac324e7df2b9a icedtea-2.3.12
 0d82bf449a610602b6e9ddcc9e076839d5351449 jdk7u17-b02
 7b357c079370e2fd324c229f2e24c982915c80a0 jdk7u17-b30
 22b6fd616cfe61774525a944f162bf5e7c418f03 jdk7u17-b31
--- a/.jcheck/conf	Tue Jun 04 10:47:35 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,2 +0,0 @@
-project=jdk7
-bugids=dup
--- a/agent/src/os/linux/Makefile	Tue Jun 04 10:47:35 2013 -0700
+++ b/agent/src/os/linux/Makefile	Fri Aug 09 12:21:36 2013 +0100
@@ -23,7 +23,12 @@
 #
 
 ARCH := $(shell if ([ `uname -m` = "ia64" ])  ; then echo ia64 ; elif ([ `uname -m` = "x86_64" ]) ; then echo amd64; elif ([ `uname -m` = "sparc64" ]) ; then echo sparc; else echo i386 ; fi )
-GCC      = gcc
+
+ifndef BUILD_GCC
+BUILD_GCC = gcc
+endif
+
+GCC      = $(BUILD_GCC)
 
 JAVAH    = ${JAVA_HOME}/bin/javah
 
@@ -40,7 +45,7 @@
 
 LIBS     = -lthread_db
 
-CFLAGS   = -c -fPIC -g -D_GNU_SOURCE -D$(ARCH) $(INCLUDES) -D_FILE_OFFSET_BITS=64
+CFLAGS   = -c -fPIC -g -D_GNU_SOURCE -D_$(ARCH)_ $(if $(filter $(ARCH),alpha),,-D$(ARCH)) $(INCLUDES) -D_FILE_OFFSET_BITS=64
 
 LIBSA = $(ARCH)/libsaproc.so
 
@@ -73,7 +78,7 @@
 	$(GCC) -shared $(LFLAGS_LIBSA) -o $(LIBSA) $(OBJS) $(LIBS)
 
 test.o: test.c
-	$(GCC) -c -o test.o -g -D_GNU_SOURCE -D$(ARCH) $(INCLUDES) test.c
+	$(GCC) -c -o test.o -g -D_GNU_SOURCE -D_$(ARCH)_ $(if $(filter $(ARCH),alpha),,-D$(ARCH)) $(INCLUDES) test.c
 
 test: test.o
 	$(GCC) -o test test.o -L$(ARCH) -lsaproc $(LIBS)
--- a/agent/src/os/linux/libproc.h	Tue Jun 04 10:47:35 2013 -0700
+++ b/agent/src/os/linux/libproc.h	Fri Aug 09 12:21:36 2013 +0100
@@ -36,9 +36,34 @@
   These two files define pt_regs structure differently
 */
 #ifdef _LP64
-#include "asm-sparc64/ptrace.h"
+struct pt_regs {
+	unsigned long u_regs[16]; /* globals and ins */
+	unsigned long tstate;
+	unsigned long tpc;
+	unsigned long tnpc;
+	unsigned int y;
+
+	/* We encode a magic number, PT_REGS_MAGIC, along
+	 * with the %tt (trap type) register value at trap
+	 * entry time.  The magic number allows us to identify
+	 * accurately a trap stack frame in the stack
+	 * unwinder, and the %tt value allows us to test
+	 * things like "in a system call" etc. for an arbitray
+	 * process.
+	 *
+	 * The PT_REGS_MAGIC is chosen such that it can be
+	 * loaded completely using just a sethi instruction.
+	 */
+	unsigned int magic;
+};
 #else
-#include "asm-sparc/ptrace.h"
+struct pt_regs {
+	unsigned long psr;
+	unsigned long pc;
+	unsigned long npc;
+	unsigned long y;
+	unsigned long u_regs[16]; /* globals and ins */
+};
 #endif
 
 #endif //sparc or sparcv9
--- a/make/linux/makefiles/adlc.make	Tue Jun 04 10:47:35 2013 -0700
+++ b/make/linux/makefiles/adlc.make	Fri Aug 09 12:21:36 2013 +0100
@@ -1,5 +1,5 @@
 #
-# Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
+# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 #
 # This code is free software; you can redistribute it and/or modify it
@@ -68,7 +68,9 @@
 
 # CFLAGS_WARN holds compiler options to suppress/enable warnings.
 # Compiler warnings are treated as errors
+ifneq ($(COMPILER_WARNINGS_FATAL),false)
 CFLAGS_WARN = -Werror
+endif
 CFLAGS += $(CFLAGS_WARN)
 
 OBJECTNAMES = \
@@ -133,8 +135,10 @@
 # Note that product files are updated via "mv", which is atomic.
 TEMPDIR := $(OUTDIR)/mktmp$(shell echo $$$$)
 
-# Debuggable by default
-CFLAGS += -g
+ifneq ($(DEBUG_BINARIES), true)
+  # Debuggable by default (unless already done by DEBUG_BINARIES)
+  CFLAGS += -g
+endif
 
 # Pass -D flags into ADLC.
 ADLCFLAGS += $(SYSDEFS)
--- a/make/linux/makefiles/defs.make	Tue Jun 04 10:47:35 2013 -0700
+++ b/make/linux/makefiles/defs.make	Fri Aug 09 12:21:36 2013 +0100
@@ -228,10 +228,14 @@
 # client and server subdirectories have symbolic links to ../libjsig.so
 EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
-  ifeq ($(ZIP_DEBUGINFO_FILES),1)
-    EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.diz
-  else
-    EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.debuginfo
+  ifneq ($(ZERO_BUILD), true)
+    ifneq ($(STRIP_POLICY),no_strip)
+      ifeq ($(ZIP_DEBUGINFO_FILES),1)
+        EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.diz
+      else
+        EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.debuginfo
+      endif
+    endif
   endif
 endif
 EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
@@ -240,11 +244,15 @@
 ifndef BUILD_CLIENT_ONLY
 EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
 EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX)
-  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
-    ifeq ($(ZIP_DEBUGINFO_FILES),1)
-      EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.diz
-    else
-      EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.debuginfo
+  ifneq ($(ZERO_BUILD), true)
+    ifneq ($(STRIP_POLICY),no_strip)
+      ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+        ifeq ($(ZIP_DEBUGINFO_FILES),1)
+          EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.diz
+        else
+          EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.debuginfo
+        endif
+      endif
     endif
   endif
 endif
@@ -253,11 +261,13 @@
   ifeq ($(ARCH_DATA_MODEL), 32)
     EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
     EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX)
-    ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
-      ifeq ($(ZIP_DEBUGINFO_FILES),1)
-        EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.diz
-      else
-        EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.debuginfo
+    ifneq ($(STRIP_POLICY),no_strip)
+      ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+        ifeq ($(ZIP_DEBUGINFO_FILES),1)
+          EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.diz
+        else
+          EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.debuginfo
+	endif
       endif
     endif
   endif
@@ -270,12 +280,16 @@
 ADD_SA_BINARIES/sparc = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
                         $(EXPORT_LIB_DIR)/sa-jdi.jar 
 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
-  ifeq ($(ZIP_DEBUGINFO_FILES),1)
-    ADD_SA_BINARIES/x86   += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz
-    ADD_SA_BINARIES/sparc += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz
-  else
-    ADD_SA_BINARIES/x86   += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
-    ADD_SA_BINARIES/sparc += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
+  ifneq ($(ZERO_BUILD), true)
+    ifneq ($(STRIP_POLICY),no_strip)
+      ifeq ($(ZIP_DEBUGINFO_FILES),1)
+        ADD_SA_BINARIES/x86   += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz
+        ADD_SA_BINARIES/sparc += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz
+      else
+        ADD_SA_BINARIES/x86   += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
+        ADD_SA_BINARIES/sparc += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
+      endif
+    endif
   endif
 endif
 ADD_SA_BINARIES/ppc   = 
--- a/make/linux/makefiles/gcc.make	Tue Jun 04 10:47:35 2013 -0700
+++ b/make/linux/makefiles/gcc.make	Fri Aug 09 12:21:36 2013 +0100
@@ -25,20 +25,43 @@
 #------------------------------------------------------------------------
 # CC, CXX & AS
 
-# When cross-compiling the ALT_COMPILER_PATH points
-# to the cross-compilation toolset
+ifndef HOST_GCC
+ifdef CROSS_COMPILE_ARCH
+HOST_GCC = gcc
+else
+HOST_GCC = $(CC)
+endif
+endif
+
+ifndef HOST_CXX
+ifdef CROSS_COMPILE_ARCH
+HOST_CXX = g++
+else
+HOST_CXX = $(CXX)
+endif
+endif
+
+ifndef BUILD_GCC
 ifdef CROSS_COMPILE_ARCH
-CXX = $(ALT_COMPILER_PATH)/g++
-CC  = $(ALT_COMPILER_PATH)/gcc
-HOSTCXX = g++
-HOSTCC  = gcc
+BUILD_GCC  = $(ALT_COMPILER_PATH)/gcc
 else
-CXX = g++
-CC  = gcc
-HOSTCXX = $(CXX)
-HOSTCC  = $(CC)
+BUILD_GCC = gcc
+endif
 endif
 
+ifndef BUILD_CXX
+ifdef CROSS_COMPILE_ARCH
+BUILD_CXX = $(ALT_COMPILER_PATH)/g++
+else
+BUILD_CXX = g++
+endif
+endif
+
+CXX = $(BUILD_CXX)
+CC = $(BUILD_GCC)
+HOSTCXX = $(HOST_CXX)
+HOSTCC  = $(HOST_GCC)
+
 AS  = $(CC) -c
 
 # -dumpversion in gcc-2.91 shows "egcs-2.91.66". In later version, it only
@@ -61,7 +84,11 @@
 # Compiler flags
 
 # position-independent code
+ifneq ($(filter parisc ppc ppc64 s390 s390x sparc sparc64 sparcv9,$(ZERO_LIBARCH)),)
 PICFLAG = -fPIC
+else
+PICFLAG = -fpic
+endif
 
 VM_PICFLAG/LIBJVM = $(PICFLAG)
 VM_PICFLAG/AOUT   =
@@ -123,7 +150,9 @@
 endif
 
 # Compiler warnings are treated as errors
+ifneq ($(COMPILER_WARNINGS_FATAL),false)
 WARNINGS_ARE_ERRORS = -Werror
+endif
 
 # Except for a few acceptable ones
 # Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
@@ -209,47 +238,48 @@
 #------------------------------------------------------------------------
 # Debug flags
 
-# Use the stabs format for debugging information (this is the default
-# on gcc-2.91). It's good enough, has all the information about line
-# numbers and local variables, and libjvm_g.so is only about 16M.
-# Change this back to "-g" if you want the most expressive format.
-# (warning: that could easily inflate libjvm_g.so to 150M!)
-# Note: The Itanium gcc compiler crashes when using -gstabs.
-DEBUG_CFLAGS/ia64  = -g
-DEBUG_CFLAGS/amd64 = -g
-DEBUG_CFLAGS/arm   = -g
-DEBUG_CFLAGS/ppc   = -g
-DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
-ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
-DEBUG_CFLAGS += -gstabs
-endif
-
-ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
-  FASTDEBUG_CFLAGS/ia64  = -g
-  FASTDEBUG_CFLAGS/amd64 = -g
-  FASTDEBUG_CFLAGS/arm   = -g
-  FASTDEBUG_CFLAGS/ppc   = -g
-  FASTDEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
-  ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
-    FASTDEBUG_CFLAGS += -gstabs
+# DEBUG_BINARIES uses full -g debug information for all configs
+ifeq ($(DEBUG_BINARIES), true)
+  CFLAGS += -g
+else
+  # Use the stabs format for debugging information (this is the default
+  # on gcc-2.91). It's good enough, has all the information about line
+  # numbers and local variables, and libjvm_g.so is only about 16M.
+  # Change this back to "-g" if you want the most expressive format.
+  # (warning: that could easily inflate libjvm_g.so to 150M!)
+  # Note: The Itanium gcc compiler crashes when using -gstabs.
+  DEBUG_CFLAGS/ia64  = -g
+  DEBUG_CFLAGS/amd64 = -g
+  DEBUG_CFLAGS/arm   = -g
+  DEBUG_CFLAGS/ppc   = -g
+  DEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
+  ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
+    DEBUG_CFLAGS += -gstabs
   endif
-
-  OPT_CFLAGS/ia64  = -g
-  OPT_CFLAGS/amd64 = -g
-  OPT_CFLAGS/arm   = -g
-  OPT_CFLAGS/ppc   = -g
-  OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH))
-  ifeq ($(OPT_CFLAGS/$(BUILDARCH)),)
-    OPT_CFLAGS += -gstabs
+  
+  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+    FASTDEBUG_CFLAGS/ia64  = -g
+    FASTDEBUG_CFLAGS/amd64 = -g
+    FASTDEBUG_CFLAGS/arm   = -g
+    FASTDEBUG_CFLAGS/ppc   = -g
+    FASTDEBUG_CFLAGS/zero  = -g
+    FASTDEBUG_CFLAGS += $(DEBUG_CFLAGS/$(BUILDARCH))
+    ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
+      FASTDEBUG_CFLAGS += -gstabs
+    endif
+  
+    OPT_CFLAGS/ia64  = -g
+    OPT_CFLAGS/amd64 = -g
+    OPT_CFLAGS/arm   = -g
+    OPT_CFLAGS/ppc   = -g
+    OPT_CFLAGS/zero  = -g
+    OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH))
+    ifeq ($(OPT_CFLAGS/$(BUILDARCH)),)
+      OPT_CFLAGS += -gstabs
+    endif
   endif
 endif
 
-# DEBUG_BINARIES overrides everything, use full -g debug information
-ifeq ($(DEBUG_BINARIES), true)
-  DEBUG_CFLAGS = -g
-  CFLAGS += $(DEBUG_CFLAGS)
-endif
-
 # If we are building HEADLESS, pass on to VM
 # so it can set the java.awt.headless property
 ifdef HEADLESS
--- a/make/linux/makefiles/jsig.make	Tue Jun 04 10:47:35 2013 -0700
+++ b/make/linux/makefiles/jsig.make	Fri Aug 09 12:21:36 2013 +0100
@@ -62,8 +62,10 @@
                          $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) -o $@ $< -ldl
 	$(QUIETLY) [ -f $(LIBJSIG_G) ] || { ln -s $@ $(LIBJSIG_G); }
 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifneq ($(STRIP_POLICY),no_strip)
 	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJSIG_DEBUGINFO)
 	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@
+  endif
   ifeq ($(STRIP_POLICY),all_strip)
 	$(QUIETLY) $(STRIP) $@
   else
@@ -73,10 +75,12 @@
     endif
   endif
 	[ -f $(LIBJSIG_G_DEBUGINFO) ] || { ln -s $(LIBJSIG_DEBUGINFO) $(LIBJSIG_G_DEBUGINFO); }
-  ifeq ($(ZIP_DEBUGINFO_FILES),1)
+  ifneq ($(STRIP_POLICY),no_strip)
+    ifeq ($(ZIP_DEBUGINFO_FILES),1)
 	$(ZIPEXE) -q -y $(LIBJSIG_DIZ) $(LIBJSIG_DEBUGINFO) $(LIBJSIG_G_DEBUGINFO)
 	$(RM) $(LIBJSIG_DEBUGINFO) $(LIBJSIG_G_DEBUGINFO)
 	[ -f $(LIBJSIG_G_DIZ) ] || { ln -s $(LIBJSIG_DIZ) $(LIBJSIG_G_DIZ); }
+    endif
   endif
 endif
 
--- a/make/linux/makefiles/rules.make	Tue Jun 04 10:47:35 2013 -0700
+++ b/make/linux/makefiles/rules.make	Fri Aug 09 12:21:36 2013 +0100
@@ -31,7 +31,10 @@
 DEMANGLE        = $(DEMANGLER) < $@ > .$@ && mv -f .$@ $@
 
 # $(CC) is the c compiler (cc/gcc), $(CXX) is the c++ compiler (CC/g++).
-CC_COMPILE       = $(CC) $(CXXFLAGS) $(CFLAGS)
+# FIXME: $(CXXFLAGS) currently only includes preprocessor flags while
+# $(CFLAGS) includes C and C++ flags.  Ideally, there should be three
+# variables: $(CFLAGS), $(CXXFLAGS) and $(CPPFLAGS).
+CC_COMPILE       = $(CC) $(CXXFLAGS)
 CXX_COMPILE      = $(CXX) $(CXXFLAGS) $(CFLAGS)
 
 AS.S            = $(AS) $(ASFLAGS)
@@ -143,20 +146,10 @@
 
 include $(GAMMADIR)/make/altsrc.make
 
-# The non-PIC object files are only generated for 32 bit platforms.
-ifdef LP64
 %.o: %.cpp
 	@echo Compiling $<
 	$(QUIETLY) $(REMOVE_TARGET)
 	$(QUIETLY) $(COMPILE.CXX) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE)
-else
-%.o: %.cpp
-	@echo Compiling $<
-	$(QUIETLY) $(REMOVE_TARGET)
-	$(QUIETLY) $(if $(findstring $@, $(NONPIC_OBJ_FILES)), \
-	   $(subst $(VM_PICFLAG), ,$(COMPILE.CXX)) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE), \
-	   $(COMPILE.CXX) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE))
-endif
 
 %.o: %.s
 	@echo Assembling $<
--- a/make/linux/makefiles/saproc.make	Tue Jun 04 10:47:35 2013 -0700
+++ b/make/linux/makefiles/saproc.make	Fri Aug 09 12:21:36 2013 +0100
@@ -91,8 +91,10 @@
 	           -lthread_db
 	$(QUIETLY) [ -f $(LIBSAPROC_G) ] || { ln -s $@ $(LIBSAPROC_G); }
 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifneq ($(STRIP_POLICY),no_strip)
 	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBSAPROC_DEBUGINFO)
 	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBSAPROC_DEBUGINFO) $@
+  endif
   ifeq ($(STRIP_POLICY),all_strip)
 	$(QUIETLY) $(STRIP) $@
   else
@@ -103,9 +105,11 @@
   endif
 	[ -f $(LIBSAPROC_G_DEBUGINFO) ] || { ln -s $(LIBSAPROC_DEBUGINFO) $(LIBSAPROC_G_DEBUGINFO); }
   ifeq ($(ZIP_DEBUGINFO_FILES),1)
+    ifneq ($(STRIP_POLICY),no_strip)
 	$(ZIPEXE) -q -y $(LIBSAPROC_DIZ) $(LIBSAPROC_DEBUGINFO) $(LIBSAPROC_G_DEBUGINFO)
 	$(RM) $(LIBSAPROC_DEBUGINFO) $(LIBSAPROC_G_DEBUGINFO)
 	[ -f $(LIBSAPROC_G_DIZ) ] || { ln -s $(LIBSAPROC_DIZ) $(LIBSAPROC_G_DIZ); }
+    endif
   endif
 endif
 
--- a/make/linux/makefiles/vm.make	Tue Jun 04 10:47:35 2013 -0700
+++ b/make/linux/makefiles/vm.make	Fri Aug 09 12:21:36 2013 +0100
@@ -97,6 +97,10 @@
   ${HS_LIB_ARCH}     \
   ${VM_DISTRO}
 
+ifdef DERIVATIVE_ID
+CPPFLAGS += -DDERIVATIVE_ID="\"$(DERIVATIVE_ID)\""
+endif
+
 # This is VERY important! The version define must only be supplied to vm_version.o
 # If not, ccache will not re-use the cache at all, since the version string might contain
 # a time and date. 
@@ -108,6 +112,10 @@
 endif
 endif
 
+ifdef DISTRIBUTION_ID
+CPPFLAGS += -DDISTRIBUTION_ID="\"$(DISTRIBUTION_ID)\""
+endif
+
 # CFLAGS_WARN holds compiler options to suppress/enable warnings.
 CFLAGS += $(CFLAGS_WARN/BYFILE)
 
@@ -215,7 +223,7 @@
 # Locate all source files in the given directory, excluding files in Src_Files_EXCLUDE.
 define findsrc
 	$(notdir $(shell find $(1)/. ! -name . -prune \
-		-a \( -name \*.c -o -name \*.cpp -o -name \*.s \) \
+		-a \( -name \*.c -o -name \*.cpp -o -name \*.s -o -name \*.S \) \
 		-a ! \( -name DUMMY $(addprefix -o -name ,$(Src_Files_EXCLUDE)) \)))
 endef
 
@@ -242,13 +250,15 @@
 vm.def: $(Res_Files) $(Obj_Files)
 	sh $(GAMMADIR)/make/linux/makefiles/build_vm_def.sh *.o > $@
 
-ifeq ($(SHARK_BUILD), true)
-  STATIC_CXX = false
-else
-  ifeq ($(ZERO_LIBARCH), ppc64)
+ifeq ($(STATIC_CXX),)
+  ifeq ($(SHARK_BUILD), true)
     STATIC_CXX = false
   else
-    STATIC_CXX = true
+    ifeq ($(ZERO_LIBARCH), ppc64)
+      STATIC_CXX = false
+    else
+      STATIC_CXX = true
+    endif
   endif
 endif
 
@@ -338,8 +348,10 @@
 	}
 ifeq ($(CROSS_COMPILE_ARCH),)
   ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+    ifneq ($(STRIP_POLICY),no_strip)
 	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DEBUGINFO)
 	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DEBUGINFO) $@
+    endif
     ifeq ($(STRIP_POLICY),all_strip)
 	$(QUIETLY) $(STRIP) $@
     else
@@ -350,9 +362,11 @@
     endif
 	$(QUIETLY) [ -f $(LIBJVM_G_DEBUGINFO) ] || ln -s $(LIBJVM_DEBUGINFO) $(LIBJVM_G_DEBUGINFO)
     ifeq ($(ZIP_DEBUGINFO_FILES),1)
+      ifneq ($(STRIP_POLICY),no_strip)
 	$(ZIPEXE) -q -y $(LIBJVM_DIZ) $(LIBJVM_DEBUGINFO) $(LIBJVM_G_DEBUGINFO)
 	$(RM) $(LIBJVM_DEBUGINFO) $(LIBJVM_G_DEBUGINFO)
 	[ -f $(LIBJVM_G_DIZ) ] || { ln -s $(LIBJVM_DIZ) $(LIBJVM_G_DIZ); }
+      endif
     endif
   endif
 endif
--- a/make/linux/makefiles/zeroshark.make	Tue Jun 04 10:47:35 2013 -0700
+++ b/make/linux/makefiles/zeroshark.make	Fri Aug 09 12:21:36 2013 +0100
@@ -25,6 +25,41 @@
 
 # Setup common to Zero (non-Shark) and Shark versions of VM
 
+ifeq ($(ZERO_LIBARCH),arm)
+
+Obj_Files += asm_helper.o
+Obj_Files += cppInterpreter_arm.o
+Obj_Files += thumb2.o
+
+CFLAGS += -DHOTSPOT_ASM
+
+cppInterpreter_arm.o:	offsets_arm.s bytecodes_arm.s
+thumb2.o:		offsets_arm.s
+
+offsets_arm.s:	mkoffsets
+	@echo Generating assembler offsets
+	./mkoffsets > $@
+
+bytecodes_arm.s: bytecodes_arm.def mkbc
+	@echo Generating ARM assembler bytecode sequences
+	$(CC_COMPILE) $(CFLAGS) -E -x c++ - < $< | ./mkbc - $@ $(COMPILE_DONE)
+
+mkbc:	$(GAMMADIR)/tools/mkbc.c
+	@echo Compiling mkbc tool
+	$(CC_COMPILE) -o $@ $< $(COMPILE_DONE)
+
+mkoffsets:	asm_helper.cpp
+	@echo Compiling offset generator
+	$(QUIETLY) $(REMOVE_TARGET)
+	$(CC_COMPILE) $(CFLAGS) -DSTATIC_OFFSETS -o $@ $< $(COMPILE_DONE)
+
+endif
+
+%.o: %.S
+	@echo Assembling $<
+	$(QUIETLY) $(REMOVE_TARGET)
+	$(COMPILE.CC) $(CFLAGS) -o $@ $< $(COMPILE_DONE)
+
 # The copied fdlibm routines in sharedRuntimeTrig.o must not be optimized
 OPT_CFLAGS/sharedRuntimeTrig.o = $(OPT_CFLAGS/NOOPT)
 # The copied fdlibm routines in sharedRuntimeTrans.o must not be optimized
--- a/make/linux/platform_zero.in	Tue Jun 04 10:47:35 2013 -0700
+++ b/make/linux/platform_zero.in	Fri Aug 09 12:21:36 2013 +0100
@@ -14,4 +14,4 @@
 
 gnu_dis_arch = zero
 
-sysdefs = -DLINUX -D_GNU_SOURCE -DCC_INTERP -DZERO -D@ZERO_ARCHDEF@ -DZERO_LIBARCH=\"@ZERO_LIBARCH@\"
+sysdefs = -DLINUX -D_GNU_SOURCE -DCC_INTERP -DZERO -DTARGET_ARCH_NYI_6939861=1 -D@ZERO_ARCHDEF@ -DZERO_LIBARCH=\"@ZERO_LIBARCH@\"
--- a/make/solaris/makefiles/adlc.make	Tue Jun 04 10:47:35 2013 -0700
+++ b/make/solaris/makefiles/adlc.make	Fri Aug 09 12:21:36 2013 +0100
@@ -75,8 +75,10 @@
 
 # CFLAGS_WARN holds compiler options to suppress/enable warnings.
 # Compiler warnings are treated as errors
-ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1)
-  CFLAGS_WARN = +w -errwarn
+ifneq ($(COMPILER_WARNINGS_FATAL),false)
+  ifeq ($(shell expr $(COMPILER_REV_NUMERIC) \>= 509), 1)
+    CFLAGS_WARN = +w -errwarn
+  endif
 endif
 CFLAGS += $(CFLAGS_WARN)
 
--- a/make/solaris/makefiles/dtrace.make	Tue Jun 04 10:47:35 2013 -0700
+++ b/make/solaris/makefiles/dtrace.make	Fri Aug 09 12:21:36 2013 +0100
@@ -116,6 +116,7 @@
 		$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc
 	[ -f $(XLIBJVM_DB_G) ] || { ln -s $(LIBJVM_DB) $(XLIBJVM_DB_G); }
 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifneq ($(STRIP_POLICY),no_strip)
 # gobjcopy crashes on "empty" section headers with the SHF_ALLOC flag set.
 # Clear the SHF_ALLOC flag (if set) from empty section headers.
 # An empty section header has sh_addr == 0 and sh_size == 0.
@@ -129,6 +130,7 @@
 # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR) is not
 # in the link name:
 	( cd $(XLIBJVM_DIR) && $(ADD_GNU_DEBUGLINK) $(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB) )
+  endif
   ifeq ($(STRIP_POLICY),all_strip)
 	$(QUIETLY) $(STRIP) $@
   else
@@ -139,11 +141,13 @@
   endif
 	[ -f $(XLIBJVM_DB_G_DEBUGINFO) ] || { cd $(XLIBJVM_DIR) && ln -s $(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB_G_DEBUGINFO); }
   ifeq ($(ZIP_DEBUGINFO_FILES),1)
+    ifneq ($(STRIP_POLICY),no_strip)
 # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR) is not
 # in the archived name:
 	( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB_G_DEBUGINFO) )
 	$(RM) $(XLIBJVM_DB_DEBUGINFO) $(XLIBJVM_DB_G_DEBUGINFO)
 	[ -f $(XLIBJVM_DB_G_DIZ) ] || { cd $(XLIBJVM_DIR) && ln -s $(LIBJVM_DB_DIZ) $(LIBJVM_DB_G_DIZ); }
+    endif
   endif
 endif
 
@@ -154,6 +158,7 @@
 		$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
 	[ -f $(XLIBJVM_DTRACE_G) ] || { ln -s $(LIBJVM_DTRACE) $(XLIBJVM_DTRACE_G); }
 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifneq ($(STRIP_POLICY),no_strip)
 # Clear the SHF_ALLOC flag (if set) from empty section headers.
 	$(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@
 	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DTRACE_DEBUGINFO)
@@ -162,6 +167,7 @@
 # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR) is not
 # in the link name:
 	( cd $(XLIBJVM_DIR) && $(ADD_GNU_DEBUGLINK) $(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE) )
+  endif
   ifeq ($(STRIP_POLICY),all_strip)
 	$(QUIETLY) $(STRIP) $@
   else
@@ -172,11 +178,13 @@
   endif
 	[ -f $(XLIBJVM_DTRACE_G_DEBUGINFO) ] || { cd $(XLIBJVM_DIR) && ln -s $(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE_G_DEBUGINFO); }
   ifeq ($(ZIP_DEBUGINFO_FILES),1)
+    ifneq ($(STRIP_POLICY),no_strip)
 # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR) is not
 # in the archived name:
 	( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE_G_DEBUGINFO) )
 	$(RM) $(XLIBJVM_DTRACE_DEBUGINFO) $(XLIBJVM_DTRACE_G_DEBUGINFO)
 	[ -f $(XLIBJVM_DTRACE_G_DIZ) ] || { cd $(XLIBJVM_DIR) && ln -s $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_G_DIZ); }
+    endif
   endif
 endif
 
@@ -226,12 +234,14 @@
 		$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc
 	[ -f $(LIBJVM_DB_G) ] || { ln -s $@ $(LIBJVM_DB_G); }
 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifneq ($(STRIP_POLICY),no_strip)
 # Clear the SHF_ALLOC flag (if set) from empty section headers.
 	$(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@
 	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DB_DEBUGINFO)
 # $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections.
 #	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $@
 	$(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBJVM_DB_DEBUGINFO) $@
+  endif
   ifeq ($(STRIP_POLICY),all_strip)
 	$(QUIETLY) $(STRIP) $@
   else
@@ -242,9 +252,11 @@
   endif
 	[ -f $(LIBJVM_DB_G_DEBUGINFO) ] || { ln -s $(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB_G_DEBUGINFO); }
   ifeq ($(ZIP_DEBUGINFO_FILES),1)
+    ifneq ($(STRIP_POLICY),no_strip)
 	$(ZIPEXE) -q -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB_G_DEBUGINFO)
 	$(RM) $(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB_G_DEBUGINFO)
 	[ -f $(LIBJVM_DB_G_DIZ) ] || { ln -s $(LIBJVM_DB_DIZ) $(LIBJVM_DB_G_DIZ); }
+    endif
   endif
 endif
 
@@ -254,12 +266,14 @@
 		$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
 	[ -f $(LIBJVM_DTRACE_G) ] || { ln -s $@ $(LIBJVM_DTRACE_G); }
 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifneq ($(STRIP_POLICY),no_strip)
 # Clear the SHF_ALLOC flag (if set) from empty section headers.
 	$(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@
 	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DTRACE_DEBUGINFO)
 # $(OBJCOPY) --add-gnu-debuglink=... corrupts SUNW_* sections.
 #	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $@
 	$(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBJVM_DTRACE_DEBUGINFO) $@
+  endif
   ifeq ($(STRIP_POLICY),all_strip)
 	$(QUIETLY) $(STRIP) $@
   else
@@ -270,9 +284,11 @@
   endif
 	[ -f $(LIBJVM_DTRACE_G_DEBUGINFO) ] || { ln -s $(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE_G_DEBUGINFO); }
   ifeq ($(ZIP_DEBUGINFO_FILES),1)
+    ifneq ($(STRIP_POLICY),no_strip)
 	$(ZIPEXE) -q -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE_G_DEBUGINFO)
 	$(RM) $(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE_G_DEBUGINFO)
 	[ -f $(LIBJVM_DTRACE_G_DIZ) ] || { ln -s $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_G_DIZ); }
+    endif
   endif
 endif
 
--- a/make/solaris/makefiles/gcc.make	Tue Jun 04 10:47:35 2013 -0700
+++ b/make/solaris/makefiles/gcc.make	Fri Aug 09 12:21:36 2013 +0100
@@ -112,7 +112,9 @@
 
 
 # Compiler warnings are treated as errors 
-WARNINGS_ARE_ERRORS = -Werror 
+ifneq ($(COMPILER_WARNINGS_FATAL),false)
+WARNINGS_ARE_ERRORS = -Werror
+endif
 # Enable these warnings. See 'info gcc' about details on these options
 ADDITIONAL_WARNINGS = -Wpointer-arith -Wconversion -Wsign-compare 
 CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(ADDITIONAL_WARNINGS) 
--- a/make/solaris/makefiles/jsig.make	Tue Jun 04 10:47:35 2013 -0700
+++ b/make/solaris/makefiles/jsig.make	Fri Aug 09 12:21:36 2013 +0100
@@ -58,6 +58,7 @@
                          $(LFLAGS_JSIG) -o $@ $(JSIGSRCDIR)/jsig.c -ldl
 	[ -f $(LIBJSIG_G) ] || { ln -s $@ $(LIBJSIG_G); }
 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifneq ($(STRIP_POLICY),no_strip)
 # gobjcopy crashes on "empty" section headers with the SHF_ALLOC flag set.
 # Clear the SHF_ALLOC flag (if set) from empty section headers.
 # An empty section header has sh_addr == 0 and sh_size == 0.
@@ -69,6 +70,7 @@
 # Use $(ADD_GNU_DEBUGLINK) until a fixed $(OBJCOPY) is available.
 #	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@
 	$(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBJSIG_DEBUGINFO) $@
+  endif
   ifeq ($(STRIP_POLICY),all_strip)
 	$(QUIETLY) $(STRIP) $@
   else
@@ -79,9 +81,11 @@
   endif
 	[ -f $(LIBJSIG_G_DEBUGINFO) ] || { ln -s $(LIBJSIG_DEBUGINFO) $(LIBJSIG_G_DEBUGINFO); }
   ifeq ($(ZIP_DEBUGINFO_FILES),1)
+    ifneq ($(STRIP_POLICY),no_strip)
 	$(ZIPEXE) -q -y $(LIBJSIG_DIZ) $(LIBJSIG_DEBUGINFO) $(LIBJSIG_G_DEBUGINFO)
 	$(RM) $(LIBJSIG_DEBUGINFO) $(LIBJSIG_G_DEBUGINFO)
 	[ -f $(LIBJSIG_G_DIZ) ] || { ln -s $(LIBJSIG_DIZ) $(LIBJSIG_G_DIZ); }
+    endif
   endif
 endif
 
--- a/make/solaris/makefiles/rules.make	Tue Jun 04 10:47:35 2013 -0700
+++ b/make/solaris/makefiles/rules.make	Fri Aug 09 12:21:36 2013 +0100
@@ -135,20 +135,10 @@
 
 include $(GAMMADIR)/make/altsrc.make
 
-# Sun compiler for 64 bit Solaris does not support building non-PIC object files.
-ifdef LP64
 %.o: %.cpp
 	@echo Compiling $<
 	$(QUIETLY) $(REMOVE_TARGET)
 	$(QUIETLY) $(COMPILE.CXX) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE)
-else
-%.o: %.cpp
-	@echo Compiling $<
-	$(QUIETLY) $(REMOVE_TARGET)
-	$(QUIETLY) $(if $(findstring $@, $(NONPIC_OBJ_FILES)), \
-	   $(subst $(VM_PICFLAG), ,$(COMPILE.CXX)) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE), \
-	   $(COMPILE.CXX) $(DEPFLAGS) -o $@ $< $(COMPILE_DONE))
-endif
 
 %.o: %.s
 	@echo Assembling $<
--- a/make/solaris/makefiles/saproc.make	Tue Jun 04 10:47:35 2013 -0700
+++ b/make/solaris/makefiles/saproc.make	Fri Aug 09 12:21:36 2013 +0100
@@ -109,6 +109,7 @@
 	           -ldl -ldemangle -lthread -lc
 	[ -f $(LIBSAPROC_G) ] || { ln -s $@ $(LIBSAPROC_G); }
 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifneq ($(STRIP_POLICY),no_strip)
 # gobjcopy crashes on "empty" section headers with the SHF_ALLOC flag set.
 # Clear the SHF_ALLOC flag (if set) from empty section headers.
 # An empty section header has sh_addr == 0 and sh_size == 0.
@@ -120,6 +121,7 @@
 # Use $(ADD_GNU_DEBUGLINK) until a fixed $(OBJCOPY) is available.
 #	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBSAPROC_DEBUGINFO) $@
 	$(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBSAPROC_DEBUGINFO) $@
+  endif
   ifeq ($(STRIP_POLICY),all_strip)
 	$(QUIETLY) $(STRIP) $@
   else
@@ -130,9 +132,11 @@
   endif
 	[ -f $(LIBSAPROC_G_DEBUGINFO) ] || { ln -s $(LIBSAPROC_DEBUGINFO) $(LIBSAPROC_G_DEBUGINFO); }
   ifeq ($(ZIP_DEBUGINFO_FILES),1)
+    ifneq ($(STRIP_POLICY),no_strip)
 	$(ZIPEXE) -q -y $(LIBSAPROC_DIZ) $(LIBSAPROC_DEBUGINFO) $(LIBSAPROC_G_DEBUGINFO)
 	$(RM) $(LIBSAPROC_DEBUGINFO) $(LIBSAPROC_G_DEBUGINFO)
 	[ -f $(LIBSAPROC_G_DIZ) ] || { ln -s $(LIBSAPROC_DIZ) $(LIBSAPROC_G_DIZ); }
+    endif
   endif
 endif
 
--- a/make/solaris/makefiles/vm.make	Tue Jun 04 10:47:35 2013 -0700
+++ b/make/solaris/makefiles/vm.make	Fri Aug 09 12:21:36 2013 +0100
@@ -85,11 +85,19 @@
   ${HS_LIB_ARCH}     \
   ${VM_DISTRO}
 
+ifdef DERIVATIVE_ID
+CPPFLAGS += -DDERIVATIVE_ID="\"$(DERIVATIVE_ID)\""
+endif
+
 # This is VERY important! The version define must only be supplied to vm_version.o
 # If not, ccache will not re-use the cache at all, since the version string might contain
 # a time and date. 
 vm_version.o: CXXFLAGS += ${JRE_VERSION} 
 
+ifdef DISTRIBUTION_ID
+CPPFLAGS += -DDISTRIBUTION_ID="\"$(DISTRIBUTION_ID)\""
+endif
+
 # CFLAGS_WARN holds compiler options to suppress/enable warnings.
 CFLAGS += $(CFLAGS_WARN)
 
@@ -294,6 +302,7 @@
 	$(QUIETLY) [ -f $(LIBJVM_G) ] || ln -s $@ $(LIBJVM_G)
 	$(QUIETLY) [ -f $(LIBJVM_G).1 ] || ln -s $@.1 $(LIBJVM_G).1
 ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
+  ifneq ($(STRIP_POLICY),no_strip)
 # gobjcopy crashes on "empty" section headers with the SHF_ALLOC flag set.
 # Clear the SHF_ALLOC flag (if set) from empty section headers.
 # An empty section header has sh_addr == 0 and sh_size == 0.
@@ -305,6 +314,7 @@
 # Use $(ADD_GNU_DEBUGLINK) until a fixed $(OBJCOPY) is available.
 #	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DEBUGINFO) $@
 	$(QUIETLY) $(ADD_GNU_DEBUGLINK) $(LIBJVM_DEBUGINFO) $@
+  endif
   ifeq ($(STRIP_POLICY),all_strip)
 	$(QUIETLY) $(STRIP) $@
   else
@@ -315,9 +325,11 @@
   endif
 	$(QUIETLY) [ -f $(LIBJVM_G_DEBUGINFO) ] || ln -s $(LIBJVM_DEBUGINFO) $(LIBJVM_G_DEBUGINFO)
   ifeq ($(ZIP_DEBUGINFO_FILES),1)
+    ifneq ($(STRIP_POLICY),no_strip)
 	$(ZIPEXE) -q -y $(LIBJVM_DIZ) $(LIBJVM_DEBUGINFO) $(LIBJVM_G_DEBUGINFO)
 	$(RM) $(LIBJVM_DEBUGINFO) $(LIBJVM_G_DEBUGINFO)
 	[ -f $(LIBJVM_G_DIZ) ] || { ln -s $(LIBJVM_DIZ) $(LIBJVM_G_DIZ); }
+    endif
   endif
 endif
 endif # filter -sbfast -xsbfast
--- a/make/windows/makefiles/vm.make	Tue Jun 04 10:47:35 2013 -0700
+++ b/make/windows/makefiles/vm.make	Fri Aug 09 12:21:36 2013 +0100
@@ -83,6 +83,14 @@
 # Define that so jni.h is on correct side
 CXX_FLAGS=$(CXX_FLAGS) /D "_JNI_IMPLEMENTATION_"
 
+!ifdef DERIVATIVE_ID
+CPP_FLAGS = $(CPP_FLAGS) /D "DERIVATIVE_ID=\"$(DERIVATIVE_ID)\""
+!endif
+
+!ifdef DISTRIBUTION_ID
+CPP_FLAGS = $(CPP_FLAGS) /D "DISTRIBUTION_ID=\"$(DISTRIBUTION_ID)\""
+!endif
+
 !if "$(BUILDARCH)" == "ia64"
 STACK_SIZE="/STACK:1048576,262144"
 !else
--- a/src/cpu/x86/vm/c2_globals_x86.hpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/cpu/x86/vm/c2_globals_x86.hpp	Fri Aug 09 12:21:36 2013 +0100
@@ -89,7 +89,7 @@
 
 // Heap related flags
 define_pd_global(uintx,PermSize,    ScaleForWordSize(16*M));
-define_pd_global(uintx,MaxPermSize, ScaleForWordSize(64*M));
+define_pd_global(uintx,MaxPermSize, ScaleForWordSize(128*M));
 
 // Ergonomics related flags
 define_pd_global(bool, NeverActAsServerClassMachine, false);
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/zero/vm/arm_cas.S	Fri Aug 09 12:21:36 2013 +0100
@@ -0,0 +1,31 @@
+#ifdef __ARM_ARCH_7A__
+@	jlong
+@	arm_val_compare_and_swap_long(volatile void *ptr,
+@				 jlong oldval,
+@				 jlong newval) {
+	.pushsection .text
+	.global arm_val_compare_and_swap_long
+#ifdef __thumb__
+	.syntax	unified
+	.thumb_func
+#endif
+	.type arm_val_compare_and_swap_long, %function
+arm_val_compare_and_swap_long:
+	stmfd	sp!, {r4, r5, r6, r7}
+	ldrd	r4, [sp, #16]
+	dmb	sy
+0:	ldrexd	r6, [r0]
+	cmp	r6, r2
+	it	eq
+	cmpeq	r7, r3
+	bne	1f
+	strexd	r1, r4, [r0]
+	cmp	r1, #0
+	bne	0b
+	dmb	sy
+1:	mov	r0, r6
+	mov	r1, r7
+	ldmfd	sp!, {r4, r5, r6, r7}
+	bx	lr
+	.popsection
+#endif // __ARM_ARCH_7A__
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/zero/vm/asm_helper.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -0,0 +1,730 @@
+/*
+ * Copyright 2009, 2010 Edward Nevill
+ * Copyright 2011, Red Hat
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifdef __arm__
+
+#define	ARCH_THUMB2	(1<<16)
+#define ARCH_VFP	(1<<17)
+#define ARCH_CLZ	(1<<18)
+
+#include "precompiled.hpp"
+#include "asm/assembler.hpp"
+#include "interp_masm_zero.hpp"
+#include "interpreter/bytecodeInterpreter.hpp"
+#include "interpreter/bytecodeInterpreter.inline.hpp"
+#include "interpreter/interpreter.hpp"
+#include "interpreter/interpreterRuntime.hpp"
+#include "oops/methodDataOop.hpp"
+#include "oops/methodOop.hpp"
+#include "oops/oop.inline.hpp"
+#include "prims/jvmtiExport.hpp"
+#include "prims/jvmtiThreadState.hpp"
+#include "runtime/deoptimization.hpp"
+#include "runtime/frame.inline.hpp"
+#include "runtime/sharedRuntime.hpp"
+#include "runtime/stubRoutines.hpp"
+#include "runtime/synchronizer.hpp"
+#include "runtime/vframeArray.hpp"
+#include "utilities/debug.hpp"
+
+#ifndef STATIC_OFFSETS
+
+#include <linux/auxvec.h>
+#include <asm/hwcap.h>
+
+#define VECBUFF_SIZE 64
+
+static char valuebuf[128];
+
+// Return the name of the current method.  Not multi-thread safe.
+extern "C" char*
+meth(interpreterState istate) {
+  istate->method()->name_and_sig_as_C_string(valuebuf, sizeof valuebuf);
+  char *p = valuebuf + strlen(valuebuf);
+  sprintf(p, ": " PTR_FORMAT " (bci %d)",
+	  (intptr_t) istate->bcp(),
+	  istate->method()->bci_from(istate->bcp()));
+  return valuebuf;
+}
+
+// Used for debugging the interpreter.  The macro TRACE in
+// cppInterpreter_arm.S calls this routine, and you can trap on a
+// particular method.
+#define NAME1 "sun.nio.ch.FileChannelImpl$Unmapper.run()V"
+#define EQ(S1, S2) (S1 && (strncmp(S1, S2, strlen(S2)) == 0))
+extern "C" void my_trace(void *jpc, void *istate)
+{
+  char *name = meth((interpreterState)istate);
+  if (EQ(name, NAME1));
+    asm volatile("nop");  // Somewhere to put a breakpoint
+}
+
+extern "C" unsigned hwcap(void)
+{
+  int fd;
+  unsigned vecs[VECBUFF_SIZE];
+  unsigned *p;
+  int i, n;
+  unsigned rc = 0;
+  unsigned arch = 4;
+ 
+  fd = open("/proc/self/auxv", O_RDONLY);
+  if (fd < 0) return 0;
+  do {
+    n = read(fd, vecs, VECBUFF_SIZE * sizeof(unsigned));
+    p = vecs;
+    i = n/8;
+    while (--i >= 0) {
+      unsigned tag = *p++;
+      unsigned value = *p++;
+      if (tag == 0) goto fini;
+      if (tag == AT_HWCAP) {
+	if (value & HWCAP_THUMBEE) rc |= ARCH_THUMB2;
+	if (value & HWCAP_VFP) rc |= ARCH_VFP;
+      } else if (tag == AT_PLATFORM) {
+	const char *s = (const char *)value;
+	int c;
+
+	if (*s++ == 'v') {
+	  arch = 0;
+	  while ((isdigit)(c = *s++)) arch = arch * 10 + c - '0';
+	}
+      }
+    }
+  } while (n == VECBUFF_SIZE * sizeof(unsigned));
+fini:
+  close(fd);
+//  printf("arch = %d, rc = 0x%08x\n", arch, rc);
+  if (arch >= 5) rc |= ARCH_CLZ;
+  if (arch >= 7) rc |= ARCH_THUMB2;
+  return rc | (1<<arch);
+}
+
+/* Thease functions allow the ASM interpreter to call CPP virtual functions.
+ * Otherwise the ASM interpreter has to grup around in the VTABLE which is
+ * not very portable.
+ */
+extern "C" bool JavaThread_is_lock_owned(JavaThread *r0, address r1)
+{
+	return r0->is_lock_owned(r1);
+}
+
+extern "C" HeapWord **CollectedHeap_top_addr(CollectedHeap *r0)
+{
+	return r0->top_addr();
+}
+
+extern "C" HeapWord **CollectedHeap_end_addr(CollectedHeap *r0)
+{
+	return r0->end_addr();
+}
+
+extern "C" char *SharedRuntime_generate_class_cast_message(const char *name, const char *klass)
+{
+	return SharedRuntime::generate_class_cast_message(name, klass);
+}
+
+#define HELPER_THROW(thread, name, msg) Exceptions::_throw_msg(thread, __FILE__, __LINE__, name, msg)
+
+class VMStructs {
+public:
+  static inline klassOop klass_at_addr(constantPoolOop constants, u2 index) {
+    return (klassOop) *constants->obj_at_addr_raw(index);
+  }
+};
+
+extern "C" oop Helper_new(interpreterState istate, unsigned index)
+{
+    JavaThread *thread = istate->thread();
+
+    constantPoolOop constants = istate->method()->constants();
+    oop result = NULL;
+    if (!constants->tag_at(index).is_unresolved_klass()) {
+      // Make sure klass is initialized and doesn't have a finalizer
+      oop entry = VMStructs::klass_at_addr(constants, index);
+      klassOop k_entry = (klassOop) entry;
+      instanceKlass* ik = (instanceKlass*) k_entry->klass_part();
+      if ( ik->is_initialized() && ik->can_be_fastpath_allocated() ) {
+	size_t obj_size = ik->size_helper();
+	// If the TLAB isn't pre-zeroed then we'll have to do it
+	bool need_zero = !ZeroTLAB;
+	if (UseTLAB) {
+	  result = (oop) thread->tlab().allocate(obj_size);
+	}
+	if (result == NULL && !CMSIncrementalMode) {
+	  need_zero = true;
+	  // Try allocate in shared eden
+    retry:
+	  HeapWord* compare_to = *Universe::heap()->top_addr();
+	  HeapWord* new_top = compare_to + obj_size;
+	  if (new_top <= *Universe::heap()->end_addr()) {
+	    if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) {
+	      goto retry;
+	    }
+	    result = (oop) compare_to;
+	  }
+	}
+	if (result != NULL) {
+	  // Initialize object (if nonzero size and need) and then the header
+	  if (need_zero ) {
+	    HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize;
+	    obj_size -= sizeof(oopDesc) / oopSize;
+	    if (obj_size > 0 ) {
+	      memset(to_zero, 0, obj_size * HeapWordSize);
+	    }
+	  }
+	  if (UseBiasedLocking) {
+	    result->set_mark(ik->prototype_header());
+	  } else {
+	    result->set_mark(markOopDesc::prototype());
+	  }
+	  result->set_klass_gap(0);
+	  result->set_klass(k_entry);
+	  return result;
+	}
+      }
+    }
+    // Slow case allocation
+    InterpreterRuntime::_new(thread, istate->method()->constants(), index);
+    result = thread->vm_result();
+    thread->set_vm_result(NULL);
+    return result;
+}
+
+extern "C" int Helper_instanceof(interpreterState istate, unsigned index, oop tos)
+{
+    if (tos == NULL) return 0;
+
+    // Constant pool may have actual klass or unresolved klass. If it is
+    // unresolved we must resolve it
+    if (istate->method()->constants()->tag_at(index).is_unresolved_klass()) {
+      InterpreterRuntime::quicken_io_cc(istate->thread());
+      if (istate->thread()->has_pending_exception()) return 0;
+    }
+    klassOop klassOf = VMStructs::klass_at_addr(istate->method()->constants(), index);
+    klassOop objKlassOop = tos->klass();
+    //
+    // Check for compatibilty. This check must not GC!!
+    // Seems way more expensive now that we must dispatch
+    //
+    return objKlassOop == klassOf || objKlassOop->klass_part()->is_subtype_of(klassOf);
+}
+
+extern "C" oop Helper_checkcast(interpreterState istate, unsigned index, oop tos)
+{
+    if (tos == NULL) return NULL;
+
+    // Constant pool may have actual klass or unresolved klass. If it is
+    // unresolved we must resolve it
+    if (istate->method()->constants()->tag_at(index).is_unresolved_klass()) {
+      oop except_oop;
+      InterpreterRuntime::quicken_io_cc(istate->thread());
+      if (except_oop = istate->thread()->pending_exception()) return except_oop;
+    }
+    klassOop klassOf = VMStructs::klass_at_addr(istate->method()->constants(), index);
+    klassOop objKlassOop = tos->klass(); //ebx
+    //
+    // Check for compatibilty. This check must not GC!!
+    // Seems way more expensive now that we must dispatch
+    //
+    if (objKlassOop != klassOf && !objKlassOop->klass_part()->is_subtype_of(klassOf)) {
+      ResourceMark rm(istate->thread());
+      const char* objName = Klass::cast(objKlassOop)->external_name();
+      const char* klassName = Klass::cast(klassOf)->external_name();
+      char* message = SharedRuntime::generate_class_cast_message(objName, klassName);
+      ThreadInVMfromJava trans(istate->thread());
+      HELPER_THROW(istate->thread(), vmSymbols::java_lang_ClassCastException(), message);
+    }
+    return istate->thread()->pending_exception();
+}
+
+extern "C" oop Helper_monitorenter(interpreterState istate, oop lockee)
+{
+    BasicObjectLock* limit = istate->monitor_base();
+    BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
+    BasicObjectLock* entry = NULL;
+    markOop displaced;
+    JavaThread *thread = istate->thread();
+
+    if (lockee == NULL) {
+      HELPER_THROW(istate->thread(), vmSymbols::java_lang_NullPointerException(), "");
+      goto handle_exception;
+    }
+    while (most_recent != limit ) {
+      if (most_recent->obj() == NULL) entry = most_recent;
+      else if (most_recent->obj() == lockee) break;
+      most_recent++;
+    }
+    if (entry == NULL) {
+      int monitor_words = frame::interpreter_frame_monitor_size();
+      ZeroStack *stack = thread->zero_stack();
+
+      if (monitor_words > stack->available_words()) {
+        InterpreterRuntime::throw_StackOverflowError(thread);
+	goto handle_exception;
+      } else {
+	stack->alloc(monitor_words * wordSize);
+
+	for (intptr_t *p = istate->stack() + 1; p < istate->stack_base(); p++)
+	  *(p - monitor_words) = *p;
+
+	istate->set_stack_limit(istate->stack_limit() - monitor_words);
+	istate->set_stack(istate->stack() - monitor_words);
+	istate->set_stack_base(istate->stack_base() - monitor_words);
+
+	entry = (BasicObjectLock *) istate->stack_base();
+      }
+    }
+    entry->set_obj(lockee);
+    displaced = lockee->mark()->set_unlocked();
+    entry->lock()->set_displaced_header(displaced);
+    if (Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
+      // Is it simple recursive case?
+      if (thread->is_lock_owned((address) displaced->clear_lock_bits())) {
+	entry->lock()->set_displaced_header(NULL);
+      } else {
+	InterpreterRuntime::monitorenter(thread, entry);
+      }
+    }
+handle_exception:
+    return thread->pending_exception();
+}
+
+extern "C" oop Helper_monitorexit(interpreterState istate, oop lockee)
+{
+    BasicObjectLock* limit = istate->monitor_base();
+    BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
+    JavaThread *thread = istate->thread();
+
+    if (lockee == NULL) {
+      HELPER_THROW(istate->thread(), vmSymbols::java_lang_NullPointerException(), "");
+      goto handle_exception;
+    }
+    while (most_recent != limit ) {
+      if ((most_recent)->obj() == lockee) {
+	BasicLock* lock = most_recent->lock();
+	markOop header = lock->displaced_header();
+	most_recent->set_obj(NULL);
+	if (header != NULL) {
+	  if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
+	    // restore object for the slow case
+	    most_recent->set_obj(lockee);
+	    InterpreterRuntime::monitorexit(thread, most_recent);
+	  }
+	}
+	return thread->pending_exception();
+      }
+      most_recent++;
+    }
+    InterpreterRuntime::throw_illegal_monitor_state_exception(thread);
+handle_exception:
+    return thread->pending_exception();
+}
+
+extern "C" oop Helper_aastore(interpreterState istate, oop value, int index, arrayOop arrayref)
+{
+    if (arrayref == NULL) {
+      ThreadInVMfromJava trans(istate->thread());
+      HELPER_THROW(istate->thread(), vmSymbols::java_lang_NullPointerException(), "");
+    } else if ((uint32_t)index >= (uint32_t)arrayref->length()) {
+      char message[jintAsStringSize];
+      sprintf(message, "%d", index);
+      HELPER_THROW(istate->thread(), vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message);
+    } else {
+      if (value != NULL) {
+	/* Check assignability of value into arrayref */
+	klassOop rhsKlassOop = value->klass(); // EBX (subclass)
+	klassOop elemKlassOop = ((objArrayKlass*) arrayref->klass()->klass_part())->element_klass();
+	//
+	// Check for compatibilty. This check must not GC!!
+	// Seems way more expensive now that we must dispatch
+	//
+	if (rhsKlassOop != elemKlassOop && !rhsKlassOop->klass_part()->is_subtype_of(elemKlassOop)) {
+	  HELPER_THROW(istate->thread(), vmSymbols::java_lang_ArrayStoreException(), "");
+	  goto handle_exception;
+	}
+      }
+      oop* elem_loc = (oop*)(((address) arrayref->base(T_OBJECT)) + index * sizeof(oop));
+      // *(oop*)(((address) arrayref->base(T_OBJECT)) + index * sizeof(oop)) = value;
+      *elem_loc = value;
+      // Mark the card
+      BarrierSet* bs = Universe::heap()->barrier_set();
+      static volatile jbyte* _byte_map_base = (volatile jbyte*)(((CardTableModRefBS*)bs)->byte_map_base);
+      OrderAccess::release_store(&_byte_map_base[(uintptr_t)elem_loc >> CardTableModRefBS::card_shift], 0);
+    }
+handle_exception:
+    return istate->thread()->pending_exception();
+}
+
+extern "C" void Helper_aputfield(oop obj)
+{
+      BarrierSet* bs = Universe::heap()->barrier_set();
+      static volatile jbyte* _byte_map_base = (volatile jbyte*)(((CardTableModRefBS*)bs)->byte_map_base);
+      OrderAccess::release_store(&_byte_map_base[(uintptr_t)obj >> CardTableModRefBS::card_shift], 0);
+}
+
+extern "C" oop Helper_synchronized_enter(JavaThread *thread, BasicObjectLock *mon)
+{
+    BasicLock *lock = mon->lock();
+    markOop displaced = lock->displaced_header();
+
+    if (thread->is_lock_owned((address)displaced->clear_lock_bits()))
+      lock->set_displaced_header(NULL);
+    else
+      InterpreterRuntime::monitorenter(thread, mon);
+    return thread->pending_exception();
+}
+
+extern "C" oop Helper_synchronized_exit(JavaThread *thread, BasicObjectLock *mon)
+{
+    {
+      HandleMark __hm(thread);
+      if (mon->obj() == NULL)
+	InterpreterRuntime::throw_illegal_monitor_state_exception(thread);
+      else
+        InterpreterRuntime::monitorexit(thread, mon);
+    }
+    return thread->pending_exception();
+}
+
+extern "C" oop Helper_SafePoint(JavaThread *thread)
+{
+    {
+      HandleMarkCleaner __hmc(thread);
+    }
+    SafepointSynchronize::block(thread);
+    return thread->pending_exception();
+}
+
+extern "C" void Helper_RaiseArrayBoundException(JavaThread *thread, int index)
+{
+  char message[jintAsStringSize];
+  sprintf(message, "%d", index);
+  {
+       ThreadInVMfromJava trans(thread);
+       Exceptions::_throw_msg(thread, "[Bytecoce Interpreter]", 99,
+			vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message);
+  }
+}
+
+extern "C" void Helper_Raise(JavaThread *thread, Symbol *name, char const *msg)
+{
+   ThreadInVMfromJava trans(thread);
+   Exceptions::_throw_msg(thread, "[Bytecoce Interpreter]", 99, name, msg);
+}
+
+extern "C" void Helper_RaiseIllegalMonitorException(JavaThread *thread)
+{
+    HandleMark __hm(thread);
+    thread->clear_pending_exception();
+    InterpreterRuntime::throw_illegal_monitor_state_exception(thread);
+}
+
+extern "C" address Helper_HandleException(interpreterState istate, JavaThread *thread)
+{
+    HandleMarkCleaner __hmc(thread);
+    Handle except_oop(thread, thread->pending_exception());
+    HandleMark __hm(thread);
+    intptr_t continuation_bci;
+    intptr_t *topOfStack;
+    address pc;
+
+    thread->clear_pending_exception();
+    continuation_bci = (intptr_t)InterpreterRuntime::exception_handler_for_exception(thread, except_oop());
+    except_oop = (oop) thread->vm_result();
+    thread->set_vm_result(NULL);
+    if (continuation_bci >= 0) {
+      topOfStack = (intptr_t *)istate->stack();
+      *topOfStack-- = (intptr_t)except_oop();
+      istate->set_stack(topOfStack);
+      pc = istate->method()->code_base() + continuation_bci;
+#if 0
+        tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", Klass::cast(except_oop->klass())->external_name(), except_oop());
+        tty->print_cr(" thrown in interpreter method <%s>", istate->method()->name_and_sig_as_C_string());
+        tty->print_cr(" at bci %d, continuing at %d for thread " INTPTR_FORMAT,
+                      pc - (intptr_t)istate->method()->code_base(),
+                      continuation_bci, thread);
+#endif
+      return pc;
+    }
+#if 0
+      tty->print_cr("Exception <%s> (" INTPTR_FORMAT ")", Klass::cast(except_oop->klass())->external_name(), except_oop());
+      tty->print_cr(" thrown in interpreter method <%s>", istate->method()->name_and_sig_as_C_string());
+      tty->print_cr(" at bci %d, unwinding for thread " INTPTR_FORMAT,
+                    pc  - (intptr_t) istate->method()->code_base(),
+                    thread);
+#endif
+    thread->set_pending_exception(except_oop(), NULL, 0);
+    return 0;
+}
+
+extern "C" void Helper_report_fatal(char *filename, int line,
+				    char *msg, int opcode, char *name)
+{
+  report_fatal(filename, line,
+	       err_msg(msg, opcode, name));
+}
+
+extern "C" int Helper_target_offset_in_bytes()
+{
+  return java_lang_invoke_CallSite::target_offset_in_bytes();
+}
+
+#endif // STATIC_OFFSETS
+
+#ifdef STATIC_OFFSETS
+
+class VMStructs {
+public:
+	static void print_vm_offsets(void);
+};
+
+#define outfile	stdout
+
+void print_def(const char *s, int v)
+{
+	fprintf(outfile, "#undef %-40s\n", s);
+	fprintf(outfile, "#define %-40s 0x%02x\n", s, v);
+}
+
+void nl(void)
+{
+	fputc('\n', outfile);
+}
+
+// ZeroFrame is not friends with VMStructs, but it is with ZeroStackPrinter
+class ZeroStackPrinter {
+public:
+  static void print_vm_offsets(void);
+};
+
+void ZeroStackPrinter::print_vm_offsets(void)
+{
+    print_def("INTERPRETER_FRAME", ZeroFrame::INTERPRETER_FRAME);
+}
+
+void VMStructs::print_vm_offsets(void)
+{
+  print_def("ISTATE_THREAD",    offset_of(BytecodeInterpreter, _thread));
+  print_def("ISTATE_BCP",       offset_of(BytecodeInterpreter, _bcp));
+  print_def("ISTATE_LOCALS",    offset_of(BytecodeInterpreter, _locals));
+  print_def("ISTATE_CONSTANTS", offset_of(BytecodeInterpreter, _constants));
+  print_def("ISTATE_METHOD",    offset_of(BytecodeInterpreter, _method));
+  print_def("ISTATE_STACK",     offset_of(BytecodeInterpreter, _stack));
+  print_def("ISTATE_MSG",       offset_of(BytecodeInterpreter, _msg));
+  print_def("ISTATE_OOP_TEMP",	offset_of(BytecodeInterpreter, _oop_temp));
+  print_def("ISTATE_STACK_BASE",offset_of(BytecodeInterpreter, _stack_base));
+  print_def("ISTATE_STACK_LIMIT",offset_of(BytecodeInterpreter, _stack_limit));
+  print_def("ISTATE_MONITOR_BASE",offset_of(BytecodeInterpreter, _monitor_base));
+  print_def("ISTATE_SELF_LINK",	offset_of(BytecodeInterpreter, _self_link));
+  print_def("ISTATE_FRAME_TYPE", sizeof(BytecodeInterpreter) + 0);
+  print_def("ISTATE_NEXT_FRAME", sizeof(BytecodeInterpreter) + 4);
+  print_def("FRAME_SIZE", sizeof(BytecodeInterpreter) + 8);
+  nl();
+  ZeroStackPrinter::print_vm_offsets();
+  nl();
+  print_def("THREAD_PENDING_EXC", offset_of(JavaThread, _pending_exception));
+  print_def("THREAD_SUSPEND_FLAGS", offset_of(JavaThread, _suspend_flags));
+  print_def("THREAD_ACTIVE_HANDLES", offset_of(JavaThread, _active_handles));
+  print_def("THREAD_LAST_HANDLE_MARK", offset_of(JavaThread, _last_handle_mark));
+  print_def("THREAD_TLAB_TOP", offset_of(JavaThread, _tlab) + offset_of(ThreadLocalAllocBuffer, _top));
+  print_def("THREAD_TLAB_END", offset_of(JavaThread, _tlab) + offset_of(ThreadLocalAllocBuffer, _end));
+  print_def("THREAD_RESOURCEAREA", offset_of(JavaThread, _resource_area));
+  print_def("THREAD_HANDLE_AREA", offset_of(JavaThread, _handle_area));
+  print_def("THREAD_STACK_BASE", offset_of(JavaThread, _stack_base));
+  print_def("THREAD_STACK_SIZE", offset_of(JavaThread, _stack_size));
+  print_def("THREAD_LAST_JAVA_SP", offset_of(JavaThread, _anchor)
+	    + offset_of(JavaFrameAnchor, _last_Java_sp));
+  print_def("THREAD_LAST_JAVA_FP", offset_of(JavaThread, _anchor)
+	    + offset_of(JavaFrameAnchor, _last_Java_fp));
+  print_def("THREAD_JNI_ENVIRONMENT", offset_of(JavaThread, _jni_environment));
+  print_def("THREAD_VM_RESULT", offset_of(JavaThread, _vm_result));
+  print_def("THREAD_STATE", offset_of(JavaThread, _thread_state));
+  print_def("THREAD_DO_NOT_UNLOCK", offset_of(JavaThread, _do_not_unlock_if_synchronized));
+
+  print_def("THREAD_JAVA_STACK_BASE", offset_of(JavaThread, _zero_stack) + in_bytes(ZeroStack::base_offset()));
+  print_def("THREAD_JAVA_SP", offset_of(JavaThread, _zero_stack) + in_bytes(ZeroStack::sp_offset()));
+  print_def("THREAD_TOP_ZERO_FRAME", offset_of(JavaThread, _top_zero_frame));
+  print_def("THREAD_SPECIALRUNTIMEEXITCONDITION", offset_of(JavaThread, _special_runtime_exit_condition));
+  nl();
+  print_def("_thread_external_suspend",	Thread::_external_suspend);
+  print_def("_thread_ext_suspended",	Thread::_ext_suspended);
+  print_def("_thread_deopt_suspend",	Thread::_deopt_suspend);
+  nl();
+  print_def("METHOD_CONSTMETHOD", offset_of(methodOopDesc, _constMethod));
+  print_def("METHOD_CONSTANTS", offset_of(methodOopDesc, _constants));
+  print_def("METHOD_METHODDATA", offset_of(methodOopDesc, _method_data));
+  print_def("METHOD_INVOKECOUNT", offset_of(methodOopDesc, _interpreter_invocation_count));
+  print_def("METHOD_ACCESSFLAGS", offset_of(methodOopDesc, _access_flags));
+  print_def("METHOD_VTABLEINDEX", offset_of(methodOopDesc, _vtable_index));
+  print_def("METHOD_RESULTINDEX", offset_of(methodOopDesc, _result_index));
+  print_def("METHOD_METHODSIZE", offset_of(methodOopDesc, _method_size));
+  print_def("METHOD_MAXSTACK", offset_of(methodOopDesc, _max_stack));
+  print_def("METHOD_MAXLOCALS", offset_of(methodOopDesc, _max_locals));
+  print_def("METHOD_SIZEOFPARAMETERS", offset_of(methodOopDesc, _size_of_parameters));
+  print_def("METHOD_INVOCATIONCOUNTER", offset_of(methodOopDesc, _invocation_counter));
+  print_def("METHOD_BACKEDGECOUNTER", offset_of(methodOopDesc, _backedge_counter));
+  print_def("METHOD_FROM_INTERPRETED", offset_of(methodOopDesc, _from_interpreted_entry));
+  // ECN: These two appear to be just tagged onto the end of the class
+  print_def("METHOD_NATIVEHANDLER", sizeof(methodOopDesc));
+  print_def("METHOD_SIGNATUREHANDLER", sizeof(methodOopDesc)+4);
+  nl();
+  print_def("CONSTMETHOD_CODESIZE", offset_of(constMethodOopDesc, _code_size));
+  print_def("CONSTMETHOD_CODEOFFSET", sizeof(constMethodOopDesc));
+  nl();
+  print_def("JNIHANDLEBLOCK_TOP", offset_of(JNIHandleBlock, _top));
+  nl();
+  print_def("KLASS_PART", sizeof(klassOopDesc));
+  print_def("KLASS_ACCESSFLAGS", offset_of(Klass, _access_flags));
+  print_def("KLASS_JAVA_MIRROR", offset_of(Klass, _java_mirror));
+  print_def("INSTANCEKLASS_INITSTATE", offset_of(instanceKlass, _init_state));
+  print_def("INSTANCEKLASS_VTABLE_LEN", offset_of(instanceKlass, _vtable_len));
+  print_def("INSTANCEKLASS_ITABLE_LEN", offset_of(instanceKlass, _itable_len));
+  print_def("INSTANCEKLASS_VTABLE_OFFSET", instanceKlass::vtable_start_offset() * sizeof(int *));
+  print_def("OBJARRAYKLASS_ELEMENTKLASS", offset_of(objArrayKlass, _element_klass));
+  nl();
+  print_def("CONSTANTPOOL_TAGS", offset_of(constantPoolOopDesc, _tags));
+  print_def("CONSTANTPOOL_CACHE", offset_of(constantPoolOopDesc, _cache));
+  print_def("CONSTANTPOOL_POOL_HOLDER", offset_of(constantPoolOopDesc, _pool_holder));
+  print_def("CONSTANTPOOL_BASE", sizeof(constantPoolOopDesc));
+  print_def("CP_CACHE_VOLATILE_FIELD_FLAG_BIT", ConstantPoolCacheEntry::volatileField);
+  print_def("CP_CACHE_FLAGS", offset_of(ConstantPoolCacheEntry, _flags));
+  nl();
+  print_def("CP_OFFSET", in_bytes(constantPoolCacheOopDesc::base_offset()));
+  nl();
+  print_def("BASE_OFFSET_BYTE", arrayOopDesc::base_offset_in_bytes(T_BYTE));
+  print_def("BASE_OFFSET_SHORT", arrayOopDesc::base_offset_in_bytes(T_SHORT));
+  print_def("BASE_OFFSET_WORD", arrayOopDesc::base_offset_in_bytes(T_INT));
+  print_def("BASE_OFFSET_LONG", arrayOopDesc::base_offset_in_bytes(T_LONG));
+  nl();
+  print_def("SIZEOF_HANDLEMARK", sizeof(HandleMark));
+  print_def("SIZEOF_FFI_CIF", sizeof(ffi_cif));
+}
+
+int main(void)
+{
+	print_def("ARCH_VFP",			ARCH_VFP);
+	print_def("ARCH_THUMB2",		ARCH_THUMB2);
+	print_def("ARCH_CLZ",			ARCH_CLZ);
+	nl();
+	print_def("JVM_CONSTANT_Utf8",		JVM_CONSTANT_Utf8);
+	print_def("JVM_CONSTANT_Unicode",	JVM_CONSTANT_Unicode);
+	print_def("JVM_CONSTANT_Integer",	JVM_CONSTANT_Integer);
+	print_def("JVM_CONSTANT_Float",		JVM_CONSTANT_Float);
+	print_def("JVM_CONSTANT_Long",		JVM_CONSTANT_Long);
+	print_def("JVM_CONSTANT_Double",	JVM_CONSTANT_Double);
+	print_def("JVM_CONSTANT_Class",		JVM_CONSTANT_Class);
+	print_def("JVM_CONSTANT_String",	JVM_CONSTANT_String);
+	print_def("JVM_CONSTANT_Fieldref",	JVM_CONSTANT_Fieldref);
+	print_def("JVM_CONSTANT_Methodref",	JVM_CONSTANT_Methodref);
+	print_def("JVM_CONSTANT_InterfaceMethodref", JVM_CONSTANT_InterfaceMethodref);
+	print_def("JVM_CONSTANT_NameAndType",	JVM_CONSTANT_NameAndType);
+	nl();
+	print_def("JVM_CONSTANT_UnresolvedClass",	JVM_CONSTANT_UnresolvedClass);
+	print_def("JVM_CONSTANT_ClassIndex",		JVM_CONSTANT_ClassIndex);
+	print_def("JVM_CONSTANT_UnresolvedString",	JVM_CONSTANT_UnresolvedString);
+	print_def("JVM_CONSTANT_StringIndex",		JVM_CONSTANT_StringIndex);
+	print_def("JVM_CONSTANT_UnresolvedClassInError",JVM_CONSTANT_UnresolvedClassInError);
+	nl();
+	print_def("JVM_ACC_PUBLIC",	JVM_ACC_PUBLIC);
+	print_def("JVM_ACC_PRIVATE",	JVM_ACC_PRIVATE);
+	print_def("JVM_ACC_PROTECTED",	JVM_ACC_PROTECTED);
+	print_def("JVM_ACC_STATIC",	JVM_ACC_STATIC);
+	print_def("JVM_ACC_FINAL",	JVM_ACC_FINAL);
+	print_def("JVM_ACC_SYNCHRONIZED",	JVM_ACC_SYNCHRONIZED);
+	print_def("JVM_ACC_SUPER",	JVM_ACC_SUPER);
+	print_def("JVM_ACC_VOLATILE",	JVM_ACC_VOLATILE);
+	print_def("JVM_ACC_BRIDGE",	JVM_ACC_BRIDGE);
+	print_def("JVM_ACC_TRANSIENT",	JVM_ACC_TRANSIENT);
+	print_def("JVM_ACC_VARARGS",	JVM_ACC_VARARGS);
+	print_def("JVM_ACC_NATIVE",	JVM_ACC_NATIVE);
+	print_def("JVM_ACC_INTERFACE",	JVM_ACC_INTERFACE);
+	print_def("JVM_ACC_ABSTRACT",	JVM_ACC_ABSTRACT);
+	print_def("JVM_ACC_STRICT",	JVM_ACC_STRICT);
+	print_def("JVM_ACC_SYNTHETIC",	JVM_ACC_SYNTHETIC);
+	print_def("JVM_ACC_ANNOTATION",	JVM_ACC_ANNOTATION);
+	print_def("JVM_ACC_ENUM",	JVM_ACC_ENUM);
+	print_def("JVM_ACC_HAS_FINALIZER", JVM_ACC_HAS_FINALIZER);
+	nl();
+	print_def("T_BOOLEAN",	T_BOOLEAN);
+	print_def("T_CHAR",	T_CHAR);
+	print_def("T_FLOAT",	T_FLOAT);
+	print_def("T_DOUBLE",	T_DOUBLE);
+	print_def("T_BYTE",	T_BYTE);
+	print_def("T_SHORT",	T_SHORT);
+	print_def("T_INT",	T_INT);
+	print_def("T_LONG",	T_LONG);
+	print_def("T_OBJECT",	T_OBJECT);
+	print_def("T_ARRAY",	T_ARRAY);
+	print_def("T_VOID",	T_VOID);
+	nl();
+	print_def("tos_btos",	btos);
+	print_def("tos_ctos",	ctos);
+	print_def("tos_stos",	stos);
+	print_def("tos_itos",	itos);
+	print_def("tos_ltos",	ltos);
+	print_def("tos_ftos",	ftos);
+	print_def("tos_dtos",	dtos);
+	print_def("tos_atos",	atos);
+	nl();
+	print_def("_thread_uninitialized",	_thread_uninitialized);
+	print_def("_thread_new",		_thread_new);
+	print_def("_thread_new_trans",		_thread_new_trans);
+	print_def("_thread_in_native",		_thread_in_native);
+	print_def("_thread_in_native_trans",	_thread_in_native_trans);
+	print_def("_thread_in_vm",		_thread_in_vm);
+	print_def("_thread_in_vm_trans",	_thread_in_vm_trans);
+	print_def("_thread_in_Java",		_thread_in_Java);
+	print_def("_thread_in_Java_trans",	_thread_in_Java_trans);
+	print_def("_thread_blocked",		_thread_blocked);
+	print_def("_thread_blocked_trans",	_thread_blocked_trans);
+	print_def("_thread_max_state",		_thread_max_state);
+	nl();
+	print_def("class_unparsable_by_gc",	instanceKlass::unparsable_by_gc);
+	print_def("class_allocated",		instanceKlass::allocated);
+	print_def("class_loaded",		instanceKlass::loaded);
+	print_def("class_linked",		instanceKlass::linked);
+	print_def("class_being_initialized",	instanceKlass::being_initialized);
+	print_def("class_fully_initialized",	instanceKlass::fully_initialized);
+	print_def("class_init_error",		instanceKlass::initialization_error);
+	nl();
+	print_def("flag_methodInterface",	1 << ConstantPoolCacheEntry::methodInterface);
+	print_def("flag_volatileField",		1 << ConstantPoolCacheEntry::volatileField);
+	print_def("flag_vfinalMethod",		1 << ConstantPoolCacheEntry::vfinalMethod);
+	print_def("flag_finalField",		1 << ConstantPoolCacheEntry::finalField);
+	nl();
+	print_def("INVOCATIONCOUNTER_COUNTINCREMENT", InvocationCounter::count_increment);
+	nl();
+	VMStructs::print_vm_offsets();
+	nl();
+	print_def("VMSYMBOLS_ArithmeticException", vmSymbols::java_lang_ArithmeticException_enum);
+	print_def("VMSYMBOLS_ArrayIndexOutOfBounds", vmSymbols::java_lang_ArrayIndexOutOfBoundsException_enum);
+	print_def("VMSYMBOLS_ArrayStoreException", vmSymbols::java_lang_ArrayStoreException_enum);
+	print_def("VMSYMBOLS_ClassCastException", vmSymbols::java_lang_ClassCastException_enum);
+	print_def("VMSYMBOLS_NullPointerException", vmSymbols::java_lang_NullPointerException_enum);
+	print_def("VMSYMBOLS_AbstractMethodError", vmSymbols::java_lang_AbstractMethodError_enum);
+	print_def("VMSYMBOLS_IncompatibleClassChangeError", vmSymbols::java_lang_IncompatibleClassChangeError_enum);
+	print_def("VMSYMBOLS_InternalError", vmSymbols::java_lang_InternalError_enum);
+
+	return 0;
+}
+
+#endif // STATIC_OFFSETS
+
+#endif // __arm__
--- a/src/cpu/zero/vm/assembler_zero.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/cpu/zero/vm/assembler_zero.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -91,3 +91,11 @@
 address ShouldNotCallThisEntry() {
   return (address) should_not_call;
 }
+
+static void zero_null_fn() {
+  return;
+}
+
+address ZeroNullStubEntry(address fn) {
+  return (address) fn;
+}
--- a/src/cpu/zero/vm/assembler_zero.hpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/cpu/zero/vm/assembler_zero.hpp	Fri Aug 09 12:21:36 2013 +0100
@@ -65,5 +65,6 @@
 
 address ShouldNotCallThisStub();
 address ShouldNotCallThisEntry();
+address ZeroNullStubEntry(address fn);
 
 #endif // CPU_ZERO_VM_ASSEMBLER_ZERO_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/zero/vm/bytecodes_arm.def	Fri Aug 09 12:21:36 2013 +0100
@@ -0,0 +1,7847 @@
+@ Copyright 2009, 2010 Edward Nevill
+@ DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@
+@ This code is free software; you can redistribute it and/or modify it
+@ under the terms of the GNU General Public License version 2 only, as
+@ published by the Free Software Foundation.
+@
+@ This code is distributed in the hope that it will be useful, but WITHOUT
+@ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+@ FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+@ version 2 for more details (a copy is included in the LICENSE file that
+@ accompanied this code).
+@
+@ You should have received a copy of the GNU General Public License version
+@ 2 along with this work; if not, write to the Free Software Foundation,
+@ Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+
+#ifdef SHARK
+#define DISABLE_NOTICE_SAFEPOINTS
+#endif
+
+#ifndef DISABLE_HW_FP
+#define HW_FP
+#endif
+#ifndef DISABLE_NOTICE_SAFEPOINTS
+#define NOTICE_SAFEPOINTS
+#endif
+#ifndef DISABLE_FAST_BYTECODES
+#define FAST_BYTECODES
+#endif
+
+/*  WARNING: If you change any of these bytecodes, you must also
+    change the safe_dispatch_table in cppInterpreter_arm.S to make it
+    match.  */
+
+nop                  = 0x00, 1
+aconst_null          = 0x01, 1
+iconst_m1            = 0x02, 1
+iconst_0             = 0x03, 1
+iconst_1             = 0x04, 1
+iconst_2             = 0x05, 1
+iconst_3             = 0x06, 1
+iconst_4             = 0x07, 1
+iconst_5             = 0x08, 1
+lconst_0             = 0x09, 1
+lconst_1             = 0x0a, 1
+fconst_0             = 0x0b, 1
+fconst_1             = 0x0c, 1
+fconst_2             = 0x0d, 1
+dconst_0             = 0x0e, 1
+dconst_1             = 0x0f, 1
+bipush               = 0x10, 2
+sipush               = 0x11, 3
+ldc                  = 0x12, 2
+ldc_w                = 0x13, 3
+ldc2_w               = 0x14, 3
+iload                = 0x15, 2
+lload                = 0x16, 2
+fload                = 0x17, 2
+dload                = 0x18, 2
+aload                = 0x19, 2
+iload_0              = 0x1a, 1
+iload_1              = 0x1b, 1
+iload_2              = 0x1c, 1
+iload_3              = 0x1d, 1
+lload_0              = 0x1e, 1
+lload_1              = 0x1f, 1
+lload_2              = 0x20, 1
+lload_3              = 0x21, 1
+fload_0              = 0x22, 1
+fload_1              = 0x23, 1
+fload_2              = 0x24, 1
+fload_3              = 0x25, 1
+dload_0              = 0x26, 1
+dload_1              = 0x27, 1
+dload_2              = 0x28, 1
+dload_3              = 0x29, 1
+aload_0              = 0x2a, 1
+aload_1              = 0x2b, 1
+aload_2              = 0x2c, 1
+aload_3              = 0x2d, 1
+iaload               = 0x2e, 1
+laload               = 0x2f, 1
+faload               = 0x30, 1
+daload               = 0x31, 1
+aaload               = 0x32, 1
+baload               = 0x33, 1
+caload               = 0x34, 1
+saload               = 0x35, 1
+istore               = 0x36, 2
+lstore               = 0x37, 2
+fstore               = 0x38, 2
+dstore               = 0x39, 2
+astore               = 0x3a, 2
+istore_0             = 0x3b, 1
+istore_1             = 0x3c, 1
+istore_2             = 0x3d, 1
+istore_3             = 0x3e, 1
+lstore_0             = 0x3f, 1
+lstore_1             = 0x40, 1
+lstore_2             = 0x41, 1
+lstore_3             = 0x42, 1
+fstore_0             = 0x43, 1
+fstore_1             = 0x44, 1
+fstore_2             = 0x45, 1
+fstore_3             = 0x46, 1
+dstore_0             = 0x47, 1
+dstore_1             = 0x48, 1
+dstore_2             = 0x49, 1
+dstore_3             = 0x4a, 1
+astore_0             = 0x4b, 1
+astore_1             = 0x4c, 1
+astore_2             = 0x4d, 1
+astore_3             = 0x4e, 1
+iastore              = 0x4f, 1
+lastore              = 0x50, 1
+fastore              = 0x51, 1
+dastore              = 0x52, 1
+aastore              = 0x53, 1
+bastore              = 0x54, 1
+castore              = 0x55, 1
+sastore              = 0x56, 1
+pop                  = 0x57, 1
+pop2                 = 0x58, 1
+dup                  = 0x59, 1
+dup_x1               = 0x5a, 1
+dup_x2               = 0x5b, 1
+dup2                 = 0x5c, 1
+dup2_x1              = 0x5d, 1
+dup2_x2              = 0x5e, 1
+swap                 = 0x5f, 1
+iadd                 = 0x60, 1
+ladd                 = 0x61, 1
+fadd                 = 0x62, 1
+dadd                 = 0x63, 1
+isub                 = 0x64, 1
+lsub                 = 0x65, 1
+fsub                 = 0x66, 1
+dsub                 = 0x67, 1
+imul                 = 0x68, 1
+lmul                 = 0x69, 1
+fmul                 = 0x6a, 1
+dmul                 = 0x6b, 1
+idiv                 = 0x6c, 1
+ldiv                 = 0x6d, 1
+fdiv                 = 0x6e, 1
+ddiv                 = 0x6f, 1
+irem                 = 0x70, 1
+lrem                 = 0x71, 1
+frem                 = 0x72, 1
+drem                 = 0x73, 1
+ineg                 = 0x74, 1
+lneg                 = 0x75, 1
+fneg                 = 0x76, 1
+dneg                 = 0x77, 1
+ishl                 = 0x78, 1
+lshl                 = 0x79, 1
+ishr                 = 0x7a, 1
+lshr                 = 0x7b, 1
+iushr                = 0x7c, 1
+lushr                = 0x7d, 1
+iand                 = 0x7e, 1
+land                 = 0x7f, 1
+ior                  = 0x80, 1
+lor                  = 0x81, 1
+ixor                 = 0x82, 1
+lxor                 = 0x83, 1
+iinc                 = 0x84, 3
+i2l                  = 0x85, 1
+i2f                  = 0x86, 1
+i2d                  = 0x87, 1
+l2i                  = 0x88, 1
+l2f                  = 0x89, 1
+l2d                  = 0x8a, 1
+f2i                  = 0x8b, 1
+f2l                  = 0x8c, 1
+f2d                  = 0x8d, 1
+d2i                  = 0x8e, 1
+d2l                  = 0x8f, 1
+d2f                  = 0x90, 1
+i2b                  = 0x91, 1
+i2c                  = 0x92, 1
+i2s                  = 0x93, 1
+lcmp                 = 0x94, 1
+fcmpl                = 0x95, 1
+fcmpg                = 0x96, 1
+dcmpl                = 0x97, 1
+dcmpg                = 0x98, 1
+ifeq                 = 0x99, 0
+ifne                 = 0x9a, 0
+iflt                 = 0x9b, 0
+ifge                 = 0x9c, 0
+ifgt                 = 0x9d, 0
+ifle                 = 0x9e, 0
+if_icmpeq            = 0x9f, 0
+if_icmpne            = 0xa0, 0
+if_icmplt            = 0xa1, 0
+if_icmpge            = 0xa2, 0
+if_icmpgt            = 0xa3, 0
+if_icmple            = 0xa4, 0
+if_acmpeq            = 0xa5, 0
+if_acmpne            = 0xa6, 0
+goto                 = 0xa7, 0
+jsr                  = 0xa8, 0
+ret                  = 0xa9, 0
+tableswitch          = 0xaa, 0
+lookupswitch         = 0xab, 0
+ireturn              = 0xac, 0
+lreturn              = 0xad, 0
+freturn              = 0xae, 0
+dreturn              = 0xaf, 0
+areturn              = 0xb0, 0
+return               = 0xb1, 0
+getstatic            = 0xb2, 3
+putstatic            = 0xb3, 3
+getfield             = 0xb4, 3
+putfield             = 0xb5, 3
+invokevirtual        = 0xb6, 3
+invokespecial        = 0xb7, 3
+invokestatic         = 0xb8, 3
+invokeinterface      = 0xb9, 0
+invokedynamic        = 0xba, 0
+new                  = 0xbb, 3
+newarray             = 0xbc, 2
+anewarray            = 0xbd, 3
+arraylength          = 0xbe, 1
+athrow               = 0xbf, 0
+checkcast            = 0xc0, 3
+instanceof           = 0xc1, 3
+monitorenter         = 0xc2, 1
+monitorexit          = 0xc3, 1
+wide                 = 0xc4, 1
+multianewarray       = 0xc5, 4
+ifnull               = 0xc6, 0
+ifnonnull            = 0xc7, 0
+goto_w               = 0xc8, 0
+jsr_w                = 0xc9, 0
+breakpoint           = 0xca, 0
+
+#ifdef FAST_BYTECODES
+
+@agetfield	= 0xcb, 3
+bgetfield	= 0xcc, 3
+cgetfield	= 0xcd, 3
+@dgetfield	= 0xce, 3
+@fgetfield	= 0xcf, 3
+igetfield	= 0xd0, 3
+lgetfield	= 0xd1, 3
+sgetfield	= 0xd2, 3
+
+aputfield	= 0xd3, 3
+bputfield	= 0xd4, 3
+cputfield	= 0xd5, 3
+@dputfield	= 0xd6, 3
+@fputfield	= 0xd7, 3
+iputfield	= 0xd8, 3
+lputfield	= 0xd9, 3
+@sputfield	= 0xda, 3
+
+iaccess_0	= 0xdb, 4
+iaccess_1	= 0xdc, 4
+iaccess_2	= 0xdd, 4
+iaccess_3	= 0xde, 4
+
+invokeresolved		= 0xdf, 3
+invokespecialresolved	= 0xe0, 3
+invokestaticresolved	= 0xe1, 3
+invokevfinal 		= 0xe2, 3
+
+iload_iload	= 0xe3, 4
+iload_iload_N	= 0xe4, 3
+
+fast_aldc	= 0xe5, 2
+fast_aldc_w	= 0xe6, 3
+@return_register_finalizer = 0xe7, 1
+
+iload_0_iconst_N        = 0xe9, 2
+iload_1_iconst_N        = 0xea, 2
+iload_2_iconst_N        = 0xeb, 2
+iload_3_iconst_N        = 0xec, 2
+iload_iconst_N          = 0xed, 3
+iadd_istore_N           = 0xee, 2
+isub_istore_N           = 0xef, 2
+iand_istore_N           = 0xf0, 2
+ior_istore_N            = 0xf1, 2
+ixor_istore_N           = 0xf2, 2
+iadd_u4store            = 0xf3, 3
+isub_u4store            = 0xf4, 3
+iand_u4store            = 0xf5, 3
+ior_u4store             = 0xf6, 3
+ixor_u4store            = 0xf7, 3
+iload_0_iload           = 0xf8, 3
+iload_1_iload           = 0xf9, 3
+iload_2_iload           = 0xfa, 3
+iload_3_iload           = 0xfb, 3
+iload_0_iload_N         = 0xfc, 2
+iload_1_iload_N         = 0xfd, 2
+iload_2_iload_N         = 0xfe, 2
+iload_3_iload_N         = 0xff, 2
+
+#endif
+
+return_register_finalizer = 0xe7, 1
+
+(nop) {
+	DISPATCH	\seq_len
+}
+
+(aconst_null,fconst_0) u4const_0 {
+	DISPATCH_START_R2
+        mov     lr, #0
+	DISPATCH_NEXT
+	PUSH	lr
+	DISPATCH_FINISH
+}
+
+(iconst_m1,iconst_0,iconst_1,iconst_2,iconst_3,iconst_4,iconst_5) iconst_N {
+	sub	lr, r0, #opc_iconst_0
+	DISPATCH_START_R2
+	PUSH	lr
+	DISPATCH_FINISH
+}
+
+(lconst_0,dconst_0) u8const_0 {
+	DISPATCH_START_R2
+        mov     tmp1, #0
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+(lconst_1) lconst_1 {
+	DISPATCH_START_R2
+        mov     r3, #1
+	DISPATCH_NEXT
+        mov     tmp1, #0
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r3, tmp1
+	DISPATCH_FINISH
+}
+
+(fconst_1) fconst_1 {
+	DISPATCH_START_R2
+        mov     tmp1, #0x3f800000
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+(fconst_2) fconst_2 {
+	DISPATCH_START_R2
+        mov     r2, #0x40000000
+	DISPATCH_NEXT
+	PUSH	r2
+	DISPATCH_FINISH
+}
+
+(dconst_1) dconst_1 {
+	DISPATCH_START_R2
+        mov     tmp1, #0x3f000000
+	DISPATCH_NEXT
+        orr     tmp1, tmp1, #0x00f00000
+	DISPATCH_NEXT
+        mov     r3, #0
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r3, tmp1
+	DISPATCH_FINISH
+}
+
+(bipush) bipush {
+	DISPATCH_START	\seq_len
+	mov	tmp1, r2, lsl #24
+	DISPATCH_NEXT
+	mov	tmp1, tmp1, asr #24
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+@ r2 = [jpc, #1]
+@ r1 = [jpc, #2]
+(sipush) sipush {
+	ldrb	r1, [jpc, #2]
+	DISPATCH_START	\seq_len
+	DISPATCH_NEXT
+	mov	r2, r2, lsl #24
+        orr     tmp1, r1, r2, asr #16
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+@ r2 = [jpc, #1]
+(iload,fload,aload) u4load {
+	DISPATCH_START	\seq_len
+	rsb	tmp1, r2, #0
+	DISPATCH_NEXT
+        ldr     tmp1, [locals, tmp1, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+@ r2 = [jpc, #1]
+(lload,dload) u8load {
+	DISPATCH_START	\seq_len
+	sub	r3, locals, r2, lsl #2
+	DISPATCH_NEXT
+	ldmda	r3, {r3, tmp1}
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r3, tmp1
+	DISPATCH_FINISH
+}
+
+(aload_0,aload_1,aload_2,aload_3) {
+	rsb	tmp1, r0, #opc_aload_0
+	DISPATCH_START_R2
+	DISPATCH_NEXT
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+(iload_0,iload_1,iload_2,iload_3) {
+	rsb	tmp1, r0, #opc_iload_0
+	DISPATCH_START_R2
+	DISPATCH_NEXT
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+(fload_0,fload_1,fload_2,fload_3) {
+	rsb	tmp1, r0, #opc_fload_0
+	DISPATCH_START_R2
+	DISPATCH_NEXT
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+(lload_0,dload_0) u8load_0 {
+	DISPATCH_START_R2
+        ldmda   locals, {r3, tmp1}
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r3
+	DISPATCH_FINISH
+}
+
+(lload_1,dload_1) u8load_1 {
+	DISPATCH_START_R2
+        ldmdb   locals, {r3, tmp1}
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r3
+	DISPATCH_FINISH
+}
+
+(lload_2,dload_2) u8load_2 {
+	DISPATCH_START_R2
+	ldr	r3, [locals, #-12]
+	DISPATCH_NEXT
+	ldr	tmp1, [locals, #-8]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r3, tmp1
+	DISPATCH_FINISH
+}
+
+(lload_3,dload_3) u8load_3 {
+	DISPATCH_START_R2
+	ldr	r3, [locals, #-16]
+	DISPATCH_NEXT
+	ldr	tmp1, [locals, #-12]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r3, tmp1
+	DISPATCH_FINISH
+}
+
+(iaload,faload,aaload) u4aload {
+	GET_STACK	1, r3
+	DISPATCH_START_R2_R0
+	POP	r2
+	DISPATCH_START_R2_JPC
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry5:
+	ldr	lr, [r3, #8]		@ lr = length
+	DISPATCH_NEXT
+	cmp	r2, lr
+	bcs	array_bound_exception_jpc_1
+	DISPATCH_NEXT
+	add	r3, r3, r2, lsl #2
+	DISPATCH_NEXT
+	ldr	tmp1, [r3, #BASE_OFFSET_WORD]
+	DISPATCH_NEXT
+	PUT_STACK	0, tmp1
+	DISPATCH_FINISH
+}
+
+(laload,daload) u8aload {
+	GET_STACK	1, r3
+	DISPATCH_START_R2_R0
+	GET_STACK	0, r2
+	DISPATCH_START_R2_JPC
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry6:
+	ldr	lr, [r3, #8]		@ lr = length
+	DISPATCH_NEXT
+	cmp	r2, lr
+	bcs	array_bound_exception_jpc_1
+	DISPATCH_NEXT
+	add	r3, r3, r2, lsl #3
+	ldr	r1, [r3, #BASE_OFFSET_LONG]
+	DISPATCH_NEXT
+	ldr	tmp1, [r3, #20]
+	DISPATCH_NEXT
+	PUT_STACK	1, tmp1
+	PUT_STACK	0, r1
+	DISPATCH_FINISH
+}
+
+(baload) baload {
+	GET_STACK	1, r3
+	DISPATCH_START_R2_R0
+	POP	r2
+	DISPATCH_START_R2_JPC
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry7:
+	ldr	lr, [r3, #8]		@ lr = length
+	DISPATCH_NEXT
+	cmp	r2, lr
+	bcs	array_bound_exception_jpc_1
+	DISPATCH_NEXT
+	add	r3, r3, r2
+	DISPATCH_NEXT
+	ldrsb	tmp1, [r3, #BASE_OFFSET_BYTE]
+	DISPATCH_NEXT
+	PUT_STACK	0, tmp1
+	DISPATCH_FINISH
+}
+
+(caload) caload {
+	GET_STACK	1, r3
+	DISPATCH_START_R2_R0
+	POP	r2
+	DISPATCH_START_R2_JPC
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry8:
+	ldr	lr, [r3, #8]		@ lr = length
+	DISPATCH_NEXT
+	cmp	r2, lr
+	bcs	array_bound_exception_jpc_1
+	DISPATCH_NEXT
+	add	r3, r3, r2, lsl #1
+	DISPATCH_NEXT
+	ldrh	tmp1, [r3, #BASE_OFFSET_SHORT]
+	DISPATCH_NEXT
+	PUT_STACK	0, tmp1
+	DISPATCH_FINISH
+}
+
+(saload) saload {
+	GET_STACK	1, r3
+	DISPATCH_START_R2_R0
+	POP	r2
+	DISPATCH_START_R2_JPC
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry9:
+	ldr	lr, [r3, #8]		@ lr = length
+	DISPATCH_NEXT
+	cmp	r2, lr
+	bcs	array_bound_exception_jpc_1
+	DISPATCH_NEXT
+	add	r3, r3, r2, lsl #1
+	DISPATCH_NEXT
+	ldrsh	tmp1, [r3, #BASE_OFFSET_SHORT]
+	DISPATCH_NEXT
+	PUT_STACK	0, tmp1
+	DISPATCH_FINISH
+}
+
+@ r2 = [jpc, #1]
+(astore,istore,fstore) u4store {
+	DISPATCH_START	\seq_len
+	rsb	tmp1, r2, #0
+	DISPATCH_NEXT
+	POP	r3
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	str	r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+@ r2 = [jpc, #1]
+(lstore,dstore) u8store {
+	DISPATCH_START	\seq_len
+	POP	r1, tmp1
+	DISPATCH_NEXT
+	sub	r3, locals, r2, lsl #2
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	stmda	r3, {r1, tmp1}
+	DISPATCH_FINISH
+}
+
+(astore_0,istore_0,fstore_0) u4store_0 {
+	DISPATCH_START_R2
+	DISPATCH_NEXT
+	POP	tmp1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        str     tmp1, [locals, #0]
+	DISPATCH_FINISH
+}
+
+(astore_1,istore_1,fstore_1) u4store_1 {
+	DISPATCH_START_R2
+	DISPATCH_NEXT
+	POP	tmp1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        str     tmp1, [locals, #-4]
+	DISPATCH_FINISH
+}
+
+(astore_2,istore_2,fstore_2) u4store_2 {
+	DISPATCH_START_R2
+	DISPATCH_NEXT
+	POP	tmp1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        str     tmp1, [locals, #-8]
+	DISPATCH_FINISH
+}
+
+(astore_3,istore_3,fstore_3) u4store_3 {
+	DISPATCH_START_R2
+	DISPATCH_NEXT
+	POP	tmp1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        str     tmp1, [locals, #-12]
+	DISPATCH_FINISH
+}
+
+(lstore_0,dstore_0) u8store_0 {
+	DISPATCH_START_R2
+	DISPATCH_NEXT
+	POP	r1, tmp1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        stmda   locals, {r1, tmp1}
+	DISPATCH_FINISH
+}
+
+(lstore_1,dstore_1) u8store_1 {
+	DISPATCH_START_R2
+	DISPATCH_NEXT
+	POP	r1, tmp1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        stmdb   locals, {r1, tmp1}
+	DISPATCH_FINISH
+}
+
+(lstore_2,dstore_2) u8store_2 {
+	DISPATCH_START_R2
+	POP	r1, tmp1
+	DISPATCH_NEXT
+        str     r1, [locals, #-12]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        str     tmp1, [locals, #-8]
+	DISPATCH_FINISH
+}
+
+(lstore_3,dstore_3) u8store_3 {
+	DISPATCH_START_R2
+	POP	r1, tmp1
+	DISPATCH_NEXT
+        str     r1, [locals, #-16]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        str     tmp1, [locals, #-12]
+	DISPATCH_FINISH
+}
+
+(iastore,fastore) u4astore {
+	POP	r1, tmp1, lr		@ r1 = value, tmp1 = index, lr = arrayref
+	DISPATCH_START_R2
+	SW_NPC	cmp	lr, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry10:
+	ldr	ip, [lr, #8]		@ lr = limit
+	DISPATCH_NEXT
+	cmp	tmp1, ip
+	bcs	array_bound_exception_jpc_1_tmp1
+	DISPATCH_NEXT
+	add	lr, lr, tmp1, lsl #2
+	DISPATCH_NEXT
+	str	r1, [lr, #BASE_OFFSET_WORD]
+	DISPATCH_FINISH
+}
+
+(lastore,dastore) u8astore {
+	POP	r1, r3, tmp1, lr		@ r1,r3 = value, tmp1 = index, lr = arrayref
+	DISPATCH_START_R2
+	SW_NPC	cmp	lr, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry11:
+	ldr	ip, [lr, #8]		@ ip = limit
+	cmp	tmp1, ip
+	DISPATCH_NEXT
+	bcs	array_bound_exception_jpc_1_tmp1
+	DISPATCH_NEXT
+	add	tmp1, lr, tmp1, lsl #3
+	str	r1, [tmp1, #BASE_OFFSET_LONG]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	str	r3, [tmp1, #BASE_OFFSET_LONG+4]
+	DISPATCH_FINISH
+}
+
+(bastore) bastore {
+	POP	r3, tmp1, lr		@ r3 = value, tmp1 = index, lr = arrayref
+	DISPATCH_START_R2
+	SW_NPC	cmp	lr, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry12:
+	ldr	ip, [lr, #8]		@ ip = limit
+	DISPATCH_NEXT
+	cmp	tmp1, ip
+	bcs	array_bound_exception_jpc_1_tmp1
+	DISPATCH_NEXT
+	add	lr, lr, tmp1
+	DISPATCH_NEXT
+	strb	r3, [lr, #BASE_OFFSET_BYTE]
+	DISPATCH_FINISH
+}
+
+(castore,sastore) u2astore {
+	POP	r3, tmp1, lr		@ r3 = value, tmp1 = index, lr = arrayref
+	DISPATCH_START_R2
+	SW_NPC	cmp	lr, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry13:
+	ldr	ip, [lr, #8]		@ ip = limit
+	DISPATCH_NEXT
+	cmp	tmp1, ip
+	bcs	array_bound_exception_jpc_1_tmp1
+	DISPATCH_NEXT
+	add	lr, lr, tmp1, lsl #1
+	DISPATCH_NEXT
+	strh	r3, [lr, #BASE_OFFSET_SHORT]
+	DISPATCH_FINISH
+}
+
+(pop) jpop {
+	DISPATCH_START_R2
+        add     stack, stack, #4
+	DISPATCH_FINISH
+}
+
+(pop2) jpop2 {
+	DISPATCH_START_R2
+        add     stack, stack, #8
+	DISPATCH_FINISH
+}
+
+(dup) dup {
+	DISPATCH_START_R2
+	ldr	lr, [stack, #4]
+	DISPATCH_NEXT
+	PUSH	lr
+	DISPATCH_FINISH
+}
+
+(dup_x1) dup_x1 {
+	DISPATCH_START_R2
+	POP	r2, r3
+	DISPATCH_NEXT
+	PUSH	r2
+	DISPATCH_NEXT
+	PUSH	r2, r3
+	DISPATCH_FINISH
+}
+
+(dup_x2) dup_x2 {
+	DISPATCH_START_R2
+	POP	r2, r3, lr
+	DISPATCH_NEXT
+	PUSH	r2
+	DISPATCH_NEXT
+	PUSH	r2, r3, lr
+	DISPATCH_FINISH
+}
+
+(dup2) dup2 {
+	DISPATCH_START_R2
+	ldmib	stack, {r2, r3}
+	DISPATCH_NEXT
+	PUSH	r2, r3
+	DISPATCH_FINISH
+}
+
+(dup2_x1) dup2_x1 {
+	DISPATCH_START_R2
+	POP	r2, r3, lr
+	DISPATCH_NEXT
+	PUSH	r2, r3
+	DISPATCH_NEXT
+	PUSH	r2, r3, lr
+	DISPATCH_FINISH
+}
+
+(dup2_x2) dup2_x2 {
+	DISPATCH_START_R2
+	POP	r2, r3, tmp1, lr
+	DISPATCH_NEXT
+	PUSH	r2, r3
+	PUSH	r2, r3, tmp1, lr
+	DISPATCH_FINISH
+}
+
+(swap) swap {
+	DISPATCH_START_R2
+	POP	r2, r3
+	DISPATCH_NEXT
+	PUSH	r2
+	DISPATCH_NEXT
+	PUSH	r3
+	DISPATCH_FINISH
+}
+
+
+(iadd) iadd {
+	DISPATCH_START_R2
+	POP	r1, tmp1
+	DISPATCH_NEXT
+	add	r1, r1, tmp1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(ladd) ladd {
+	DISPATCH_START_R2
+	POP	r2, r3, tmp1, lr
+	DISPATCH_NEXT
+	adds	r2, r2, tmp1
+	adc	r3, r3, lr
+	DISPATCH_NEXT
+	PUSH	r2, r3
+	DISPATCH_FINISH
+}
+
+(isub) isub {
+	DISPATCH_START_R2
+	POP	r1, tmp1
+	DISPATCH_NEXT
+	sub	r1, tmp1, r1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(lsub) lsub {
+	DISPATCH_START_R2
+	POP	r2, r3, tmp1, lr
+	DISPATCH_NEXT
+	subs	r2, tmp1, r2
+	sbc	r3, lr, r3
+	DISPATCH_NEXT
+	PUSH	r2, r3
+	DISPATCH_FINISH
+}
+
+(imul) imul {
+	DISPATCH_START_R2
+	POP	r2, tmp1
+	DISPATCH_NEXT
+	mul	r1, r2, tmp1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(lmul) lmul {
+	POP	r1, tmp1, ip, lr
+        umull   r3, r0, ip, r1
+        mla     tmp1, ip, tmp1, r0
+	DISPATCH_START_R2
+        mla     tmp1, lr, r1, tmp1
+	DISPATCH_NEXT
+	PUSH	r3, tmp1
+	DISPATCH_FINISH
+}
+
+(ldiv) ldiv {
+	POP	r2, r3
+	POP	r0, r1
+	orrs	lr, r2, r3
+	beq	.ldiv_0
+	bl	__aeabi_ldivmod
+	PUSH	r0, r1
+	DISPATCH	\seq_len
+}
+
+(lrem) lrem {
+	POP	r2, r3
+	POP	r0, r1
+	orrs	lr, r2, r3
+	beq	.lrem_0
+	bl	__aeabi_ldivmod
+	PUSH	r2, r3
+	DISPATCH	\seq_len
+}
+
+(frem) frem {
+	POPF1
+	POPF0
+        bl      fmodf
+	PUSHF0
+	DISPATCH	\seq_len
+}
+
+(drem) drem {
+	POPD1
+        POPD0
+        bl      fmod
+	PUSHD0
+	DISPATCH	\seq_len
+}
+
+(ineg) ineg {
+	DISPATCH_START_R2
+	POP	tmp1
+	DISPATCH_NEXT
+        rsb     tmp1, tmp1, #0
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+(lneg) lneg {
+	DISPATCH_START_R2
+	POP	r2, r3
+	DISPATCH_NEXT
+	rsbs	r2, r2, #0
+	rsc	r3, r3, #0
+	DISPATCH_NEXT
+	PUSH	r2, r3
+	DISPATCH_FINISH
+}
+
+(fneg) fneg {
+	DISPATCH_START_R2
+	POP	r2
+	DISPATCH_NEXT
+	eor	r2, r2, #0x80000000
+	DISPATCH_NEXT
+	PUSH	r2
+	DISPATCH_FINISH
+}
+
+(dneg) dneg {
+	DISPATCH_START_R2
+	GET_STACK	1, r2
+	DISPATCH_NEXT
+	eor	r2, r2, #0x80000000
+	DISPATCH_NEXT
+	PUT_STACK	1, r2
+	DISPATCH_FINISH
+}
+
+(ishl) ishl {
+	DISPATCH_START_R2
+	POP	r2, r3
+	DISPATCH_NEXT
+	and	r2, r2, #31
+	mov	r2, r3, lsl r2
+	DISPATCH_NEXT
+	PUSH	r2
+	DISPATCH_FINISH
+}
+
+(lshl) lshl {
+	DISPATCH_START_R2
+	POP	r2, r3, lr
+	DISPATCH_NEXT
+	tst	r2, #32
+	and	r2, r2, #31
+	movne	tmp1, #0
+	movne	lr, r3, lsl r2
+	moveq	tmp1, r3, lsl r2
+	moveq	lr, lr, lsl r2
+	rsbeq	r2, r2, #32
+	orreq	lr, lr, r3, lsr r2
+	PUSH	tmp1, lr
+	DISPATCH_FINISH
+}
+
+(ishr) ishr {
+	DISPATCH_START_R2
+	POP	r2, r3
+	DISPATCH_NEXT
+	and	r2, r2, #31
+	mov	r2, r3, asr r2
+	DISPATCH_NEXT
+	PUSH	r2
+	DISPATCH_FINISH
+}
+
+(lshr) lshr {
+	DISPATCH_START_R2
+	POP	r2, r3, tmp1
+	DISPATCH_NEXT
+	tst	r2, #32
+	and	r2, r2, #31
+	movne	lr, tmp1, asr #31
+	movne	r3, tmp1, asr r2
+	moveq	lr, tmp1, asr r2
+	moveq	r3, r3, lsr r2
+	rsbeq	r2, r2, #32
+	orreq	r3, r3, tmp1, lsl r2
+	PUSH	r3, lr
+	DISPATCH_FINISH
+}
+
+(iushr) iushr {
+	DISPATCH_START_R2
+	POP	r2, r3
+	DISPATCH_NEXT
+	and	r2, r2, #31
+	mov	r2, r3, lsr r2
+	DISPATCH_NEXT
+	PUSH	r2
+	DISPATCH_FINISH
+}
+
+(lushr) lushr {
+	DISPATCH_START_R2
+	POP	r2, r3, tmp1
+	DISPATCH_NEXT
+	tst	r2, #32
+	and	r2, r2, #31
+	movne	lr, #0
+	movne	r3, tmp1, lsr r2
+	moveq	lr, tmp1, lsr r2
+	moveq	r3, r3, lsr r2
+	rsbeq	r2, r2, #32
+	orreq	r3, r3, tmp1, lsl r2
+	PUSH	r3, lr
+	DISPATCH_FINISH
+}
+
+(iand) iand {
+	DISPATCH_START_R2
+	POP	r1, tmp1
+	DISPATCH_NEXT
+	and	r1, tmp1, r1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(land) land {
+	DISPATCH_START_R2
+	POP	r2, r3, tmp1, lr
+	DISPATCH_NEXT
+	and	r2, tmp1, r2
+	and	r3, lr, r3
+	DISPATCH_NEXT
+	PUSH	r2, r3
+	DISPATCH_FINISH
+}
+
+(ior) ior {
+	DISPATCH_START_R2
+	POP	r1, tmp1
+	DISPATCH_NEXT
+	orr	r1, tmp1, r1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(lor) lor {
+	DISPATCH_START_R2
+	POP	r2, r3, tmp1, lr
+	DISPATCH_NEXT
+	orr	r2, tmp1, r2
+	orr	r3, lr, r3
+	DISPATCH_NEXT
+	PUSH	r2, r3
+	DISPATCH_FINISH
+}
+
+(ixor) ixor {
+	DISPATCH_START_R2
+	POP	r1, tmp1
+	DISPATCH_NEXT
+	eor	r1, tmp1, r1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(lxor) lxor {
+	DISPATCH_START_R2
+	POP	r2, r3, tmp1, lr
+	DISPATCH_NEXT
+	eor	r2, tmp1, r2
+	eor	r3, lr, r3
+	DISPATCH_NEXT
+	PUSH	r2, r3
+	DISPATCH_FINISH
+}
+
+@ r2 = [jpc, #1]
+(iinc) iinc {
+	ldrsb	lr, [jpc, #2]
+	DISPATCH_START	\seq_len
+	rsb	tmp1, r2, #0
+	DISPATCH_NEXT
+	ldr	r3, [locals, tmp1, lsl #2]
+	DISPATCH_NEXT
+	add	r3, r3, lr
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	str	r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(i2l) i2l {
+	DISPATCH_START_R2
+	POP	r2
+	DISPATCH_NEXT
+	mov	r3, r2, asr #31
+	DISPATCH_NEXT
+	PUSH	r2, r3
+	DISPATCH_FINISH
+}
+
+(i2f) i2f {
+	POP	r0
+        bl      __aeabi_i2f
+	PUSH	r0
+	DISPATCH	\seq_len
+}
+
+(i2d) i2d {
+	POP	r0
+        bl      __aeabi_i2d
+	PUSH	r0, r1
+	DISPATCH	\seq_len
+}
+
+(l2i) l2i {
+	DISPATCH_START_R2
+	POP	r3
+	DISPATCH_NEXT
+	PUT_STACK	0, r3
+	DISPATCH_FINISH
+}
+
+(l2f) l2f {
+	POP	r0, r1
+        bl      __aeabi_l2f
+	PUSH	r0
+	DISPATCH	\seq_len
+}
+
+(l2d) l2d {
+	POP	r0, r1
+        bl      __aeabi_l2d
+	PUSH	r0, r1
+	DISPATCH	\seq_len
+}
+
+(f2i) f2i {
+	POPF0
+        bl      _ZN13SharedRuntime3f2iEf
+	PUSH	r0
+	DISPATCH	\seq_len
+}
+
+(f2l) f2l {
+	POPF0
+        bl      _ZN13SharedRuntime3f2lEf
+	PUSH	r0, r1
+	DISPATCH	\seq_len
+}
+
+(f2d) f2d {
+	POP	r0
+        bl      __aeabi_f2d
+	PUSH	r0, r1
+	DISPATCH	\seq_len
+}
+
+(d2i) d2i {
+	POPD0
+        bl      _ZN13SharedRuntime3d2iEd
+	PUSH	r0
+	DISPATCH	\seq_len
+}
+
+(d2l) d2l {
+	POPD0
+        bl      _ZN13SharedRuntime3d2lEd
+	PUSH	r0, r1
+	DISPATCH	\seq_len
+}
+
+(d2f) d2f {
+	POP	r0, r1
+        bl      __aeabi_d2f
+	PUSH	r0
+	DISPATCH	\seq_len
+}
+
+(i2b) i2b {
+	DISPATCH_START_R2
+	POP	r3
+	DISPATCH_NEXT
+        mov     r3, r3, asl #24
+        mov     r3, r3, asr #24
+	DISPATCH_NEXT
+	PUSH	r3
+	DISPATCH_FINISH
+}
+
+(i2c) i2c {
+	DISPATCH_START_R2
+	POP	r3
+	DISPATCH_NEXT
+        mov     r3, r3, asl #16
+        mov     r3, r3, lsr #16
+	DISPATCH_NEXT
+	PUSH	r3
+	DISPATCH_FINISH
+}
+
+(i2s) i2s {
+	DISPATCH_START_R2
+	POP	r3
+	DISPATCH_NEXT
+        mov     r3, r3, asl #16
+        mov     r3, r3, asr #16
+	DISPATCH_NEXT
+	PUSH	r3
+	DISPATCH_FINISH
+}
+
+(lcmp) lcmp {
+	POP	r1, r3, tmp1, lr
+	DISPATCH_START_R2
+	subs	r1, tmp1, r1
+	movne	r1, #1
+	sbcs	lr, lr, r3
+	DISPATCH_NEXT
+	movne	r1, #1
+	rsblt	r1, r1, #0
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+#ifdef NOTICE_SAFEPOINTS
+
+@ r2 = [jpc, #1]
+@ r1 = [jpc, #2]
+(ifeq,ifnull) ifeq_unsafe {
+	POP	r3
+	ldrb	r1, [jpc, #2]
+        cmp     r3, #0
+	beq	branch_taken_unsafe
+	DISPATCH 3
+}
+
+(ifne,ifnonnull) ifne_unsafe {
+	POP	r3
+	ldrb	r1, [jpc, #2]
+        cmp     r3, #0
+	bne	branch_taken_unsafe
+	DISPATCH 3
+}
+
+(iflt) iflt_unsafe {
+	POP	r3
+	ldrb	r1, [jpc, #2]
+        cmp     r3, #0
+	blt	branch_taken_unsafe
+	DISPATCH 3
+}
+
+(ifge) ifge_unsafe {
+	POP	r3
+	ldrb	r1, [jpc, #2]
+        cmp     r3, #0
+	bge	branch_taken_unsafe
+	DISPATCH 3
+}
+
+(ifgt) ifgt_unsafe {
+	POP	r3
+	ldrb	r1, [jpc, #2]
+        cmp     r3, #0
+	bgt	branch_taken_unsafe
+	DISPATCH 3
+}
+
+(ifle) ifle_unsafe {
+	POP	r3
+	ldrb	r1, [jpc, #2]
+        cmp     r3, #0
+	ble	branch_taken_unsafe
+	DISPATCH 3
+}
+
+(if_icmpeq,if_acmpeq) if_icmpeq_unsafe {
+	POP	r3, tmp1
+	ldrb	r1, [jpc, #2]
+        cmp     tmp1, r3
+	beq	branch_taken_unsafe
+	DISPATCH 3
+}
+
+(if_icmpne,if_acmpne) if_icmpne_unsafe {
+	POP	r3, tmp1
+	ldrb	r1, [jpc, #2]
+        cmp     tmp1, r3
+	bne	branch_taken_unsafe
+	DISPATCH 3
+}
+
+(if_icmplt) if_icmplt_unsafe {
+	POP	r3, tmp1
+	ldrb	r1, [jpc, #2]
+        cmp     tmp1, r3
+	blt	branch_taken_unsafe
+	DISPATCH 3
+}
+
+(if_icmpge) if_icmpge_unsafe {
+	POP	r3, tmp1
+	ldrb	r1, [jpc, #2]
+        cmp     tmp1, r3
+	bge	branch_taken_unsafe
+	DISPATCH 3
+}
+
+(if_icmpgt) if_icmpgt_unsafe {
+	POP	r3, tmp1
+	ldrb	r1, [jpc, #2]
+        cmp     tmp1, r3
+	bgt	branch_taken_unsafe
+	DISPATCH 3
+}
+
+(if_icmple) if_icmple_unsafe {
+	POP	r3, tmp1
+	ldrb	r1, [jpc, #2]
+        cmp     tmp1, r3
+	ble	branch_taken_unsafe
+	DISPATCH 3
+}
+
+(goto) goto_unsafe {
+	ldrb	r1, [jpc, #2]
+	mov	r2, r2, lsl #24
+        orr     tmp1, r1, r2, asr #16
+        DISPATCH_START_REG	tmp1
+  USEC  cmp     tmp1, #0
+  USEC  ble     do_backedge
+	DISPATCH_BYTECODE
+}
+
+#endif // NOTICE_SAFEPOINTS
+
+(jsr) jsr {
+	ldr	r3, [istate, #ISTATE_METHOD]
+	ldr	r1, [r3, #METHOD_CONSTMETHOD]
+	rsb	r2, r1, jpc
+	sub	r2, r2, #CONSTMETHOD_CODEOFFSET - 3
+	PUSH	r2
+	b	do_goto
+}
+
+@ r2 = [jpc, #1]
+@ r1 = [jpc, #2]
+(ret) ret {
+	ldr	r0, [istate, #ISTATE_METHOD]
+	ldr	r3, [r0, #METHOD_CONSTMETHOD]
+	ldr	r1, [locals, -r2, lsl #2]
+	add	jpc, r3, r1
+	DISPATCH	CONSTMETHOD_CODEOFFSET
+}
+
+@ We dont do safe and unsafe versions of tableswitch and lookupswitch
+(tableswitch) tableswitch {
+	POP	a2
+        bic     a1, jpc, #3
+        ldr     a4,[a1,#8]
+@       BYTESEX_REVERSE a3, a4, a3
+
+        eor     a3, a4, a4, ror #16
+        bic     a3, a3, #0xff0000
+        mov     a4, a4, ror #8
+        eor     a3, a4, a3, lsr #8
+
+        ldr     a4,[a1,#12]
+@       BYTESEX_REVERSE a4, a4, ip
+
+        eor     ip, a4, a4, ror #16
+        bic     ip, ip, #0xff0000
+        mov     a4, a4, ror #8
+        eor     a4, a4, ip, lsr #8
+
+        sub     a2,a2,a3
+        sub     a4,a4,a3
+        cmp     a4,a2
+        ldrcc   a1,[a1,#4]
+        addcs   a1,a1,a2,LSL #2
+        ldrcs   a1,[a1,#16]
+@       BYTESEX_REVERSE a1, a1, a4
+
+        eor     a4, a1, a1, ror #16
+        bic     a4, a4, #0xff0000
+        mov     a1, a1, ror #8
+        eors    ip, a1, a4, lsr #8
+
+        DISPATCH_START_REG	ip
+	DISPATCH_BYTECODE
+}
+
+(lookupswitch) lookupswitch {
+	POP	a2
+        bic     a1, jpc, #3
+@       BYTESEX_REVERSE a2, a2, ip
+
+        eor     ip, a2, a2, ror #16
+        bic     ip, ip, #0xff0000
+        mov     a2, a2, ror #8
+        eor     a2, a2, ip, lsr #8
+
+        ldr     a3,[a1,#8]
+@       BYTESEX_REVERSE a3, a3, ip
+
+        eor     ip, a3, a3, ror #16
+        bic     ip, ip, #0xff0000
+        mov     a3, a3, ror #8
+        eor     a3, a3, ip, lsr #8
+
+        subs    a4,a3,#1
+        bmi     1f
+        add     a1, a1, #4
+0:
+        ldr      a3,[a1,#8]!
+        cmp     a3,a2
+        beq     2f
+        subs    a4,a4,#1
+        bpl     0b
+1:
+        bic     a1, jpc, #3
+2:
+        ldr      a2,[a1,#4]
+@       BYTESEX_REVERSE a2, a2, ip
+
+        eor     ip, a2, a2, ror #16
+        bic     ip, ip, #0xff0000
+        mov     a2, a2, ror #8
+        eors    ip, a2, ip, lsr #8
+
+        DISPATCH_START_REG	ip
+	DISPATCH_BYTECODE
+}
+
+#ifdef FAST_BYTECODES
+(igetfield) igetfield {
+	ldrb	r1, [jpc, #2]
+	DISPATCH_START	3
+	POP	tmp1
+        add     r1, constpool, r1, lsl #12
+	add	r1, r1, r2, lsl #4
+	DISPATCH_NEXT
+	SW_NPC	cmp	tmp1, #0
+	SW_NPC	beq	null_ptr_exception_jpc_3
+	GO_IF_VOLATILE	r3, r1, 3f
+	ldr	r1, [r1, #CP_OFFSET+8]
+	DISPATCH_NEXT
+.abortentry78:
+	ldr	r1, [tmp1, r1]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+3:
+	VOLATILE_VERSION
+	ldr	r1, [r1, #CP_OFFSET+8]
+	DISPATCH_NEXT
+.abortentry78_v:
+	ldr	r1, [tmp1, r1]
+	FullBarrier
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(bgetfield) bgetfield {
+	ldrb	r1, [jpc, #2]
+	DISPATCH_START	3
+	POP	tmp1
+        add     r1, constpool, r1, lsl #12
+	add	r1, r1, r2, lsl #4
+	DISPATCH_NEXT
+	SW_NPC	cmp	tmp1, #0
+	SW_NPC	beq	null_ptr_exception_jpc_3
+	GO_IF_VOLATILE	r3, r1, 3f
+	ldr	r1, [r1, #CP_OFFSET+8]
+	DISPATCH_NEXT
+.abortentry79:
+	ldrsb	r1, [tmp1, r1]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+3:
+	VOLATILE_VERSION
+	ldr	r1, [r1, #CP_OFFSET+8]
+	DISPATCH_NEXT
+.abortentry79_v:
+	ldrsb	r1, [tmp1, r1]
+	FullBarrier
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(cgetfield) cgetfield {
+	ldrb	r1, [jpc, #2]
+	DISPATCH_START	3
+	POP	tmp1
+        add     r1, constpool, r1, lsl #12
+	add	r1, r1, r2, lsl #4
+	DISPATCH_NEXT
+	SW_NPC	cmp	tmp1, #0
+	SW_NPC	beq	null_ptr_exception_jpc_3
+	GO_IF_VOLATILE	r3, r1, 3f
+	ldr	r1, [r1, #CP_OFFSET+8]
+	DISPATCH_NEXT
+.abortentry80:
+	ldrh	r1, [tmp1, r1]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+3:
+	VOLATILE_VERSION
+	ldr	r1, [r1, #CP_OFFSET+8]
+	DISPATCH_NEXT
+.abortentry80_v:
+	ldrh	r1, [tmp1, r1]
+	FullBarrier
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(sgetfield) sgetfield {
+	ldrb	r1, [jpc, #2]
+	DISPATCH_START	3
+	POP	tmp1
+        add     r1, constpool, r1, lsl #12
+	add	r1, r1, r2, lsl #4
+	DISPATCH_NEXT
+	SW_NPC	cmp	tmp1, #0
+	SW_NPC	beq	null_ptr_exception_jpc_3
+	GO_IF_VOLATILE	r3, r1, 3f
+	ldr	r1, [r1, #CP_OFFSET+8]
+	DISPATCH_NEXT
+.abortentry81:
+	ldrsh	r1, [tmp1, r1]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+3:
+	VOLATILE_VERSION
+	ldr	r1, [r1, #CP_OFFSET+8]
+	DISPATCH_NEXT
+.abortentry81_v:
+	ldrsh	r1, [tmp1, r1]
+	FullBarrier
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(lgetfield) lgetfield {
+	ldrb	r1, [jpc, #2]
+	DISPATCH_START	3
+	POP	tmp1
+        add     r1, constpool, r1, lsl #12
+	add	r1, r1, r2, lsl #4
+	DISPATCH_NEXT
+	SW_NPC	cmp	tmp1, #0
+	SW_NPC	beq	null_ptr_exception_jpc_3
+	GO_IF_VOLATILE	r3, r1, 3f
+	ldr	r1, [r1, #CP_OFFSET+8]
+	DISPATCH_NEXT
+	add	r1, tmp1, r1
+	DISPATCH_NEXT
+.abortentry82:
+	ldmia	r1, {r1, tmp1}
+	DISPATCH_NEXT
+	PUSH	r1, tmp1
+	DISPATCH_FINISH
+3:
+	VOLATILE_VERSION
+	ldr	r1, [r1, #CP_OFFSET+8]
+	add	r1, tmp1, r1
+#ifndef	__ARM_ARCH_7A__
+.abortentry82_v:
+	ldmia	r1, {r2, r3}
+#else
+.abortentry82_v:
+	ldrexd	r2, r3 , [r1]
+#endif
+	// Be very careful here: you must be certain that
+	// DISPATCH_NEXT does not corrupt R2 or R3.
+	DISPATCH_NEXT
+	FullBarrier
+	PUSH	r2, r3
+	DISPATCH_FINISH
+}
+
+(iputfield) iputfield {
+	ldrb	r1, [jpc, #2]
+	DISPATCH_START	3
+	POP	r3, tmp1		@ r3 = value, tmp1 = object
+        add     r1, constpool, r1, lsl #12
+	add	r1, r1, r2, lsl #4
+	DISPATCH_NEXT
+	SW_NPC	cmp	tmp1, #0
+	SW_NPC	beq	null_ptr_exception_jpc_3
+	GO_IF_VOLATILE	r2, r1, 3f
+	ldr	r1, [r1, #CP_OFFSET+8]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+.abortentry83:
+	str	r3, [tmp1, r1]
+	DISPATCH_FINISH
+3:
+	VOLATILE_VERSION
+	ldr	r1, [r1, #CP_OFFSET+8]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	StoreStoreBarrier
+.abortentry83_v:
+	str	r3, [tmp1, r1]
+	StoreLoadBarrier
+	DISPATCH_FINISH
+}
+
+(cputfield) cputfield {
+	ldrb	r1, [jpc, #2]
+	DISPATCH_START	3
+	POP	r3, tmp1		@ r3 = value, tmp1 = object
+        add     r1, constpool, r1, lsl #12
+	add	r1, r1, r2, lsl #4
+	DISPATCH_NEXT
+	SW_NPC	cmp	tmp1, #0
+	SW_NPC	beq	null_ptr_exception_jpc_3
+	GO_IF_VOLATILE	r2, r1, 3f
+	ldr	r1, [r1, #CP_OFFSET+8]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+.abortentry84:
+	strh	r3, [tmp1, r1]
+	DISPATCH_FINISH
+3:
+	VOLATILE_VERSION
+	ldr	r1, [r1, #CP_OFFSET+8]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	StoreStoreBarrier
+.abortentry84_v:
+	strh	r3, [tmp1, r1]
+	StoreLoadBarrier
+	DISPATCH_FINISH
+}
+
+(bputfield) bputfield {
+	ldrb	r1, [jpc, #2]
+	DISPATCH_START	3
+	POP	r3, tmp1		@ r3 = value, tmp1 = object
+        add     r1, constpool, r1, lsl #12
+	add	r1, r1, r2, lsl #4
+	DISPATCH_NEXT
+	SW_NPC	cmp	tmp1, #0
+	SW_NPC	beq	null_ptr_exception_jpc_3
+	GO_IF_VOLATILE	r2, r1, 3f
+	ldr	r1, [r1, #CP_OFFSET+8]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+.abortentry85:
+	strb	r3, [tmp1, r1]
+	DISPATCH_FINISH
+3:
+	VOLATILE_VERSION
+	ldr	r1, [r1, #CP_OFFSET+8]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	StoreStoreBarrier
+.abortentry85_v:
+	strb	r3, [tmp1, r1]
+	StoreLoadBarrier
+	DISPATCH_FINISH
+}
+
+(aputfield) aputfield {
+	ldrb	r1, [jpc, #2]
+	POP	r3, tmp1		@ r3 = value, tmp1 = object
+        add     r1, constpool, r1, lsl #12
+	add	r1, r1, r2, lsl #4
+	SW_NPC	cmp	tmp1, #0
+	SW_NPC	beq	null_ptr_exception_jpc_3
+	GO_IF_VOLATILE	r2, r1, 3f
+	ldr	r1, [r1, #CP_OFFSET+8]
+.abortentry113:
+	str	r3, [tmp1, r1]
+	mov	r0, tmp1
+	bl	Helper_aputfield
+	DISPATCH 3
+3:
+	VOLATILE_VERSION
+	ldr	r1, [r1, #CP_OFFSET+8]
+	StoreStoreBarrier
+.abortentry113_v:
+	str	r3, [tmp1, r1]
+	StoreLoadBarrier
+	mov	r0, tmp1
+	bl	Helper_aputfield
+	DISPATCH 3
+}
+
+(lputfield) lputfield {
+	ldrb	r1, [jpc, #2]
+	POP	r3, tmp1, lr		@ r3, tmp1 = value, lr = object
+        add     r1, constpool, r1, lsl #12
+	add	r1, r1, r2, lsl #4
+	SW_NPC	cmp	lr, #0
+	SW_NPC	beq	null_ptr_exception_jpc_3
+	GO_IF_VOLATILE	r2, r1, 3f
+	DISPATCH_START	3
+	ldr	r1, [r1, #CP_OFFSET+8]
+	DISPATCH_NEXT
+	add	r1, lr, r1
+	DISPATCH_NEXT
+.abortentry86:
+	stm	r1, {r3, tmp1}
+	DISPATCH_FINISH
+3:
+	VOLATILE_VERSION
+	ldr	r1, [r1, #CP_OFFSET+8]
+	add	r1, lr, r1
+	StoreStoreBarrier
+#ifndef	__ARM_ARCH_7A__
+.abortentry86_v:
+	stm	r1, {r3, tmp1}
+#else
+	mov	ip, r1
+	mov	r1, r3
+	// Data in tmp1 & r1, address in ip, r2 & r3 scratch
+	mov	r0, r1
+	mov	r1, tmp1
+.abortentry86_v:
+	ldrexd	r2, r3, [ip]
+	strexd	r2, r0, r1, [ip]
+	teq	r2, #0
+	bne	.abortentry86_v
+#endif
+	DISPATCH_START	3
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	StoreLoadBarrier
+	DISPATCH_FINISH
+}
+
+#endif // FAST_BYTECODES
+
+@ r2 = [jpc, #1]
+@ r1 = [jpc, #2]
+(getstatic) getstatic {
+	ldrb	r1, [jpc, #2]
+        add     tmp1, constpool, r1, lsl #12
+	add	tmp1, tmp1, r2, lsl #4
+        ldr     r3, [tmp1, #CP_OFFSET]
+	and	r3, r3, #0x00ff0000
+	cmp	r3, #opc_getstatic << 16
+	blne	resolve_get_put
+	GO_IF_VOLATILE	r2, tmp1, 3f
+	ldr	r3, [tmp1, #CP_OFFSET+4]
+	ldr	r2, [tmp1, #CP_OFFSET+12]
+        ldr     lr, [tmp1, #CP_OFFSET+8]
+        movs    r2, r2, lsr #29
+	bhi	getstatic_w		@ C = 1, Z = 0 => R2 == 3, 5, 7
+	bcs	getstatic_h		@ C = 1 => R2 = 1
+	beq	getstatic_sb		@ Z = 1 => R2 = 0
+	tst	r2, #2
+	bne	getstatic_dw
+	b	getstatic_sh
+3:
+	VOLATILE_VERSION
+	ldr	r3, [tmp1, #CP_OFFSET+4]
+	ldr	r2, [tmp1, #CP_OFFSET+12]
+        ldr     lr, [tmp1, #CP_OFFSET+8]
+        movs    r2, r2, lsr #29
+	bhi	getstatic_volatile_w		@ C = 1, Z = 0 => R2 == 3, 5, 7
+	bcs	getstatic_volatile_h		@ C = 1 => R2 = 1
+	beq	getstatic_volatile_sb		@ Z = 1 => R2 = 0
+	tst	r2, #2
+	bne	getstatic_volatile_dw
+	b	getstatic_volatile_sh
+}
+
+@ r2 = [jpc, #1]
+@ r1 = [jpc, #2]
+(putstatic) putstatic {
+	ldrb	r1, [jpc, #2]
+        add     tmp1, constpool, r1, lsl #12
+	add	tmp1, tmp1, r2, lsl #4
+        ldr     r3, [tmp1, #CP_OFFSET]
+        and     r3, r3, #0xff000000
+        cmp     r3, #opc_putstatic << 24
+	blne	resolve_get_put
+	GO_IF_VOLATILE	r2, tmp1, 3f
+	ldr	r3, [tmp1, #CP_OFFSET+4]		@ r3 = object
+        ldr     lr, [tmp1, #CP_OFFSET+12]           @ lr = tos_type
+        ldr     r2, [tmp1, #CP_OFFSET+8]            @ r2 = offset
+	movs	lr, lr, lsr #29
+	bhi	putstatic_w		@ C = 1, Z = 0 => R2 == 3, 5, 7
+	bcs	putstatic_h		@ C = 1 => R2 = 1
+	beq	putstatic_sb		@ Z = 1 => R2 = 0
+	tst	lr, #2
+	bne	putstatic_dw
+	b	putstatic_sh
+3:
+	VOLATILE_VERSION
+	ldr	r3, [tmp1, #CP_OFFSET+4]		@ r3 = object
+        ldr     lr, [tmp1, #CP_OFFSET+12]           @ lr = tos_type
+        ldr     r2, [tmp1, #CP_OFFSET+8]            @ r2 = offset
+	movs	lr, lr, lsr #29
+	bhi	putstatic_volatile_w		@ C = 1, Z = 0 => R2 == 3, 5, 7
+	bcs	putstatic_volatile_h		@ C = 1 => R2 = 1
+	beq	putstatic_volatile_sb		@ Z = 1 => R2 = 0
+	tst	lr, #2
+	bne	putstatic_volatile_dw
+	b	putstatic_volatile_sh
+}
+
+#ifdef NOTICE_SAFEPOINTS
+
+(return) return_unsafe {
+
+	ldr	r9, [istate, #ISTATE_MONITOR_BASE]	@ r9 = base
+	ldr	tmp1, [istate, #ISTATE_STACK_BASE]	@ tmp1 = end
+
+	cmp	tmp1, r9
+	bcc	1f
+2:
+	mov	r3, #0
+
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	str	r3, [thread, #THREAD_LAST_JAVA_SP]
+	ldr	r0, [istate, #ISTATE_METHOD]
+	ldr	r3, [r2, #0]
+	ldrh	r0, [r0, #40]
+	add	r1, r2, #4
+	str	r3, [thread, #THREAD_TOP_ZERO_FRAME]
+
+	add	stack, r1, r0, lsl #2
+
+	str	stack, [thread, #THREAD_JAVA_SP]
+
+	mov	r0, #0	@ deoptimized_frames = 0
+	ldmfd	arm_sp!, {fast_regset, pc}
+1:
+	bl	return_check_monitors
+	b	2b
+}
+
+(ireturn,areturn,freturn) ireturn_unsafe {
+
+	ldr	r9, [istate, #ISTATE_MONITOR_BASE]	@ r9 = base
+	ldr	tmp1, [istate, #ISTATE_STACK_BASE]	@ tmp1 = end
+
+	cmp	tmp1, r9
+	bcc	1f
+2:
+	mov	r3, #0
+
+	POP	r1					@ pop result before we lose stack
+
+	ldr	stack, [thread, #THREAD_TOP_ZERO_FRAME]
+	str	r3, [thread, #THREAD_LAST_JAVA_SP]
+	ldr	r0, [istate, #ISTATE_METHOD]
+	ldr	r3, [stack, #0]
+	ldrh	r0, [r0, #40]
+
+	str	r3, [thread, #THREAD_TOP_ZERO_FRAME]
+	str	r1, [stack, r0, lsl #2]!
+
+	str	stack, [thread, #THREAD_JAVA_SP]
+
+	mov	r0, #0	@ deoptimized_frames = 0
+	ldmfd	arm_sp!, {fast_regset, pc}
+1:
+	bl	return_check_monitors
+	b	2b
+}
+
+(lreturn,dreturn) lreturn_unsafe {
+
+	ldr	r9, [istate, #ISTATE_MONITOR_BASE]	@ r9 = base
+	ldr	tmp1, [istate, #ISTATE_STACK_BASE]	@ tmp1 = end
+	cmp	tmp1, r9
+	bcc	1f
+2:
+	mov	r3, #0
+
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	str	r3, [thread, #THREAD_LAST_JAVA_SP]
+	ldr	r0, [istate, #ISTATE_METHOD]
+	ldr	r3, [r2, #0]
+	ldrh	r0, [r0, #40]
+	add	r1, r2, #4
+	str	r3, [thread, #THREAD_TOP_ZERO_FRAME]
+
+	POP	r2, r3
+
+	add	stack, r1, r0, lsl #2
+	stmdb	stack!, {r2, r3}
+
+	str	stack, [thread, #THREAD_JAVA_SP]
+
+	mov	r0, #0	@ deoptimized_frames = 0
+	ldmfd	arm_sp!, {fast_regset, pc}
+1:
+	bl	return_check_monitors
+	b	2b
+}
+
+#endif // NOTICE_SAFEPOINTS
+
+(ldc) ldc {
+	ldr	r3, [istate, #ISTATE_METHOD]			@ method
+	ldrb	lr, [jpc, #1]
+
+	ldr	tmp1, [r3, #METHOD_CONSTANTS]			@ constants
+
+	DISPATCH_START	\seq_len
+
+	ldr	r3, [tmp1, #8]
+	DISPATCH_NEXT
+	add	r3, r3, #12
+	ldrb	r3, [r3, lr]
+
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+
+	cmp	r3, #JVM_CONSTANT_Integer
+	cmpne	r3, #JVM_CONSTANT_Float
+	cmpne	r3, #JVM_CONSTANT_String
+	bne	1f
+
+	add	r3, tmp1, lr, lsl #2
+	ldr	r3, [r3, #CONSTANTPOOL_BASE]
+	DISPATCH_NEXT
+	PUSH	r3
+	DISPATCH_FINISH
+1:
+	cmp	r3, #JVM_CONSTANT_Class
+	bne	2f
+	add	r0, tmp1, #CONSTANTPOOL_BASE
+	ldr	r0, [r0, lr, lsl #2]
+	ldr	r1, [r0, #KLASS_PART + KLASS_JAVA_MIRROR]
+	PUSH	r1
+	DISPATCH	0
+2:
+	sub	jpc, jpc, #\seq_len
+	mov	r0, thread
+        DECACHE_JPC
+        DECACHE_STACK
+	mov	r1, #0
+        bl      _ZN18InterpreterRuntime3ldcEP10JavaThreadb
+	CACHE_CP
+	ldr	r1, [thread, #THREAD_PENDING_EXC]
+	CACHE_JPC
+	cmp	r1, #0
+	bne	handle_exception
+	ldr	r3, [thread, #THREAD_VM_RESULT]
+	mov	r2, #0
+	PUSH	r3
+	str	r2, [thread, #THREAD_VM_RESULT]
+	DISPATCH	\seq_len
+}
+
+(ldc_w) ldc_w {
+	ldrb	lr, [jpc, #1]
+
+	ldr	r3, [istate, #ISTATE_METHOD]			@ method
+	ldrb	ip, [jpc, #2]
+	ldr	r2, [r3, #METHOD_CONSTANTS]			@ constants
+
+	DISPATCH_START	\seq_len
+
+	ldr	r3, [r2, #8]
+	orr	lr, ip, lr, lsl #8
+	add	r3, r3, #12
+	ldrb	r3, [r3, lr]
+
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+
+	cmp	r3, #JVM_CONSTANT_Integer
+	cmpne	r3, #JVM_CONSTANT_Float
+	cmpne	r3, #JVM_CONSTANT_String
+	bne	1f
+
+	add	r3, r2, lr, lsl #2
+	ldr	r3, [r3, #CONSTANTPOOL_BASE]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r3
+	DISPATCH_FINISH
+1:
+	cmp	r3, #JVM_CONSTANT_Class
+	bne	2f
+	add	r0, r2, #CONSTANTPOOL_BASE
+	ldr	r0, [r0, lr, lsl #2]
+	ldr	r1, [r0, #KLASS_PART + KLASS_JAVA_MIRROR]
+	PUSH	r1
+	DISPATCH	0
+2:
+	sub	jpc, jpc, #\seq_len
+	mov	r0, thread
+        DECACHE_JPC
+        DECACHE_STACK
+	mov	r1, #1
+        bl      _ZN18InterpreterRuntime3ldcEP10JavaThreadb
+	CACHE_CP
+	ldr	r1, [thread, #THREAD_PENDING_EXC]
+	CACHE_JPC
+	cmp	r1, #0
+	ldr	r3, [thread, #THREAD_VM_RESULT]
+	bne	handle_exception
+	mov	r2, #0
+	PUSH	r3
+	str	r2, [thread, #THREAD_VM_RESULT]
+	DISPATCH	\seq_len
+}
+
+(ldc2_w) ldc2_w {
+	ldrb	r3, [jpc, #1]
+
+	ldr	tmp1, [istate, #ISTATE_METHOD]			@ method
+	ldrb	lr, [jpc, #2]
+	ldr	r2, [tmp1, #METHOD_CONSTANTS]			@ constants
+
+	DISPATCH_START	\seq_len
+
+	ldr	tmp1, [r2, #8]
+	orr	r3, lr, r3, lsl #8
+	add	tmp1, tmp1, #12
+	ldrb	tmp1, [tmp1, r3]
+
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+
+	add	tmp1, r2, r3, lsl #2
+	ldr	r3, [tmp1, #CONSTANTPOOL_BASE]
+	ldr	tmp1, [tmp1, #CONSTANTPOOL_BASE+4]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r3, tmp1
+	DISPATCH_FINISH
+}
+
+#ifdef FAST_BYTECODES
+(iadd_u4store) {
+	ldrb	r3, [jpc, #2]
+	DISPATCH_START	\seq_len
+	POP	r1, tmp1
+	DISPATCH_NEXT
+	add	r1, tmp1, r1
+	DISPATCH_NEXT
+	rsb	r3, r3, #0
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	str	r1, [locals, r3, lsl #2]
+	DISPATCH_FINISH
+}
+
+(isub_u4store) {
+	ldrb	r3, [jpc, #2]
+	DISPATCH_START	\seq_len
+	POP	r1, tmp1
+	DISPATCH_NEXT
+	sub	r1, tmp1, r1
+	DISPATCH_NEXT
+	rsb	r3, r3, #0
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	str	r1, [locals, r3, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iand_u4store) {
+	ldrb	r3, [jpc, #2]
+	DISPATCH_START	\seq_len
+	POP	r1, tmp1
+	DISPATCH_NEXT
+	and	r1, tmp1, r1
+	DISPATCH_NEXT
+	rsb	r3, r3, #0
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	str	r1, [locals, r3, lsl #2]
+	DISPATCH_FINISH
+}
+
+(ior_u4store) {
+	ldrb	r3, [jpc, #2]
+	DISPATCH_START	\seq_len
+	POP	r1, tmp1
+	DISPATCH_NEXT
+	orr	r1, tmp1, r1
+	DISPATCH_NEXT
+	rsb	r3, r3, #0
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	str	r1, [locals, r3, lsl #2]
+	DISPATCH_FINISH
+}
+
+(ixor_u4store) {
+	ldrb	r3, [jpc, #2]
+	DISPATCH_START	\seq_len
+	POP	r1, tmp1
+	DISPATCH_NEXT
+	eor	r1, tmp1, r1
+	DISPATCH_NEXT
+	rsb	r3, r3, #0
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	str	r1, [locals, r3, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iadd_istore_N) {
+	ldrb	lr, [jpc, #1]
+	DISPATCH_START	\seq_len
+	DISPATCH_NEXT
+	rsb	r3, lr, #opc_istore_0
+	POP	r1, tmp1
+	DISPATCH_NEXT
+	add	r1, tmp1, r1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	str	r1, [locals, r3, lsl #2]
+	DISPATCH_FINISH
+}
+
+(isub_istore_N) {
+	ldrb	lr, [jpc, #1]
+	DISPATCH_START	\seq_len
+	DISPATCH_NEXT
+	rsb	r3, lr, #opc_istore_0
+	POP	r1, tmp1
+	DISPATCH_NEXT
+	sub	r1, tmp1, r1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	str	r1, [locals, r3, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iand_istore_N) {
+	ldrb	lr, [jpc, #1]
+	DISPATCH_START	\seq_len
+	DISPATCH_NEXT
+	rsb	r3, lr, #opc_istore_0
+	POP	r1, tmp1
+	DISPATCH_NEXT
+	and	r1, tmp1, r1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	str	r1, [locals, r3, lsl #2]
+	DISPATCH_FINISH
+}
+
+(ior_istore_N) {
+	ldrb	lr, [jpc, #1]
+	DISPATCH_START	\seq_len
+	DISPATCH_NEXT
+	rsb	r3, lr, #opc_istore_0
+	POP	r1, tmp1
+	DISPATCH_NEXT
+	orr	r1, tmp1, r1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	str	r1, [locals, r3, lsl #2]
+	DISPATCH_FINISH
+}
+
+(ixor_istore_N) {
+	ldrb	lr, [jpc, #1]
+	DISPATCH_START	\seq_len
+	DISPATCH_NEXT
+	rsb	r3, lr, #opc_istore_0
+	POP	r1, tmp1
+	DISPATCH_NEXT
+	eor	r1, tmp1, r1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	str	r1, [locals, r3, lsl #2]
+	DISPATCH_FINISH
+}
+
+@ r2 = [jpc, #1]
+(iload_iconst_N)
+{
+        ldrb    r3, [jpc, #2]
+        DISPATCH_START  \seq_len
+	rsb	r2, r2, #0
+        DISPATCH_NEXT
+        sub     r3, r3, #opc_iconst_0
+        ldr     tmp1, [locals, r2, lsl #2]
+        DISPATCH_NEXT
+        DISPATCH_NEXT
+        DISPATCH_NEXT
+        PUSH   r3, tmp1
+        DISPATCH_FINISH
+}
+
+(iload_0_iconst_N,iload_1_iconst_N,iload_2_iconst_N,iload_3_iconst_N)
+{
+        rsb     r3, r0, #opc_iload_0_iconst_N
+        DISPATCH_START  \seq_len
+        ldrb    r2, [jpc, #1-\seq_len]
+        DISPATCH_NEXT
+        ldr     tmp1, [locals, r3, lsl #2]
+        DISPATCH_NEXT
+        sub     r1, r2, #opc_iconst_0
+        DISPATCH_NEXT
+        DISPATCH_NEXT
+        PUSH   r1, tmp1
+        DISPATCH_FINISH
+}
+
+@ (aload_N)(getfield)
+(iaccess_0,iaccess_1,iaccess_2,iaccess_3)
+{
+	ldrb	r2, [jpc, #3]
+	rsb	tmp1, r0, #opc_iaccess_0
+	ldrb	r3, [jpc, #2]
+	add	r1, constpool, r2, lsl #12
+	DISPATCH_START	4
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	add	r1, r3, lsl #4
+	DISPATCH_NEXT
+	SW_NPC	cmp	tmp1, #0
+	SW_NPC	beq	null_ptr_exception_jpc_3
+	GO_IF_VOLATILE r3, r1, 3f
+	DISPATCH_NEXT
+        ldr     r1, [r1, #CP_OFFSET+8]
+	DISPATCH_NEXT
+.abortentry87:
+	ldr	r1, [tmp1, r1]
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+3:
+	VOLATILE_VERSION
+	DISPATCH_NEXT
+        ldr     r1, [r1, #CP_OFFSET+8]
+	DISPATCH_NEXT
+.abortentry87_v:
+	ldr	r1, [tmp1, r1]
+	FullBarrier
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+@ r2 = [jpc, #1]
+(iload_iload) fast_iload_iload
+{
+	ldrb	r3, [jpc, #3]
+	DISPATCH_START	\seq_len
+	rsb	r2, r2, #0
+	DISPATCH_NEXT
+	rsb	r3, r3, #0
+	DISPATCH_NEXT
+	ldr	tmp1, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	PUSH	r3, tmp1
+	DISPATCH_FINISH
+}
+
+@ r2 = [jpc, #1]
+(iload_iload_N) fast_iload_iload_N
+{
+	ldrb	r3, [jpc, #2]
+	DISPATCH_START	\seq_len
+	rsb	r2, r2, #0
+	DISPATCH_NEXT
+	rsb	r3, r3, #opc_iload_0
+	DISPATCH_NEXT
+	ldr	tmp1, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	PUSH	r3, tmp1
+	DISPATCH_FINISH
+}
+
+(iload_0_iload,iload_1_iload,iload_2_iload,iload_3_iload) fast_iload_N_iload
+{
+	ldrb	r3, [jpc, #2]
+	rsb	r2, r0, #opc_iload_0_iload
+	DISPATCH_START	\seq_len
+	rsb	r3, r3, #0
+	DISPATCH_NEXT
+	ldr	tmp1, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r3, tmp1
+	DISPATCH_FINISH
+}
+
+(iload_0_iload_N,iload_1_iload_N,iload_2_iload_N,iload_3_iload_N) fast_iload_N_iload_N
+{
+	rsb	r3, r0, #opc_iload_0_iload_N
+	DISPATCH_START	\seq_len
+	rsb	r2, r2, #opc_iload_0
+	DISPATCH_NEXT
+	ldr	tmp1, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	ldr	r3, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r3, tmp1
+	DISPATCH_FINISH
+}
+
+#endif // FAST_BYTECODE
+
+#ifdef HW_FP
+
+(fadd) fadd_vfp {
+	DISPATCH_START_R2
+	vldr	s15, [stack, #8]
+	vldr	s14, [stack, #4]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	fadds	s15, s15, s14
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	vstr	s15, [stack, #8]
+	add	stack, stack, #4
+	DISPATCH_FINISH
+}
+
+(dadd) dadd_vfp {
+	DISPATCH_START_R2
+	vldr	d7, [stack, #12]
+	vldr	d6, [stack, #4]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	faddd	d0, d7, d6
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	vstr	d0, [stack, #12]
+	add	stack, stack, #8
+	DISPATCH_FINISH
+}
+
+(fsub) fsub_vfp {
+	DISPATCH_START_R2
+	vldr	s15, [stack, #8]
+	vldr	s14, [stack, #4]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	fsubs	s15, s15, s14
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	vstr	s15, [stack, #8]
+	add	stack, stack, #4
+	DISPATCH_FINISH
+}
+
+(fmul) fmul_vfp {
+	DISPATCH_START_R2
+	vldr	s15, [stack, #8]
+	vldr	s14, [stack, #4]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	fmuls	s15, s15, s14
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	vstr	s15, [stack, #8]
+	add	stack, stack, #4
+	DISPATCH_FINISH
+}
+
+(dmul) dmul_vfp {
+	DISPATCH_START_R2
+	vldr	d7, [stack, #12]
+	vldr	d6, [stack, #4]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	fmuld	d0, d7, d6
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	vstr	d0, [stack, #12]
+	add	stack, stack, #8
+	DISPATCH_FINISH
+}
+
+(fdiv) fdiv_vfp {
+	DISPATCH_START_R2
+	vldr	s15, [stack, #8]
+	vldr	s14, [stack, #4]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	fdivs	s15, s15, s14
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	vstr	s15, [stack, #8]
+	add	stack, stack, #4
+	DISPATCH_FINISH
+}
+
+(ddiv) ddiv_vfp {
+	DISPATCH_START_R2
+	vldr	d7, [stack, #12]
+	vldr	d6, [stack, #4]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	fdivd	d0, d7, d6
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	vstr	d0, [stack, #12]
+	add	stack, stack, #8
+	DISPATCH_FINISH
+}
+
+(fcmpl) fcmpl_vfp {
+	DISPATCH_START_R2
+	flds	s14, [stack, #8]
+	flds	s15, [stack, #4]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	fcmpes	s14, s15
+	add	stack, stack, #8
+	fmstat
+	mvnmi	r3, #0
+	bmi	1f
+	movgt	r3, #1
+	bgt	1f
+	fcmps	s14, s15
+	fmstat
+	moveq	r3, #0
+	mvnne	r3, #0
+1:
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r3
+	DISPATCH_FINISH
+}
+
+(fcmpg) fcmpg_vfp {
+	DISPATCH_START_R2
+	flds	s14, [stack, #8]
+	flds	s15, [stack, #4]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	fcmpes	s14, s15
+	add	stack, stack, #8
+	fmstat
+	mvnmi	r3, #0
+	bmi	1f
+	movgt	r3, #1
+	bgt	1f
+	fcmps	s14, s15
+	fmstat
+	moveq	r3, #0
+	movne	r3, #1
+1:
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r3
+	DISPATCH_FINISH
+}
+
+(dcmpl) dcmpl_vfp {
+	DISPATCH_START_R2
+	fldd	d6, [stack, #12]
+	fldd	d7, [stack, #4]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	fcmped	d6, d7
+	fmstat
+	mvnmi	r3, #0
+	bmi	1f
+	movgt	r3, #1
+	bgt	1f
+	fcmpd	d6, d7
+	fmstat
+	moveq	r3, #0
+	mvnne	r3, #0
+1:
+	add	stack, stack, #16
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r3
+	DISPATCH_FINISH
+}
+
+(dcmpg) dcmpg_vfp {
+	DISPATCH_START_R2
+	fldd	d6, [stack, #12]
+	fldd	d7, [stack, #4]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	fcmped	d6, d7
+	fmstat
+	mvnmi	r3, #0
+	bmi	1f
+	movgt	r3, #1
+	bgt	1f
+	fcmpd	d6, d7
+	fmstat
+	moveq	r3, #0
+	movne	r3, #1
+1:
+	add	stack, stack, #16
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r3
+	DISPATCH_FINISH
+}
+
+#endif // HW_FP
+
+#ifdef FAST_BYTECODES
+
+@##############################################################################
+@ Optimised bytecode pairs
+@##############################################################################
+
+@ --- load; iaccess ------------------------------------------------------
+
+(iload_0,iload_1,iload_2,iload_3)
+(iaccess_0,iaccess_1,iaccess_2,iaccess_3)
+{
+	rsb	lr, r0, #opc_iload_0
+	ldrb	r2, [jpc, #4]
+	rsb	tmp1, r1, #opc_iaccess_0
+	ldrb	r3, [jpc, #3]
+	ldr	lr, [locals, lr, lsl #2]
+	add	r1, constpool, r2, lsl #12
+	DISPATCH_START	\seq_len
+	PUSH	lr
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	add	r1, r3, lsl #4
+	DISPATCH_NEXT
+	SW_NPC	cmp	tmp1, #0
+	SW_NPC	beq	null_ptr_exception_jpc_3
+	DISPATCH_NEXT
+        ldr     r1, [r1, #CP_OFFSET+8]
+	DISPATCH_NEXT
+.abortentry111:
+	ldr	r1, [tmp1, r1]
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(iload,aload,fload)
+(iaccess_0,iaccess_1,iaccess_2,iaccess_3)
+{
+	rsb	lr, r2, #0
+	ldrb	r2, [jpc, #5]
+	rsb	tmp1, r1, #opc_iaccess_0
+	ldrb	r3, [jpc, #4]
+	ldr	lr, [locals, lr, lsl #2]
+	add	r1, constpool, r2, lsl #12
+	DISPATCH_START	\seq_len
+	PUSH	lr
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	add	r1, r3, lsl #4
+	DISPATCH_NEXT
+	SW_NPC	cmp	tmp1, #0
+	SW_NPC	beq	null_ptr_exception_jpc_3
+	DISPATCH_NEXT
+        ldr     r1, [r1, #CP_OFFSET+8]
+	DISPATCH_NEXT
+.abortentry112:
+	ldr	r1, [tmp1, r1]
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+@ --- load; load ---------------------------------------------------------
+
+(aload_0,aload_1,aload_2,aload_3)
+(aload_0,aload_1,aload_2,aload_3)
+{
+	rsb	tmp1, r0, #opc_aload_0
+	DISPATCH_START	\seq_len
+	rsb	r1, r1, #opc_aload_0
+	DISPATCH_NEXT
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	ldr	r1, [locals, r1, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1, tmp1
+	DISPATCH_FINISH
+}
+
+(aload_0,aload_1,aload_2,aload_3)
+(iload_0,iload_1,iload_2,iload_3)
+{
+	rsb	tmp1, r0, #opc_aload_0
+	DISPATCH_START	\seq_len
+	rsb	r1, r1, #opc_iload_0
+	DISPATCH_NEXT
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	ldr	r1, [locals, r1, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1, tmp1
+	DISPATCH_FINISH
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(aload_0,aload_1,aload_2,aload_3)
+{
+	rsb	tmp1, r0, #opc_iload_0
+        DISPATCH_START  \seq_len
+	rsb	r1, r1, #opc_aload_0
+        DISPATCH_NEXT
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	cmp	r0, #opc_igetfield
+	ldr	r1, [locals, r1, lsl #2]
+        beq     1f
+2:
+        DISPATCH_NEXT
+        DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1, tmp1
+        DISPATCH_FINISH
+1:
+	ldrb	lr, [jpc, #-1]
+	add	lr, lr, #opc_iaccess_0-opc_aload_0
+	REWRITE_PAIRS	strb	lr, [jpc, #-1]
+	b	2b
+}
+
+@ 7 cycles
+(iload_0,iload_1,iload_2,iload_3)
+(iload_0,iload_1,iload_2,iload_3)
+{
+	add	r0, r0, #opc_iload_0_iload_N-opc_iload_0
+	REWRITE_PAIRS	strb	r0, [jpc]
+	b	do_fast_iload_N_iload_N
+}
+
+(aload_0,aload_1,aload_2,aload_3)
+(iload,aload,fload)
+{
+	ldrb	r3, [jpc, #2]
+	rsb	tmp1, r0, #opc_aload_0
+	DISPATCH_START	\seq_len
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	rsb	r3, r3, #0
+	DISPATCH_NEXT
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r3, tmp1
+	DISPATCH_FINISH
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(aload,fload)
+{
+	ldrb	r3, [jpc, #2]
+	rsb	tmp1, r0, #opc_iload_0
+	DISPATCH_START	\seq_len
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	rsb	r3, r3, #0
+	DISPATCH_NEXT
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r3, tmp1
+	DISPATCH_FINISH
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(iload)
+{
+	add	r0, r0, #opc_iload_0_iload-opc_iload_0
+	REWRITE_PAIRS	strb	r0, [jpc]
+	b	do_fast_iload_N_iload
+}
+
+@ r2 = [jpc, #1]
+(iload,aload,fload)
+(aload_0,aload_1,aload_2,aload_3)
+{
+	DISPATCH_START	\seq_len
+	rsb	r1, r1, #opc_aload_0
+	DISPATCH_NEXT
+	rsb	tmp1, r2, #0
+	ldr	r1, [locals, r1, lsl #2]
+	cmp	r0, #opc_igetfield
+	DISPATCH_NEXT
+	beq	1f
+2:
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1, tmp1
+	DISPATCH_FINISH
+1:
+	ldrb	lr, [jpc, #-1]
+	add	lr, lr, #opc_iaccess_0-opc_aload_0
+	REWRITE_PAIRS	strb	lr, [jpc, #-1]
+	b	2b
+}
+
+@ r2 = [jpc, #1]
+(aload,fload)
+(iload_0,iload_1,iload_2,iload_3)
+{
+	DISPATCH_START	\seq_len
+	rsb	r1, r1, #opc_iload_0
+	DISPATCH_NEXT
+	rsb	tmp1, r2, #0
+	ldr	r1, [locals, r1, lsl #2]
+	DISPATCH_NEXT
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1, tmp1
+	DISPATCH_FINISH
+}
+
+@ r2 = [jpc, #1]
+(iload)
+(iload_0,iload_1,iload_2,iload_3)
+{
+	mov	r0, #opc_iload_iload_N
+	REWRITE_PAIRS	strb	r0, [jpc]
+	b	do_fast_iload_iload_N
+}
+
+@ r2 = [jpc, #1]
+(aload,fload)(iload,aload,fload) {
+	ldrb	r1, [jpc, #3]
+	rsb	tmp1, r2, #0
+	DISPATCH_START	\seq_len
+	rsb	r1, r1, #0
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	DISPATCH_NEXT
+	ldr	r1, [locals, r1, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1, tmp1
+	DISPATCH_FINISH
+}
+
+(iload)(iload) {
+	mov	r0, #opc_iload_iload
+	REWRITE_PAIRS	strb	r0, [jpc]
+	b	do_fast_iload_iload
+}
+
+(iload)(aload,fload) {
+	ldrb	r1, [jpc, #3]
+	rsb	tmp1, r2, #0
+	DISPATCH_START	\seq_len
+	rsb	r1, r1, #0
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	DISPATCH_NEXT
+	ldr	r1, [locals, r1, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1, tmp1
+	DISPATCH_FINISH
+}
+
+@ --- load; store --------------------------------------------------------
+
+(aload_0,aload_1,aload_2,aload_3)
+(astore_0,astore_1,astore_2,astore_3)
+{
+	rsb	tmp1, r0, #opc_aload_0
+	DISPATCH_START	\seq_len
+	rsb	r1, r1, #opc_astore_0
+	DISPATCH_NEXT
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	str	tmp1, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(istore_0,istore_1,istore_2,istore_3)
+{
+	rsb	tmp1, r0, #opc_iload_0
+	DISPATCH_START	\seq_len
+	rsb	r1, r1, #opc_istore_0
+	DISPATCH_NEXT
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	str	tmp1, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(aload_0,aload_1,aload_2,aload_3)
+(astore,istore,fstore)
+{
+	ldrb	r1, [jpc, #2]
+	rsb	tmp1, r0, #opc_aload_0
+	DISPATCH_START	\seq_len
+	rsb	r1, r1, #0
+	DISPATCH_NEXT
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	str	tmp1, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(astore,istore,fstore)
+{
+	ldrb	r1, [jpc, #2]
+	rsb	tmp1, r0, #opc_iload_0
+	DISPATCH_START	\seq_len
+	rsb	r1, r1, #0
+	DISPATCH_NEXT
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	str	tmp1, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+@ r2 = [jpc, #1]
+(iload,aload,fload)
+(astore_0,astore_1,astore_2,astore_3)
+{
+	DISPATCH_START	\seq_len
+	rsb	tmp1, r1, #opc_astore_0
+	DISPATCH_NEXT
+	rsb	r1, r2, #0
+	ldr	r1, [locals, r1, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	str	r1, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+@ r2 = [jpc, #1]
+(iload,aload,fload)
+(istore_0,istore_1,istore_2,istore_3)
+{
+	DISPATCH_START	\seq_len
+	rsb	tmp1, r1, #opc_istore_0
+	DISPATCH_NEXT
+	rsb	r1, r2, #0
+	ldr	r1, [locals, r1, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	str	r1, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+@ r2 = [jpc, #1]
+(iload,aload,fload)(astore,istore,fstore) {
+	ldrb	tmp1, [jpc, #3]
+	rsb	r1, r2, #0
+	DISPATCH_START	\seq_len
+	rsb	tmp1, tmp1, #0
+	DISPATCH_NEXT
+	ldr	r1, [locals, r1, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	str	r1, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+@ --- load; const -------------------------------------------------------
+
+(aload_0,aload_1,aload_2,aload_3)
+(iconst_m1,iconst_0,iconst_1,iconst_2,iconst_3,iconst_4,iconst_5)
+{
+	rsb	tmp1, r0, #opc_aload_0
+	DISPATCH_START	\seq_len
+	sub	r1, r1, #opc_iconst_0
+	DISPATCH_NEXT
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(iconst_m1,iconst_0,iconst_1,iconst_2,iconst_3,iconst_4,iconst_5)
+{
+        add     r0, r0, #opc_iload_0_iconst_N-opc_iload_0
+        REWRITE_PAIRS	strb    r0, [jpc]
+	b	do_iload_0_iconst_N
+}
+
+@ r2 = [jpc, #1]
+(iload,aload,fload)
+(iconst_m1,iconst_0,iconst_1,iconst_2,iconst_3,iconst_4,iconst_5)
+{
+        cmp     r0, #opc_iload
+        DISPATCH_START  \seq_len
+        sub     r1, r1, #opc_iconst_0
+        DISPATCH_NEXT
+        ldr     r3, [locals, -r2, lsl #2]
+        DISPATCH_NEXT
+        beq     1f
+        DISPATCH_NEXT
+        DISPATCH_NEXT
+        PUSH   r1, r3
+        DISPATCH_FINISH
+1:
+        mov     tmp1, #opc_iload_iconst_N
+        REWRITE_PAIRS	strb    tmp1, [jpc, #-\seq_len]
+	add	jpc, #-\seq_len
+	b	do_iload_iconst_N
+}
+
+(aload_0,aload_1,aload_2,aload_3)
+(bipush)
+{
+	ldrsb	r2, [jpc, #2]
+	rsb	r3, r0, #opc_aload_0
+	DISPATCH_START	\seq_len
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	PUSH	r2, r3
+	DISPATCH_FINISH
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(bipush)
+{
+	ldrsb	r2, [jpc, #2]
+	rsb	r3, r0, #opc_iload_0
+	DISPATCH_START	\seq_len
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	PUSH	r2, r3
+	DISPATCH_FINISH
+}
+
+(aload_0,aload_1,aload_2,aload_3)
+(sipush)
+{
+        ldrsb   r2, [jpc, #2]   @ zero_extendqisi2
+        ldrb    lr, [jpc, #3]   @ zero_extendqisi2
+	rsb	r3, r0, #opc_aload_0
+	DISPATCH_START	\seq_len
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+        orr     r2, lr, r2, asl #8
+	DISPATCH_NEXT
+	PUSH	r2, r3
+	DISPATCH_FINISH
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(sipush)
+{
+        ldrsb   r2, [jpc, #2]   @ zero_extendqisi2
+        ldrb    lr, [jpc, #3]   @ zero_extendqisi2
+	rsb	r3, r0, #opc_iload_0
+	DISPATCH_START	\seq_len
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+        orr     r2, lr, r2, asl #8
+	DISPATCH_NEXT
+	PUSH	r2, r3
+	DISPATCH_FINISH
+}
+
+@ r2 = [jpc, #1]
+(iload,aload,fload)(bipush) {
+	ldrsb	r3, [jpc, #3]
+	DISPATCH_START	\seq_len
+	ldr	lr, [locals, -r2, lsl #2]
+	DISPATCH_NEXT
+	PUSH	r3, lr
+	DISPATCH_FINISH
+}
+
+@ r2 = [jpc, #1]
+(iload,aload,fload)(sipush) {
+	ldrsb	r3, [jpc, #3]
+	ldrb	lr, [jpc, #4]
+	DISPATCH_START	\seq_len
+	ldr	tmp1, [locals, -r2, lsl #2]
+        orr     r3, lr, r3, asl #8
+	DISPATCH_NEXT
+	PUSH	r3, tmp1
+	DISPATCH_FINISH
+}
+
+@ --- load; Xaload -------------------------------------------------------
+
+(iload_0,iload_1,iload_2,iload_3)
+(iaload,aaload,faload)
+{
+	POP	r3
+	rsb	r2, r0, #opc_iload_0
+	DISPATCH_START	\seq_len
+	ldr	r2, [locals, r2, lsl #2]
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry19:
+	ldr	lr, [r3, #8]		@ lr = length
+	DISPATCH_NEXT
+	cmp	r2, lr
+	bcs	array_bound_exception_jpc_1
+	add	r3, r3, r2, lsl #2
+	ldr	lr, [r3, #BASE_OFFSET_WORD]
+	DISPATCH_NEXT
+	PUSH	lr
+	DISPATCH_FINISH
+}
+
+@ r2 = [jpc, #1]
+(iload,aload,fload)(iaload,aaload,faload) {
+	POP	r3
+	ldr	r2, [locals, -r2, lsl #2]
+	DISPATCH_START	\seq_len
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry20:
+	ldr	lr, [r3, #8]		@ lr = length
+	DISPATCH_NEXT
+	cmp	r2, lr
+	bcs	array_bound_exception_jpc_1
+	add	r3, r3, r2, lsl #2
+	ldr	lr, [r3, #BASE_OFFSET_WORD]
+	DISPATCH_NEXT
+	PUSH	lr
+	DISPATCH_FINISH
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(baload)
+{
+	POP	r3
+	rsb	r2, r0, #opc_iload_0
+	DISPATCH_START	\seq_len
+	ldr	r2, [locals, r2, lsl #2]
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry21:
+	ldr	lr, [r3, #8]		@ lr = length
+	DISPATCH_NEXT
+	cmp	r2, lr
+	bcs	array_bound_exception_jpc_1
+	add	r3, r3, r2
+	ldrsb	lr, [r3, #BASE_OFFSET_BYTE]
+	DISPATCH_NEXT
+	PUSH	lr
+	DISPATCH_FINISH
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(caload)
+{
+	POP	r3
+	rsb	r2, r0, #opc_iload_0
+	DISPATCH_START	\seq_len
+	ldr	r2, [locals, r2, lsl #2]
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry22:
+	ldr	lr, [r3, #8]		@ lr = length
+	DISPATCH_NEXT
+	cmp	r2, lr
+	bcs	array_bound_exception_jpc_1
+	add	r3, r3, r2, lsl #1
+	ldrh	lr, [r3, #BASE_OFFSET_SHORT]
+	DISPATCH_NEXT
+	PUSH	lr
+	DISPATCH_FINISH
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(saload)
+{
+	POP	r3
+	rsb	r2, r0, #opc_iload_0
+	DISPATCH_START	\seq_len
+	ldr	r2, [locals, r2, lsl #2]
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry23:
+	ldr	lr, [r3, #8]		@ lr = length
+	DISPATCH_NEXT
+	cmp	r2, lr
+	bcs	array_bound_exception_jpc_1
+	add	r3, r3, r2, lsl #1
+	ldrsh	lr, [r3, #BASE_OFFSET_SHORT]
+	DISPATCH_NEXT
+	PUSH	lr
+	DISPATCH_FINISH
+}
+
+@ r2 = [jpc, #1]
+(iload,aload,fload)(baload) {
+	POP	r3
+	ldr	r2, [locals, -r2, lsl #2]
+	DISPATCH_START	\seq_len
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry24:
+	ldr	lr, [r3, #8]		@ lr = length
+	DISPATCH_NEXT
+	cmp	r2, lr
+	bcs	array_bound_exception_jpc_1
+	add	r3, r3, r2
+	ldrsb	lr, [r3, #BASE_OFFSET_BYTE]
+	DISPATCH_NEXT
+	PUSH	lr
+	DISPATCH_FINISH
+}
+
+@ r2 = [jpc, #1]
+(iload,aload,fload)(caload) {
+	POP	r3
+	ldr	r2, [locals, -r2, lsl #2]
+	DISPATCH_START	\seq_len
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry25:
+	ldr	lr, [r3, #8]		@ lr = length
+	DISPATCH_NEXT
+	cmp	r2, lr
+	bcs	array_bound_exception_jpc_1
+	add	r3, r3, r2, lsl #1
+	ldrh	lr, [r3, #BASE_OFFSET_SHORT]
+	DISPATCH_NEXT
+	PUSH	lr
+	DISPATCH_FINISH
+}
+
+@ r2 = [jpc, #1]
+(iload,aload,fload)(saload) {
+	POP	r3
+	ldr	r2, [locals, -r2, lsl #2]
+	DISPATCH_START	\seq_len
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry26:
+	ldr	lr, [r3, #8]		@ lr = length
+	DISPATCH_NEXT
+	cmp	r2, lr
+	bcs	array_bound_exception_jpc_1
+	add	r3, r3, r2, lsl #1
+	ldrsh	lr, [r3, #BASE_OFFSET_SHORT]
+	DISPATCH_NEXT
+	PUSH	lr
+	DISPATCH_FINISH
+}
+
+@ --- load; Xastore -------------------------------------------------------
+
+(iload_0,iload_1,iload_2,iload_3)
+(iastore,fastore)
+{
+	POP	r2, r3
+	rsb	tmp1, r0, #opc_iload_0
+	DISPATCH_START	\seq_len
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry27:
+	ldr	lr, [r3, #8]		@ lr = limit
+	DISPATCH_NEXT
+	cmp	r2, lr
+	bcs	array_bound_exception_jpc_1
+	DISPATCH_NEXT
+	add	r3, r3, r2, lsl #2
+	str	tmp1, [r3, #BASE_OFFSET_WORD]
+	DISPATCH_FINISH
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(bastore)
+{
+	POP	r2, r3
+	rsb	tmp1, r0, #opc_iload_0
+	DISPATCH_START	\seq_len
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry28:
+	ldr	lr, [r3, #8]		@ lr = limit
+	DISPATCH_NEXT
+	cmp	r2, lr
+	bcs	array_bound_exception_jpc_1
+	DISPATCH_NEXT
+	add	r3, r3, r2
+	strb	tmp1, [r3, #BASE_OFFSET_BYTE]
+	DISPATCH_FINISH
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(castore,sastore)
+{
+	POP	r2, r3
+	rsb	tmp1, r0, #opc_iload_0
+	DISPATCH_START	\seq_len
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry29:
+	ldr	lr, [r3, #8]		@ lr = limit
+	DISPATCH_NEXT
+	cmp	r2, lr
+	bcs	array_bound_exception_jpc_1
+	DISPATCH_NEXT
+	add	r3, r3, r2, lsl #1
+	strh	tmp1, [r3, #BASE_OFFSET_SHORT]
+	DISPATCH_FINISH
+}
+
+(iload,aload,fload)(iastore,fastore) {
+	POP	r3, tmp1
+	DISPATCH_START	\seq_len
+	ldr	r2, [locals, -r2, lsl #2]
+	SW_NPC	cmp	tmp1, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry30:
+	ldr	lr, [tmp1, #8]		@ lr = limit
+	DISPATCH_NEXT
+	cmp	r3, lr
+	bcs	array_bound_exception_jpc_1_r3
+	DISPATCH_NEXT
+	add	tmp1, tmp1, r3, lsl #2
+	str	r2, [tmp1, #BASE_OFFSET_WORD]
+	DISPATCH_FINISH
+}
+
+(iload,aload,fload)(bastore) {
+	POP	r3, tmp1
+	DISPATCH_START	\seq_len
+	ldr	r2, [locals, -r2, lsl #2]
+	SW_NPC	cmp	tmp1, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry31:
+	ldr	lr, [tmp1, #8]		@ lr = limit
+	DISPATCH_NEXT
+	cmp	r3, lr
+	bcs	array_bound_exception_jpc_1_r3
+	DISPATCH_NEXT
+	add	tmp1, tmp1, r3
+	strb	r2, [tmp1, #BASE_OFFSET_BYTE]
+	DISPATCH_FINISH
+}
+
+(iload,aload,fload)(castore,sastore) {
+	POP	r3, tmp1
+	DISPATCH_START	\seq_len
+	ldr	r2, [locals, -r2, lsl #2]
+	SW_NPC	cmp	tmp1, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry32:
+	ldr	lr, [tmp1, #8]		@ lr = limit
+	DISPATCH_NEXT
+	cmp	r3, lr
+	bcs	array_bound_exception_jpc_1_r3
+	DISPATCH_NEXT
+	add	tmp1, tmp1, r3, lsl #1
+	strh	r2, [tmp1, #BASE_OFFSET_SHORT]
+	DISPATCH_FINISH
+}
+
+@ --- load; dataop -------------------------------------------------------
+
+(iload_0,iload_1,iload_2,iload_3)
+(iadd)
+{
+	POP	r1
+	rsb	tmp1, r0, #opc_iload_0
+	DISPATCH_START	\seq_len
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	DISPATCH_NEXT
+	add	tmp1, r1, tmp1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+(iload,aload,fload)(iadd) {
+	DISPATCH_START	\seq_len
+	rsb	r1, r2, #0
+	POP	tmp1
+	DISPATCH_NEXT
+	ldr	r1, [locals, r1, lsl #2]
+	DISPATCH_NEXT
+	add	r1, tmp1, r1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(isub)
+{
+	POP	r1
+	rsb	tmp1, r0, #opc_iload_0
+	DISPATCH_START	\seq_len
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	DISPATCH_NEXT
+	sub	tmp1, r1, tmp1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+(iload,aload,fload)(isub) {
+	DISPATCH_START	\seq_len
+	rsb	r1, r2, #0
+	POP	tmp1
+	DISPATCH_NEXT
+	ldr	r1, [locals, r1, lsl #2]
+	DISPATCH_NEXT
+	sub	r1, tmp1, r1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(imul)
+{
+	POP	r2
+	rsb	lr, r0, #opc_iload_0
+	DISPATCH_START	\seq_len
+	ldr	lr, [locals, lr, lsl #2]
+	DISPATCH_NEXT
+	mul	r3, r2, lr
+	DISPATCH_NEXT
+	PUSH	r3
+	DISPATCH_FINISH
+}
+
+(iload,aload,fload)(imul) {
+	DISPATCH_START	\seq_len
+	POP	r3
+	ldr	r2, [locals, -r2, lsl #2]
+	DISPATCH_NEXT
+	mul	lr, r3, r2
+	DISPATCH_NEXT
+	PUSH	lr
+	DISPATCH_FINISH
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(ineg)
+{
+	rsb	lr, r0, #opc_iload_0
+	DISPATCH_START	\seq_len
+	ldr	lr, [locals, lr, lsl #2]
+	DISPATCH_NEXT
+	rsb	lr, lr, #0
+	PUSH	lr
+	DISPATCH_FINISH
+}
+
+(iload,aload,fload)(ineg) {
+	DISPATCH_START	\seq_len
+	ldr	r2, [locals, -r2, lsl #2]
+	DISPATCH_NEXT
+	rsb	r2, r2, #0
+	PUSH	r2
+	DISPATCH_FINISH
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(ishl)
+{
+	POP	r2
+	rsb	lr, r0, #opc_iload_0
+	DISPATCH_START	\seq_len
+	ldr	lr, [locals, lr, lsl #2]
+	DISPATCH_NEXT
+	mov	lr, r2, lsl lr
+	DISPATCH_NEXT
+	PUSH	lr
+	DISPATCH_FINISH
+}
+
+(iload,aload,fload)(ishl) {
+	DISPATCH_START	\seq_len
+	POP	r3
+	ldr	r2, [locals, -r2, lsl #2]
+	DISPATCH_NEXT
+	mov	r2, r3, lsl r2
+	DISPATCH_NEXT
+	PUSH	r2
+	DISPATCH_FINISH
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(ishr)
+{
+	POP	r2
+	rsb	lr, r0, #opc_iload_0
+	DISPATCH_START	\seq_len
+	ldr	lr, [locals, lr, lsl #2]
+	DISPATCH_NEXT
+	mov	lr, r2, asr lr
+	DISPATCH_NEXT
+	PUSH	lr
+	DISPATCH_FINISH
+}
+
+(iload,aload,fload)(ishr) {
+	DISPATCH_START	\seq_len
+	POP	r3
+	ldr	r2, [locals, -r2, lsl #2]
+	DISPATCH_NEXT
+	mov	r2, r3, asr r2
+	DISPATCH_NEXT
+	PUSH	r2
+	DISPATCH_FINISH
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(iushr)
+{
+	POP	r2
+	rsb	lr, r0, #opc_iload_0
+	DISPATCH_START	\seq_len
+	ldr	lr, [locals, lr, lsl #2]
+	DISPATCH_NEXT
+	mov	lr, r2, lsr lr
+	DISPATCH_NEXT
+	PUSH	lr
+	DISPATCH_FINISH
+}
+
+(iload,aload,fload)(iushr) {
+	ldrb	r2, [jpc, #1]
+	DISPATCH_START	\seq_len
+	POP	r3
+	ldr	r2, [locals, -r2, lsl #2]
+	DISPATCH_NEXT
+	mov	r2, r3, lsr r2
+	DISPATCH_NEXT
+	PUSH	r2
+	DISPATCH_FINISH
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(iand)
+{
+	POP	r1
+	rsb	tmp1, r0, #opc_iload_0
+	DISPATCH_START	\seq_len
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	DISPATCH_NEXT
+	and	tmp1, r1, tmp1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+(iload,aload,fload)(iand) {
+	DISPATCH_START	\seq_len
+	rsb	r1, r2, #0
+	POP	tmp1
+	DISPATCH_NEXT
+	ldr	r1, [locals, r1, lsl #2]
+	DISPATCH_NEXT
+	and	r1, tmp1, r1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(ior)
+{
+	POP	r1
+	rsb	tmp1, r0, #opc_iload_0
+	DISPATCH_START	\seq_len
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	DISPATCH_NEXT
+	orr	tmp1, r1, tmp1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+(iload,aload,fload)(ior) {
+	DISPATCH_START	\seq_len
+	rsb	r1, r2, #0
+	POP	tmp1
+	DISPATCH_NEXT
+	ldr	r1, [locals, r1, lsl #2]
+	DISPATCH_NEXT
+	orr	r1, tmp1, r1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(ixor)
+{
+	POP	r1
+	rsb	tmp1, r0, #opc_iload_0
+	DISPATCH_START	\seq_len
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	DISPATCH_NEXT
+	eor	tmp1, r1, tmp1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+(iload,aload,fload)(ixor) {
+	DISPATCH_START	\seq_len
+	rsb	r1, r2, #0
+	POP	tmp1
+	DISPATCH_NEXT
+	ldr	r1, [locals, r1, lsl #2]
+	DISPATCH_NEXT
+	eor	r1, tmp1, r1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(i2c)
+{
+	rsb	lr, r0, #opc_iload_0
+	DISPATCH_START	\seq_len
+	ldr	lr, [locals, lr, lsl #2]
+	DISPATCH_NEXT
+        mov     lr, lr, asl #16
+        mov     lr, lr, lsr #16
+	PUSH	lr
+	DISPATCH_FINISH
+}
+
+(iload,aload,fload)(i2c) {
+	DISPATCH_START	\seq_len
+	ldr	r2, [locals, -r2, lsl #2]
+	DISPATCH_NEXT
+        mov     r2, r2, asl #16
+        mov     r2, r2, lsr #16
+	PUSH	r2
+	DISPATCH_FINISH
+}
+
+#ifdef NOTICE_SAFEPOINTS
+
+@ --- load; branch -------------------------------------------------------
+
+(iload_0,iload_1,iload_2,iload_3)
+(ifeq,ifnull)
+{
+	rsb	r3, r0, #opc_iload_0
+        ldrsb   r1, [jpc, #2]
+	ldr	r3, [locals, r3, lsl #2]
+        ldrb    ip, [jpc, #3]
+        cmp     r3, #0
+	beq	branch_taken_unsafe_1
+	DISPATCH 4
+}
+
+(iload,aload,fload)(ifeq,ifnull) {
+	rsb	r2, r2, #0
+        ldrsb   r1, [jpc, #3]
+	ldr	r3, [locals, r2, lsl #2]
+        ldrb    ip, [jpc, #4]
+        cmp     r3, #0
+	beq	branch_taken_unsafe_2
+	DISPATCH 5
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(ifne,ifnonnull)
+{
+	rsb	r3, r0, #opc_iload_0
+        ldrsb   r1, [jpc, #2]
+	ldr	r3, [locals, r3, lsl #2]
+        ldrb    ip, [jpc, #3]
+        cmp     r3, #0
+	bne	branch_taken_unsafe_1
+	DISPATCH 4
+}
+
+(iload,aload,fload)(ifne,ifnonnull) {
+	rsb	r2, r2, #0
+        ldrsb   r1, [jpc, #3]
+	ldr	r3, [locals, r2, lsl #2]
+        ldrb    ip, [jpc, #4]
+        cmp     r3, #0
+	bne	branch_taken_unsafe_2
+	DISPATCH 5
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(iflt)
+{
+	rsb	r3, r0, #opc_iload_0
+        ldrsb   r1, [jpc, #2]
+	ldr	r3, [locals, r3, lsl #2]
+        ldrb    ip, [jpc, #3]
+        cmp     r3, #0
+	blt	branch_taken_unsafe_1
+	DISPATCH 4
+}
+
+(iload,aload,fload)(iflt) {
+	rsb	r2, r2, #0
+        ldrsb   r1, [jpc, #3]
+	ldr	r3, [locals, r2, lsl #2]
+        ldrb    ip, [jpc, #4]
+        cmp     r3, #0
+	blt	branch_taken_unsafe_2
+	DISPATCH 5
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(ifge)
+{
+	rsb	r3, r0, #opc_iload_0
+        ldrsb   r1, [jpc, #2]
+	ldr	r3, [locals, r3, lsl #2]
+        ldrb    ip, [jpc, #3]
+        cmp     r3, #0
+	bge	branch_taken_unsafe_1
+	DISPATCH 4
+}
+
+(iload,aload,fload)(ifge) {
+	rsb	r2, r2, #0
+        ldrsb   r1, [jpc, #3]
+	ldr	r3, [locals, r2, lsl #2]
+        ldrb    ip, [jpc, #4]
+        cmp     r3, #0
+	bge	branch_taken_unsafe_2
+	DISPATCH 5
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(ifgt)
+{
+	rsb	r3, r0, #opc_iload_0
+        ldrsb   r1, [jpc, #2]
+	ldr	r3, [locals, r3, lsl #2]
+        ldrb    ip, [jpc, #3]
+        cmp     r3, #0
+	bgt	branch_taken_unsafe_1
+	DISPATCH 4
+}
+
+(iload,aload,fload)(ifgt) {
+	rsb	r2, r2, #0
+        ldrsb   r1, [jpc, #3]
+	ldr	r3, [locals, r2, lsl #2]
+        ldrb    ip, [jpc, #4]
+        cmp     r3, #0
+	bgt	branch_taken_unsafe_2
+	DISPATCH 5
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(ifle)
+{
+	rsb	r3, r0, #opc_iload_0
+        ldrsb   r1, [jpc, #2]
+	ldr	r3, [locals, r3, lsl #2]
+        ldrb    ip, [jpc, #3]
+        cmp     r3, #0
+	ble	branch_taken_unsafe_1
+	DISPATCH 4
+}
+
+(iload,aload,fload)(ifle) {
+	rsb	r2, r2, #0
+        ldrsb   r1, [jpc, #3]
+	ldr	r3, [locals, r2, lsl #2]
+        ldrb    ip, [jpc, #4]
+        cmp     r3, #0
+	ble	branch_taken_unsafe_2
+	DISPATCH 5
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(if_icmpeq,if_acmpeq)
+{
+	POP	r3
+	rsb	r2, r0, #opc_iload_0
+        ldrsb   r1, [jpc, #2]
+	ldr	r2, [locals, r2, lsl #2]
+        ldrb    ip, [jpc, #3]
+        cmp     r3, r2
+	beq	branch_taken_unsafe_1
+	DISPATCH 4
+}
+
+(iload,aload,fload)(if_icmpeq,if_acmpeq) {
+	POP	r3
+	rsb	r2, r2, #0
+        ldrsb   r1, [jpc, #3]
+	ldr	r2, [locals, r2, lsl #2]
+        ldrb    ip, [jpc, #4]
+        cmp     r3, r2
+	beq	branch_taken_unsafe_2
+	DISPATCH 5
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(if_icmpne,if_acmpne)
+{
+	POP	r3
+	rsb	r2, r0, #opc_iload_0
+        ldrsb   r1, [jpc, #2]
+	ldr	r2, [locals, r2, lsl #2]
+        ldrb    ip, [jpc, #3]
+        cmp     r3, r2
+	bne	branch_taken_unsafe_1
+	DISPATCH 4
+}
+
+(iload,aload,fload)(if_icmpne,if_acmpne) {
+	POP	r3
+	rsb	r2, r2, #0
+        ldrsb   r1, [jpc, #3]
+	ldr	r2, [locals, r2, lsl #2]
+        ldrb    ip, [jpc, #4]
+        cmp     r3, r2
+	bne	branch_taken_unsafe_2
+	DISPATCH 5
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(if_icmplt)
+{
+	POP	r3
+	rsb	r2, r0, #opc_iload_0
+        ldrsb   r1, [jpc, #2]
+	ldr	r2, [locals, r2, lsl #2]
+        ldrb    ip, [jpc, #3]
+        cmp     r3, r2
+	blt	branch_taken_unsafe_1
+	DISPATCH 4
+}
+
+(iload,aload,fload)(if_icmplt) {
+	POP	r3
+	rsb	r2, r2, #0
+        ldrsb   r1, [jpc, #3]
+	ldr	r2, [locals, r2, lsl #2]
+        ldrb    ip, [jpc, #4]
+        cmp     r3, r2
+	blt	branch_taken_unsafe_2
+	DISPATCH 5
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(if_icmpge)
+{
+	POP	r3
+	rsb	r2, r0, #opc_iload_0
+        ldrsb   r1, [jpc, #2]
+	ldr	r2, [locals, r2, lsl #2]
+        ldrb    ip, [jpc, #3]
+        cmp     r3, r2
+	bge	branch_taken_unsafe_1
+	DISPATCH 4
+}
+
+(iload,aload,fload)(if_icmpge) {
+	POP	r3
+	rsb	r2, r2, #0
+        ldrsb   r1, [jpc, #3]
+	ldr	r2, [locals, r2, lsl #2]
+        ldrb    ip, [jpc, #4]
+        cmp     r3, r2
+	bge	branch_taken_unsafe_2
+	DISPATCH 5
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(if_icmpgt)
+{
+	POP	r3
+	rsb	r2, r0, #opc_iload_0
+        ldrsb   r1, [jpc, #2]
+	ldr	r2, [locals, r2, lsl #2]
+        ldrb    ip, [jpc, #3]
+        cmp     r3, r2
+	bgt	branch_taken_unsafe_1
+	DISPATCH 4
+}
+
+(iload,aload,fload)(if_icmpgt) {
+	POP	r3
+	rsb	r2, r2, #0
+        ldrsb   r1, [jpc, #3]
+	ldr	r2, [locals, r2, lsl #2]
+        ldrb    ip, [jpc, #4]
+        cmp     r3, r2
+	bgt	branch_taken_unsafe_2
+	DISPATCH 5
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(if_icmple)
+{
+	POP	r3
+	rsb	r2, r0, #opc_iload_0
+        ldrsb   r1, [jpc, #2]
+	ldr	r2, [locals, r2, lsl #2]
+        ldrb    ip, [jpc, #3]
+        cmp     r3, r2
+	ble	branch_taken_unsafe_1
+	DISPATCH 4
+}
+
+(iload,aload,fload)(if_icmple) {
+	POP	r3
+	rsb	r2, r2, #0
+        ldrsb   r1, [jpc, #3]
+	ldr	r2, [locals, r2, lsl #2]
+        ldrb    ip, [jpc, #4]
+        cmp     r3, r2
+	ble	branch_taken_unsafe_2
+	DISPATCH 5
+}
+
+@ --- load; return/invoke -------------------------------------------------
+
+(iload_0,iload_1,iload_2,iload_3)
+(ireturn,areturn,freturn)
+{
+	rsb	r0, r0, #opc_iload_0
+	ldr	r9, [istate, #ISTATE_MONITOR_BASE]	@ r9 = base
+	ldr	tmp1, [istate, #ISTATE_STACK_BASE]	@ tmp1 = end
+	ldr	r1, [locals, r0, lsl #2]
+	cmp	tmp1, r9
+	bcc	1f
+2:
+	mov	r3, #0
+	ldr	stack, [thread, #THREAD_TOP_ZERO_FRAME]
+	str	r3, [thread, #THREAD_LAST_JAVA_SP]
+	ldr	r0, [istate, #ISTATE_METHOD]
+	ldr	r3, [stack, #0]
+	ldrh	r0, [r0, #40]
+	str	r3, [thread, #THREAD_TOP_ZERO_FRAME]
+	str	r1, [stack, r0, lsl #2]!
+
+	str	stack, [thread, #THREAD_JAVA_SP]
+
+	mov	r0, #0	@ deoptimized_frames = 0
+	ldmfd	arm_sp!, {fast_regset, pc}
+1:
+	PUSH	r1
+	add	jpc, jpc, #1
+	bl	return_check_monitors
+	POP	r1
+	b	2b
+}
+
+(iload,aload,fload)(ireturn,areturn,freturn) {
+
+	ldr	r9, [istate, #ISTATE_MONITOR_BASE]	@ r9 = base
+	ldr	tmp1, [istate, #ISTATE_STACK_BASE]	@ tmp1 = end
+	ldr	r1, [locals, -r2, lsl #2]
+	cmp	tmp1, r9
+	bcc	1f
+2:
+	mov	r3, #0
+	ldr	stack, [thread, #THREAD_TOP_ZERO_FRAME]
+	str	r3, [thread, #THREAD_LAST_JAVA_SP]
+	ldr	r0, [istate, #ISTATE_METHOD]
+	ldr	r3, [stack, #0]
+	ldrh	r0, [r0, #40]
+	str	r3, [thread, #THREAD_TOP_ZERO_FRAME]
+	str	r1, [stack, r0, lsl #2]!
+
+	str	stack, [thread, #THREAD_JAVA_SP]
+
+	mov	r0, #0	@ deoptimized_frames = 0
+	ldmfd	arm_sp!, {fast_regset, pc}
+1:
+	PUSH	r1
+	add	jpc, jpc, #2
+	bl	return_check_monitors
+	POP	r1
+	b	2b
+}
+
+#endif // NOTICE_SAFEPOINTS
+
+(iload_0,iload_1,iload_2,iload_3)
+(invokeresolved)
+{
+	add	jpc, jpc, #1
+	rsb	r0, r0, #opc_iload_0
+	ldr	r0, [locals, r0, lsl #2]
+	ldrb	r2, [jpc, #1]
+	ldrb	r1, [jpc, #2]
+	PUSH	r0
+	b	do_invokeresolved
+}
+
+(aload_0,aload_1,aload_2,aload_3)
+(invokeresolved)
+{
+	add	jpc, jpc, #1
+	rsb	r0, r0, #opc_aload_0
+	ldr	r0, [locals, r0, lsl #2]
+	ldrb	r2, [jpc, #1]
+	ldrb	r1, [jpc, #2]
+	PUSH	r0
+	b	do_invokeresolved
+}
+
+(iload,aload,fload)(invokeresolved) {
+	ldr	r0, [locals, -r2, lsl #2]
+	add	jpc, jpc, #2
+        ldrb     r2, [jpc, #1]
+        ldrb     r1, [jpc, #2]
+	PUSH	r0
+	b	do_invokeresolved
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(invokevfinal)
+{
+	add	jpc, jpc, #1
+	rsb	r0, r0, #opc_iload_0
+	ldr	r0, [locals, r0, lsl #2]
+	ldrb	r2, [jpc, #1]
+	ldrb	r1, [jpc, #2]
+	PUSH	r0
+	b	do_invokevfinal
+}
+
+(aload_0,aload_1,aload_2,aload_3)
+(invokevfinal)
+{
+	add	jpc, jpc, #1
+	rsb	r0, r0, #opc_aload_0
+	ldr	r0, [locals, r0, lsl #2]
+	ldrb	r2, [jpc, #1]
+	ldrb	r1, [jpc, #2]
+	PUSH	r0
+	b	do_invokevfinal
+}
+
+(iload,aload,fload)(invokevfinal) {
+	ldr	r0, [locals, -r2, lsl #2]
+	add	jpc, jpc, #2
+        ldrb     r2, [jpc, #1]
+        ldrb     r1, [jpc, #2]
+	PUSH	r0
+	b	do_invokevfinal
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(invokespecialresolved)
+{
+	add	jpc, jpc, #1
+	rsb	r0, r0, #opc_iload_0
+	ldr	r0, [locals, r0, lsl #2]
+	ldrb	r2, [jpc, #1]
+	ldrb	r1, [jpc, #2]
+	PUSH	r0
+	b	do_invokespecialresolved
+}
+
+(aload_0,aload_1,aload_2,aload_3)
+(invokespecialresolved)
+{
+	add	jpc, jpc, #1
+	rsb	r0, r0, #opc_aload_0
+	ldr	r0, [locals, r0, lsl #2]
+	ldrb	r2, [jpc, #1]
+	ldrb	r1, [jpc, #2]
+	PUSH	r0
+	b	do_invokespecialresolved
+}
+
+(iload,aload,fload)(invokespecialresolved) {
+	ldr	r0, [locals, -r2, lsl #2]
+	add	jpc, jpc, #2
+        ldrb     r2, [jpc, #1]
+        ldrb     r1, [jpc, #2]
+	PUSH	r0
+	b	do_invokespecialresolved
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(invokestaticresolved)
+{
+	add	jpc, jpc, #1
+	rsb	r0, r0, #opc_iload_0
+	ldr	r0, [locals, r0, lsl #2]
+	ldrb	r2, [jpc, #1]
+	ldrb	r1, [jpc, #2]
+	PUSH	r0
+	b	do_invokestaticresolved
+}
+
+(aload_0,aload_1,aload_2,aload_3)
+(invokestaticresolved)
+{
+	add	jpc, jpc, #1
+	rsb	r0, r0, #opc_aload_0
+	ldr	r0, [locals, r0, lsl #2]
+	ldrb	r2, [jpc, #1]
+	ldrb	r1, [jpc, #2]
+	PUSH	r0
+	b	do_invokestaticresolved
+}
+
+(iload,aload,fload)(invokestaticresolved) {
+	ldr	r0, [locals, -r2, lsl #2]
+	add	jpc, jpc, #2
+        ldrb     r2, [jpc, #1]
+        ldrb     r1, [jpc, #2]
+	PUSH	r0
+	b	do_invokestaticresolved
+
+}
+
+(iload_0,iload_1,iload_2,iload_3)
+(invokeinterface)
+{
+	add	jpc, jpc, #1
+	rsb	r0, r0, #opc_iload_0
+	ldr	r0, [locals, r0, lsl #2]
+        ldrb     r2, [jpc, #1]
+        ldrb     r1, [jpc, #2]
+	PUSH	r0
+	b	do_invokeinterface
+}
+
+(aload_0,aload_1,aload_2,aload_3)
+(invokeinterface)
+{
+	add	jpc, jpc, #1
+	rsb	r0, r0, #opc_aload_0
+	ldr	r0, [locals, r0, lsl #2]
+        ldrb     r2, [jpc, #1]
+        ldrb     r1, [jpc, #2]
+	PUSH	r0
+	b	do_invokeinterface
+}
+
+(iload,aload,fload)(invokeinterface) {
+	ldr	r0, [locals, -r2, lsl #2]
+	add	jpc, jpc, #2
+        ldrb     r2, [jpc, #1]
+        ldrb     r1, [jpc, #2]
+	PUSH	r0
+	b	do_invokeinterface
+}
+
+(aload_0,aload_1,aload_2,aload_3)
+(igetfield)
+{
+	add	r0, r0, #opc_iaccess_0-opc_aload_0
+	REWRITE_PAIRS	strb	r0, [jpc]
+	DISPATCH_BYTECODE
+}
+
+@ 13 cycles
+(iload,aload,fload)(igetfield) {
+	ldrb	ip, [jpc, #4]
+	rsb	tmp1, r2, #0
+	ldrb	r3, [jpc, #3]
+	add	r1, constpool, ip, lsl #12
+	DISPATCH_START	5
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	add	r1, r3, lsl #4
+	DISPATCH_NEXT
+	SW_NPC	cmp	tmp1, #0
+	SW_NPC	beq	null_ptr_exception_jpc_3
+	DISPATCH_NEXT
+	GO_IF_VOLATILE r3, r1, 3f
+        ldr     r1, [r1, #CP_OFFSET+8]
+	DISPATCH_NEXT
+.abortentry88:
+	ldr	r1, [tmp1, r1]
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+3:
+	VOLATILE_VERSION
+        ldr     r1, [r1, #CP_OFFSET+8]
+	DISPATCH_NEXT
+.abortentry88_v:
+	ldr	r1, [tmp1, r1]
+	FullBarrier
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+@ --- iconst; store -------------------------------------------------
+
+(iconst_m1,iconst_0,iconst_1,iconst_2,iconst_3,iconst_4,iconst_5)
+(istore_0,istore_1,istore_2,istore_3)
+{
+	sub	r3, r0, #opc_iconst_0
+	DISPATCH_START	\seq_len
+	rsb	r2, r1, #opc_istore_0
+	str	r3, [locals, r2, lsl #2]
+	DISPATCH_BYTECODE
+}
+
+(iconst_m1,iconst_0,iconst_1,iconst_2,iconst_3,iconst_4,iconst_5)(astore,istore,fstore) {
+	ldrb	r2, [jpc, #2]
+	sub	r3, r0, #opc_iconst_0
+	DISPATCH_START	\seq_len
+	str	r3, [locals, -r2, lsl #2]
+	DISPATCH_BYTECODE
+}
+
+@ --- iconst; dataop -------------------------------------------------
+
+(iconst_m1,iconst_0,iconst_1,iconst_2,iconst_3,iconst_4,iconst_5)(iadd) {
+	sub	tmp1, r0, #opc_iconst_0
+	DISPATCH_START	\seq_len
+	POP	r1
+	DISPATCH_NEXT
+	add	tmp1, r1, tmp1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+(iconst_m1,iconst_0,iconst_1,iconst_2,iconst_3,iconst_4,iconst_5)(isub) {
+	sub	tmp1, r0, #opc_iconst_0
+	DISPATCH_START	\seq_len
+	POP	r1
+	DISPATCH_NEXT
+	sub	tmp1, r1, tmp1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+(iconst_m1,iconst_0,iconst_1,iconst_2,iconst_3,iconst_4,iconst_5)(idiv) {
+	subs	lr, r0, #opc_iconst_2
+	DISPATCH_START	\seq_len
+	POP	tmp1
+	DISPATCH_NEXT
+	beq	5f
+	bcc	3f
+	cmp	lr, #(opc_iconst_4-opc_iconst_2)
+	beq	4f
+	bcc	2f
+@ divide by 5
+1:
+	mvn	lr, #0x198		@ Form 0x66666667 in lr
+	bic	lr, lr, #0x9800
+	add	lr, lr, lr, lsl #16
+	smull	a3, a4, tmp1, lr
+	mov	a3, tmp1, asr #31
+	rsb	tmp1, a3, a4, asr #1
+	b	6f
+@ divide by 3
+2:
+	mvn	lr, #0xa9		@ Form 0x55555556 in lr
+	bic	lr, lr, #0xaa00
+	add	lr, lr, lr, lsl #16
+        smull   a3, a4, tmp1, lr
+        sub     tmp1, a4, tmp1, asr #31
+	b	6f
+3:
+	cmp	lr, #(opc_iconst_0-opc_iconst_2)
+	beq	div_zero_jpc_1
+	rsbcc	tmp1, tmp1, #0		@ Divide by -1 or 1
+	b	6f
+@ divide by 4
+4:	movs	a4, tmp1
+	addmi	a4, a4, #3
+	mov	tmp1, a4, asr #2
+	b	6f
+@ divide by 2
+5:
+	add	tmp1, tmp1, tmp1, lsr #31
+	mov	tmp1, tmp1, asr #1
+6:
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+(iconst_m1,iconst_0,iconst_1,iconst_2,iconst_3,iconst_4,iconst_5)(ishl) {
+	sub	tmp1, r0, #opc_iconst_0
+	DISPATCH_START	\seq_len
+	POP	r2
+	DISPATCH_NEXT
+	mov	tmp1, r2, lsl tmp1
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+@ --- iconst; branch -------------------------------------------------
+
+#ifdef NOTICE_SAFEPOINTS
+
+(iconst_m1,iconst_0,iconst_1,iconst_2,iconst_3,iconst_4,iconst_5)(if_icmpeq) {
+	POP	r3
+	sub	r2, r0, #opc_iconst_0
+        ldrsb   r1, [jpc, #2]
+        cmp     r3, r2
+        ldrb    ip, [jpc, #3]
+	beq	branch_taken_unsafe_1
+	DISPATCH 4
+}
+
+(iconst_m1,iconst_0,iconst_1,iconst_2,iconst_3,iconst_4,iconst_5)(if_icmpne) {
+	POP	r3
+	sub	r2, r0, #opc_iconst_0
+        ldrsb   r1, [jpc, #2]
+        cmp     r3, r2
+        ldrb    ip, [jpc, #3]
+	bne	branch_taken_unsafe_1
+	DISPATCH 4
+}
+
+(iconst_m1,iconst_0,iconst_1,iconst_2,iconst_3,iconst_4,iconst_5)(if_icmplt) {
+	POP	r3
+	sub	r2, r0, #opc_iconst_0
+        ldrsb   r1, [jpc, #2]
+        cmp     r3, r2
+        ldrb    ip, [jpc, #3]
+	blt	branch_taken_unsafe_1
+	DISPATCH 4
+}
+
+(iconst_m1,iconst_0,iconst_1,iconst_2,iconst_3,iconst_4,iconst_5)(if_icmpge) {
+	POP	r3
+	sub	r2, r0, #opc_iconst_0
+        ldrsb   r1, [jpc, #2]
+        cmp     r3, r2
+        ldrb    ip, [jpc, #3]
+	bge	branch_taken_unsafe_1
+	DISPATCH 4
+}
+
+(iconst_m1,iconst_0,iconst_1,iconst_2,iconst_3,iconst_4,iconst_5)(if_icmpgt) {
+	POP	r3
+	sub	r2, r0, #opc_iconst_0
+        ldrsb   r1, [jpc, #2]
+        cmp     r3, r2
+        ldrb    ip, [jpc, #3]
+	bgt	branch_taken_unsafe_1
+	DISPATCH 4
+}
+
+(iconst_m1,iconst_0,iconst_1,iconst_2,iconst_3,iconst_4,iconst_5)(if_icmple) {
+	POP	r3
+	sub	r2, r0, #opc_iconst_0
+        ldrsb   r1, [jpc, #2]
+        cmp     r3, r2
+        ldrb    ip, [jpc, #3]
+	ble	branch_taken_unsafe_1
+	DISPATCH 4
+}
+
+(iconst_m1,iconst_0,iconst_1,iconst_2,iconst_3,iconst_4,iconst_5)(ireturn) {
+	ldr	r9, [istate, #ISTATE_MONITOR_BASE]	@ r9 = base
+	ldr	tmp1, [istate, #ISTATE_STACK_BASE]	@ tmp1 = end
+	sub	r1, r0, #opc_iconst_0
+	cmp	tmp1, r9
+	bcc	1f
+2:
+	mov	r3, #0
+	ldr	stack, [thread, #THREAD_TOP_ZERO_FRAME]
+	str	r3, [thread, #THREAD_LAST_JAVA_SP]
+	ldr	r0, [istate, #ISTATE_METHOD]
+	ldr	r3, [stack, #0]
+	ldrh	r0, [r0, #40]
+	str	r3, [thread, #THREAD_TOP_ZERO_FRAME]
+	str	r1, [stack, r0, lsl #2]!
+
+	str	stack, [thread, #THREAD_JAVA_SP]
+
+	mov	r0, #0	@ deoptimized_frames = 0
+	ldmfd	arm_sp!, {fast_regset, pc}
+1:
+	PUSH	r1
+	add	jpc, jpc, #1
+	bl	return_check_monitors
+	POP	r1
+	b	2b
+}
+
+#endif // NOTICE_SAFEPOINTS
+
+(iconst_m1,iconst_0,iconst_1,iconst_2,iconst_3,iconst_4,iconst_5)(invokeresolved) {
+	add	jpc, jpc, #1
+	sub	r0, r0, #opc_iconst_0
+	ldrb	r2, [jpc, #1]
+	ldrb	r1, [jpc, #2]
+	PUSH	r0
+	b	do_invokeresolved
+}
+
+(iconst_m1,iconst_0,iconst_1,iconst_2,iconst_3,iconst_4,iconst_5)(invokevfinal) {
+	add	jpc, jpc, #1
+	sub	r0, r0, #opc_iconst_0
+	ldrb	r2, [jpc, #1]
+	ldrb	r1, [jpc, #2]
+	PUSH	r0
+	b	do_invokevfinal
+}
+
+(iconst_m1,iconst_0,iconst_1,iconst_2,iconst_3,iconst_4,iconst_5)(invokestaticresolved) {
+	add	jpc, jpc, #1
+	sub	r0, r0, #opc_iconst_0
+	ldrb	r2, [jpc, #1]
+	ldrb	r1, [jpc, #2]
+	PUSH	r0
+	b	do_invokestaticresolved
+}
+
+(iconst_m1,iconst_0,iconst_1,iconst_2,iconst_3,iconst_4,iconst_5)(invokespecialresolved) {
+	add	jpc, jpc, #1
+	sub	r0, r0, #opc_iconst_0
+	ldrb	r2, [jpc, #1]
+	ldrb	r1, [jpc, #2]
+	PUSH	r0
+	b	do_invokespecialresolved
+}
+
+@# --- Bytecode sequences iaload; xxx -----------------------------------------------
+
+(iaload,faload,aaload)(iconst_m1,iconst_0,iconst_1,iconst_2,iconst_3,iconst_4,iconst_5) {
+	sub	r2, r1, #opc_iconst_0
+	POP	r3, lr			@ r3 = index, lr = arrayref
+	DISPATCH_START	\seq_len
+	SW_NPC	cmp	lr, #0
+	SW_NPC	beq	null_ptr_exception_jpc_2
+.abortentry38:
+	ldr	tmp1, [lr, #8]		@ tmp1 = length
+	DISPATCH_NEXT
+	cmp	r3, tmp1
+	bcs	array_bound_exception_jpc_2_r3
+	add	lr, lr, r3, lsl #2
+	ldr	r3, [lr, #12]
+	DISPATCH_NEXT
+	PUSH	r2, r3
+	DISPATCH_FINISH
+}
+
+(iaload,faload,aaload)(bipush) {
+	ldrsb	r2, [jpc, #2]
+	POP	r3, lr			@ r3 = index, lr = arrayref
+	DISPATCH_START	\seq_len
+	SW_NPC	cmp	lr, #0
+	SW_NPC	beq	null_ptr_exception_jpc_3
+.abortentry39:
+	ldr	tmp1, [lr, #8]		@ tmp1 = length
+	DISPATCH_NEXT
+	cmp	r3, tmp1
+	bcs	array_bound_exception_jpc_3_r3
+	add	lr, lr, r3, lsl #2
+	ldr	r3, [lr, #12]
+	DISPATCH_NEXT
+	PUSH	r2, r3
+	DISPATCH_FINISH
+}
+
+(iaload,faload,aaload)(sipush) {
+	ldrsb	r2, [jpc, #2]
+	ldrb	tmp1, [jpc, #3]
+	POP	r3, lr			@ r3 = index, lr = arrayref
+	DISPATCH_START	\seq_len
+	orr	r2, tmp1, r2, lsl #8
+	SW_NPC	cmp	lr, #0
+	SW_NPC	beq	null_ptr_exception_jpc_4
+.abortentry40:
+	ldr	tmp1, [lr, #8]		@ tmp1 = length
+	DISPATCH_NEXT
+	cmp	r3, tmp1
+	bcs	array_bound_exception_jpc_4_r3
+	add	lr, lr, r3, lsl #2
+	ldr	r3, [lr, #12]
+	DISPATCH_NEXT
+	PUSH	r2, r3
+	DISPATCH_FINISH
+}
+
+(iaload,faload,aaload)(iload,fload,aload) {
+	ldrb	r2, [jpc, #2]
+	POP	r3, lr			@ r3 = index, lr = arrayref
+	DISPATCH_START	\seq_len
+	ldr	r2, [locals, -r2, lsl #2]
+	SW_NPC	cmp	lr, #0
+	SW_NPC	beq	null_ptr_exception_jpc_3
+.abortentry41:
+	ldr	tmp1, [lr, #8]		@ tmp1 = length
+	DISPATCH_NEXT
+	cmp	r3, tmp1
+	bcs	array_bound_exception_jpc_3_r3
+	add	lr, lr, r3, lsl #2
+	ldr	r3, [lr, #BASE_OFFSET_WORD]
+	DISPATCH_NEXT
+	PUSH	r2, r3
+	DISPATCH_FINISH
+}
+
+(iaload,faload,aaload)
+(iload_0,iload_1,iload_2,iload_3)
+{
+	rsb	r2, r1, #opc_iload_0
+	POP	r3, lr			@ r3 = index, lr = arrayref
+	DISPATCH_START	\seq_len
+	ldr	r2, [locals, r2, lsl #2]
+	SW_NPC	cmp	lr, #0
+	SW_NPC	beq	null_ptr_exception_jpc_2
+.abortentry42:
+	ldr	tmp1, [lr, #8]		@ tmp1 = length
+	DISPATCH_NEXT
+	cmp	r3, tmp1
+	bcs	array_bound_exception_jpc_2_r3
+	add	lr, lr, r3, lsl #2
+	ldr	r3, [lr, #BASE_OFFSET_WORD]
+	DISPATCH_NEXT
+	PUSH	r2, r3
+	DISPATCH_FINISH
+}
+
+(iaload,faload,aaload)
+(aload_0,aload_1,aload_2,aload_3)
+{
+	rsb	r2, r1, #opc_aload_0
+	POP	r3, lr			@ r3 = index, lr = arrayref
+	DISPATCH_START	\seq_len
+	ldr	r2, [locals, r2, lsl #2]
+	SW_NPC	cmp	lr, #0
+	SW_NPC	beq	null_ptr_exception_jpc_2
+.abortentry42_1:
+	ldr	tmp1, [lr, #8]		@ tmp1 = length
+	DISPATCH_NEXT
+	cmp	r3, tmp1
+	bcs	array_bound_exception_jpc_2_r3
+	add	lr, lr, r3, lsl #2
+	ldr	r3, [lr, #BASE_OFFSET_WORD]
+	DISPATCH_NEXT
+	PUSH	r2, r3
+	DISPATCH_FINISH
+}
+
+(iaload,faload,aaload)(iaload,faload,aaload)
+{
+	POP	r2, r3			@ r2 = index, r3 = arrayref
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_0
+.abortentry43:
+	ldr	tmp1, [r3, #8]		@ tmp1 = length
+	DISPATCH_START	\seq_len
+	cmp	r2, tmp1
+	bcs	array_bound_exception_jpc_2
+	add	r3, r3, r2, lsl #2
+	POP	lr			@ r2 = index, lr = arrayref
+	ldr	r2, [r3, #BASE_OFFSET_WORD]
+	SW_NPC	cmp	lr, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry44:
+	ldr	tmp1, [lr, #8]		@ tmp1 = length
+	DISPATCH_NEXT
+	cmp	r2, tmp1
+	bcs	array_bound_exception_jpc_1
+	add	lr, lr, r2, lsl #2
+	ldr	r2, [lr, #BASE_OFFSET_WORD]
+	DISPATCH_NEXT
+	PUSH	r2
+	DISPATCH_FINISH
+}
+
+(iaload,faload,aaload)(astore,istore,fstore) {
+	ldrb	r2, [jpc, #2]
+	POP	r3, lr			@ r3 = index, lr = arrayref
+	DISPATCH_START	\seq_len
+	SW_NPC	cmp	lr, #0
+	SW_NPC	beq	null_ptr_exception_jpc_3
+.abortentry45:
+	ldr	tmp1, [lr, #8]		@ tmp1 = length
+	DISPATCH_NEXT
+	cmp	r3, tmp1
+	bcs	array_bound_exception_jpc_3_r3
+	add	lr, lr, r3, lsl #2
+	ldr	r3, [lr, #BASE_OFFSET_WORD]
+	DISPATCH_NEXT
+	str	r3, [locals, -r2, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iaload,faload,aaload)
+(istore_0,istore_1,istore_2,istore_3) {
+	rsb	r2, r1, #opc_istore_0
+	POP	r3, lr			@ r3 = index, lr = arrayref
+	DISPATCH_START	\seq_len
+	SW_NPC	cmp	lr, #0
+	SW_NPC	beq	null_ptr_exception_jpc_2
+.abortentry46:
+	ldr	tmp1, [lr, #8]		@ tmp1 = length
+	DISPATCH_NEXT
+	cmp	r3, tmp1
+	bcs	array_bound_exception_jpc_2_r3
+	add	lr, lr, r3, lsl #2
+	ldr	r3, [lr, #BASE_OFFSET_WORD]
+	DISPATCH_NEXT
+	str	r3, [locals, r2, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iaload,faload,aaload)(iastore,fastore) {
+	POP	r3, lr			@ r3 = index, lr = arrayref
+	SW_NPC	cmp	lr, #0
+	SW_NPC	beq	null_ptr_exception_jpc_0
+.abortentry47:
+	ldr	tmp1, [lr, #8]		@ tmp1 = length
+	DISPATCH_START	\seq_len
+	cmp	r3, tmp1
+	bcs	array_bound_exception_jpc_2_r3
+	add	lr, lr, r3, lsl #2
+	ldr	tmp1, [lr, #BASE_OFFSET_WORD]
+
+	POP	r2, r3		@ tmp1 = value, r2 = index, r3 = arrayref
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry48:
+	ldr	lr, [r3, #8]		@ lr = limit
+	DISPATCH_NEXT
+	cmp	r2, lr
+	bcs	array_bound_exception_jpc_1
+	DISPATCH_NEXT
+	add	r3, r3, r2, lsl #2
+	str	tmp1, [r3, #BASE_OFFSET_WORD]
+	DISPATCH_FINISH
+}
+
+(iaload,faload,aaload)(bastore) {
+	POP	r3, lr			@ r3 = index, lr = arrayref
+	SW_NPC	cmp	lr, #0
+	SW_NPC	beq	null_ptr_exception_jpc_0
+.abortentry49:
+	ldr	tmp1, [lr, #8]		@ tmp1 = length
+	DISPATCH_START	\seq_len
+	cmp	r3, tmp1
+	bcs	array_bound_exception_jpc_2_r3
+	add	lr, lr, r3, lsl #2
+	ldr	tmp1, [lr, #BASE_OFFSET_WORD]
+
+	POP	r2, r3		@ tmp1 = value, r2 = index, r3 = arrayref
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry50:
+	ldr	lr, [r3, #8]		@ lr = limit
+	DISPATCH_NEXT
+	cmp	r2, lr
+	bcs	array_bound_exception_jpc_1
+	DISPATCH_NEXT
+	add	r3, r3, r2
+	strb	tmp1, [r3, #BASE_OFFSET_BYTE]
+	DISPATCH_FINISH
+}
+
+(iaload,faload,aaload)(castore,sastore) {
+	POP	r3, lr			@ r3 = index, lr = arrayref
+	SW_NPC	cmp	lr, #0
+	SW_NPC	beq	null_ptr_exception_jpc_0
+.abortentry51:
+	ldr	tmp1, [lr, #8]		@ tmp1 = length
+	DISPATCH_START	\seq_len
+	cmp	r3, tmp1
+	bcs	array_bound_exception_jpc_2_r3
+	add	lr, lr, r3, lsl #2
+	ldr	tmp1, [lr, #BASE_OFFSET_WORD]
+
+	POP	r2, r3		@ tmp1 = value, r2 = index, r3 = arrayref
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry52:
+	ldr	lr, [r3, #8]		@ lr = limit
+	DISPATCH_NEXT
+	cmp	r2, lr
+	bcs	array_bound_exception_jpc_1
+	DISPATCH_NEXT
+	add	r3, r3, r2, lsl #1
+	strh	tmp1, [r3, #BASE_OFFSET_BYTE]
+	DISPATCH_FINISH
+}
+
+(iaload,faload,aaload)(iand) {
+	POP	r2, r3			@ r2 = index, r3 = arrayref
+	DISPATCH_START	\seq_len
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_2
+.abortentry58:
+	ldr	tmp1, [r3, #8]		@ tmp1 = length
+	DISPATCH_NEXT
+	cmp	r2, tmp1
+	bcs	array_bound_exception_jpc_2
+	add	r3, r3, r2, lsl #2
+	ldr	tmp1, [r3, #BASE_OFFSET_WORD]		@ tmp1 = tos
+	POP	r2			@ r2 = tosm1
+	DISPATCH_NEXT
+	and	tmp1, r2, tmp1		@ tosm1 <dop> tos
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+(iaload,faload,aaload)(ior) {
+	POP	r2, r3			@ r2 = index, r3 = arrayref
+	DISPATCH_START	\seq_len
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_2
+.abortentry59:
+	ldr	tmp1, [r3, #8]		@ tmp1 = length
+	DISPATCH_NEXT
+	cmp	r2, tmp1
+	bcs	array_bound_exception_jpc_2
+	add	r3, r3, r2, lsl #2
+	ldr	tmp1, [r3, #BASE_OFFSET_WORD]		@ tmp1 = tos
+	POP	r2			@ r2 = tosm1
+	DISPATCH_NEXT
+	orr	tmp1, r2, tmp1		@ tosm1 <dop> tos
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+(iaload,faload,aaload)(ixor) {
+	POP	r2, r3			@ r2 = index, r3 = arrayref
+	DISPATCH_START	\seq_len
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_2
+.abortentry60:
+	ldr	tmp1, [r3, #8]		@ tmp1 = length
+	DISPATCH_NEXT
+	cmp	r2, tmp1
+	bcs	array_bound_exception_jpc_2
+	add	r3, r3, r2, lsl #2
+	ldr	tmp1, [r3, #BASE_OFFSET_WORD]		@ tmp1 = tos
+	POP	r2			@ r2 = tosm1
+	DISPATCH_NEXT
+	eor	tmp1, r2, tmp1		@ tosm1 <dop> tos
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+@ ---- iadd; xxx ------------------------------------------------------------
+
+(iadd)(iload,fload,aload) {
+	ldrb	r1, [jpc, #2]
+	DISPATCH_START	\seq_len
+	POP	r3, tmp1
+	DISPATCH_NEXT
+	rsb	r1, r1, #0
+	DISPATCH_NEXT
+	ldr	r1, [locals, r1, lsl #2]
+	DISPATCH_NEXT
+	add	r3, tmp1, r3
+	DISPATCH_NEXT
+	PUSH	r1, r3
+	DISPATCH_FINISH
+}
+
+(iadd)
+(iload_0,iload_1,iload_2,iload_3)
+{
+	DISPATCH_START	\seq_len
+	rsb	r1, r1, #opc_iload_0
+	POP	r3, tmp1
+	DISPATCH_NEXT
+	ldr	r1, [locals, r1, lsl #2]
+	add	r3, tmp1, r3
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1, r3
+	DISPATCH_FINISH
+}
+
+(iadd)(iaload,aaload,faload) {
+	POP	r2, r3, lr		@ lr = ref
+	DISPATCH_START	\seq_len
+	add	r2, r3, r2		@ r2 = index
+	SW_NPC	cmp	lr, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry73:
+	ldr	tmp1, [lr, #8]		@ tmp1 = length
+	DISPATCH_NEXT
+	cmp	r2, tmp1
+	bcs	array_bound_exception_jpc_1
+	add	lr, lr, r2, lsl #2
+	ldr	tmp1, [lr, #BASE_OFFSET_WORD]
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+(iadd)(istore) {
+	mov	r0, #opc_iadd_u4store
+	REWRITE_PAIRS	strb	r0, [jpc]
+	b	do_iadd_u4store
+}
+
+(iadd)
+(istore_0,istore_1,istore_2,istore_3) {
+	mov	r0, #opc_iadd_istore_N
+	REWRITE_PAIRS	strb	r0, [jpc]
+	b	do_iadd_istore_N
+}
+
+(iadd)(iastore,fastore) {
+	POP	r2, r3
+	DISPATCH_START	\seq_len
+	add	tmp1, r3, r2		@ tmp1 = value
+	POP	r2, r3			@ r2, index, r3 = ref
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry106:
+	ldr	lr, [r3, #8]		@ lr = limit
+	DISPATCH_NEXT
+	cmp	r2, lr
+	bcs	array_bound_exception_jpc_1
+	DISPATCH_NEXT
+	add	r3, r3, r2, lsl #2
+	str	tmp1, [r3, #BASE_OFFSET_WORD]
+	DISPATCH_FINISH
+}
+
+(iadd)(iadd) {
+	DISPATCH_START	\seq_len
+	POP	r2, r3, tmp1
+	DISPATCH_NEXT
+	add	r1, r3, r2
+	DISPATCH_NEXT
+	add	r1, tmp1, r1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(iadd)(isub) {
+	DISPATCH_START	\seq_len
+	POP	r2, r3, tmp1
+	DISPATCH_NEXT
+	add	r1, r3, r2
+	DISPATCH_NEXT
+	sub	r1, tmp1, r1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(iadd)(iinc) {
+	POP	tmp1, lr
+	DISPATCH_START	\seq_len
+	add	tmp1, lr, tmp1
+        ldrb    r3, [jpc, #-2]	@ jpc now points to next bc
+        ldrsb   r2, [jpc, #-1]
+	DISPATCH_NEXT
+	PUSH	tmp1
+	ldr	tmp1, [locals, -r3, lsl #2]
+	DISPATCH_NEXT
+	add	tmp1, tmp1, r2
+	str	tmp1, [locals, -r3, lsl #2]
+	DISPATCH_FINISH
+}
+@ ---- sub; xxx ------------------------------------------------------------
+
+(isub)(iload,fload,aload) {
+	ldrb	r1, [jpc, #2]
+	DISPATCH_START	\seq_len
+	POP	r3, tmp1
+	DISPATCH_NEXT
+	rsb	r1, r1, #0
+	DISPATCH_NEXT
+	ldr	r1, [locals, r1, lsl #2]
+	DISPATCH_NEXT
+	sub	r3, tmp1, r3
+	DISPATCH_NEXT
+	PUSH	r1, r3
+	DISPATCH_FINISH
+}
+
+(isub)
+(iload_0,iload_1,iload_2,iload_3)
+{
+	DISPATCH_START	\seq_len
+	rsb	r1, r1, #opc_iload_0
+	POP	r3, tmp1
+	DISPATCH_NEXT
+	ldr	r1, [locals, r1, lsl #2]
+	sub	r3, tmp1, r3
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1, r3
+	DISPATCH_FINISH
+}
+
+(isub)(iaload,aaload,faload) {
+	POP	r2, r3, lr		@ lr = ref
+	DISPATCH_START	\seq_len
+	sub	r2, r3, r2		@ r2 = index
+	SW_NPC	cmp	lr, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry74:
+	ldr	tmp1, [lr, #8]		@ tmp1 = length
+	DISPATCH_NEXT
+	cmp	r2, tmp1
+	bcs	array_bound_exception_jpc_1
+	add	lr, lr, r2, lsl #2
+	ldr	tmp1, [lr, #BASE_OFFSET_WORD]
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+(isub)(istore) {
+	mov	r0, #opc_isub_u4store
+	REWRITE_PAIRS	strb	r0, [jpc]
+	b	do_isub_u4store
+}
+
+(isub)
+(istore_0,istore_1,istore_2,istore_3) {
+	mov	r0, #opc_isub_istore_N
+	REWRITE_PAIRS	strb	r0, [jpc]
+	b	do_isub_istore_N
+}
+
+(isub)(iastore,fastore) {
+	POP	r2, r3
+	DISPATCH_START	\seq_len
+	sub	tmp1, r3, r2		@ tmp1 = value
+	POP	r2, r3			@ r2, index, r3 = ref
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry105:
+	ldr	lr, [r3, #8]		@ lr = limit
+	DISPATCH_NEXT
+	cmp	r2, lr
+	bcs	array_bound_exception_jpc_1
+	DISPATCH_NEXT
+	add	r3, r3, r2, lsl #2
+	str	tmp1, [r3, #BASE_OFFSET_WORD]
+	DISPATCH_FINISH
+}
+
+(isub)(iadd) {
+	DISPATCH_START	\seq_len
+	POP	r2, r3, tmp1
+	DISPATCH_NEXT
+	sub	r1, r3, r2
+	DISPATCH_NEXT
+	add	r1, tmp1, r1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(isub)(isub) {
+	DISPATCH_START	\seq_len
+	POP	r2, r3, tmp1
+	DISPATCH_NEXT
+	sub	r1, r3, r2
+	DISPATCH_NEXT
+	sub	r1, tmp1, r1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(isub)(iinc) {
+	POP	tmp1, lr
+	DISPATCH_START	\seq_len
+	sub	tmp1, lr, tmp1
+        ldrb    r3, [jpc, #-2]	@ jpc now points to next bc
+        ldrsb   r2, [jpc, #-1]
+	DISPATCH_NEXT
+	PUSH	tmp1
+	ldr	tmp1, [locals, -r3, lsl #2]
+	DISPATCH_NEXT
+	add	tmp1, tmp1, r2
+	str	tmp1, [locals, -r3, lsl #2]
+	DISPATCH_FINISH
+}
+@ ---- iand; xxx ------------------------------------------------------------
+
+(iand)(iload,fload,aload) {
+	ldrb	r1, [jpc, #2]
+	DISPATCH_START	\seq_len
+	POP	r3, tmp1
+	DISPATCH_NEXT
+	rsb	r1, r1, #0
+	DISPATCH_NEXT
+	ldr	r1, [locals, r1, lsl #2]
+	DISPATCH_NEXT
+	and	r3, tmp1, r3
+	DISPATCH_NEXT
+	PUSH	r1, r3
+	DISPATCH_FINISH
+}
+
+(iand)
+(iload_0,iload_1,iload_2,iload_3)
+{
+	DISPATCH_START	\seq_len
+	rsb	r1, r1, #opc_iload_0
+	POP	r3, tmp1
+	DISPATCH_NEXT
+	ldr	r1, [locals, r1, lsl #2]
+	and	r3, tmp1, r3
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1, r3
+	DISPATCH_FINISH
+}
+
+(iand)(iaload,aaload,faload) {
+	POP	r2, r3, lr		@ lr = ref
+	DISPATCH_START	\seq_len
+	and	r2, r3, r2		@ r2 = index
+	SW_NPC	cmp	lr, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry75:
+	ldr	tmp1, [lr, #8]		@ tmp1 = length
+	DISPATCH_NEXT
+	cmp	r2, tmp1
+	bcs	array_bound_exception_jpc_1
+	add	lr, lr, r2, lsl #2
+	ldr	tmp1, [lr, #BASE_OFFSET_WORD]
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+(iand)(istore) {
+	mov	r0, #opc_iand_u4store
+	REWRITE_PAIRS	strb	r0, [jpc]
+	b	do_iand_u4store
+}
+
+(iand)
+(istore_0,istore_1,istore_2,istore_3) {
+	mov	r0, #opc_iand_istore_N
+	REWRITE_PAIRS	strb	r0, [jpc]
+	b	do_iand_istore_N
+}
+
+(iand)(iastore,fastore) {
+	POP	r2, r3
+	DISPATCH_START	\seq_len
+	and	tmp1, r3, r2		@ tmp1 = value
+	POP	r2, r3			@ r2, index, r3 = ref
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry107:
+	ldr	lr, [r3, #8]		@ lr = limit
+	DISPATCH_NEXT
+	cmp	r2, lr
+	bcs	array_bound_exception_jpc_1
+	DISPATCH_NEXT
+	add	r3, r3, r2, lsl #2
+	str	tmp1, [r3, #BASE_OFFSET_WORD]
+	DISPATCH_FINISH
+}
+
+(iand)(iadd) {
+	DISPATCH_START	\seq_len
+	POP	r2, r3, tmp1
+	DISPATCH_NEXT
+	and	r1, r3, r2
+	DISPATCH_NEXT
+	add	r1, tmp1, r1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(iand)(isub) {
+	DISPATCH_START	\seq_len
+	POP	r2, r3, tmp1
+	DISPATCH_NEXT
+	and	r1, r3, r2
+	DISPATCH_NEXT
+	sub	r1, tmp1, r1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(iand)(iinc) {
+	POP	tmp1, lr
+	DISPATCH_START	\seq_len
+	and	tmp1, lr, tmp1
+        ldrb    r3, [jpc, #-2]	@ jpc now points to next bc
+        ldrsb   r2, [jpc, #-1]
+	DISPATCH_NEXT
+	PUSH	tmp1
+	ldr	tmp1, [locals, -r3, lsl #2]
+	DISPATCH_NEXT
+	add	tmp1, tmp1, r2
+	str	tmp1, [locals, -r3, lsl #2]
+	DISPATCH_FINISH
+}
+@ ---- ior; xxx ------------------------------------------------------------
+
+(ior)(iload,fload,aload) {
+	ldrb	r1, [jpc, #2]
+	DISPATCH_START	\seq_len
+	POP	r3, tmp1
+	DISPATCH_NEXT
+	rsb	r1, r1, #0
+	DISPATCH_NEXT
+	ldr	r1, [locals, r1, lsl #2]
+	DISPATCH_NEXT
+	orr	r3, tmp1, r3
+	DISPATCH_NEXT
+	PUSH	r1, r3
+	DISPATCH_FINISH
+}
+
+(ior)
+(iload_0,iload_1,iload_2,iload_3)
+{
+	DISPATCH_START	\seq_len
+	rsb	r1, r1, #opc_iload_0
+	POP	r3, tmp1
+	DISPATCH_NEXT
+	ldr	r1, [locals, r1, lsl #2]
+	orr	r3, tmp1, r3
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1, r3
+	DISPATCH_FINISH
+}
+
+(ior)(iaload,aaload,faload) {
+	POP	r2, r3, lr		@ lr = ref
+	DISPATCH_START	\seq_len
+	orr	r2, r3, r2		@ r2 = index
+	SW_NPC	cmp	lr, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry76:
+	ldr	tmp1, [lr, #8]		@ tmp1 = length
+	DISPATCH_NEXT
+	cmp	r2, tmp1
+	bcs	array_bound_exception_jpc_1
+	add	lr, lr, r2, lsl #2
+	ldr	tmp1, [lr, #BASE_OFFSET_WORD]
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+(ior)(istore) {
+	mov	r0, #opc_ior_u4store
+	REWRITE_PAIRS	strb	r0, [jpc]
+	b	do_ior_u4store
+}
+
+(ior)
+(istore_0,istore_1,istore_2,istore_3) {
+	mov	r0, #opc_ior_istore_N
+	REWRITE_PAIRS	strb	r0, [jpc]
+	b	do_ior_istore_N
+}
+
+(ior)(iastore,fastore) {
+	POP	r2, r3
+	DISPATCH_START	\seq_len
+	orr	tmp1, r3, r2		@ tmp1 = value
+	POP	r2, r3			@ r2, index, r3 = ref
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry108:
+	ldr	lr, [r3, #8]		@ lr = limit
+	DISPATCH_NEXT
+	cmp	r2, lr
+	bcs	array_bound_exception_jpc_1
+	DISPATCH_NEXT
+	add	r3, r3, r2, lsl #2
+	str	tmp1, [r3, #BASE_OFFSET_WORD]
+	DISPATCH_FINISH
+}
+
+(ior)(iadd) {
+	DISPATCH_START	\seq_len
+	POP	r2, r3, tmp1
+	DISPATCH_NEXT
+	orr	r1, r3, r2
+	DISPATCH_NEXT
+	add	r1, tmp1, r1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(ior)(isub) {
+	DISPATCH_START	\seq_len
+	POP	r2, r3, tmp1
+	DISPATCH_NEXT
+	orr	r1, r3, r2
+	DISPATCH_NEXT
+	sub	r1, tmp1, r1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(ior)(iinc) {
+	POP	tmp1, lr
+	DISPATCH_START	\seq_len
+	orr	tmp1, lr, tmp1
+        ldrb    r3, [jpc, #-2]	@ jpc now points to next bc
+        ldrsb   r2, [jpc, #-1]
+	DISPATCH_NEXT
+	PUSH	tmp1
+	ldr	tmp1, [locals, -r3, lsl #2]
+	DISPATCH_NEXT
+	add	tmp1, tmp1, r2
+	str	tmp1, [locals, -r3, lsl #2]
+	DISPATCH_FINISH
+}
+
+@ ---- ixor; xxx ------------------------------------------------------------
+
+(ixor)(iload,fload,aload) {
+	ldrb	r1, [jpc, #2]
+	DISPATCH_START	\seq_len
+	POP	r3, tmp1
+	DISPATCH_NEXT
+	rsb	r1, r1, #0
+	DISPATCH_NEXT
+	ldr	r1, [locals, r1, lsl #2]
+	DISPATCH_NEXT
+	eor	r3, tmp1, r3
+	DISPATCH_NEXT
+	PUSH	r1, r3
+	DISPATCH_FINISH
+}
+
+(ixor)
+(iload_0,iload_1,iload_2,iload_3)
+{
+	DISPATCH_START	\seq_len
+	rsb	r1, r1, #opc_iload_0
+	POP	r3, tmp1
+	DISPATCH_NEXT
+	ldr	r1, [locals, r1, lsl #2]
+	eor	r3, tmp1, r3
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1, r3
+	DISPATCH_FINISH
+}
+
+(ixor)(iaload,aaload,faload) {
+	POP	r2, r3, lr		@ lr = ref
+	DISPATCH_START	\seq_len
+	eor	r2, r3, r2		@ r2 = index
+	SW_NPC	cmp	lr, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry77:
+	ldr	tmp1, [lr, #8]		@ tmp1 = length
+	DISPATCH_NEXT
+	cmp	r2, tmp1
+	bcs	array_bound_exception_jpc_1
+	add	lr, lr, r2, lsl #2
+	ldr	tmp1, [lr, #BASE_OFFSET_WORD]
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+(ixor)(istore) {
+	mov	r0, #opc_ixor_u4store
+	REWRITE_PAIRS	strb	r0, [jpc]
+	b	do_ixor_u4store
+}
+
+(ixor)
+(istore_0,istore_1,istore_2,istore_3) {
+	mov	r0, #opc_ixor_istore_N
+	REWRITE_PAIRS	strb	r0, [jpc]
+	b	do_ixor_istore_N
+}
+
+(ixor)(iastore,fastore) {
+	POP	r2, r3
+	DISPATCH_START	\seq_len
+	eor	tmp1, r3, r2		@ tmp1 = value
+	POP	r2, r3			@ r2, index, r3 = ref
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry109:
+	ldr	lr, [r3, #8]		@ lr = limit
+	DISPATCH_NEXT
+	cmp	r2, lr
+	bcs	array_bound_exception_jpc_1
+	DISPATCH_NEXT
+	add	r3, r3, r2, lsl #2
+	str	tmp1, [r3, #BASE_OFFSET_WORD]
+	DISPATCH_FINISH
+}
+
+(ixor)(iadd) {
+	DISPATCH_START	\seq_len
+	POP	r2, r3, tmp1
+	DISPATCH_NEXT
+	eor	r1, r3, r2
+	DISPATCH_NEXT
+	add	r1, tmp1, r1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+        DISPATCH_FINISH
+}
+
+(ixor)(isub) {
+	DISPATCH_START	\seq_len
+	POP	r2, r3, tmp1
+	DISPATCH_NEXT
+	eor	r1, r3, r2
+	DISPATCH_NEXT
+	sub	r1, tmp1, r1
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(ixor)(iinc) {
+	POP	tmp1, lr
+	DISPATCH_START	\seq_len
+	eor	tmp1, lr, tmp1
+        ldrb    r3, [jpc, #-2]	@ jpc now points to next bc
+        ldrsb   r2, [jpc, #-1]
+	DISPATCH_NEXT
+	PUSH	tmp1
+	ldr	tmp1, [locals, -r3, lsl #2]
+	DISPATCH_NEXT
+	add	tmp1, tmp1, r2
+	str	tmp1, [locals, -r3, lsl #2]
+	DISPATCH_FINISH
+}
+
+@ --- iinc; xxx --------------------------------------------------------------
+
+(iinc)(iconst_m1,iconst_0,iconst_1,iconst_2,iconst_3,iconst_4,iconst_5) {
+        ldrsb   tmp1, [jpc, #2]
+	sub	lr, r1, #opc_iconst_0
+	DISPATCH_START	\seq_len
+	rsb	r1, r2, #0
+	ldr	r3, [locals, r1, lsl #2]
+	DISPATCH_NEXT
+	add	r3, r3, tmp1
+	DISPATCH_NEXT
+	PUSH	lr
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	str	r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iinc)(iload,aload,fload) {
+	ldrb	lr, [jpc, #4]
+        ldrsb   tmp1, [jpc, #2]
+	DISPATCH_START	\seq_len
+	rsb	r1, r2, #0
+	ldr	r3, [locals, r1, lsl #2]
+	DISPATCH_NEXT
+	rsb	lr, lr, #0
+	DISPATCH_NEXT
+	add	r3, r3, tmp1
+	DISPATCH_NEXT
+	str	r3, [locals, r1, lsl #2]
+	ldr	tmp1, [locals, lr, lsl #2]
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+(iinc)
+(iload_0,iload_1,iload_2,iload_3)
+{
+	rsb	lr, r1, #opc_iload_0
+        ldrsb   tmp1, [jpc, #2]
+	DISPATCH_START	\seq_len
+	rsb	r1, r2, #0
+	ldr	r3, [locals, r1, lsl #2]
+	DISPATCH_NEXT
+	add	r3, r3, tmp1
+	DISPATCH_NEXT
+	str	r3, [locals, r1, lsl #2]
+	DISPATCH_NEXT
+	ldr	tmp1, [locals, lr, lsl #2]
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+@###############################################################################
+@# Optimised bytecode triples
+@###############################################################################
+
+(iaccess_0,iaccess_1,iaccess_2,iaccess_3)
+(iload,fload,aload) {
+	ldrb	r2, [jpc, #3]
+	rsb	tmp1, r0, #opc_iaccess_0
+	ldrb	r1, [jpc, #2]
+	add	r3, constpool, r2, lsl #12
+	DISPATCH_START	6
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	add	r3, r1, lsl #4
+	ldrb	r1, [jpc, #-1]
+	DISPATCH_NEXT
+	SW_NPC	cmp	tmp1, #0
+	SW_NPC	beq	null_ptr_exception_jpc_5
+	DISPATCH_NEXT
+        ldr     r3, [r3, #CP_OFFSET+8]
+	DISPATCH_NEXT
+	rsb	r1, r1, #0
+.abortentry89:
+	ldr	r3, [tmp1, r3]
+	ldr	r1, [locals, r1, lsl #2]
+	DISPATCH_NEXT
+	PUSH	r1, r3
+	DISPATCH_FINISH
+}
+
+(iaccess_0,iaccess_1,iaccess_2,iaccess_3)
+(iload_0,iload_1,iload_2,iload_3) {
+	ldrb	r2, [jpc, #3]
+	rsb	tmp1, r0, #opc_iaccess_0
+	ldrb	ip, [jpc, #2]
+	add	r3, constpool, r2, lsl #12
+	DISPATCH_START	5
+	rsb	r1, r1, #opc_iload_0
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	add	r3, ip, lsl #4
+	DISPATCH_NEXT
+	SW_NPC	cmp	tmp1, #0
+	SW_NPC	beq	null_ptr_exception_jpc_4
+	DISPATCH_NEXT
+        ldr     r3, [r3, #CP_OFFSET+8]
+	DISPATCH_NEXT
+.abortentry90:
+	ldr	r3, [tmp1, r3]
+	ldr	r1, [locals, r1, lsl #2]
+	DISPATCH_NEXT
+	PUSH	r1, r3
+	DISPATCH_FINISH
+}
+
+(iaccess_0,iaccess_1,iaccess_2,iaccess_3)
+(iadd) {
+	ldrb	r2, [jpc, #3]
+	rsb	tmp1, r0, #opc_iaccess_0
+	ldrb	ip, [jpc, #2]
+	add	r1, constpool, r2, lsl #12
+	DISPATCH_START	5
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	add	r1, ip, lsl #4
+	DISPATCH_NEXT
+	SW_NPC	cmp	tmp1, #0
+	SW_NPC	beq	null_ptr_exception_jpc_4
+	DISPATCH_NEXT
+        ldr     r1, [r1, #CP_OFFSET+8]
+	DISPATCH_NEXT
+	POP	r3
+.abortentry91:
+	ldr	r1, [tmp1, r1]
+	DISPATCH_NEXT
+	add	r1, r1, r3
+	PUSH	r1
+	DISPATCH_FINISH
+}
+
+(iload_0_iconst_N,iload_1_iconst_N,iload_2_iconst_N,iload_3_iconst_N)
+(iadd)
+{
+        rsb     r3, r0, #opc_iload_0_iconst_N
+        DISPATCH_START  \seq_len
+        ldrb    r2, [jpc, #1-\seq_len]
+        DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+        DISPATCH_NEXT
+        sub     tmp1, r2, #opc_iconst_0
+        DISPATCH_NEXT
+        add     r3, r3, tmp1
+        DISPATCH_NEXT
+        PUSH    r3
+        DISPATCH_FINISH
+}
+
+(iload_0_iconst_N,iload_1_iconst_N,iload_2_iconst_N,iload_3_iconst_N)
+(isub)
+{
+        rsb     r3, r0, #opc_iload_0_iconst_N
+        DISPATCH_START  \seq_len
+        ldrb    r2, [jpc, #1-\seq_len]
+        DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+        DISPATCH_NEXT
+        sub     tmp1, r2, #opc_iconst_0
+        DISPATCH_NEXT
+        sub     r3, r3, tmp1
+        DISPATCH_NEXT
+        PUSH    r3
+        DISPATCH_FINISH
+}
+
+(iload_0_iconst_N,iload_1_iconst_N,iload_2_iconst_N,iload_3_iconst_N)
+(iand)
+{
+        rsb     r3, r0, #opc_iload_0_iconst_N
+        DISPATCH_START  \seq_len
+        ldrb    r2, [jpc, #1-\seq_len]
+        DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+        DISPATCH_NEXT
+        sub     tmp1, r2, #opc_iconst_0
+        DISPATCH_NEXT
+        and     r3, r3, tmp1
+        DISPATCH_NEXT
+        PUSH    r3
+        DISPATCH_FINISH
+}
+
+(iload_0_iconst_N,iload_1_iconst_N,iload_2_iconst_N,iload_3_iconst_N)
+(ior)
+{
+        rsb     r3, r0, #opc_iload_0_iconst_N
+        DISPATCH_START  \seq_len
+        ldrb    r2, [jpc, #1-\seq_len]
+        DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+        DISPATCH_NEXT
+        sub     tmp1, r2, #opc_iconst_0
+        DISPATCH_NEXT
+        orr     r3, r3, tmp1
+        DISPATCH_NEXT
+        PUSH    r3
+        DISPATCH_FINISH
+}
+
+(iload_0_iconst_N,iload_1_iconst_N,iload_2_iconst_N,iload_3_iconst_N)
+(ixor)
+{
+        rsb     r3, r0, #opc_iload_0_iconst_N
+        DISPATCH_START  \seq_len
+        ldrb    r2, [jpc, #1-\seq_len]
+        DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+        DISPATCH_NEXT
+        sub     tmp1, r2, #opc_iconst_0
+        DISPATCH_NEXT
+        eor     r3, r3, tmp1
+        DISPATCH_NEXT
+        PUSH    r3
+        DISPATCH_FINISH
+}
+
+(iload_0_iconst_N,iload_1_iconst_N,iload_2_iconst_N,iload_3_iconst_N)
+(idiv)
+{
+	rsb	tmp1, r0, #opc_iload_0_iconst_N
+	subs	lr, r2, #opc_iconst_2
+	DISPATCH_START	\seq_len
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	DISPATCH_NEXT
+	beq	5f
+	bcc	3f
+	cmp	lr, #(opc_iconst_4-opc_iconst_2)
+	beq	4f
+	bcc	2f
+@ divide by 5
+1:
+	mvn	lr, #0x198		@ Form 0x66666667 in lr
+	bic	lr, lr, #0x9800
+	add	lr, lr, lr, lsl #16
+	smull	a3, a4, tmp1, lr
+	mov	a3, tmp1, asr #31
+	rsb	tmp1, a3, a4, asr #1
+	b	6f
+@ divide by 3
+2:
+	mvn	lr, #0xa9		@ Form 0x55555556 in lr
+	bic	lr, lr, #0xaa00
+	add	lr, lr, lr, lsl #16
+        smull   a3, a4, tmp1, lr
+        sub     tmp1, a4, tmp1, asr #31
+	b	6f
+3:
+	cmp	lr, #(opc_iconst_0-opc_iconst_2)
+	beq	div_zero_jpc_1
+	rsbcc	tmp1, tmp1, #0		@ Divide by -1 or 1
+	b	6f
+@ divide by 4
+4:	movs	a4, tmp1
+	addmi	a4, a4, #3
+	mov	tmp1, a4, asr #2
+	b	6f
+@ divide by 2
+5:
+	add	tmp1, tmp1, tmp1, lsr #31
+	mov	tmp1, tmp1, asr #1
+6:
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+@ r2 = [jpc, #1]
+(iload_iconst_N)
+(iadd)
+{
+        ldrb    r3, [jpc, #2]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+        DISPATCH_NEXT
+        ldr     r2, [locals, r2, lsl #2]
+        sub     r3, r3, #opc_iconst_0
+        DISPATCH_NEXT
+        add     r3, r2, r3
+        DISPATCH_NEXT
+        DISPATCH_NEXT
+        PUSH    r3
+        DISPATCH_FINISH
+}
+
+@ r2 = [jpc, #1]
+(iload_iconst_N)
+(isub)
+{
+        ldrb    r3, [jpc, #2]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+        DISPATCH_NEXT
+        ldr     r2, [locals, r2, lsl #2]
+        sub     r3, r3, #opc_iconst_0
+        DISPATCH_NEXT
+        sub     r3, r2, r3
+        DISPATCH_NEXT
+        DISPATCH_NEXT
+        PUSH    r3
+        DISPATCH_FINISH
+}
+
+@ r2 = [jpc, #1]
+(iload_iconst_N)
+(iand)
+{
+        ldrb    r3, [jpc, #2]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+        DISPATCH_NEXT
+        ldr     r2, [locals, r2, lsl #2]
+        sub     r3, r3, #opc_iconst_0
+        DISPATCH_NEXT
+        and     r3, r2, r3
+        DISPATCH_NEXT
+        DISPATCH_NEXT
+        PUSH    r3
+        DISPATCH_FINISH
+}
+
+@ r2 = [jpc, #1]
+(iload_iconst_N)
+(ior)
+{
+        ldrb    r3, [jpc, #2]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+        DISPATCH_NEXT
+        ldr     r2, [locals, r2, lsl #2]
+        sub     r3, r3, #opc_iconst_0
+        DISPATCH_NEXT
+        orr     r3, r2, r3
+        DISPATCH_NEXT
+        DISPATCH_NEXT
+        PUSH    r3
+        DISPATCH_FINISH
+}
+
+@ r2 = [jpc, #1]
+(iload_iconst_N)
+(ixor)
+{
+        ldrb    r3, [jpc, #2]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+        DISPATCH_NEXT
+        ldr     r2, [locals, r2, lsl #2]
+        sub     r3, r3, #opc_iconst_0
+        DISPATCH_NEXT
+        eor     r3, r2, r3
+        DISPATCH_NEXT
+        DISPATCH_NEXT
+        PUSH    r3
+        DISPATCH_FINISH
+}
+
+(iload_iconst_N)
+(idiv)
+{
+	ldrb	lr, [jpc, #2]
+	rsb	tmp1, r2, #0
+        DISPATCH_START  \seq_len
+	ldr	tmp1, [locals, tmp1, lsl #2]
+	subs	lr, lr, #opc_iconst_2
+	DISPATCH_NEXT
+	beq	5f
+	bcc	3f
+	cmp	lr, #(opc_iconst_4-opc_iconst_2)
+	beq	4f
+	bcc	2f
+@ divide by 5
+1:
+	mvn	lr, #0x198		@ Form 0x66666667 in lr
+	bic	lr, lr, #0x9800
+	add	lr, lr, lr, lsl #16
+	smull	a3, a4, tmp1, lr
+	mov	a3, tmp1, asr #31
+	rsb	tmp1, a3, a4, asr #1
+	b	6f
+@ divide by 3
+2:
+	mvn	lr, #0xa9		@ Form 0x55555556 in lr
+	bic	lr, lr, #0xaa00
+	add	lr, lr, lr, lsl #16
+        smull   a3, a4, tmp1, lr
+        sub     tmp1, a4, tmp1, asr #31
+	b	6f
+3:
+	cmp	lr, #(opc_iconst_0-opc_iconst_2)
+	beq	div_zero_jpc_1
+	rsbcc	tmp1, tmp1, #0		@ Divide by -1 or 1
+	b	6f
+@ divide by 4
+4:	movs	a4, tmp1
+	addmi	a4, a4, #3
+	mov	tmp1, a4, asr #2
+	b	6f
+@ divide by 2
+5:
+	add	tmp1, tmp1, tmp1, lsr #31
+	mov	tmp1, tmp1, asr #1
+6:
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+}
+
+(iload_iload)
+(iadd)
+{
+        ldrb    r3, [jpc, #3]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+        rsb     r3, r3, #0
+        ldr     r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        add     r3, r2, r3
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        PUSH    r3
+	DISPATCH_FINISH
+}
+
+(iload_iload_N)
+(iadd)
+{
+        ldrb    r3, [jpc, #2]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+        rsb     r3, r3, #opc_iload_0
+        ldr     r2, [locals, r2, lsl #2]
+        ldr     r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        add     r3, r2, r3
+	DISPATCH_NEXT
+        DISPATCH_NEXT
+        PUSH    r3
+	DISPATCH_FINISH
+}
+
+(iload_0_iload,iload_1_iload,iload_2_iload,iload_3_iload)
+(iadd)
+{
+	ldrb	r3, [jpc, #2]
+	rsb	r2, r0, #opc_iload_0_iload
+        DISPATCH_START  \seq_len
+        rsb     r3, r3, #0
+        ldr     r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        add     r3, r2, r3
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        PUSH    r3
+	DISPATCH_FINISH
+}
+
+(iload_0_iload_N,iload_1_iload_N,iload_2_iload_N,iload_3_iload_N)
+(iadd)
+{
+	rsb	r3, r2, #opc_iload_0
+	rsb	r2, r0, #opc_iload_0_iload_N
+        DISPATCH_START  \seq_len
+        ldr     r2, [locals, r2, lsl #2]
+        ldr     r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        add     r3, r2, r3
+	DISPATCH_NEXT
+        DISPATCH_NEXT
+        PUSH    r3
+	DISPATCH_FINISH
+}
+
+(iload_iload)
+(isub)
+{
+        ldrb    r3, [jpc, #3]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+        rsb     r3, r3, #0
+        ldr     r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        sub     r3, r2, r3
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        PUSH    r3
+	DISPATCH_FINISH
+}
+
+(iload_iload_N)
+(isub)
+{
+        ldrb    r3, [jpc, #2]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+        rsb     r3, r3, #opc_iload_0
+        ldr     r2, [locals, r2, lsl #2]
+        ldr     r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        sub     r3, r2, r3
+	DISPATCH_NEXT
+        DISPATCH_NEXT
+        PUSH    r3
+	DISPATCH_FINISH
+}
+
+(iload_0_iload,iload_1_iload,iload_2_iload,iload_3_iload)
+(isub)
+{
+	ldrb	r3, [jpc, #2]
+	rsb	r2, r0, #opc_iload_0_iload
+        DISPATCH_START  \seq_len
+        rsb     r3, r3, #0
+        ldr     r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        sub     r3, r2, r3
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        PUSH    r3
+	DISPATCH_FINISH
+}
+
+(iload_0_iload_N,iload_1_iload_N,iload_2_iload_N,iload_3_iload_N)
+(isub)
+{
+	rsb	r3, r2, #opc_iload_0
+	rsb	r2, r0, #opc_iload_0_iload_N
+        DISPATCH_START  \seq_len
+        ldr     r2, [locals, r2, lsl #2]
+        ldr     r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        sub     r3, r2, r3
+	DISPATCH_NEXT
+        DISPATCH_NEXT
+        PUSH    r3
+	DISPATCH_FINISH
+}
+
+(iload_iload)
+(iand)
+{
+        ldrb    r3, [jpc, #3]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+        rsb     r3, r3, #0
+        ldr     r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        and     r3, r2, r3
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        PUSH    r3
+	DISPATCH_FINISH
+}
+
+(iload_iload_N)
+(iand)
+{
+        ldrb    r3, [jpc, #2]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+        rsb     r3, r3, #opc_iload_0
+        ldr     r2, [locals, r2, lsl #2]
+        ldr     r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        and     r3, r2, r3
+	DISPATCH_NEXT
+        DISPATCH_NEXT
+        PUSH    r3
+	DISPATCH_FINISH
+}
+
+(iload_0_iload,iload_1_iload,iload_2_iload,iload_3_iload)
+(iand)
+{
+	ldrb	r3, [jpc, #2]
+	rsb	r2, r0, #opc_iload_0_iload
+        DISPATCH_START  \seq_len
+        rsb     r3, r3, #0
+        ldr     r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        and     r3, r2, r3
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        PUSH    r3
+	DISPATCH_FINISH
+}
+
+(iload_0_iload_N,iload_1_iload_N,iload_2_iload_N,iload_3_iload_N)
+(iand)
+{
+	rsb	r3, r2, #opc_iload_0
+	rsb	r2, r0, #opc_iload_0_iload_N
+        DISPATCH_START  \seq_len
+        ldr     r2, [locals, r2, lsl #2]
+        ldr     r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        and     r3, r2, r3
+	DISPATCH_NEXT
+        DISPATCH_NEXT
+        PUSH    r3
+	DISPATCH_FINISH
+}
+
+(iload_iload)
+(ior)
+{
+        ldrb    r3, [jpc, #3]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+        rsb     r3, r3, #0
+        ldr     r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        orr     r3, r2, r3
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        PUSH    r3
+	DISPATCH_FINISH
+}
+
+(iload_iload_N)
+(ior)
+{
+        ldrb    r3, [jpc, #2]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+        rsb     r3, r3, #opc_iload_0
+        ldr     r2, [locals, r2, lsl #2]
+        ldr     r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        orr     r3, r2, r3
+	DISPATCH_NEXT
+        DISPATCH_NEXT
+        PUSH    r3
+	DISPATCH_FINISH
+}
+
+(iload_0_iload,iload_1_iload,iload_2_iload,iload_3_iload)
+(ior)
+{
+	ldrb	r3, [jpc, #2]
+	rsb	r2, r0, #opc_iload_0_iload
+        DISPATCH_START  \seq_len
+        rsb     r3, r3, #0
+        ldr     r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        orr     r3, r2, r3
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        PUSH    r3
+	DISPATCH_FINISH
+}
+
+(iload_0_iload_N,iload_1_iload_N,iload_2_iload_N,iload_3_iload_N)
+(ior)
+{
+	rsb	r3, r2, #opc_iload_0
+	rsb	r2, r0, #opc_iload_0_iload_N
+        DISPATCH_START  \seq_len
+        ldr     r2, [locals, r2, lsl #2]
+        ldr     r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        orr     r3, r2, r3
+	DISPATCH_NEXT
+        DISPATCH_NEXT
+        PUSH    r3
+	DISPATCH_FINISH
+}
+
+(iload_iload)
+(ixor)
+{
+        ldrb    r3, [jpc, #3]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+        rsb     r3, r3, #0
+        ldr     r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        eor     r3, r2, r3
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+        PUSH    r3
+        DISPATCH_FINISH
+}
+
+(iload_iload_N)
+(ixor)
+{
+        ldrb    r3, [jpc, #2]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+        rsb     r3, r3, #opc_iload_0
+        ldr     r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+        DISPATCH_NEXT
+        DISPATCH_NEXT
+        eor     r3, r2, r3
+        DISPATCH_NEXT
+        DISPATCH_NEXT
+        PUSH    r3
+        DISPATCH_FINISH
+}
+
+(iload_0_iload,iload_1_iload,iload_2_iload,iload_3_iload)
+(ixor)
+{
+        ldrb    r3, [jpc, #2]
+	rsb	r2, r0, #opc_iload_0_iload
+        DISPATCH_START  \seq_len
+        rsb     r3, r3, #0
+        ldr     r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+        DISPATCH_NEXT
+        DISPATCH_NEXT
+        eor     r3, r2, r3
+        DISPATCH_NEXT
+        DISPATCH_NEXT
+        PUSH    r3
+        DISPATCH_FINISH
+}
+
+(iload_0_iload_N,iload_1_iload_N,iload_2_iload_N,iload_3_iload_N)
+(ixor)
+{
+	rsb	r3, r2, #opc_iload_0
+	rsb	r2, r0, #opc_iload_0_iload_N
+        DISPATCH_START  \seq_len
+        ldr     r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+        DISPATCH_NEXT
+        DISPATCH_NEXT
+        eor     r3, r2, r3
+        DISPATCH_NEXT
+        DISPATCH_NEXT
+        PUSH    r3
+	DISPATCH_FINISH
+}
+
+@ Former quads
+
+(iload_0_iconst_N,iload_1_iconst_N,iload_2_iconst_N,iload_3_iconst_N)
+(iadd_u4store)
+{
+        rsb     r3, r0, #opc_iload_0_iconst_N
+        DISPATCH_START  \seq_len
+	sub	r2, r2, #opc_iconst_0
+	DISPATCH_NEXT
+        ldrb    tmp1, [jpc, #-1]
+        ldr     r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+        add     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iconst_N)
+(iadd_u4store)
+{
+	ldrb	r3, [jpc, #2]
+        ldrb    lr, [jpc, #5]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+	sub	r3, r3, #opc_iconst_0
+	DISPATCH_NEXT
+        rsb     r1, lr, #0
+	DISPATCH_NEXT
+        ldr     tmp1, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        add     r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iconst_N,iload_1_iconst_N,iload_2_iconst_N,iload_3_iconst_N)
+(iadd_istore_N)
+{
+        rsb     r3, r0, #opc_iload_0_iconst_N
+        DISPATCH_START  \seq_len
+	sub	r2, r2, #opc_iconst_0
+	DISPATCH_NEXT
+        ldrb    tmp1, [jpc, #-1]
+        ldr     r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+        add     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #opc_istore_0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iconst_N)
+(iadd_istore_N)
+{
+	ldrb	r3, [jpc, #2]
+        ldrb    lr, [jpc, #4]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+	sub	r3, r3, #opc_iconst_0
+	DISPATCH_NEXT
+        rsb     r1, lr, #opc_istore_0
+	DISPATCH_NEXT
+        ldr     tmp1, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        add     r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iconst_N,iload_1_iconst_N,iload_2_iconst_N,iload_3_iconst_N)
+(isub_u4store)
+{
+        rsb     r3, r0, #opc_iload_0_iconst_N
+        DISPATCH_START  \seq_len
+	sub	r2, r2, #opc_iconst_0
+	DISPATCH_NEXT
+        ldrb    tmp1, [jpc, #-1]
+        ldr     r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+        sub     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iconst_N)
+(isub_u4store)
+{
+	ldrb	r3, [jpc, #2]
+        ldrb    lr, [jpc, #5]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+	sub	r3, r3, #opc_iconst_0
+	DISPATCH_NEXT
+        rsb     r1, lr, #0
+	DISPATCH_NEXT
+        ldr     tmp1, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        sub     r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iconst_N,iload_1_iconst_N,iload_2_iconst_N,iload_3_iconst_N)
+(isub_istore_N)
+{
+        rsb     r3, r0, #opc_iload_0_iconst_N
+        DISPATCH_START  \seq_len
+	sub	r2, r2, #opc_iconst_0
+	DISPATCH_NEXT
+        ldrb    tmp1, [jpc, #-1]
+        ldr     r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+        sub     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #opc_istore_0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iconst_N)
+(isub_istore_N)
+{
+	ldrb	r3, [jpc, #2]
+        ldrb    lr, [jpc, #4]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+	sub	r3, r3, #opc_iconst_0
+	DISPATCH_NEXT
+        rsb     r1, lr, #opc_istore_0
+	DISPATCH_NEXT
+        ldr     tmp1, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        sub     r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iconst_N,iload_1_iconst_N,iload_2_iconst_N,iload_3_iconst_N)
+(iand_u4store)
+{
+        rsb     r3, r0, #opc_iload_0_iconst_N
+        DISPATCH_START  \seq_len
+	sub	r2, r2, #opc_iconst_0
+	DISPATCH_NEXT
+        ldrb    tmp1, [jpc, #-1]
+        ldr     r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+        and     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iconst_N)
+(iand_u4store)
+{
+	ldrb	r3, [jpc, #2]
+        ldrb    lr, [jpc, #5]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+	sub	r3, r3, #opc_iconst_0
+	DISPATCH_NEXT
+        rsb     r1, lr, #0
+	DISPATCH_NEXT
+        ldr     tmp1, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        and     r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iconst_N,iload_1_iconst_N,iload_2_iconst_N,iload_3_iconst_N)
+(iand_istore_N)
+{
+        rsb     r3, r0, #opc_iload_0_iconst_N
+        DISPATCH_START  \seq_len
+	sub	r2, r2, #opc_iconst_0
+	DISPATCH_NEXT
+        ldrb    tmp1, [jpc, #-1]
+        ldr     r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+        and     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #opc_istore_0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iconst_N)
+(iand_istore_N)
+{
+	ldrb	r3, [jpc, #2]
+        ldrb    lr, [jpc, #4]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+	sub	r3, r3, #opc_iconst_0
+	DISPATCH_NEXT
+        rsb     r1, lr, #opc_istore_0
+	DISPATCH_NEXT
+        ldr     tmp1, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        and     r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iconst_N,iload_1_iconst_N,iload_2_iconst_N,iload_3_iconst_N)
+(ior_u4store)
+{
+        rsb     r3, r0, #opc_iload_0_iconst_N
+        DISPATCH_START  \seq_len
+	sub	r2, r2, #opc_iconst_0
+	DISPATCH_NEXT
+        ldrb    tmp1, [jpc, #-1]
+        ldr     r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+        orr     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iconst_N)
+(ior_u4store)
+{
+	ldrb	r3, [jpc, #2]
+        ldrb    lr, [jpc, #5]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+	sub	r3, r3, #opc_iconst_0
+	DISPATCH_NEXT
+        rsb     r1, lr, #0
+	DISPATCH_NEXT
+        ldr     tmp1, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        orr     r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iconst_N,iload_1_iconst_N,iload_2_iconst_N,iload_3_iconst_N)
+(ior_istore_N)
+{
+        rsb     r3, r0, #opc_iload_0_iconst_N
+        DISPATCH_START  \seq_len
+	sub	r2, r2, #opc_iconst_0
+	DISPATCH_NEXT
+        ldrb    tmp1, [jpc, #-1]
+        ldr     r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+        orr     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #opc_istore_0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iconst_N)
+(ior_istore_N)
+{
+	ldrb	r3, [jpc, #2]
+        ldrb    lr, [jpc, #4]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+	sub	r3, r3, #opc_iconst_0
+	DISPATCH_NEXT
+        rsb     r1, lr, #opc_istore_0
+	DISPATCH_NEXT
+        ldr     tmp1, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        orr     r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iconst_N,iload_1_iconst_N,iload_2_iconst_N,iload_3_iconst_N)
+(ixor_u4store)
+{
+        rsb     r3, r0, #opc_iload_0_iconst_N
+        DISPATCH_START  \seq_len
+	sub	r2, r2, #opc_iconst_0
+	DISPATCH_NEXT
+        ldrb    tmp1, [jpc, #-1]
+        ldr     r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+        eor     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iconst_N)
+(ixor_u4store)
+{
+	ldrb	r3, [jpc, #2]
+        ldrb    lr, [jpc, #5]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+	sub	r3, r3, #opc_iconst_0
+	DISPATCH_NEXT
+        rsb     r1, lr, #0
+	DISPATCH_NEXT
+        ldr     tmp1, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        eor     r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iconst_N,iload_1_iconst_N,iload_2_iconst_N,iload_3_iconst_N)
+(ixor_istore_N)
+{
+        rsb     r3, r0, #opc_iload_0_iconst_N
+        DISPATCH_START  \seq_len
+	sub	r2, r2, #opc_iconst_0
+	DISPATCH_NEXT
+        ldrb    tmp1, [jpc, #-1]
+        ldr     r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+        eor     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #opc_istore_0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iconst_N)
+(ixor_istore_N)
+{
+	ldrb	r3, [jpc, #2]
+        ldrb    lr, [jpc, #4]
+        rsb     r2, r2, #0
+        DISPATCH_START  \seq_len
+	sub	r3, r3, #opc_iconst_0
+	DISPATCH_NEXT
+        rsb     r1, lr, #opc_istore_0
+	DISPATCH_NEXT
+        ldr     tmp1, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        eor     r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+#ifdef NOTICE_SAFEPOINTS
+
+(iload_0_iconst_N,iload_1_iconst_N,iload_2_iconst_N,iload_3_iconst_N)
+(if_icmpeq,if_acmpeq)
+{
+	ldrb	r3, [jpc, #1]
+	rsb	r2, r0, #opc_iload_0_iconst_N
+	ldrsb	r1, [jpc, #3]
+	ldr	r2, [locals, r2, lsl #2]
+	sub	r3, r3, #opc_iconst_0
+	ldrb	ip, [jpc, #4]
+	cmp	r2, r3
+	beq	branch_taken_unsafe_2
+	DISPATCH 5
+}
+
+(iload_iconst_N)
+(if_icmpeq,if_acmpeq)
+{
+	ldrb	r3, [jpc, #2]
+	rsb	r2, r2, #0
+	ldrsb	r1, [jpc, #4]
+	ldr	r2, [locals, r2, lsl #2]
+	sub	r3, r3, #opc_iconst_0
+	ldrb	ip, [jpc, #5]
+	cmp	r2, r3
+	beq	branch_taken_unsafe_3
+	DISPATCH 6
+}
+
+(iload_0_iconst_N,iload_1_iconst_N,iload_2_iconst_N,iload_3_iconst_N)
+(if_icmpne,if_acmpne)
+{
+	ldrb	r3, [jpc, #1]
+	rsb	r2, r0, #opc_iload_0_iconst_N
+	ldrsb	r1, [jpc, #3]
+	ldr	r2, [locals, r2, lsl #2]
+	sub	r3, r3, #opc_iconst_0
+	ldrb	ip, [jpc, #4]
+	cmp	r2, r3
+	bne	branch_taken_unsafe_2
+	DISPATCH 5
+}
+
+(iload_iconst_N)
+(if_icmpne,if_acmpne)
+{
+	ldrb	r3, [jpc, #2]
+	rsb	r2, r2, #0
+	ldrsb	r1, [jpc, #4]
+	ldr	r2, [locals, r2, lsl #2]
+	sub	r3, r3, #opc_iconst_0
+	ldrb	ip, [jpc, #5]
+	cmp	r2, r3
+	bne	branch_taken_unsafe_3
+	DISPATCH 6
+}
+
+(iload_0_iconst_N,iload_1_iconst_N,iload_2_iconst_N,iload_3_iconst_N)
+(if_icmplt)
+{
+	ldrb	r3, [jpc, #1]
+	rsb	r2, r0, #opc_iload_0_iconst_N
+	ldrsb	r1, [jpc, #3]
+	ldr	r2, [locals, r2, lsl #2]
+	sub	r3, r3, #opc_iconst_0
+	ldrb	ip, [jpc, #4]
+	cmp	r2, r3
+	blt	branch_taken_unsafe_2
+	DISPATCH 5
+}
+
+(iload_iconst_N)
+(if_icmplt)
+{
+	ldrb	r3, [jpc, #2]
+	rsb	r2, r2, #0
+	ldrsb	r1, [jpc, #4]
+	ldr	r2, [locals, r2, lsl #2]
+	sub	r3, r3, #opc_iconst_0
+	ldrb	ip, [jpc, #5]
+	cmp	r2, r3
+	blt	branch_taken_unsafe_3
+	DISPATCH 6
+}
+
+(iload_0_iconst_N,iload_1_iconst_N,iload_2_iconst_N,iload_3_iconst_N)
+(if_icmpge)
+{
+	ldrb	r3, [jpc, #1]
+	rsb	r2, r0, #opc_iload_0_iconst_N
+	ldrsb	r1, [jpc, #3]
+	ldr	r2, [locals, r2, lsl #2]
+	sub	r3, r3, #opc_iconst_0
+	ldrb	ip, [jpc, #4]
+	cmp	r2, r3
+	bge	branch_taken_unsafe_2
+	DISPATCH 5
+}
+
+(iload_iconst_N)
+(if_icmpge)
+{
+	ldrb	r3, [jpc, #2]
+	rsb	r2, r2, #0
+	ldrsb	r1, [jpc, #4]
+	ldr	r2, [locals, r2, lsl #2]
+	sub	r3, r3, #opc_iconst_0
+	ldrb	ip, [jpc, #5]
+	cmp	r2, r3
+	bge	branch_taken_unsafe_3
+	DISPATCH 6
+}
+
+(iload_0_iconst_N,iload_1_iconst_N,iload_2_iconst_N,iload_3_iconst_N)
+(if_icmpgt)
+{
+	ldrb	r3, [jpc, #1]
+	rsb	r2, r0, #opc_iload_0_iconst_N
+	ldrsb	r1, [jpc, #3]
+	ldr	r2, [locals, r2, lsl #2]
+	sub	r3, r3, #opc_iconst_0
+	ldrb	ip, [jpc, #4]
+	cmp	r2, r3
+	bgt	branch_taken_unsafe_2
+	DISPATCH 5
+}
+
+(iload_iconst_N)
+(if_icmpgt)
+{
+	ldrb	r3, [jpc, #2]
+	rsb	r2, r2, #0
+	ldrsb	r1, [jpc, #4]
+	ldr	r2, [locals, r2, lsl #2]
+	sub	r3, r3, #opc_iconst_0
+	ldrb	ip, [jpc, #5]
+	cmp	r2, r3
+	bgt	branch_taken_unsafe_3
+	DISPATCH 6
+}
+
+(iload_0_iconst_N,iload_1_iconst_N,iload_2_iconst_N,iload_3_iconst_N)
+(if_icmple)
+{
+	ldrb	r3, [jpc, #1]
+	rsb	r2, r0, #opc_iload_0_iconst_N
+	ldrsb	r1, [jpc, #3]
+	ldr	r2, [locals, r2, lsl #2]
+	sub	r3, r3, #opc_iconst_0
+	ldrb	ip, [jpc, #4]
+	cmp	r2, r3
+	ble	branch_taken_unsafe_2
+	DISPATCH 5
+}
+
+(iload_iconst_N)
+(if_icmple)
+{
+	ldrb	r3, [jpc, #2]
+	rsb	r2, r2, #0
+	ldrsb	r1, [jpc, #4]
+	ldr	r2, [locals, r2, lsl #2]
+	sub	r3, r3, #opc_iconst_0
+	ldrb	ip, [jpc, #5]
+	cmp	r2, r3
+	ble	branch_taken_unsafe_3
+	DISPATCH 6
+}
+
+#endif // NOTICE_SAFEPOINTS
+
+(iload_iload)
+(iadd_istore_N)
+{
+	ldrb	r3, [jpc, #3]
+	ldrb	lr, [jpc, #5]
+	rsb	r2, r2, #0
+        DISPATCH_START  \seq_len
+	rsb	r3, r3, #0
+	DISPATCH_NEXT
+	rsb	r1, lr, #opc_istore_0
+	ldr	tmp1, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	add	r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iload_N)
+(iadd_istore_N)
+{
+	ldrb	r3, [jpc, #2]
+	ldrb	lr, [jpc, #4]
+	rsb	r2, r2, #0
+        DISPATCH_START  \seq_len
+	rsb	r3, r3, #opc_iload_0
+	DISPATCH_NEXT
+	rsb	r1, lr, #opc_istore_0
+	ldr	tmp1, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	add	r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iload,iload_1_iload,iload_2_iload,iload_3_iload)
+(iadd_istore_N)
+{
+        rsb     r3, r0, #opc_iload_0_iload
+	ldrb	r2, [jpc, #2]
+	ldrb	tmp1, [jpc, #4]
+        DISPATCH_START  \seq_len
+	rsb	r2, r2, #0
+	DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        add     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #opc_istore_0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iload_N,iload_1_iload_N,iload_2_iload_N,iload_3_iload_N)
+(iadd_istore_N)
+{
+	ldrb	tmp1, [jpc, #3]
+	rsb	r3, r0, #opc_iload_0_iload_N
+        DISPATCH_START  \seq_len
+	rsb	r2, r2, #opc_iload_0
+	DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        add     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #opc_istore_0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iload)
+(isub_istore_N)
+{
+	ldrb	r3, [jpc, #3]
+	ldrb	lr, [jpc, #5]
+	rsb	r2, r2, #0
+        DISPATCH_START  \seq_len
+	rsb	r3, r3, #0
+	DISPATCH_NEXT
+	rsb	r1, lr, #opc_istore_0
+	ldr	tmp1, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	sub	r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iload_N)
+(isub_istore_N)
+{
+	ldrb	r3, [jpc, #2]
+	ldrb	lr, [jpc, #4]
+	rsb	r2, r2, #0
+        DISPATCH_START  \seq_len
+	rsb	r3, r3, #opc_iload_0
+	DISPATCH_NEXT
+	rsb	r1, lr, #opc_istore_0
+	ldr	tmp1, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	sub	r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iload,iload_1_iload,iload_2_iload,iload_3_iload)
+(isub_istore_N)
+{
+        rsb     r3, r0, #opc_iload_0_iload
+	ldrb	r2, [jpc, #2]
+	ldrb	tmp1, [jpc, #4]
+        DISPATCH_START  \seq_len
+	rsb	r2, r2, #0
+	DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        sub     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #opc_istore_0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iload_N,iload_1_iload_N,iload_2_iload_N,iload_3_iload_N)
+(isub_istore_N)
+{
+	ldrb	tmp1, [jpc, #3]
+	rsb	r3, r0, #opc_iload_0_iload_N
+        DISPATCH_START  \seq_len
+	rsb	r2, r2, #opc_iload_0
+	DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        sub     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #opc_istore_0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iload)
+(iand_istore_N)
+{
+	ldrb	r3, [jpc, #3]
+	ldrb	lr, [jpc, #5]
+	rsb	r2, r2, #0
+        DISPATCH_START  \seq_len
+	rsb	r3, r3, #0
+	DISPATCH_NEXT
+	rsb	r1, lr, #opc_istore_0
+	ldr	tmp1, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	and	r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iload_N)
+(iand_istore_N)
+{
+	ldrb	r3, [jpc, #2]
+	ldrb	lr, [jpc, #4]
+	rsb	r2, r2, #0
+        DISPATCH_START  \seq_len
+	rsb	r3, r3, #opc_iload_0
+	DISPATCH_NEXT
+	rsb	r1, lr, #opc_istore_0
+	ldr	tmp1, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	and	r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iload,iload_1_iload,iload_2_iload,iload_3_iload)
+(iand_istore_N)
+{
+        rsb     r3, r0, #opc_iload_0_iload
+	ldrb	r2, [jpc, #2]
+	ldrb	tmp1, [jpc, #4]
+        DISPATCH_START  \seq_len
+	rsb	r2, r2, #0
+	DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        and     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #opc_istore_0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iload_N,iload_1_iload_N,iload_2_iload_N,iload_3_iload_N)
+(iand_istore_N)
+{
+	ldrb	tmp1, [jpc, #3]
+	rsb	r3, r0, #opc_iload_0_iload_N
+        DISPATCH_START  \seq_len
+	rsb	r2, r2, #opc_iload_0
+	DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        and     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #opc_istore_0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iload)
+(ior_istore_N)
+{
+	ldrb	r3, [jpc, #3]
+	ldrb	lr, [jpc, #5]
+	rsb	r2, r2, #0
+        DISPATCH_START  \seq_len
+	rsb	r3, r3, #0
+	DISPATCH_NEXT
+	rsb	r1, lr, #opc_istore_0
+	ldr	tmp1, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	orr	r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iload_N)
+(ior_istore_N)
+{
+	ldrb	r3, [jpc, #2]
+	ldrb	lr, [jpc, #4]
+	rsb	r2, r2, #0
+        DISPATCH_START  \seq_len
+	rsb	r3, r3, #opc_iload_0
+	DISPATCH_NEXT
+	rsb	r1, lr, #opc_istore_0
+	ldr	tmp1, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	orr	r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iload,iload_1_iload,iload_2_iload,iload_3_iload)
+(ior_istore_N)
+{
+        rsb     r3, r0, #opc_iload_0_iload
+	ldrb	r2, [jpc, #2]
+	ldrb	tmp1, [jpc, #4]
+        DISPATCH_START  \seq_len
+	rsb	r2, r2, #0
+	DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        orr     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #opc_istore_0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iload_N,iload_1_iload_N,iload_2_iload_N,iload_3_iload_N)
+(ior_istore_N)
+{
+	ldrb	tmp1, [jpc, #3]
+	rsb	r3, r0, #opc_iload_0_iload_N
+        DISPATCH_START  \seq_len
+	rsb	r2, r2, #opc_iload_0
+	DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        orr     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #opc_istore_0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iload)
+(ixor_istore_N)
+{
+	ldrb	r3, [jpc, #3]
+	ldrb	lr, [jpc, #5]
+	rsb	r2, r2, #0
+        DISPATCH_START  \seq_len
+	rsb	r3, r3, #0
+	DISPATCH_NEXT
+	rsb	r1, lr, #opc_istore_0
+	ldr	tmp1, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	eor	r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iload_N)
+(ixor_istore_N)
+{
+	ldrb	r3, [jpc, #2]
+	ldrb	lr, [jpc, #4]
+	rsb	r2, r2, #0
+        DISPATCH_START  \seq_len
+	rsb	r3, r3, #opc_iload_0
+	DISPATCH_NEXT
+	rsb	r1, lr, #opc_istore_0
+	ldr	tmp1, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	eor	r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iload,iload_1_iload,iload_2_iload,iload_3_iload)
+(ixor_istore_N)
+{
+        rsb     r3, r0, #opc_iload_0_iload
+	ldrb	r2, [jpc, #2]
+	ldrb	tmp1, [jpc, #4]
+        DISPATCH_START  \seq_len
+	rsb	r2, r2, #0
+	DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        eor     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #opc_istore_0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iload_N,iload_1_iload_N,iload_2_iload_N,iload_3_iload_N)
+(ixor_istore_N)
+{
+	ldrb	tmp1, [jpc, #3]
+	rsb	r3, r0, #opc_iload_0_iload_N
+        DISPATCH_START  \seq_len
+	rsb	r2, r2, #opc_iload_0
+	DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        eor     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #opc_istore_0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iload)
+(iadd_u4store)
+{
+	ldrb	r3, [jpc, #3]
+	ldrb	lr, [jpc, #6]
+	rsb	r2, r2, #0
+        DISPATCH_START  \seq_len
+	rsb	r3, r3, #0
+	DISPATCH_NEXT
+	rsb	r1, lr, #0
+	ldr	tmp1, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	add	r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iload_N)
+(iadd_u4store)
+{
+	ldrb	r3, [jpc, #2]
+	ldrb	lr, [jpc, #5]
+	rsb	r2, r2, #0
+        DISPATCH_START  \seq_len
+	rsb	r3, r3, #opc_iload_0
+	DISPATCH_NEXT
+	rsb	r1, lr, #0
+	ldr	tmp1, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	add	r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iload,iload_1_iload,iload_2_iload,iload_3_iload)
+(iadd_u4store)
+{
+        rsb     r3, r0, #opc_iload_0_iload
+	ldrb	r2, [jpc, #2]
+	ldrb	tmp1, [jpc, #5]
+        DISPATCH_START  \seq_len
+	rsb	r2, r2, #0
+	DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        add     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iload_N,iload_1_iload_N,iload_2_iload_N,iload_3_iload_N)
+(iadd_u4store)
+{
+	ldrb	tmp1, [jpc, #4]
+	rsb	r3, r0, #opc_iload_0_iload_N
+        DISPATCH_START  \seq_len
+	rsb	r2, r2, #opc_iload_0
+	DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        add     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iload)
+(isub_u4store)
+{
+	ldrb	r3, [jpc, #3]
+	ldrb	lr, [jpc, #6]
+	rsb	r2, r2, #0
+        DISPATCH_START  \seq_len
+	rsb	r3, r3, #0
+	DISPATCH_NEXT
+	rsb	r1, lr, #0
+	ldr	tmp1, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	sub	r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iload_N)
+(isub_u4store)
+{
+	ldrb	r3, [jpc, #2]
+	ldrb	lr, [jpc, #5]
+	rsb	r2, r2, #0
+        DISPATCH_START  \seq_len
+	rsb	r3, r3, #opc_iload_0
+	DISPATCH_NEXT
+	rsb	r1, lr, #0
+	ldr	tmp1, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	sub	r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iload,iload_1_iload,iload_2_iload,iload_3_iload)
+(isub_u4store)
+{
+        rsb     r3, r0, #opc_iload_0_iload
+	ldrb	r2, [jpc, #2]
+	ldrb	tmp1, [jpc, #5]
+        DISPATCH_START  \seq_len
+	rsb	r2, r2, #0
+	DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        sub     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iload_N,iload_1_iload_N,iload_2_iload_N,iload_3_iload_N)
+(isub_u4store)
+{
+	ldrb	tmp1, [jpc, #4]
+	rsb	r3, r0, #opc_iload_0_iload_N
+        DISPATCH_START  \seq_len
+	rsb	r2, r2, #opc_iload_0
+	DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        sub     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iload)
+(iand_u4store)
+{
+	ldrb	r3, [jpc, #3]
+	ldrb	lr, [jpc, #6]
+	rsb	r2, r2, #0
+        DISPATCH_START  \seq_len
+	rsb	r3, r3, #0
+	DISPATCH_NEXT
+	rsb	r1, lr, #0
+	ldr	tmp1, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	and	r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iload_N)
+(iand_u4store)
+{
+	ldrb	r3, [jpc, #2]
+	ldrb	lr, [jpc, #5]
+	rsb	r2, r2, #0
+        DISPATCH_START  \seq_len
+	rsb	r3, r3, #opc_iload_0
+	DISPATCH_NEXT
+	rsb	r1, lr, #0
+	ldr	tmp1, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	and	r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iload,iload_1_iload,iload_2_iload,iload_3_iload)
+(iand_u4store)
+{
+        rsb     r3, r0, #opc_iload_0_iload
+	ldrb	r2, [jpc, #2]
+	ldrb	tmp1, [jpc, #5]
+        DISPATCH_START  \seq_len
+	rsb	r2, r2, #0
+	DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        and     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iload_N,iload_1_iload_N,iload_2_iload_N,iload_3_iload_N)
+(iand_u4store)
+{
+	ldrb	tmp1, [jpc, #4]
+	rsb	r3, r0, #opc_iload_0_iload_N
+        DISPATCH_START  \seq_len
+	rsb	r2, r2, #opc_iload_0
+	DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        and     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iload)
+(ior_u4store)
+{
+	ldrb	r3, [jpc, #3]
+	ldrb	lr, [jpc, #6]
+	rsb	r2, r2, #0
+        DISPATCH_START  \seq_len
+	rsb	r3, r3, #0
+	DISPATCH_NEXT
+	rsb	r1, lr, #0
+	ldr	tmp1, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	orr	r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iload_N)
+(ior_u4store)
+{
+	ldrb	r3, [jpc, #2]
+	ldrb	lr, [jpc, #5]
+	rsb	r2, r2, #0
+        DISPATCH_START  \seq_len
+	rsb	r3, r3, #opc_iload_0
+	DISPATCH_NEXT
+	rsb	r1, lr, #0
+	ldr	tmp1, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	orr	r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iload,iload_1_iload,iload_2_iload,iload_3_iload)
+(ior_u4store)
+{
+        rsb     r3, r0, #opc_iload_0_iload
+	ldrb	r2, [jpc, #2]
+	ldrb	tmp1, [jpc, #5]
+        DISPATCH_START  \seq_len
+	rsb	r2, r2, #0
+	DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        orr     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iload_N,iload_1_iload_N,iload_2_iload_N,iload_3_iload_N)
+(ior_u4store)
+{
+	ldrb	tmp1, [jpc, #4]
+	rsb	r3, r0, #opc_iload_0_iload_N
+        DISPATCH_START  \seq_len
+	rsb	r2, r2, #opc_iload_0
+	DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        orr     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iload)
+(ixor_u4store)
+{
+	ldrb	r3, [jpc, #3]
+	ldrb	lr, [jpc, #6]
+	rsb	r2, r2, #0
+        DISPATCH_START  \seq_len
+	rsb	r3, r3, #0
+	DISPATCH_NEXT
+	rsb	r1, lr, #0
+	ldr	tmp1, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	eor	r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_iload_N)
+(ixor_u4store)
+{
+	ldrb	r3, [jpc, #2]
+	ldrb	lr, [jpc, #5]
+	rsb	r2, r2, #0
+        DISPATCH_START  \seq_len
+	rsb	r3, r3, #opc_iload_0
+	DISPATCH_NEXT
+	rsb	r1, lr, #0
+	ldr	tmp1, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	eor	r3, tmp1, r3
+	DISPATCH_NEXT
+        str     r3, [locals, r1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iload,iload_1_iload,iload_2_iload,iload_3_iload)
+(ixor_u4store)
+{
+        rsb     r3, r0, #opc_iload_0_iload
+	ldrb	r2, [jpc, #2]
+	ldrb	tmp1, [jpc, #5]
+        DISPATCH_START  \seq_len
+	rsb	r2, r2, #0
+	DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        eor     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+(iload_0_iload_N,iload_1_iload_N,iload_2_iload_N,iload_3_iload_N)
+(ixor_u4store)
+{
+	ldrb	tmp1, [jpc, #4]
+	rsb	r3, r0, #opc_iload_0_iload_N
+        DISPATCH_START  \seq_len
+	rsb	r2, r2, #opc_iload_0
+	DISPATCH_NEXT
+        ldr     r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	DISPATCH_NEXT
+        eor     r3, r3, r2
+	DISPATCH_NEXT
+	rsb	tmp1, tmp1, #0
+	DISPATCH_NEXT
+        str     r3, [locals, tmp1, lsl #2]
+	DISPATCH_FINISH
+}
+
+#ifdef NOTICE_SAFEPOINTS
+
+(iload_iload)
+(if_icmpeq,if_acmpeq) {
+	ldrb	r3, [jpc, #3]
+	rsb	r2, r2, #0
+	ldrsb	r1, [jpc, #5]
+	rsb	r3, r3, #0
+	ldr	r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	ldrb	ip, [jpc, #6]
+	cmp	r2, r3
+	beq	branch_taken_unsafe_4
+	DISPATCH 7
+}
+
+(iload_iload_N)
+(if_icmpeq,if_acmpeq) {
+	ldrb	r3, [jpc, #2]
+	rsb	r2, r2, #0
+	ldrsb	r1, [jpc, #4]
+	rsb	r3, r3, #opc_iload_0
+	ldr	r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	ldrb	ip, [jpc, #5]
+	cmp	r2, r3
+	beq	branch_taken_unsafe_3
+	DISPATCH 6
+}
+
+(iload_0_iload,iload_1_iload,iload_2_iload,iload_3_iload)
+(if_icmpeq,if_acmpeq) {
+	ldrb	r3, [jpc, #2]
+	rsb	r2, r0, #opc_iload_0_iload
+	ldrsb	r1, [jpc, #4]
+	rsb	r3, r3, #0
+	ldr	r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	ldrb	ip, [jpc, #5]
+	cmp	r2, r3
+	beq	branch_taken_unsafe_3
+	DISPATCH 6
+}
+
+(iload_0_iload_N,iload_1_iload_N,iload_2_iload_N,iload_3_iload_N)
+(if_icmpeq,if_acmpeq) {
+	rsb	r3, r2, #opc_iload_0
+	ldrsb	r1, [jpc, #3]
+	rsb	r2, r0, #opc_iload_0_iload_N
+	ldr	r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	ldrb	ip, [jpc, #4]
+	cmp	r2, r3
+	beq	branch_taken_unsafe_2
+	DISPATCH 5
+}
+
+(iload_iload)
+(if_icmpne,if_acmpne) {
+	ldrb	r3, [jpc, #3]
+	rsb	r2, r2, #0
+	ldrsb	r1, [jpc, #5]
+	rsb	r3, r3, #0
+	ldr	r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	ldrb	ip, [jpc, #6]
+	cmp	r2, r3
+	bne	branch_taken_unsafe_4
+	DISPATCH 7
+}
+
+(iload_iload_N)
+(if_icmpne,if_acmpne) {
+	ldrb	r3, [jpc, #2]
+	rsb	r2, r2, #0
+	ldrsb	r1, [jpc, #4]
+	rsb	r3, r3, #opc_iload_0
+	ldr	r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	ldrb	ip, [jpc, #5]
+	cmp	r2, r3
+	bne	branch_taken_unsafe_3
+	DISPATCH 6
+}
+
+(iload_0_iload,iload_1_iload,iload_2_iload,iload_3_iload)
+(if_icmpne,if_acmpne) {
+	ldrb	r3, [jpc, #2]
+	rsb	r2, r0, #opc_iload_0_iload
+	ldrsb	r1, [jpc, #4]
+	rsb	r3, r3, #0
+	ldr	r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	ldrb	ip, [jpc, #5]
+	cmp	r2, r3
+	bne	branch_taken_unsafe_3
+	DISPATCH 6
+}
+
+(iload_0_iload_N,iload_1_iload_N,iload_2_iload_N,iload_3_iload_N)
+(if_icmpne,if_acmpne) {
+	rsb	r3, r2, #opc_iload_0
+	ldrsb	r1, [jpc, #3]
+	rsb	r2, r0, #opc_iload_0_iload_N
+	ldr	r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	ldrb	ip, [jpc, #4]
+	cmp	r2, r3
+	bne	branch_taken_unsafe_2
+	DISPATCH 5
+}
+
+(iload_iload)
+(if_icmplt) {
+	ldrb	r3, [jpc, #3]
+	rsb	r2, r2, #0
+	ldrsb	r1, [jpc, #5]
+	rsb	r3, r3, #0
+	ldr	r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	ldrb	ip, [jpc, #6]
+	cmp	r2, r3
+	blt	branch_taken_unsafe_4
+	DISPATCH 7
+}
+
+(iload_iload_N)
+(if_icmplt) {
+	ldrb	r3, [jpc, #2]
+	rsb	r2, r2, #0
+	ldrsb	r1, [jpc, #4]
+	rsb	r3, r3, #opc_iload_0
+	ldr	r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	ldrb	ip, [jpc, #5]
+	cmp	r2, r3
+	blt	branch_taken_unsafe_3
+	DISPATCH 6
+}
+
+(iload_0_iload,iload_1_iload,iload_2_iload,iload_3_iload)
+(if_icmplt) {
+	ldrb	r3, [jpc, #2]
+	rsb	r2, r0, #opc_iload_0_iload
+	ldrsb	r1, [jpc, #4]
+	rsb	r3, r3, #0
+	ldr	r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	ldrb	ip, [jpc, #5]
+	cmp	r2, r3
+	blt	branch_taken_unsafe_3
+	DISPATCH 6
+}
+
+(iload_0_iload_N,iload_1_iload_N,iload_2_iload_N,iload_3_iload_N)
+(if_icmplt) {
+	rsb	r3, r2, #opc_iload_0
+	ldrsb	r1, [jpc, #3]
+	rsb	r2, r0, #opc_iload_0_iload_N
+	ldr	r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	ldrb	ip, [jpc, #4]
+	cmp	r2, r3
+	blt	branch_taken_unsafe_2
+	DISPATCH 5
+}
+
+(iload_iload)
+(if_icmpge) {
+	ldrb	r3, [jpc, #3]
+	rsb	r2, r2, #0
+	ldrsb	r1, [jpc, #5]
+	rsb	r3, r3, #0
+	ldr	r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	ldrb	ip, [jpc, #6]
+	cmp	r2, r3
+	bge	branch_taken_unsafe_4
+	DISPATCH 7
+}
+
+(iload_iload_N)
+(if_icmpge) {
+	ldrb	r3, [jpc, #2]
+	rsb	r2, r2, #0
+	ldrsb	r1, [jpc, #4]
+	rsb	r3, r3, #opc_iload_0
+	ldr	r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	ldrb	ip, [jpc, #5]
+	cmp	r2, r3
+	bge	branch_taken_unsafe_3
+	DISPATCH 6
+}
+
+(iload_0_iload,iload_1_iload,iload_2_iload,iload_3_iload)
+(if_icmpge) {
+	ldrb	r3, [jpc, #2]
+	rsb	r2, r0, #opc_iload_0_iload
+	ldrsb	r1, [jpc, #4]
+	rsb	r3, r3, #0
+	ldr	r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	ldrb	ip, [jpc, #5]
+	cmp	r2, r3
+	bge	branch_taken_unsafe_3
+	DISPATCH 6
+}
+
+(iload_0_iload_N,iload_1_iload_N,iload_2_iload_N,iload_3_iload_N)
+(if_icmpge) {
+	rsb	r3, r2, #opc_iload_0
+	ldrsb	r1, [jpc, #3]
+	rsb	r2, r0, #opc_iload_0_iload_N
+	ldr	r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	ldrb	ip, [jpc, #4]
+	cmp	r2, r3
+	bge	branch_taken_unsafe_2
+	DISPATCH 5
+}
+
+(iload_iload)
+(if_icmpgt) {
+	ldrb	r3, [jpc, #3]
+	rsb	r2, r2, #0
+	ldrsb	r1, [jpc, #5]
+	rsb	r3, r3, #0
+	ldr	r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	ldrb	ip, [jpc, #6]
+	cmp	r2, r3
+	bgt	branch_taken_unsafe_4
+	DISPATCH 7
+}
+
+(iload_iload_N)
+(if_icmpgt) {
+	ldrb	r3, [jpc, #2]
+	rsb	r2, r2, #0
+	ldrsb	r1, [jpc, #4]
+	rsb	r3, r3, #opc_iload_0
+	ldr	r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	ldrb	ip, [jpc, #5]
+	cmp	r2, r3
+	bgt	branch_taken_unsafe_3
+	DISPATCH 6
+}
+
+(iload_0_iload,iload_1_iload,iload_2_iload,iload_3_iload)
+(if_icmpgt) {
+	ldrb	r3, [jpc, #2]
+	rsb	r2, r0, #opc_iload_0_iload
+	ldrsb	r1, [jpc, #4]
+	rsb	r3, r3, #0
+	ldr	r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	ldrb	ip, [jpc, #5]
+	cmp	r2, r3
+	bgt	branch_taken_unsafe_3
+	DISPATCH 6
+}
+
+(iload_0_iload_N,iload_1_iload_N,iload_2_iload_N,iload_3_iload_N)
+(if_icmpgt) {
+	rsb	r3, r2, #opc_iload_0
+	ldrsb	r1, [jpc, #3]
+	rsb	r2, r0, #opc_iload_0_iload_N
+	ldr	r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	ldrb	ip, [jpc, #4]
+	cmp	r2, r3
+	bgt	branch_taken_unsafe_2
+	DISPATCH 5
+}
+
+(iload_iload)
+(if_icmple) {
+	ldrb	r3, [jpc, #3]
+	rsb	r2, r2, #0
+	ldrsb	r1, [jpc, #5]
+	rsb	r3, r3, #0
+	ldr	r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	ldrb	ip, [jpc, #6]
+	cmp	r2, r3
+	ble	branch_taken_unsafe_4
+	DISPATCH 7
+}
+
+(iload_iload_N)
+(if_icmple) {
+	ldrb	r3, [jpc, #2]
+	rsb	r2, r2, #0
+	ldrsb	r1, [jpc, #4]
+	rsb	r3, r3, #opc_iload_0
+	ldr	r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	ldrb	ip, [jpc, #5]
+	cmp	r2, r3
+	ble	branch_taken_unsafe_3
+	DISPATCH 6
+}
+
+(iload_0_iload,iload_1_iload,iload_2_iload,iload_3_iload)
+(if_icmple) {
+	ldrb	r3, [jpc, #2]
+	rsb	r2, r0, #opc_iload_0_iload
+	ldrsb	r1, [jpc, #4]
+	rsb	r3, r3, #0
+	ldr	r2, [locals, r2, lsl #2]
+	ldr	r3, [locals, r3, lsl #2]
+	ldrb	ip, [jpc, #5]
+	cmp	r2, r3
+	ble	branch_taken_unsafe_3
+	DISPATCH 6
+}
+
+(iload_0_iload_N,iload_1_iload_N,iload_2_iload_N,iload_3_iload_N)
+(if_icmple) {
+	rsb	r3, r2, #opc_iload_0
+	ldrsb	r1, [jpc, #3]
+	rsb	r2, r0, #opc_iload_0_iload_N
+	ldr	r3, [locals, r3, lsl #2]
+	ldr	r2, [locals, r2, lsl #2]
+	ldrb	ip, [jpc, #4]
+	cmp	r2, r3
+	ble	branch_taken_unsafe_2
+	DISPATCH 5
+}
+
+#endif
+
+#endif // FAST_BYTECODES
--- a/src/cpu/zero/vm/bytecodes_zero.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/cpu/zero/vm/bytecodes_zero.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -1,6 +1,7 @@
 /*
  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2007 Red Hat, Inc.
+ * Copyright 2009 Edward Nevill
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -27,5 +28,54 @@
 #include "interpreter/bytecodes.hpp"
 
 void Bytecodes::pd_initialize() {
-  // No zero specific initialization
+#ifdef HOTSPOT_ASM
+  // Because iaccess_N can trap, we must say aload_N can trap, otherwise
+  // we get an assertion failure
+  def(_aload_1, "aload_1", "b", NULL, T_OBJECT ,  1, true);
+  def(_aload_2, "aload_2", "b", NULL, T_OBJECT ,  1, true);
+  def(_aload_3, "aload_3", "b", NULL, T_OBJECT ,  1, true);
+
+  def(_iaccess_0, "_iaccess_0", "b_jj", NULL, T_INT,  1, true, _aload_0);
+  def(_iaccess_1, "_iaccess_1", "b_jj", NULL, T_INT,  1, true, _aload_1);
+  def(_iaccess_2, "_iaccess_2", "b_jj", NULL, T_INT,  1, true, _aload_2);
+  def(_iaccess_3, "_iaccess_3", "b_jj", NULL, T_INT,  1, true, _aload_3);
+
+  def(_invokeresolved,   "invokeresolved",   "bjj", NULL, T_ILLEGAL, -1, true, _invokevirtual);
+  def(_invokespecialresolved, "invokespecialresolved", "bjj", NULL, T_ILLEGAL, -1, true, _invokespecial);
+  def(_invokestaticresolved,  "invokestaticresolved",  "bjj", NULL, T_ILLEGAL,  0, true, _invokestatic);
+
+  def(_dmac,            "dmac",      "b_",  NULL, T_DOUBLE, -16, false, _dmul);
+
+  def(_iload_iload,      "iload_iload",      "bi_i",NULL, T_INT, 2, false, _iload);
+  def(_iload_iload_N,    "iload_iload_N",    "bi_", NULL, T_INT, 2, false, _iload);
+
+  def(_iload_0_iconst_N, "iload_0_iconst_N", "b_",  NULL, T_INT, 2, false, _iload_0);
+  def(_iload_1_iconst_N, "iload_1_iconst_N", "b_",  NULL, T_INT, 2, false, _iload_1);
+  def(_iload_2_iconst_N, "iload_2_iconst_N", "b_",  NULL, T_INT, 2, false, _iload_2);
+  def(_iload_3_iconst_N, "iload_3_iconst_N", "b_",  NULL, T_INT, 2, false, _iload_3);
+  def(_iload_iconst_N,   "iload_iconst_N",   "bi_", NULL, T_INT, 2, false, _iload);
+
+  def(_iadd_istore_N,    "iadd_istore_N",    "b_",  NULL, T_VOID, -2, false, _iadd);
+  def(_isub_istore_N,    "isub_istore_N",    "b_",  NULL, T_VOID, -2, false, _isub);
+  def(_iand_istore_N,    "iand_istore_N",    "b_",  NULL, T_VOID, -2, false, _iand);
+  def(_ior_istore_N,     "ior_istore_N",     "b_",  NULL, T_VOID, -2, false, _ior);
+  def(_ixor_istore_N,    "ixor_istore_N",    "b_",  NULL, T_VOID, -2, false, _ixor);
+
+  def(_iadd_u4store,     "iadd_u4store",     "b_i", NULL, T_VOID, -2, false, _iadd);
+  def(_isub_u4store,     "isub_u4store",     "b_i", NULL, T_VOID, -2, false, _isub);
+  def(_iand_u4store,     "iand_u4store",     "b_i", NULL, T_VOID, -2, false, _iand);
+  def(_ior_u4store,      "ior_u4store",      "b_i", NULL, T_VOID, -2, false, _ior);
+  def(_ixor_u4store,     "ixor_u4store",     "b_i", NULL, T_VOID, -2, false, _ixor);
+
+  def(_iload_0_iload,    "iload_0_iload",    "b_i", NULL, T_INT, 2, false, _iload_0);
+  def(_iload_1_iload,    "iload_1_iload",    "b_i", NULL, T_INT, 2, false, _iload_1);
+  def(_iload_2_iload,    "iload_2_iload",    "b_i", NULL, T_INT, 2, false, _iload_2);
+  def(_iload_3_iload,    "iload_3_iload",    "b_i", NULL, T_INT, 2, false, _iload_3);
+
+  def(_iload_0_iload_N,  "iload_0_iload_N",  "b_",  NULL, T_INT, 2, false, _iload_0);
+  def(_iload_1_iload_N,  "iload_1_iload_N",  "b_",  NULL, T_INT, 2, false, _iload_1);
+  def(_iload_2_iload_N,  "iload_2_iload_N",  "b_",  NULL, T_INT, 2, false, _iload_2);
+  def(_iload_3_iload_N,  "iload_3_iload_N",  "b_",  NULL, T_INT, 2, false, _iload_3);
+
+#endif // HOTSPOT_ASM
 }
--- a/src/cpu/zero/vm/bytecodes_zero.hpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/cpu/zero/vm/bytecodes_zero.hpp	Fri Aug 09 12:21:36 2013 +0100
@@ -1,6 +1,7 @@
 /*
  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
  * Copyright 2009 Red Hat, Inc.
+ * Copyright 2009 Edward Nevill
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -26,6 +27,44 @@
 #ifndef CPU_ZERO_VM_BYTECODES_ZERO_HPP
 #define CPU_ZERO_VM_BYTECODES_ZERO_HPP
 
-// This file is intentionally empty
+#ifdef HOTSPOT_ASM
+#define _iaccess_0      ((Bytecodes::Code)0xdb)
+#define _iaccess_1      ((Bytecodes::Code)0xdc)
+#define _iaccess_2      ((Bytecodes::Code)0xdd)
+#define _iaccess_3      ((Bytecodes::Code)0xde)
+
+#define _invokeresolved         ((Bytecodes::Code)0xdf)
+#define _invokespecialresolved  ((Bytecodes::Code)0xe0)
+#define _invokestaticresolved   ((Bytecodes::Code)0xe1)
+
+#define _iload_iload    ((Bytecodes::Code)0xe3)
+#define _iload_iload_N  ((Bytecodes::Code)0xe4)
+
+#define _dmac           ((Bytecodes::Code)0xe8)
+
+      _iload_0_iconst_N   , // 233 0xe9
+      _iload_1_iconst_N   , // 234 0xea
+      _iload_2_iconst_N   , // 235 0xeb
+      _iload_3_iconst_N   , // 236 0xec
+      _iload_iconst_N     , // 237 0xed
+      _iadd_istore_N      , // 238 0xee
+      _isub_istore_N      , // 239 0xef
+      _iand_istore_N      , // 240 0xf0
+      _ior_istore_N       , // 241 0xf1
+      _ixor_istore_N      , // 242 0xf2
+      _iadd_u4store       , // 243 0xf3
+      _isub_u4store       , // 244 0xf4
+      _iand_u4store       , // 245 0xf5
+      _ior_u4store        , // 246 0xf6
+      _ixor_u4store       , // 247 0xf7
+      _iload_0_iload      , // 248 0xf8
+      _iload_1_iload      , // 249 0xf9
+      _iload_2_iload      , // 250 0xfa
+      _iload_3_iload      , // 251 0xfb
+      _iload_0_iload_N    , // 252 0xfc
+      _iload_1_iload_N    , // 253 0xfd
+      _iload_2_iload_N    , // 254 0xfe
+      _iload_3_iload_N    , // 255 0xff
+#endif // HOTSPOT_ASM
 
 #endif // CPU_ZERO_VM_BYTECODES_ZERO_HPP
--- a/src/cpu/zero/vm/copy_zero.hpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/cpu/zero/vm/copy_zero.hpp	Fri Aug 09 12:21:36 2013 +0100
@@ -169,7 +169,7 @@
 }
 
 static void pd_fill_to_bytes(void* to, size_t count, jubyte value) {
-  memset(to, value, count);
+  if ( count > 0 ) memset(to, value, count);
 }
 
 static void pd_zero_to_words(HeapWord* tohw, size_t count) {
@@ -177,7 +177,7 @@
 }
 
 static void pd_zero_to_bytes(void* to, size_t count) {
-  memset(to, 0, count);
+  if ( count > 0 ) memset(to, 0, count);
 }
 
 #endif // CPU_ZERO_VM_COPY_ZERO_HPP
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/zero/vm/cppInterpreter_arm.S	Fri Aug 09 12:21:36 2013 +0100
@@ -0,0 +1,7367 @@
+#ifdef __arm__
+
+@ Copyright 2009, 2010 Edward Nevill
+@ Copyright 2012, Red Hat
+@ DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+@
+@ This code is free software; you can redistribute it and/or modify it
+@ under the terms of the GNU General Public License version 2 only, as
+@ published by the Free Software Foundation.
+@
+@ This code is distributed in the hope that it will be useful, but WITHOUT
+@ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+@ FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+@ version 2 for more details (a copy is included in the LICENSE file that
+@ accompanied this code).
+@
+@ You should have received a copy of the GNU General Public License version
+@ 2 along with this work; if not, write to the Free Software Foundation,
+@ Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+
+#undef T2JIT
+#if !defined(DISABLE_THUMB2) && defined(HOTSPOT_ASM) && !defined(SHARK)
+#define T2JIT
+#endif
+
+#ifdef HOTSPOT_ASM
+
+#if defined(__ARM_ARCH_4__) || defined(__ARM_ARCH_4T__)
+#define ARMv4
+#endif
+
+#if defined(SHARK) || defined(T2JIT)
+
+#define USE_COMPILER
+
+#endif
+
+#ifdef USE_COMPILER
+
+#ifdef SHARK
+#define MP_COMPILE_THRESHOLD    0x10000         // 65536 - must be a single MOV constant
+#define UP_COMPILE_THRESHOLD    0x30000         // 196608 - must be a single MOV constant
+#else
+#define MP_COMPILE_THRESHOLD    0x1380		// ~ 5000 - must be a single MOV constant
+#define UP_COMPILE_THRESHOLD    0x1380		// ~ 5000 - must be a single MOV constant
+#endif
+
+#define MAX_FG_METHOD_SIZE      500
+
+#ifndef DISABLE_ON_STACK_REPLACEMENT
+#define ON_STACK_REPLACEMENT
+#endif
+#ifndef ENABLE_BG_COMP_ON_NON_MP
+#define DISABLE_BG_COMP_ON_NON_MP
+#endif
+
+#ifdef T2JIT
+#define FREQ_COUNT_OVERFLOW Thumb2_Compile
+#else
+#define FREQ_COUNT_OVERFLOW _ZN18InterpreterRuntime26frequency_counter_overflowEP10JavaThreadPh
+#endif
+
+#endif // USE_COMPILER
+
+#ifndef DISABLE_NOTICE_SAFEPOINTS
+#define NOTICE_SAFEPOINTS
+#endif
+#ifndef DISABLE_HW_NULL_PTR_CHECK
+#define HW_NULL_PTR_CHECK
+#endif
+#ifndef DISABLE_FAST_BYTECODES
+#define FAST_BYTECODES
+#endif
+#ifndef DISABLE_HW_FP
+#define HW_FP
+#endif
+
+#define LEAF_STACK_SIZE	200
+#define STACK_SPARE	40
+
+#define TBIT 1
+	
+#define stack		r4
+#define	jpc		r5
+#define dispatch	r6
+#define locals		r7
+#define istate		r8
+#define constpool	r9
+#define thread		r10
+#define arm_sp		r13
+
+#define tmp_xxx		r7
+#define tmp_yyy		r5
+#define tmp_vvv		r9
+
+#define tmp1		r11
+
+#define regset		r4,r5,r6,r7,r9,r10,r11
+#define fast_regset	r8
+
+#define Rframe	r7
+
+#define FRAME_METHOD		(ISTATE_METHOD-ISTATE_NEXT_FRAME)
+#define FRAME_CONSTANTS		(ISTATE_CONSTANTS-ISTATE_NEXT_FRAME)
+#define FRAME_BCP		(ISTATE_BCP-ISTATE_NEXT_FRAME)
+#define FRAME_STACK_LIMIT	(ISTATE_STACK_LIMIT-ISTATE_NEXT_FRAME)
+#define FRAME_LOCALS		(ISTATE_LOCALS-ISTATE_NEXT_FRAME)
+#define FRAME_STACK		(ISTATE_STACK-ISTATE_NEXT_FRAME)
+
+#include "offsets_arm.s"
+
+#define last_implemented_bytecode 201
+
+	.macro	ALIGN_CODE
+	.align	6
+	.endm
+
+	.macro	ALIGN_DATA
+	.align	6
+	.endm
+
+	.macro	ALIGN_OPCODE
+	.align	6
+	.endm
+
+	.macro	ALIGN_WORD
+	.align	2
+	.endm
+
+#define SLOW_ENTRY_OFFSET 24
+#define FAST_ENTRY_OFFSET 40
+
+	.macro	SLOW_ENTRY
+	ALIGN_CODE
+	.word	0, 0, 0, 0, 0, 0
+	.endm
+
+	.macro	FAST_ENTRY
+	ALIGN_CODE
+	.endm
+
+@------------------------------------------------
+@ Software NULL Pointer check macro.
+@ Usage:
+@	SW_NPC	cmp	obj, #0
+@	SW_NPC	beq	null_ptr_exception
+@------------------------------------------------
+	.macro	SW_NPC	p1, p2, p3, p4
+#ifndef HW_NULL_PTR_CHECK
+  .ifnes "\p4", ""
+	\p1 \p2, \p3, \p4
+  .else
+    .ifnes "\p3", ""
+	\p1 \p2, \p3
+    .else
+	\p1 \p2
+    .endif
+  .endif
+#endif // HW_NULL_PTR_CHECK
+	.endm
+
+	.macro	HW_NPC	p1, p2, p3, p4
+#ifdef HW_NULL_PTR_CHECK
+  .ifnes "\p4", ""
+	\p1 \p2, \p3, \p4
+  .else
+    .ifnes "\p3", ""
+	\p1 \p2, \p3
+    .else
+	\p1 \p2
+    .endif
+  .endif
+#endif // HW_NULL_PTR_CHECK
+	.endm
+
+@------------------------------------------------
+@ Fast Bytecode Macros FBC and NFBC
+@ Use to conditionalise code using fast bytecodes
+@ EG:
+@	FBC	mov	r0, #opc_invokeresolved
+@	FBC	b	rewrite_bytecode
+@	NFBC	code to handle slow case
+@	NFBC	...
+@------------------------------------------------
+	.macro	FBC	p1, p2, p3, p4, p5
+#ifdef FAST_BYTECODES
+  .ifnes "\p5", ""
+	  \p1 \p2, \p3, \p4, \p5
+  .else
+    .ifnes "\p4", ""
+	  \p1 \p2, \p3, \p4
+    .else
+      .ifnes "\p3", ""
+	  \p1 \p2, \p3
+      .else
+	  \p1 \p2
+      .endif
+    .endif
+  .endif
+#endif
+	.endm
+
+	.macro	NFBC	p1, p2, p3, p4
+#ifndef FAST_BYTECODES
+  .ifnes "\p4", ""
+	\p1 \p2, \p3, \p4
+  .else
+    .ifnes "\p3", ""
+	\p1 \p2, \p3
+    .else
+	\p1 \p2
+    .endif
+  .endif
+#endif
+	.endm
+
+@------------------------------------------------
+@ Notice Safepoints macro
+@ Usage:
+@	NSP	<notice safepoint specific code>
+@------------------------------------------------
+	.macro	NSP	p1, p2, p3, p4, p5
+#ifdef NOTICE_SAFEPOINTS
+  .ifnes "\p5", ""
+	  \p1 \p2, \p3, \p4, \p5
+  .else
+    .ifnes "\p4", ""
+	  \p1 \p2, \p3, \p4
+    .else
+      .ifnes "\p3", ""
+	  \p1 \p2, \p3
+      .else
+	  \p1 \p2
+      .endif
+    .endif
+  .endif
+#endif
+	.endm
+
+@------------------------------------------------
+@ Use Compiler macro
+@ Usage:
+@	USEC	<compiler specific code>
+@------------------------------------------------
+	.macro	USEC	p1, p2, p3, p4
+#ifdef USE_COMPILER
+  .ifnes "\p4", ""
+	\p1 \p2, \p3, \p4
+  .else
+    .ifnes "\p3", ""
+	\p1 \p2, \p3
+    .else
+	\p1 \p2
+    .endif
+  .endif
+#endif
+	.endm
+
+@------------------------------------------------
+@ On stack replacement macro
+@ Usage:
+@       OSR     <compiler specific code>
+@------------------------------------------------
+        .macro  OSR     p1, p2, p3, p4
+#ifdef ON_STACK_REPLACEMENT
+  .ifnes "\p4", ""
+        \p1 \p2, \p3, \p4
+  .else
+    .ifnes "\p3", ""
+        \p1 \p2, \p3
+    .else
+        \p1 \p2
+    .endif
+  .endif
+#endif
+        .endm
+@------------------------------------------------
+@ THUMB2 specific code macro
+@ Usage:
+@	T2	<thumb2 specific code>
+@------------------------------------------------
+	.macro	T2	p1, p2, p3, p4
+#ifdef T2JIT
+  .ifnes "\p4", ""
+        \p1 \p2, \p3, \p4
+  .else
+    .ifnes "\p3", ""
+        \p1 \p2, \p3
+    .else
+        \p1 \p2
+    .endif
+  .endif
+#endif
+        .endm
+
+@------------------------------------------------
+@ Rewrite pairs of bytecodes
+@
+@ The fast bytecodes that replace pairs of codes improve performance,
+@ but they cause races between threads and incorrect operation in some
+@ other cases too.  REWRITE_PAIRS disables rewriting bytecode pairs.
+@	
+@ Usage:
+@	REWRITE_PAIRS	<instruction>
+@------------------------------------------------
+	.macro	REWRITE_PAIRS	p1, p2, p3, p4
+        .endm
+
+	.macro	Opcode	label
+	ALIGN_OPCODE
+do_\label:
+	.endm
+
+	.macro	GET_STACK	offset, reg
+	ldr	\reg, [stack, #(\offset+1) * 4]
+	.endm
+
+	.macro	PUT_STACK	offset, reg
+	str	\reg, [stack, #(\offset+1) * 4]
+	.endm
+
+#define PUSH	java_push
+	.macro	PUSH	reg1, reg2, reg3, reg4
+  .ifnes "\reg4", ""
+	stmda	stack!, {\reg1, \reg2, \reg3, \reg4}
+  .else
+    .ifnes "\reg3", ""
+	stmda	stack!, {\reg1, \reg2, \reg3}
+    .else
+      .ifnes "\reg2", ""
+	stmda	stack!, {\reg1, \reg2}
+      .else
+	str	\reg1, [stack], #-4
+      .endif
+    .endif
+  .endif
+	.endm
+
+#define POP	java_pop
+	.macro	POP	reg1, reg2, reg3, reg4
+  .ifnes "\reg4", ""
+	ldmib	stack!, {\reg1, \reg2, \reg3, \reg4}
+  .else
+    .ifnes "\reg3", ""
+	ldmib	stack!, {\reg1, \reg2, \reg3}
+    .else
+      .ifnes "\reg2", ""
+	ldmib	stack!, {\reg1, \reg2}
+      .else
+	ldr	\reg1, [stack, #4]!
+      .endif
+    .endif
+  .endif
+	.endm
+
+	.macro POPF0
+#ifdef __ARM_PCS_VFP
+	flds s0, [stack, #4]
+	add stack, #4
+#else
+	POP r0
+#endif
+	.endm
+	
+	.macro POPF1
+#ifdef __ARM_PCS_VFP
+	flds s1, [stack, #4]
+	add stack, #4
+#else
+	POP r1
+#endif
+	.endm
+	
+	.macro POPD0
+#ifdef __ARM_PCS_VFP
+	flds s0, [stack, #4]
+	flds s1, [stack, #8]
+	add stack, #8
+#else
+	POP r0, r1
+#endif
+	.endm
+	
+	.macro POPD1
+#ifdef __ARM_PCS_VFP
+	flds s2, [stack, #4]
+	flds s3, [stack, #8]
+	add stack, #8
+#else
+	POP r2, r3
+#endif
+	.endm
+	
+	.macro PUSHF0
+#ifdef __ARM_PCS_VFP
+	add stack, #-4
+	fsts s0, [stack, #4]
+#else
+	PUSH r0
+#endif
+	.endm
+	
+	.macro PUSHD0
+#ifdef __ARM_PCS_VFP
+	add stack, #-8
+	fsts s0, [stack, #4]
+	fsts s1, [stack, #8]
+#else
+	PUSH r0, r1
+#endif
+	.endm
+	
+	.macro	LOAD_ISTATE
+	ldr	istate, [thread, #THREAD_TOP_ZERO_FRAME]
+	sub	istate, istate, #ISTATE_NEXT_FRAME
+	.endm
+
+	.macro	CACHE_JPC
+	ldr	jpc, [istate, #ISTATE_BCP]
+	.endm
+
+	.macro	CACHE_LOCALS
+	ldr	locals, [istate, #ISTATE_LOCALS]
+	.endm
+
+	.macro	CACHE_STACK
+	ldr	stack, [istate, #ISTATE_STACK]
+	.endm
+
+	.macro	CACHE_CP
+	ldr	constpool, [istate, #ISTATE_CONSTANTS]
+	.endm
+
+	.macro	DECACHE_STACK_USING_FRAME
+	str	stack, [Rframe, #FRAME_STACK]
+	.endm
+
+	.macro	DECACHE_STACK
+	str	stack, [istate, #ISTATE_STACK]
+	.endm
+
+	.macro	DECACHE_JPC_USING_FRAME
+	str	jpc, [Rframe, #FRAME_BCP]
+	.endm
+
+	.macro	DECACHE_JPC
+	str	jpc, [istate, #ISTATE_BCP]
+	.endm
+
+	.macro	BREAK_DISPATCH
+	ldr	r1, [dispatch, #DispatchBreakPoint-XXX]
+	cmp	r1, jpc
+	bleq	do_dispatch_break
+	.endm
+
+	.set	dispatch_state, 0
+
+	.macro	DISPATCH_STATE	state
+	.set	dispatch_state, \state
+	.endm
+
+	.macro	DISPATCH_START	step=0
+	.set	dispatch_state, 1
+	ldrb	r0, [jpc, #\step]!
+	.endm
+
+	.macro	DISPATCH_START_REG	reg
+	.set	dispatch_state, 1
+	ldrb	r0, [jpc, \reg]!
+	.endm
+
+	.macro	DISPATCH_START_R2_R0
+	.set	dispatch_state, 1
+	mov	r0, r2
+	.endm
+
+	.macro	DISPATCH_START_R2_JPC
+	.set	dispatch_state, 1
+	add	jpc, jpc, #1
+	.endm
+
+	.macro	DISPATCH_START_R2
+	.set	dispatch_state, 1
+	add	jpc, jpc, #1
+	mov	r0, r2
+	.endm
+
+	.macro	DISPATCH_1
+@        ldrb    r1, [jpc, #2]
+	.endm
+
+	.macro	DISPATCH_2
+        ldr     ip, [dispatch, r0, lsl #2]
+	.endm
+
+	.macro	DISPATCH_3
+        ldrb    r2, [jpc, #1]
+	.endm
+
+	.macro	DISPATCH_4
+        ands    lr, ip, #7
+	.endm
+
+	.macro	DISPATCH_NEXT
+    .if dispatch_state == 0
+	.error	"DISPATCH_FINISH without a DISPATCH_START or DISPATCH_STATE"
+    .elseif dispatch_state == 1
+	DISPATCH_1
+    .elseif dispatch_state == 2
+	DISPATCH_2
+    .elseif dispatch_state == 3
+	DISPATCH_3
+    .elseif dispatch_state == 4
+	DISPATCH_4
+    .else
+	.error "Too many DISPATCH_NEXTs"
+    .endif
+	.set	dispatch_state, dispatch_state + 1
+	.endm
+
+	@ This macro calls a user-supplied my_trace routine.  It
+	@ passes the current JPC as argument zero.  It can be safely
+	@ inserted at any point in the interpreter.
+ 	.macro TRACE
+	stmfd	sp!, {r0, r1, r2, r3, r4, lr, ip}
+	mrs	r4, cpsr
+	mov	r0, jpc
+	ldr	r1, [thread, #THREAD_TOP_ZERO_FRAME]
+	sub	r1, r1, #ISTATE_NEXT_FRAME
+	ldr	r2, =my_trace
+	blx	r2
+	msr	cpsr, r4
+	ldmfd	sp!, {r0, r1, r2, r3, r4, lr, ip}	
+	.endm
+	
+	.macro	DISPATCH_FINISH
+    .if dispatch_state == 0
+	.error	"DISPATCH_FINISH without a DISPATCH_START or DISPATCH_STATE"
+    .elseif dispatch_state == 1
+	DISPATCH_1
+	DISPATCH_2
+	DISPATCH_3
+	DISPATCH_4
+    .elseif dispatch_state == 2
+	DISPATCH_2
+	DISPATCH_3
+	DISPATCH_4
+    .elseif dispatch_state == 3
+	DISPATCH_3
+	DISPATCH_4
+    .elseif dispatch_state == 4
+	DISPATCH_4
+    .endif
+        moveq   pc, ip
+	ldrb	r1, [jpc, lr]
+        bic     ip, ip, #7
+        ldr     pc, [ip, r1, lsl #2]
+	.set	dispatch_state, 0
+	.ltorg
+	.endm
+
+	.macro	DISPATCH_BYTECODE
+@        ldrb    r1, [jpc, #2]
+        ldr     ip, [dispatch, r0, lsl #2]
+        ldrb    r2, [jpc, #1]
+        ands    lr, ip, #7
+        moveq   pc, ip
+	ldrb	r1, [jpc, lr]
+        bic     ip, ip, #7
+        ldr     pc, [ip, r1, lsl #2]
+	.endm
+
+	.macro	DISPATCH step=0
+	ldrb	r0, [jpc, #\step]!
+@        ldrb    r1, [jpc, #2]
+        ldr     ip, [dispatch, r0, lsl #2]
+        ldrb    r2, [jpc, #1]
+        ands    lr, ip, #7
+        moveq   pc, ip
+	ldrb	r1, [jpc, lr]
+        bic     ip, ip, #7
+        ldr     pc, [ip, r1, lsl #2]
+	.ltorg
+	.endm
+
+#define FFI_TYPE_VOID		0
+#define FFI_TYPE_FLOAT		2
+#define	FFI_TYPE_DOUBLE		3
+#define FFI_TYPE_BOOL		5
+#define	FFI_TYPE_SINT8		6
+#define FFI_TYPE_UINT16		7
+#define FFI_TYPE_SINT16		8
+#define FFI_TYPE_SINT32		10
+#define FFI_TYPE_SINT64		12
+#define FFI_TYPE_POINTER	14
+
+	.macro	_BLX	reg
+	mov	lr, pc
+	mov	pc, \reg
+	.endm
+
+	.macro	_BX	reg
+	mov	pc, \reg
+	.endm
+
+	.macro	_BXEQ	reg
+	moveq	pc, \reg
+	.endm
+
+	.macro	_BXNE	reg
+	movne	pc, \reg
+	.endm
+
+#ifdef ARMv4
+
+#define blx _BLX
+#define bx _BX
+#define bxeq _BXEQ
+#define bxne _BXNE
+	.arch armv4
+
+#else
+	.arch armv7-a
+#endif
+
+#ifdef HW_FP
+
+#ifdef __ARM_PCS_VFP
+ 	.fpu vfpv3-d16
+	.eabi_attribute Tag_ABI_HardFP_use, 3
+	.eabi_attribute Tag_ABI_VFP_args, 1
+#else // __ARM_PCS_VFP
+	.fpu vfp
+#endif // __ARM_PCS_VFP
+
+#else // HW_FP
+	.fpu softvfp
+#endif // HW_FP
+
+#ifndef	__ARM_ARCH_7A__
+#	define dmb VOLATILE_BARRIER
+#	define dmb_st VOLATILE_BARRIER
+#else
+#	define	dmb_st .inst   0xf57ff05e
+#endif
+
+#define StoreStoreBarrier dmb_st
+#define StoreLoadBarrier dmb
+#define FullBarrier dmb
+	
+	.macro	VOLATILE_BARRIER arg
+	stmfd	sp!, {r2, lr}
+	ldr	r2, =0xffff0fa0 @ kernel_dmb
+	blx	r2
+	ldmfd	sp!, {r2, lr}
+	.endm
+	
+	.macro	GO_IF_VOLATILE reg, cp_cache, label
+	ldr	\reg, [\cp_cache, #CP_OFFSET+CP_CACHE_FLAGS]
+	tst	\reg, #(1<<CP_CACHE_VOLATILE_FIELD_FLAG_BIT)
+	bne	\label
+	.set	dispatch_saved, dispatch_state
+	.endm
+
+	@ We have to save and restore the dispatch_state because
+	@ dispatching is done twice, once each for volatile and
+	@ non-volatile versions.  It's essential that dispatch_state
+	@ be correct at the entry to the volatile version of the
+	@ handler.
+
+	.macro VOLATILE_VERSION
+	.if dispatch_state == 0
+	.set	dispatch_state, dispatch_saved
+	.else
+	.error "VOLATILE_VERSION macro used before non-volatile DISPATCH_FINISH."
+	.endif
+	.endm
+	
+	.eabi_attribute 20, 1 @ Tag_ABI_FP_denormal
+	.eabi_attribute 21, 1 @ Tag_ABI_FP_exceptions
+	.eabi_attribute 23, 3 @ Tag_ABI_FP_number_model
+	.eabi_attribute 24, 1 @ Tag_ABI_align8_needed
+	.eabi_attribute 25, 1 @ Tag_ABI_align8_preserved
+	.eabi_attribute 26, 2 @ Tag_ABI_enum_size
+	.eabi_attribute 30, 2 @ Tag_ABI_optimization_goals
+	.eabi_attribute 18, 4 @ Tag_ABI_PCS_wchar_t
+
+	.text
+
+	.global	cmpxchg_ptr
+	.type cmpxchg_ptr, %function
+cmpxchg_ptr:
+	stmfd	sp!, {r4, r5, r6, r7, r8, lr}
+	mov	r6, #0xffffffc0
+	mov	r4, r2
+	mov	r7, r0
+	mov	r5, r1
+	bic	r6, r6, #0xf000
+	mov	r8, r2
+1:
+	ldr	r3, [r5, #0]
+	mov	r0, r4
+	mov	r1, r7
+	mov	r2, r5
+	cmp	r4, r3
+	bne	2f
+	blx	r6
+	cmp	r0, #0
+	bne	1b
+	mov	r0, r8
+	ldmfd	sp!, {r4, r5, r6, r7, r8, pc}
+2:
+	mov	r8, r3
+	mov	r0, r8
+	ldmfd	sp!, {r4, r5, r6, r7, r8, pc}
+
+build_frame:
+	mov	r3, r0
+	ldr	r0, [r1, #METHOD_ACCESSFLAGS]
+	stmfd	arm_sp!, {r4, r5, r6, r7, r8}
+	ands	r7, r0, #JVM_ACC_SYNCHRONIZED
+	movne	r7, #2
+	tst	r0, #JVM_ACC_NATIVE
+	mov	r4, #0
+	movne	r5, #0
+	ldreqh	r6, [r1, #METHOD_MAXLOCALS]
+	ldrneh	r6, [r1, #METHOD_SIZEOFPARAMETERS]
+	ldreq	r0, [r3, #8]
+	subeq	r6, r6, #1
+	ldrne	r0, [r3, #8]
+	subne	r6, r6, #1
+	ldreqh	r5, [r1, #METHOD_MAXSTACK]
+	addeq	r6, r0, r6, asl #2
+	addne	r6, r0, r6, asl #2
+	sub	ip, r0, #4
+	str	ip, [r3, #8]
+	mov     ip, #INTERPRETER_FRAME
+	str	r4, [r0, #-4]
+	ldr	r0, [r3, #8]
+	sub	r8, r0, #4
+	str	r8, [r3, #8]
+	str	ip, [r0, #-4]
+	ldr	r8, [r3, #8]
+	sub	ip, r8, #68
+	str	ip, [r3, #8]
+	str	r2, [r8, #-68]
+	mov	r8, #0
+	str	r4, [ip, #44]
+	str	r6, [ip, #8]
+	str	r1, [ip, #16]
+	str	ip, [ip, #64]
+	ldr	r2, [r1, #METHOD_ACCESSFLAGS]
+	tst	r2, #JVM_ACC_NATIVE
+	mov	r2, #0
+	ldreq	r4, [r1, #METHOD_CONSTMETHOD]
+	addeq	r4, r4, #CONSTMETHOD_CODEOFFSET
+	str	r4, [ip, #4]
+	ldr	r4, [r1, #METHOD_CONSTANTS]
+	ldr	r4, [r4, #CONSTANTPOOL_CACHE]
+	str	r8, [ip, #28]
+	str	r2, [ip, #32]
+	str	r4, [ip, #12]
+	str	r2, [ip, #48]
+	str	r2, [ip, #20]
+	ldr	r2, [r3, #8]
+	str	r2, [ip, #60]
+	ldr	r2, [r1, #METHOD_ACCESSFLAGS]
+	tst	r2, #JVM_ACC_SYNCHRONIZED
+	beq	.L10
+	ldr	r2, [r3, #8]
+	sub	r7, r2, r7, asl #2
+	str	r7, [r3, #8]
+	ldr	r2, [r1, #METHOD_ACCESSFLAGS]
+	tst	r2, #JVM_ACC_STATIC
+	ldrne	r2, [r1, #METHOD_CONSTANTS]
+	ldreq	r2, [r6, #0]
+	ldrne	r2, [r2, #CONSTANTPOOL_POOL_HOLDER]
+	ldrne	r2, [r2, #KLASS_PART + KLASS_JAVA_MIRROR]
+	str	r2, [r7, #4]
+.L10:
+	ldr	r2, [r3, #8]
+	cmp	r5, #0
+	str	r2, [ip, #52]
+	ldr	r2, [r3, #8]
+	sub	r2, r2, #4
+	str	r2, [ip, #24]
+	ldrne	r2, [r3, #8]
+	ldreq	r5, [r3, #8]
+	subne	r5, r2, r5, asl #2
+	strne	r5, [r3, #8]
+	sub	r5, r5, #4
+	str	r5, [ip, #56]
+	ldmfd	arm_sp!, {r4, r5, r6, r7, r8}
+	bx	lr
+
+	ALIGN_CODE
+	.global	asm_generate_method_entry
+	.type asm_generate_method_entry, %function
+asm_generate_method_entry:
+	mov	r3, r0
+	mov	r0, #0
+#ifdef PRODUCT
+	// These entry points can not be used when PRODUCT is
+	// undefined because the BytecodeInterpreter class is virtual
+	// so it has an extra word (the vtable pointer) at its
+	// beginning.
+	adrl	ip, dispatch_init_adcon
+	ldm	ip, {r1, r2}
+	add	r1, r1, ip
+	add	r1, r1, r2		@ r1->dispatch
+
+	ldr	r2, [r1, #can_post_interpreter_events-XXX]
+	ldrb	r2, [r2]
+	cmp	r2, #0
+	bne	1f
+
+	ldr	r2, [r1, #PrintCommandLineFlags_Address-XXX]
+	ldrb	r2, [r2]
+	cmp	r2, #0
+	bne	1f
+
+	cmp	r3, #((3f-2f)/4) // i.e. sizeof asm_method_table
+	adrcc	ip, asm_method_table
+	ldrcc	r0, [ip, r3, lsl #2]
+#endif // PRODUCT
+1:
+	bx	lr
+
+// This table must be kept in sync with
+// AbstractInterpreter::MethodKind.  Note that every entry must have a
+// corresponding fast entry point at addr + CODE_ALIGN_SIZE.
+asm_method_table:
+2:
+        .word   normal_entry                    // method needs locals initialization
+        .word   normal_entry_synchronized       // method needs locals initialization & is synchronized
+        .word   native_entry                    // native method
+        .word   native_entry_synchronized       // native method & is synchronized
+        .word   empty_entry                     // empty method (code: _return)
+        .word   accessor_entry                  // accessor method (code: _aload_0, _getfield, _(a|i)return)
+        .word   normal_entry                    // abstract method (throws an AbstractMethodException)
+        .word   method_handle_entry             // java.lang.invoke.MethodHandles::invoke
+        .word   normal_entry                    // implementation of java.lang.Math.sin   (x)
+        .word   normal_entry                    // implementation of java.lang.Math.cos   (x)
+        .word   normal_entry                    // implementation of java.lang.Math.tan   (x)
+        .word   normal_entry                    // implementation of java.lang.Math.abs   (x)
+        .word   normal_entry                    // implementation of java.lang.Math.sqrt  (x)
+        .word   normal_entry                    // implementation of java.lang.Math.log   (x)
+        .word   normal_entry                    // implementation of java.lang.Math.log10 (x)
+        .word   accessor_entry                  // implementation of java.lang.ref.Reference.get()
+3:
+	
+	SLOW_ENTRY
+native_entry_synchronized:
+	mov	r2, thread
+	b	_ZN14CppInterpreter12native_entryEP13methodOopDesciP6Thread
+
+	FAST_ENTRY
+fast_native_entry_synchronized:
+	mov	r2, thread
+	b	_ZN14CppInterpreter12native_entryEP13methodOopDesciP6Thread
+
+	SLOW_ENTRY
+empty_entry:
+	ldrh	r3, [r0, #42]
+	ldr	r1, [r2, #THREAD_JAVA_SP]
+	add	r1, r1, r3, lsl #2
+	str	r1, [r2, #THREAD_JAVA_SP]
+	mov	r0, #0	@ deoptimized_frames = 0
+	bx	lr
+
+	FAST_ENTRY
+fast_empty_entry:
+	ldrh	r3, [r0, #42]
+	ldr	r1, [thread, #THREAD_JAVA_SP]
+	add	r1, r1, r3, lsl #2
+	str	r1, [thread, #THREAD_JAVA_SP]
+	bx	lr
+
+@ ---- START execute.s ---------------------------------------------------------------------
+
+	.global	asm_check_null_ptr
+	.type asm_check_null_ptr, %function
+asm_check_null_ptr:
+
+#ifdef HW_NULL_PTR_CHECK
+
+#define uc_mcontext		20
+#define arm_registers_offset	12
+#define arm_cpsr_offset		16*4
+
+	add	r0, r0, #uc_mcontext + arm_registers_offset
+	ldr	r1, [r0, #15*4]
+	adr	ip, abort_table
+abort_loop:
+	ldr	r2, [ip], #8
+	cmp	r2, #0
+	beq	2f
+	cmp	r2, r1
+	bne	abort_loop
+
+	ldr	r3, [ip, #-4]
+	cmp	r3, #8
+	bcs	1f
+
+	ldr	ip, [r0, #5*4]
+	sub	ip, ip, r3
+	str	ip, [r0, #5*4]
+
+	adrl	r3, null_ptr_exception
+1:
+	str	r3, [r0, #15*4]
+do_setcontext:
+	mov	r0, #1
+	bx	lr
+#endif // HW_NULL_PTR_CHECK
+2:
+#ifdef T2JIT
+	b	Thumb2_Check_Null
+#else
+	mov	r0, #0
+	bx	lr
+#endif
+
+#ifdef HW_NULL_PTR_CHECK
+abort_table:
+			.word	.abortentry5, 1
+			.word	.abortentry6, 1
+			.word	.abortentry7, 1
+			.word	.abortentry8, 1
+			.word	.abortentry9, 1
+			.word	.abortentry10, 1
+			.word	.abortentry11, 1
+			.word	.abortentry12, 1
+			.word	.abortentry13, 1
+
+		FBC	.word	.abortentry19, 1
+		FBC	.word	.abortentry20, 1
+		FBC	.word	.abortentry21, 1
+		FBC	.word	.abortentry22, 1
+		FBC	.word	.abortentry23, 1
+		FBC	.word	.abortentry24, 1
+		FBC	.word	.abortentry25, 1
+		FBC	.word	.abortentry26, 1
+		FBC	.word	.abortentry27, 1
+		FBC	.word	.abortentry28, 1
+		FBC	.word	.abortentry29, 1
+		FBC	.word	.abortentry30, 1
+		FBC	.word	.abortentry31, 1
+		FBC	.word	.abortentry32, 1
+
+		FBC	.word	.abortentry38, 2
+		FBC	.word	.abortentry39, 3
+		FBC	.word	.abortentry40, 4
+		FBC	.word	.abortentry41, 3
+		FBC	.word	.abortentry42, 2
+		FBC	.word	.abortentry42_1, 2
+		FBC	.word	.abortentry43, 0
+		FBC	.word	.abortentry44, 1
+		FBC	.word	.abortentry45, 3
+		FBC	.word	.abortentry46, 2
+		FBC	.word	.abortentry47, 0
+		FBC	.word	.abortentry48, 1
+		FBC	.word	.abortentry49, 0
+		FBC	.word	.abortentry50, 1
+		FBC	.word	.abortentry51, 0
+		FBC	.word	.abortentry52, 1
+
+		FBC	.word	.abortentry58, 2
+		FBC	.word	.abortentry59, 2
+		FBC	.word	.abortentry60, 2
+
+		FBC	.word	.abortentry73, 1
+		FBC	.word	.abortentry74, 1
+		FBC	.word	.abortentry75, 1
+		FBC	.word	.abortentry76, 1
+		FBC	.word	.abortentry77, 1
+
+	    FBC		.word	.abortentry78, 3
+	    FBC		.word	.abortentry78_v, 3
+	    FBC		.word	.abortentry79, 3
+	    FBC		.word	.abortentry79_v, 3
+	    FBC		.word	.abortentry80, 3
+	    FBC		.word	.abortentry80_v, 3
+	    FBC		.word	.abortentry81, 3
+	    FBC		.word	.abortentry81_v, 3
+	    FBC		.word	.abortentry82, 3
+	    FBC		.word	.abortentry82_v, 3
+	    FBC		.word	.abortentry83, 3
+	    FBC		.word	.abortentry83_v, 3
+	    FBC		.word	.abortentry84, 3
+	    FBC		.word	.abortentry84_v, 3
+	    FBC		.word	.abortentry85, 3
+	    FBC		.word	.abortentry85_v, 3
+	    FBC		.word	.abortentry86, 3
+	    FBC		.word	.abortentry86_v, 3
+	    FBC		.word	.abortentry87, 3
+	    FBC		.word	.abortentry87_v, 3
+
+	    FBC    	.word	.abortentry88, 3
+	    FBC    	.word	.abortentry88_v, 3
+	    FBC	   	.word	.abortentry89, 5
+	    FBC	     	.word	.abortentry90, 4
+	    FBC	     	.word	.abortentry91, 4
+	    FBC		.word	.abortentry104, 0
+		FBC	.word	.abortentry105, 1
+		FBC	.word	.abortentry106, 1
+		FBC	.word	.abortentry107, 1
+		FBC	.word	.abortentry108, 1
+		FBC	.word	.abortentry109, 1
+			.word	.abortentry110, 0
+
+		FBC	.word	.abortentry111, 3
+		FBC	.word	.abortentry112, 3
+
+		FBC	.word	.abortentry113, 0
+		FBC	.word	.abortentry113_v, 0
+			.word	.abortentry114, 1
+		FBC	.word	.abortentry117, 0
+			.word	.abortentry118, 0
+			.word	.abortentry119, 1
+	.word	0
+
+#endif
+
+
+	SLOW_ENTRY
+native_entry:
+	stmfd	arm_sp!, {regset, lr}
+	mov	thread, r2
+	bl	fast_native_entry	
+	mov	r0, #0	@ deoptimized_frames = 0
+	ldmia	sp!, {regset, pc}
+
+	FAST_ENTRY
+fast_native_entry:
+	adrl	ip, dispatch_init_adcon
+	mov	r11, r0
+	ldm	ip, {dispatch, r7}
+	stmdb	sp!, {fast_regset, lr}
+	add	dispatch, dispatch, ip
+	add	dispatch, dispatch, r7
+	ldrh	r1, [r11, #METHOD_SIZEOFPARAMETERS]
+	ldr	r4, [thread, #THREAD_JAVA_SP]
+	ldr	r3, [thread, #THREAD_TOP_ZERO_FRAME]
+	mov	r0, #0
+	mov	ip, #INTERPRETER_FRAME
+	sub	r9, r4, #FRAME_SIZE
+	str	r9, [thread, #THREAD_JAVA_SP]	@ drop stack
+	sub	r5, r9, #4		@ stack limit = r9 - 4
+	str	r3, [r9, #ISTATE_NEXT_FRAME]
+	str	ip, [r9, #ISTATE_FRAME_TYPE]
+	str	r9, [r9, #ISTATE_MONITOR_BASE]
+	str	r5, [r9, #ISTATE_STACK_LIMIT]
+	str	r9, [r9, #ISTATE_STACK_BASE]
+	str	r0, [r9, #ISTATE_OOP_TEMP]
+
+	str	r0, [r9, #ISTATE_MSG]
+
+	ldr	ip, [r11, #METHOD_CONSTANTS]
+	sub	r7, r4, #4
+	mov	r5, #0
+	add	r7, r7, r1, lsl #2
+
+	ldr	ip, [ip, #CONSTANTPOOL_CACHE]
+
+	str	thread, [r9, #ISTATE_THREAD]
+	str	r5, [r9, #ISTATE_BCP]
+	str	r7, [r9, #ISTATE_LOCALS]
+	str	ip, [r9, #ISTATE_CONSTANTS]
+	str	r11, [r9, #ISTATE_METHOD]
+	str     r9, [r9, #ISTATE_SELF_LINK]
+
+	ldr	r1, [thread, #THREAD_STACK_SIZE]
+	ldr	r3, [thread, #THREAD_STACK_BASE]
+	add	r0, r9, #72
+
+	rsb	r3, r1, r3
+	rsb	r3, r3, arm_sp
+	cmp	r3, #4096
+	str	r0, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r5, [r11, #METHOD_SIGNATUREHANDLER]
+	blt	.fast_native_entry_throw_stack_overflow
+	cmp	r5, #0
+	bne	.fast_native_entry_got_handleraddr
+	str	r5, [thread, #THREAD_LAST_JAVA_SP] @ r5 is zero at this point
+	str	r0, [thread, #THREAD_LAST_JAVA_FP]
+	ldr	r0, [thread, #THREAD_JAVA_SP]
+	str	r0, [thread, #THREAD_LAST_JAVA_SP]
+	mov	r0, thread
+	mov	r1, r11
+	bl	_ZN18InterpreterRuntime19prepare_native_callEP10JavaThreadP13methodOopDesc
+	ldr	r11, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r1, [thread, #THREAD_PENDING_EXC]
+	str	r5, [thread, #THREAD_LAST_JAVA_SP]  @ r5 is zero at this point
+	str	r5, [thread, #THREAD_LAST_JAVA_FP]
+	ldr	r5, [thread, #THREAD_JAVA_SP]
+	str	r5, [thread, #THREAD_LAST_JAVA_SP]
+	ldr	r11, [r11, #-72 + ISTATE_METHOD]
+	cmp	r1, #0
+	bne	.fast_native_entry_exception
+	ldr	r5, [r11, #METHOD_SIGNATUREHANDLER]
+.fast_native_entry_got_handleraddr:
+	ldr	r2, [dispatch, #InterpreterRuntime_slow_signature_handler_Address-XXX]
+	cmp	r5, r2
+	bne	.fast_native_entry_get_handler
+	ldr	r3, [thread, #THREAD_TOP_ZERO_FRAME]
+	stmfd	sp!, {r2}
+	mov	r2, #0
+        str     r2, [thread, #THREAD_LAST_JAVA_SP]
+	ldmfd	sp!, {r2}
+	mov	r0, thread
+	str	r3, [thread, #THREAD_LAST_JAVA_FP]
+	ldr	r3, [thread, #THREAD_JAVA_SP]
+	str	r3, [thread, #THREAD_LAST_JAVA_SP]
+	mov	r3, r2
+	mov	r1, r11
+	bl	_ZN18InterpreterRuntime22slow_signature_handlerEP10JavaThreadP13methodOopDescPiS4_
+	ldr	r11, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r1, [thread, #THREAD_PENDING_EXC]
+	mov	r3, #0
+	ldr	r11, [r11, #-72 + ISTATE_METHOD]
+	cmp	r1, #0
+	str	r3, [thread, #THREAD_LAST_JAVA_SP]
+	str	r3, [thread, #THREAD_LAST_JAVA_FP]
+	mov	r5, r0
+	bne	.fast_native_entry_exception
+.fast_native_entry_get_handler:
+
+	sub	ip, r7, r4
+	add	r3, r4, #ISTATE_OOP_TEMP-76
+
+	mov	ip, ip, asr #2
+	mov	r4, arm_sp
+
+	add	lr, ip, #4
+	sub	arm_sp, arm_sp, #16
+
+	bic	lr, lr, #1
+	add	r1, r5, #SIZEOF_FFI_CIF
+
+	sub	arm_sp, arm_sp, lr, lsl #2
+	add	r2, thread, #THREAD_JNI_ENVIRONMENT
+
+	mov	lr, arm_sp
+	str	r2, [lr], #4
+
+#ifdef __ARM_PCS_VFP
+	mov	thread, #0xff	@ bitmap for floating-point register set
+	orr	thread, #0xff00
+#endif	
+	ldr	r2, [r11, #METHOD_ACCESSFLAGS]
+	add	r1, r1, #4
+	tst	r2, #JVM_ACC_STATIC
+	beq	.do_fast_copy_args
+
+	ldr	r2, [r11, #METHOD_CONSTANTS]
+	ldr	r2, [r2, #CONSTANTPOOL_POOL_HOLDER]
+	str	r3, [lr], #4
+	ldr	r2, [r2, #KLASS_PART + KLASS_JAVA_MIRROR]
+	add	r1, r1, #4
+	str	r2, [r3]
+
+.do_fast_copy_args:
+	cmp	ip, #0
+	blt	.fast_no_args
+
+.fast_copy_args:
+	ldr	r0, [r1], #4
+	ldrh	r3, [r0, #6]
+	cmp	r3, #FFI_TYPE_DOUBLE
+	beq	.fast_copy_double
+	cmp	r3, #FFI_TYPE_FLOAT
+	beq	.fast_copy_float
+	ldr	r2, [r7], #-4
+	cmp	r3, #FFI_TYPE_SINT64
+	beq	.fast_copy_long
+
+	cmp	r3, #FFI_TYPE_POINTER
+	beq	.fast_copy_ptr
+
+	subs	ip, ip, #1
+	str	r2, [lr], #4
+	bge	.fast_copy_args
+	b	.fast_no_args
+
+#ifdef __ARM_PCS_VFP
+	// FIXME: These macros are very inefficient
+	.macro	FIND_LOWEST_BIT	rd, rs
+	mov	\rd, #0
+0:	tst	\rs, #1
+	lsr	\rs, #1
+	addeq	\rd, #1
+	beq	0b
+	lsl	\rs, \rd
+	lsl	\rs, #1
+	.endm
+	
+	.macro	FIND_LOWEST_BIT_PAIR rd, rs
+	stmfd	sp!, {r1}
+	stmfd	sp!, {\rs}
+	mov	\rd, #0
+0:	tst	\rs, #1
+	lsr	\rs, #2
+	addeq	\rd, #2
+	beq	0b
+	ldmfd	sp!, {\rs}
+	mov	r1, #3
+	lsl	r1, \rd
+	bic	\rs, r1
+	ldmfd	sp!, {r1}
+	.endm
+	
+.fast_copy_double:
+	orrs	thread, thread
+	ldreq	r2, [r7], #-4	
+	beq	vm_fatal_error
+	FIND_LOWEST_BIT_PAIR r0, thread
+	adrl	r2, .copy_double_table
+	add	pc, r2, r0, asl#5
+
+.fast_copy_float:
+	orrs	thread, thread
+	ldreq	r2, [r7], #-4	
+	beq	vm_fatal_error
+	FIND_LOWEST_BIT r0, thread
+	adr	r2, .copy_float_table
+	add	pc, r2, r0, asl#6
+#else
+
+.fast_copy_double:
+	ldr	r2, [r7], #-4
+	tst	lr, #4
+	ldr	r3, [r7], #-4
+	addne	lr, lr, #4
+	str	r2, [lr, #4]
+	subs	ip, ip, #2
+	str	r3, [lr], #8
+	bge	.fast_copy_args
+	b	.fast_no_args
+	
+.fast_copy_float:
+	ldr	r2, [r7], #-4
+	subs	ip, ip, #1
+	str	r2, [lr], #4
+	bge	.fast_copy_args
+
+#endif
+
+.fast_copy_long:
+	tst	lr, #4
+	ldr	r3, [r7], #-4
+	addne	lr, lr, #4
+	str	r2, [lr, #4]
+	subs	ip, ip, #2
+	str	r3, [lr], #8
+	bge	.fast_copy_args
+	b	.fast_no_args
+
+.fast_copy_ptr:
+	cmp	r2, #0
+	addne	r2, r7, #4
+	subs	ip, ip, #1
+	str	r2, [lr], #4
+	bge	.fast_copy_args
+
+.fast_no_args:
+	ldr     thread, [r9, #ISTATE_THREAD]
+	ldr	r0, [thread, #THREAD_TOP_ZERO_FRAME]
+	mov	r2, #_thread_in_native
+
+	mov	ip, #0
+	str	ip, [thread, #THREAD_LAST_JAVA_SP]
+
+	str	r0, [thread, #THREAD_LAST_JAVA_FP]
+	str	r2, [thread, #THREAD_STATE]
+
+	ldr	r2, [thread, #THREAD_JAVA_SP]
+	str	r2, [thread, #THREAD_LAST_JAVA_SP]
+
+	ldr	ip, [r11, #METHOD_NATIVEHANDLER]
+	ldrh	r11, [r11, #METHOD_SIZEOFPARAMETERS]
+
+	ldmia	arm_sp!, {r0, r1, r2, r3}
+	blx	ip
+
+	mov	ip, #_thread_in_native_trans
+	mov	arm_sp, r4
+
+	ldr	r3, [dispatch, #SafePointSynchronize_state_Address-XXX]
+	str	ip, [thread, #THREAD_STATE]
+
+	ldr	r3, [r3, #0]
+	cmp	r3, #0
+	ldreq	r3, [thread, #THREAD_SUSPEND_FLAGS]
+	cmpeq	r3, #0
+	bne	.fast_native_entry_do_special
+
+.fast_native_entry_do_return:
+	mov	r3, #_thread_in_Java
+	mov	r2, #0
+
+	str	r3, [thread, #THREAD_STATE]
+	str	r2, [thread, #THREAD_LAST_JAVA_SP]
+	str	r2, [thread, #THREAD_LAST_JAVA_FP]
+
+	add	r2, r5, #SIZEOF_FFI_CIF
+	ldr	r3, [r5, #4]
+
+	ldr	r5, [thread, #THREAD_TOP_ZERO_FRAME]
+
+	ldr	lr, [r5], #4
+
+	add	r5, r5, r11, lsl #2
+
+	ldr	ip, [r2, r3, asl #2]
+	adr	r3, .return_type_table
+
+	ldrh	r2, [ip, #6]
+	ldr	ip, [thread, #THREAD_ACTIVE_HANDLES]
+
+	mov	tmp1, #0
+	ldr	pc, [r3, r2, lsl #2]
+
+.return_type_table:
+	.word	.fast_native_return_void	@ FFI_TYPE_VOID	== 0
+	.word	0
+#ifdef __ARM_PCS_VFP
+	.word	.fast_native_return_float	@ FFI_TYPE_FLOAT == 2
+	.word	.fast_native_return_double	@ FFI_TYPE_DOUBLE == 3
+#else
+	.word	.fast_native_return_w		@ FFI_TYPE_FLOAT == 2
+	.word	.fast_native_return_dw		@ FFI_TYPE_DOUBLE == 3
+#endif
+	.word	0
+	.word	.fast_native_return_bool	@ FFI_TYPE_BOOL == 5
+	.word	.fast_native_return_byte	@ FFI_TYPE_SINT8 == 6
+	.word	.fast_native_return_char	@ FFI_TYPE_UINT16 == 7
+	.word	.fast_native_return_short	@ FFI_TYPE_SINT16 == 8
+	.word	0
+	.word	.fast_native_return_w		@ FFI_TYPE_SINT32 == 10
+	.word	0
+	.word	.fast_native_return_dw		@ FFI_TYPE_SINT64 == 12
+	.word	0
+	.word	.fast_native_return_obj		@ FFI_TYPE_POINTER == 14
+
+#ifdef __ARM_PCS_VFP
+.fast_native_return_double:
+	fsts	s0, [r5, #-8]
+	str	lr, [thread, #THREAD_TOP_ZERO_FRAME]
+	str	tmp1, [ip, #JNIHANDLEBLOCK_TOP]
+	fsts	s1, [r5, #-4]
+	add	r5, #-8
+	str	r5, [thread, #THREAD_JAVA_SP]
+	mov	r0, #0	@ deoptimized_frames = 0
+	ldmfd	arm_sp!, {fast_regset, pc}
+.fast_native_return_float:
+	fsts	s0, [r5, #-4]
+	str	lr, [thread, #THREAD_TOP_ZERO_FRAME]
+	str	tmp1, [ip, #JNIHANDLEBLOCK_TOP]
+	add	r5, #-4
+	str	r5, [thread, #THREAD_JAVA_SP]
+	mov	r0, #0	@ deoptimized_frames = 0
+	ldmfd	arm_sp!, {fast_regset, pc}
+#endif
+.fast_native_return_dw:
+	str	r0, [r5, #-8]!
+	str	lr, [thread, #THREAD_TOP_ZERO_FRAME]
+	str	tmp1, [ip, #JNIHANDLEBLOCK_TOP]
+	str	r1, [r5, #4]
+	str	r5, [thread, #THREAD_JAVA_SP]
+	mov	r0, #0	@ deoptimized_frames = 0
+	ldmfd	arm_sp!, {fast_regset, pc}
+.fast_native_return_byte:
+	mov	r0, r0, lsl #24
+	str	lr, [thread, #THREAD_TOP_ZERO_FRAME]
+	mov	r0, r0, asr #24
+	str	tmp1, [ip, #JNIHANDLEBLOCK_TOP]
+	str	r0, [r5, #-4]!
+	str	r5, [thread, #THREAD_JAVA_SP]
+	mov	r0, #0	@ deoptimized_frames = 0
+	ldmfd	arm_sp!, {fast_regset, pc}
+.fast_native_return_char:
+	mov	r0, r0, lsl #16
+	str	lr, [thread, #THREAD_TOP_ZERO_FRAME]
+	mov	r0, r0, lsr #16
+	str	tmp1, [ip, #JNIHANDLEBLOCK_TOP]
+	str	r0, [r5, #-4]!
+	str	r5, [thread, #THREAD_JAVA_SP]
+	mov	r0, #0	@ deoptimized_frames = 0
+	ldmfd	arm_sp!, {fast_regset, pc}
+.fast_native_return_bool:
+	ands	r0, r0, #255
+	str	lr, [thread, #THREAD_TOP_ZERO_FRAME]
+	movne	r0, #1
+	str	tmp1, [ip, #JNIHANDLEBLOCK_TOP]
+	str	r0, [r5, #-4]!
+	str	r5, [thread, #THREAD_JAVA_SP]
+	mov	r0, #0	@ deoptimized_frames = 0
+	ldmfd	arm_sp!, {fast_regset, pc}
+.fast_native_return_obj:
+	cmp	r0, #0
+	ldrne	r0, [r0]
+	str	r0, [r5, #-4]!
+	str	lr, [thread, #THREAD_TOP_ZERO_FRAME]
+	str	tmp1, [ip, #JNIHANDLEBLOCK_TOP]
+	str	r5, [thread, #THREAD_JAVA_SP]
+	mov	r0, #0	@ deoptimized_frames = 0
+	ldmfd	arm_sp!, {fast_regset, pc}
+.fast_native_return_short:
+	mov	r0, r0, lsl #16
+	mov	r0, r0, asr #16
+.fast_native_return_w:
+	str	r0, [r5, #-4]!
+.fast_native_return_void:
+	str	lr, [thread, #THREAD_TOP_ZERO_FRAME]
+	str	tmp1, [ip, #JNIHANDLEBLOCK_TOP]
+.fast_native_exit:
+	str	r5, [thread, #THREAD_JAVA_SP]
+	mov	r0, #0	@ deoptimized_frames = 0
+	ldmfd	arm_sp!, {fast_regset, pc}
+
+.fast_native_entry_throw_stack_overflow:
+	str	r0, [thread, #THREAD_LAST_JAVA_FP]
+	mov	r0, thread
+	bl	_ZN18InterpreterRuntime24throw_StackOverflowErrorEP10JavaThread
+	mov	r3, #0
+	ldr	r1, [thread, #THREAD_PENDING_EXC]
+	str	r3, [thread, #THREAD_LAST_JAVA_FP]
+	str	r3, [thread, #THREAD_LAST_JAVA_SP]
+.fast_native_entry_exception:
+	ldr	r5, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r3, [r5], #4
+	str	r3, [thread, #THREAD_TOP_ZERO_FRAME]
+
+	ldrh	r3, [r11, #METHOD_SIZEOFPARAMETERS]
+	add	r5, r5, r3, lsl #2
+	b	.fast_native_exit
+
+.fast_native_entry_do_special:
+	stmdb	arm_sp!, {r0, r1}
+	mov	r0, thread
+	bl	_ZN10JavaThread40check_special_condition_for_native_transEPS_
+	ldmia	arm_sp!, {r0, r1}
+	b	.fast_native_entry_do_return
+
+
+	
+#ifdef __ARM_PCS_VFP
+
+	.macro	COPY_FLOAT rs, rd, rcount
+	.align	6
+	flds	\rd, [\rs]
+	add	\rs, #-4
+	subs	\rcount, #1
+	bge	.fast_copy_args
+	b	.fast_no_args
+	.endm
+
+	.align	6
+.copy_float_table:
+	COPY_FLOAT r7, s0, ip
+	COPY_FLOAT r7, s1, ip
+	COPY_FLOAT r7, s2, ip
+	COPY_FLOAT r7, s3, ip
+	COPY_FLOAT r7, s4, ip
+	COPY_FLOAT r7, s5, ip
+	COPY_FLOAT r7, s6, ip
+	COPY_FLOAT r7, s7, ip
+	COPY_FLOAT r7, s8, ip
+	COPY_FLOAT r7, s9, ip
+	COPY_FLOAT r7, s10, ip
+	COPY_FLOAT r7, s11, ip
+	COPY_FLOAT r7, s12, ip
+	COPY_FLOAT r7, s13, ip
+	COPY_FLOAT r7, s14, ip
+	COPY_FLOAT r7, s15, ip
+	COPY_FLOAT r7, s16, ip
+	COPY_FLOAT r7, s17, ip
+	COPY_FLOAT r7, s18, ip
+	COPY_FLOAT r7, s19, ip
+	COPY_FLOAT r7, s20, ip
+	COPY_FLOAT r7, s21, ip
+	COPY_FLOAT r7, s22, ip
+	COPY_FLOAT r7, s23, ip
+	COPY_FLOAT r7, s24, ip
+	COPY_FLOAT r7, s25, ip
+	COPY_FLOAT r7, s26, ip
+	COPY_FLOAT r7, s27, ip
+	COPY_FLOAT r7, s28, ip
+	COPY_FLOAT r7, s29, ip
+	COPY_FLOAT r7, s30, ip
+	COPY_FLOAT r7, s31, ip
+
+	.macro	COPY_DOUBLE rs, rdlo, rdhi, rcount
+	.align	6
+	flds	\rdhi, [\rs]
+	flds	\rdlo, [\rs, #-4]
+	add	\rs, #-8
+	subs	\rcount, #2
+	bge	.fast_copy_args
+	b	.fast_no_args
+	.endm
+
+	.align	6
+.copy_double_table:
+	COPY_DOUBLE r7, s0, s1, ip
+	COPY_DOUBLE r7, s2, s3, ip
+	COPY_DOUBLE r7, s4, s5, ip
+	COPY_DOUBLE r7, s6, s7, ip
+	COPY_DOUBLE r7, s8, s9, ip
+	COPY_DOUBLE r7, s10, s11, ip
+	COPY_DOUBLE r7, s12, s13, ip
+	COPY_DOUBLE r7, s14, s15, ip
+	COPY_DOUBLE r7, s16, s17, ip
+	COPY_DOUBLE r7, s18, s19, ip
+	COPY_DOUBLE r7, s20, s21, ip
+	COPY_DOUBLE r7, s22, s23, ip
+	COPY_DOUBLE r7, s24, s25, ip
+	COPY_DOUBLE r7, s26, s27, ip
+	COPY_DOUBLE r7, s28, s29, ip
+	COPY_DOUBLE r7, s30, s31, ip
+#endif
+
+#include "bytecodes_arm.s"
+
+	Opcode	idiv
+
+	POP	r1
+	POP	r0
+	cmp	r1, #0
+	beq	divide_by_zero_exception
+	bl	__aeabi_idiv
+	PUSH	r0
+	DISPATCH 1
+
+	Opcode	idiv_clz
+
+	POP	r1
+	POP	r0
+	bl	int_div
+idiv_clz_ret:
+	PUSH	r0
+	DISPATCH 1
+
+	Opcode	irem
+
+	POP	r1
+	POP	r0
+	cmp	r1, #0
+	beq	divide_by_zero_exception
+	bl	__aeabi_idivmod
+	PUSH	r1
+	DISPATCH 1
+
+	Opcode	irem_clz
+
+	POP	r1
+	POP	r0
+	bl	int_rem
+irem_clz_ret:
+	PUSH	r0
+	DISPATCH 1
+
+	Opcode	goto
+        ldrsb   r1, [jpc, #1]
+        ldrb    r2, [jpc, #2]
+branch_taken:
+        orr     r2, r2, r1, lsl #8
+        DISPATCH_START_REG	r2
+	cmp	r2, #0
+	ble	do_backedge
+	DISPATCH_FINISH
+
+branch_taken_unsafe:
+	mov	r2, r2, lsl #24
+	orr	r2, r1, r2, asr #16
+        DISPATCH_START_REG	r2
+  USEC	cmp	r2, #0
+  USEC	ble	do_backedge
+	DISPATCH_FINISH
+
+branch_taken_unsafe_1:
+	add	jpc, jpc, #1
+	orr	r2, ip, r1, lsl #8
+        DISPATCH_START_REG	r2
+  USEC	cmp	r2, #0
+  USEC	ble	do_backedge
+	DISPATCH_FINISH
+
+branch_taken_unsafe_2:
+	add	jpc, jpc, #2
+	orr	r2, ip, r1, lsl #8
+        DISPATCH_START_REG	r2
+  USEC	cmp	r2, #0
+  USEC	ble	do_backedge
+	DISPATCH_FINISH
+
+branch_taken_unsafe_3:
+	add	jpc, jpc, #3
+	orr	r2, ip, r1, lsl #8
+        DISPATCH_START_REG	r2
+  USEC	cmp	r2, #0
+  USEC	ble	do_backedge
+	DISPATCH_FINISH
+
+branch_taken_unsafe_4:
+	add	jpc, jpc, #4
+	orr	r2, ip, r1, lsl #8
+        DISPATCH_START_REG	r2
+  USEC	cmp	r2, #0
+  USEC	ble	do_backedge
+	DISPATCH_FINISH
+
+do_backedge:
+  USEC	ldr	tmp1, [istate, #ISTATE_METHOD]
+  OSR	ldr	lr, [dispatch, #InterpreterInvocationLimit_Address-XXX]
+  USEC	ldr	r1, [tmp1, #METHOD_BACKEDGECOUNTER]
+  USEC	ldr	ip, [tmp1, #METHOD_INVOCATIONCOUNTER]
+  USEC	add	r1, r1, #INVOCATIONCOUNTER_COUNTINCREMENT
+  OSR	ldr	lr, [lr]
+  USEC	add	ip, ip, #INVOCATIONCOUNTER_COUNTINCREMENT
+  USEC	str	r1, [tmp1, #METHOD_BACKEDGECOUNTER]
+#ifdef T2JIT
+  OSR	cmp	r1, lr
+#else
+  OSR	cmp	r1, lr, lsl #2
+#endif
+  USEC	str	ip, [tmp1, #METHOD_INVOCATIONCOUNTER]
+  OSR	bcs	do_osr
+
+osr_continue:
+	ldr	ip, [dispatch, #SafePointSynchronize_state_Address-XXX]
+	ldr	r1, [ip]
+	cmp	r1, #1
+	beq	do_synchronize
+	DISPATCH_STATE	1
+	DISPATCH_FINISH
+
+
+do_synchronize:
+	DECACHE_JPC
+	DECACHE_STACK
+	mov	r0, thread
+	bl	Helper_SafePoint
+	CACHE_CP
+	CACHE_JPC
+	cmp	r0, #0
+	bne	handle_exception
+	DISPATCH	0
+
+#ifdef ON_STACK_REPLACEMENT
+
+#ifdef T2JIT
+do_osr:
+	ldr	r3, [tmp1, #METHOD_CONSTMETHOD]
+	DECACHE_JPC
+	DECACHE_STACK
+	mov	r0, thread
+	sub	r1, jpc, r3
+	sub	r1, r1, #CONSTMETHOD_CODEOFFSET
+	bl	FREQ_COUNT_OVERFLOW
+1:
+	cmp	r0, #0
+	bne	call_thumb2
+	CACHE_CP
+	CACHE_JPC
+	DISPATCH_START	0
+	b	osr_continue
+
+#else
+
+do_osr:
+	ldr	ip, [dispatch, #UseOnStackReplacement_Address-XXX]
+	ldrb	ip, [ip]
+	cmp	ip, #0
+	beq	osr_continue
+
+	ldr	r3, [tmp1, #METHOD_CONSTMETHOD]
+	DECACHE_JPC
+	ldrh	r3, [r3, #CONSTMETHOD_CODESIZE]
+	DECACHE_STACK
+	mov	r0, thread
+	sub	r1, jpc, r2
+	cmp	r3, #MAX_FG_METHOD_SIZE
+	bcc	1f
+	ldr	tmp1, [dispatch, #BackgroundCompilation_Address-XXX]
+	mov	r3, #1
+	ldr	r5, [tmp1]
+	str	r3, [tmp1]
+	bl	FREQ_COUNT_OVERFLOW
+	str	r5, [tmp1]
+	b	2f
+1:
+	bl	FREQ_COUNT_OVERFLOW
+2:
+	CACHE_CP
+	ldr	r1, [thread, #THREAD_PENDING_EXC]
+	CACHE_JPC
+	cmp	r1, #0
+	bne	handle_exception
+	cmp	r0, #0
+	beq	1f
+	ldr	r1, [r0, #56]
+	cmn	r1, #2
+	bne	osr_migrate
+1:
+	DISPATCH_START	0
+	b	osr_continue
+
+osr_migrate:
+	ldr	tmp1, [r0, #128]	@ osr_method->osr_entry()
+	mov	r0, thread
+	bl	_ZN13SharedRuntime19OSR_migration_beginEP10JavaThread
+	mov	r1, r0
+	ldr	r0, [istate, #ISTATE_METHOD]
+	ldrh	lr, [r0, #METHOD_MAXLOCALS]
+	ldrh	ip, [r0, #METHOD_SIZEOFPARAMETERS]
+	sub	lr, lr, ip
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	add	ip, r2, #4
+	ldr	r2, [r2]
+	add	ip, ip, lr, lsl #2
+	str	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	str	ip, [thread, #THREAD_JAVA_SP]
+	mov	r2, tmp1
+@ r0 = method
+@ r1 = osr_buf
+@ r2 = osr_entry
+	mov	lr, pc
+	ldr	pc, [tmp1]
+
+	mov	r0, #0	@ deoptimized_frames = 0
+	ldmfd	arm_sp!, {fast_regset, pc}
+
+#endif // T2JIT
+
+#endif // ON_STACK_REPLACEMENT
+
+	Opcode	ifeq
+	Opcode	ifnull
+        POP     r3
+        ldrsb   r1, [jpc, #1]
+        ldrb    r2, [jpc, #2]
+        cmp     r3, #0
+	beq	branch_taken
+	DISPATCH	3
+
+	Opcode	ifne
+	Opcode	ifnonnull
+        POP     r3
+        ldrsb   r1, [jpc, #1]
+        ldrb    r2, [jpc, #2]
+        cmp     r3, #0
+	bne	branch_taken
+	DISPATCH	3
+
+	Opcode	iflt
+        POP     r3
+        ldrsb   r1, [jpc, #1]
+        ldrb    r2, [jpc, #2]
+        cmp     r3, #0
+	blt	branch_taken
+	DISPATCH	3
+
+	Opcode	ifge
+        POP     r3
+        ldrsb   r1, [jpc, #1]
+        ldrb    r2, [jpc, #2]
+        cmp     r3, #0
+	bge	branch_taken
+	DISPATCH	3
+
+	Opcode	ifgt
+        POP     r3
+        ldrsb   r1, [jpc, #1]
+        ldrb    r2, [jpc, #2]
+        cmp     r3, #0
+	bgt	branch_taken
+	DISPATCH	3
+
+	Opcode	ifle
+        POP     r3
+        ldrsb   r1, [jpc, #1]
+        ldrb    r2, [jpc, #2]
+        cmp     r3, #0
+	ble	branch_taken
+	DISPATCH	3
+
+	Opcode	if_icmpeq
+	Opcode	if_acmpeq
+        POP    r2, r3
+        ldrsb   r1, [jpc, #1]
+        ldrb    r2, [jpc, #2]
+        cmp     r3, r2
+	beq	branch_taken
+	DISPATCH	3
+
+	Opcode	if_icmpne
+	Opcode	if_acmpne
+        POP    r2, r3
+        ldrsb   r1, [jpc, #1]
+        ldrb    r2, [jpc, #2]
+        cmp     r3, r2
+	bne	branch_taken
+	DISPATCH	3
+
+	Opcode	if_icmplt
+        POP    r2, r3
+        ldrsb   r1, [jpc, #1]
+        ldrb    r2, [jpc, #2]
+        cmp     r3, r2
+	blt	branch_taken
+	DISPATCH	3
+
+	Opcode	if_icmpge
+        POP    r2, r3
+        ldrsb   r1, [jpc, #1]
+        ldrb    r2, [jpc, #2]
+        cmp     r3, r2
+	bge	branch_taken
+	DISPATCH	3
+
+	Opcode	if_icmpgt
+        POP    r2, r3
+        ldrsb   r1, [jpc, #1]
+        ldrb    r2, [jpc, #2]
+        cmp     r3, r2
+	bgt	branch_taken
+	DISPATCH	3
+
+	Opcode	if_icmple
+        POP    r2, r3
+        ldrsb   r1, [jpc, #1]
+        ldrb    r2, [jpc, #2]
+        cmp     r3, r2
+	ble	branch_taken
+	DISPATCH	3
+
+	Opcode	ireturn
+	Opcode	freturn
+	Opcode	lreturn
+	Opcode	dreturn
+	Opcode	areturn
+	Opcode	return
+	ldr	r3, [dispatch, #SafePointSynchronize_state_Address-XXX]
+	ldr	r1, [r3]
+	cmp	r1, #1
+	bne	handle_return
+	DECACHE_JPC
+	DECACHE_STACK
+	mov	r0, thread
+	bl	Helper_SafePoint
+	CACHE_JPC
+	cmp	r0, #0
+	beq	handle_return
+	b	handle_exception
+
+resolve_get_put:
+	mov	r1, r0
+	mov	tmp1, lr
+	@ stmfd	arm_sp!, {lr}
+	mov	r0, thread
+	DECACHE_JPC
+        DECACHE_STACK
+       	bl      _ZN18InterpreterRuntime15resolve_get_putEP10JavaThreadN9Bytecodes4CodeE
+        ldr     r3, [thread, #THREAD_PENDING_EXC]
+	CACHE_JPC
+	CACHE_CP
+        cmp     r3, #0
+	mov	lr, tmp1
+	@ ldmfd	arm_sp!, {lr}
+        bne     getfield_exception
+@ Now restart the getfield
+        ldrb    r3, [jpc, #1]
+        ldrb    r2, [jpc, #2]
+        orr     r3, r3, r2, lsl #8      @ r3 = index
+	add	tmp1, constpool, r3, lsl #4	@ tmp1 = cache
+	bx	lr
+
+accessor_non_w:
+	bcs	accessor_h
+	beq	accessor_sb
+	tst	r0, #2
+	bne	accessor_dw
+accessor_sh:
+	ldrsh	r0, [r3, r1]
+	str	r0, [ip, #0]
+	mov	r0, #0	@ deoptimized_frames = 0
+	bx	lr
+accessor_h:
+	ldrh	r0, [r3, r1]
+	str	r0, [ip, #0]
+	mov	r0, #0	@ deoptimized_frames = 0
+	bx	lr
+accessor_sb:
+	ldrsb	r0, [r3, r1]
+	str	r0, [ip, #0]
+	mov	r0, #0	@ deoptimized_frames = 0
+	bx	lr
+accessor_dw:
+	add	r0, r3, r1
+	ldm	r0, {r0, r1}
+	sub	ip, ip, #4
+	str	ip, [thread, #THREAD_JAVA_SP]
+	stmia	ip, {r0, r1}
+	mov	r0, #0	@ deoptimized_frames = 0
+	bx	lr
+
+	Opcode	getfield
+	ldrb	r1, [jpc, #2]
+        add     tmp1, constpool, r1, lsl #12
+	add	tmp1, tmp1, r2, lsl #4
+	ldr	r3, [tmp1, #CP_OFFSET]
+        and     r3, r3, #0x00ff0000
+        cmp     r3, #opc_getfield << 16
+        blne    resolve_get_put
+  NFBC	POP	r3
+        ldr     r2, [tmp1, #CP_OFFSET+12]
+  NFBC	cmp	r3, #0
+  NFBC	beq	null_ptr_exception
+  NFBC	ldr	tmp1, [tmp1, #CP_OFFSET+8]
+        movs    r2, r2, lsr #29
+  FBC	movhi	r0, #opc_igetfield
+        bls     getfield_non_w
+  NFBC	ldr	tmp1, [r3, tmp1]
+  NFBC	PUSH	tmp1
+  NFBC	DISPATCH 3
+
+#ifdef FAST_BYTECODES
+rewrite_bytecode:
+	strb	r0, [jpc]
+	DISPATCH_BYTECODE
+#endif
+
+getfield_non_w:
+        bcs     getfield_h              @ C = 1 => R2 = 1
+        beq     getfield_sb             @ Z = 1 => R2 = 0
+        tst     r2, #2
+        bne     getfield_dw
+
+#ifdef FAST_BYTECODES
+getfield_sh:
+	mov	r0, #opc_sgetfield
+	b	rewrite_bytecode
+getfield_h:
+	mov	r0, #opc_cgetfield
+	b	rewrite_bytecode
+getfield_sb:
+	mov	r0, #opc_bgetfield
+	b	rewrite_bytecode
+getfield_dw:
+	mov	r0, #opc_lgetfield
+	b	rewrite_bytecode
+#else
+getfield_sh:
+	ldrsh	tmp1, [r3, tmp1]
+	PUSH	tmp1
+	DISPATCH 3
+getfield_h:
+	ldrh	tmp1, [r3, tmp1]
+	PUSH	tmp1
+	DISPATCH 3
+getfield_sb:
+	ldrsb	tmp1, [r3, tmp1]
+	PUSH	tmp1
+	DISPATCH 3
+getfield_dw:
+	add	r3, r3, tmp1
+	ldm	r3, {r2, tmp1}
+	PUSH	r2, tmp1
+	DISPATCH 3
+#endif
+
+	Opcode	putfield
+	ldrb	r1, [jpc, #2]
+        add     tmp1, constpool, r1, lsl #12
+	add	tmp1, tmp1, r2, lsl #4
+	ldr	r3, [tmp1, #CP_OFFSET]
+        and     r3, r3, #0xff000000
+        cmp     r3, #opc_putfield << 24
+        blne    resolve_get_put
+        ldr     r2, [tmp1, #CP_OFFSET+12]
+  NFBC	ldr	tmp1, [tmp1, #CP_OFFSET+8]
+        movs    r2, r2, lsr #29
+	bls	putfield_non_w
+  FBC   mov	r0, #opc_iputfield
+	cmp	r2, #tos_atos >> 1
+  FBC	moveq	r0, #opc_aputfield
+  FBC	b	rewrite_bytecode
+  NFBC	beq	putfield_a
+  NFBC	POP	r2, r3
+  NFBC	cmp	r3, #0
+  NFBC	beq	null_ptr_exception
+  NFBC	str	r2, [r3, tmp1]
+  NFBC	DISPATCH 3
+
+putfield_non_w:
+	bcs	putfield_h
+	beq	putfield_sb
+	tst	r2, #2
+	bne	putfield_dw
+
+#ifdef FAST_BYTECODES
+putfield_sh:
+putfield_h:
+	mov	r0, #opc_cputfield
+	b	rewrite_bytecode
+putfield_sb:
+	mov	r0, #opc_bputfield
+	b	rewrite_bytecode
+putfield_dw:
+	mov	r0, #opc_lputfield
+	b	rewrite_bytecode
+#else
+putfield_sh:
+putfield_h:
+	POP	r2, r3
+	cmp	r3, #0
+	beq	null_ptr_exception
+	strh	r2, [r3, tmp1]
+	DISPATCH 3
+putfield_sb:
+	POP	r2, r3
+	cmp	r3, #0
+	beq	null_ptr_exception
+	strb	r2, [r3, tmp1]
+	DISPATCH 3
+putfield_dw:
+	POP	r2, r3, lr
+	cmp	lr, #0
+	beq	null_ptr_exception
+	add	tmp1, lr, tmp1
+	stm	tmp1, {r2, r3}
+	DISPATCH 3
+putfield_a:
+	POP	r2, r3
+	cmp	r3, #0
+	beq	null_ptr_exception
+	str	r2, [r3, tmp1]
+	mov	r0, r3
+	bl	Helper_aputfield
+	DISPATCH 3
+#endif
+
+getstatic_sh:
+	DISPATCH_START	3
+	ldrsh	tmp1, [r3, lr]
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+getstatic_h:
+	DISPATCH_START	3
+	ldrh	tmp1, [r3, lr]
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+getstatic_sb:
+	DISPATCH_START	3
+	ldrsb	tmp1, [r3, lr]
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+getstatic_dw:
+	DISPATCH_START	3
+	add	r3, r3, lr
+	ldm	r3, {r2, tmp1}
+	DISPATCH_NEXT
+	PUSH	r2, tmp1
+	DISPATCH_FINISH
+getstatic_w:
+	DISPATCH_START	3
+	ldr	tmp1, [r3, lr]
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+
+putstatic_sh:
+putstatic_h:
+	DISPATCH_START	3
+	POP	tmp1
+	DISPATCH_NEXT
+	strh	tmp1, [r3, r2]
+	DISPATCH_FINISH
+putstatic_w:
+	cmp	lr, #tos_atos >> 1	@ >> 1 due to lsr #29 above
+	beq	putstatic_a
+	DISPATCH_START	3
+	POP	tmp1
+	DISPATCH_NEXT
+	str	tmp1, [r3, r2]
+	DISPATCH_FINISH
+putstatic_sb:
+	DISPATCH_START	3
+	POP	tmp1
+	DISPATCH_NEXT
+	strb	tmp1, [r3, r2]
+	DISPATCH_FINISH
+putstatic_dw:
+	DISPATCH_START	3
+	add	r2, r2, r3
+	POP	r3, tmp1
+	DISPATCH_NEXT
+	stm	r2, {r3, tmp1}
+	DISPATCH_FINISH
+putstatic_a:
+	POP	tmp1
+	str	tmp1, [r3, r2]
+	mov	r0, r3
+	bl	Helper_aputfield
+	DISPATCH 3
+
+getstatic_volatile_sh:
+	DISPATCH_START	3
+	ldrsh	tmp1, [r3, lr]
+	FullBarrier
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+getstatic_volatile_h:
+	DISPATCH_START	3
+	ldrh	tmp1, [r3, lr]
+	FullBarrier
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+getstatic_volatile_sb:
+	DISPATCH_START	3
+	ldrsb	tmp1, [r3, lr]
+	FullBarrier
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+getstatic_volatile_dw:
+	add	r3, r3, lr
+#ifndef	__ARM_ARCH_7A__
+	ldm	r3, {r2, tmp1}
+	FullBarrier
+	PUSH	r2, tmp1
+#else
+	ldrexd	r0, r1, [r3]
+	FullBarrier
+	PUSH	r0, r1
+#endif
+	DISPATCH	3
+getstatic_volatile_w:
+	DISPATCH_START	3
+	ldr	tmp1, [r3, lr]
+	FullBarrier
+	DISPATCH_NEXT
+	PUSH	tmp1
+	DISPATCH_FINISH
+
+putstatic_volatile_sh:
+putstatic_volatile_h:
+	DISPATCH_START	3
+	POP	tmp1
+	DISPATCH_NEXT
+	StoreStoreBarrier
+	strh	tmp1, [r3, r2]
+	StoreLoadBarrier
+	DISPATCH_FINISH
+putstatic_volatile_w:
+	cmp	lr, #tos_atos >> 1	@ >> 1 due to lsr #29 above
+	beq	putstatic_volatile_a
+	DISPATCH_START	3
+	POP	tmp1
+	DISPATCH_NEXT
+	StoreStoreBarrier
+	str	tmp1, [r3, r2]
+	StoreLoadBarrier
+	DISPATCH_FINISH
+putstatic_volatile_sb:
+	DISPATCH_START	3
+	POP	tmp1
+	DISPATCH_NEXT
+	StoreStoreBarrier
+	strb	tmp1, [r3, r2]
+	StoreLoadBarrier
+	DISPATCH_FINISH
+putstatic_volatile_dw:
+	add	ip, r2, r3
+	POP	r0, r1
+	StoreStoreBarrier
+#ifndef	__ARM_ARCH_7A__
+	stm	ip, {r0, r1}
+#else
+	// Data in tmp1 & tmp2, address in ip, r2 & r3 scratch
+0:	ldrexd	r2, r3, [ip]
+	strexd	r2, r0, r1, [ip]
+	teq	r2, #0
+	bne	0b
+#endif
+	DISPATCH_START	3
+	StoreLoadBarrier
+	DISPATCH_FINISH
+putstatic_volatile_a:
+	POP	tmp1
+	StoreStoreBarrier
+	str	tmp1, [r3, r2]
+	mov	r0, r3
+	bl	Helper_aputfield
+	DISPATCH 3
+
+resolve_invokeinterface:
+	mov	r1, #opc_invokeinterface
+	b	resolve_invoke
+resolve_invokevirtual:
+	mov	r1, #opc_invokevirtual
+	b	resolve_invoke
+resolve_invokespecial:
+	mov	r1, #opc_invokespecial
+	b	resolve_invoke
+resolve_invokestatic:
+	mov	r1, #opc_invokestatic
+resolve_invoke:
+	mov	tmp1, lr
+	mov	r0, thread
+	DECACHE_JPC
+	DECACHE_STACK
+	bl	_ZN18InterpreterRuntime14resolve_invokeEP10JavaThreadN9Bytecodes4CodeE
+	CACHE_JPC
+	ldr	r3, [thread, #THREAD_PENDING_EXC]
+	CACHE_CP
+	cmp	r3, #0
+        ldrb    r3, [jpc, #1]
+        ldrb    r2, [jpc, #2]
+	bne	resolve_exception
+        orr     r3, r3, r2, lsl #8      @ r3 = index
+	add	r0, constpool, r3, lsl #4	@ r1 = cache
+	bx	tmp1
+
+# r2 = [jpc, #1]
+# r1 = [jpc, #2]
+	Opcode	new
+	ldrb	r1, [jpc, #2]
+	DECACHE_JPC
+	DECACHE_STACK
+	orr	r1, r1, r2, lsl #8
+	mov	r0, r8
+	bl	Helper_new
+	CACHE_JPC
+	CACHE_CP
+	cmp	r0, #0
+	beq	handle_exception
+	PUSH	r0
+	DISPATCH 3
+
+bytecode_interpreter_str:
+	.ascii  __FILE__
+	.byte 0
+	ALIGN_WORD
+
+	Opcode	newarray
+	ldrb	r1, [jpc, #1]	@ zero_extendqisi2
+	ldr	r2, [stack, #4]
+	mov	r0, thread
+	DECACHE_JPC
+	DECACHE_STACK
+	bl	_ZN18InterpreterRuntime8newarrayEP10JavaThread9BasicTypei
+	ldr	ip, [thread, #THREAD_PENDING_EXC]
+	CACHE_JPC
+	CACHE_CP
+	cmp	ip, #0
+	ldr	r2, [thread, #THREAD_VM_RESULT]
+	bne	handle_exception
+	str	r2, [stack, #4]
+	str	ip, [thread, #THREAD_VM_RESULT]
+	DISPATCH	2
+
+	Opcode	anewarray
+	ldrb	r0, [jpc, #1]	@ zero_extendqisi2
+	ldr	r3, [stack, #4]
+	ldr	lr, [istate, #ISTATE_METHOD]
+	ldrb	r2, [jpc, #2]	@ zero_extendqisi2
+	orr	r2, r2, r0, asl #8
+	DECACHE_JPC
+	DECACHE_STACK
+	ldr	r1, [lr, #METHOD_CONSTANTS]
+	mov	r0, thread
+	bl	_ZN18InterpreterRuntime9anewarrayEP10JavaThreadP19constantPoolOopDescii
+	ldr	ip, [thread, #THREAD_PENDING_EXC]
+	CACHE_JPC
+	CACHE_CP
+	cmp	ip, #0
+	ldr	r2, [thread, #THREAD_VM_RESULT]
+	bne	handle_exception
+	str	r2, [stack, #4]
+	str	ip, [thread, #THREAD_VM_RESULT]
+	DISPATCH	3
+
+	Opcode	arraylength
+	DISPATCH_START	1
+	ldr	r3, [stack, #4]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	SW_NPC	cmp	r3, #0
+	SW_NPC	beq	null_ptr_exception_jpc_1
+.abortentry114:
+	ldr	r3, [r3, #8]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	str	r3, [stack, #4]
+	DISPATCH_FINISH
+
+	Opcode	athrow
+	ldr	r1, [stack, #4]
+	cmp	r1, #0
+	beq	null_ptr_exception
+	mov	r2, #0
+	mov	r0, thread
+	mov	r3, r2
+	bl	_ZN12ThreadShadow21set_pending_exceptionEP7oopDescPKci
+	b	handle_exception
+
+#define secondary_super_cache_offset_in_bytes	20
+#define tmp_chunk	locals
+#define tmp_hwm		stack
+#define	tmp_max		constpool
+
+# r2 = [jpc, #1]
+# r1 = [jpc, #2]
+	Opcode	checkcast
+	ldrb	r1, [jpc, #2]
+	DECACHE_JPC
+	DECACHE_STACK
+	orr	r1, r1, r2, lsl #8
+	mov	r0, r8
+	GET_STACK	0, r2
+	bl	Helper_checkcast
+	CACHE_JPC
+	CACHE_CP
+	cmp	r0, #0
+	bne	handle_exception
+	DISPATCH 3
+
+# r2 = [jpc, #1]
+# r1 = [jpc, #2]
+	Opcode	instanceof
+	ldrb	r1, [jpc, #2]
+	DECACHE_JPC
+	DECACHE_STACK
+	orr	r1, r1, r2, lsl #8
+	mov	r0, r8
+	POP	r2
+	bl	Helper_instanceof
+	CACHE_JPC
+	CACHE_CP
+	cmp	r0, #-1
+	beq	handle_exception
+	PUSH	r0
+	DISPATCH 3
+
+	Opcode	monitorenter
+	mov	r0, r8
+	POP	r1
+ 	DECACHE_JPC
+ 	DECACHE_STACK
+	bl	Helper_monitorenter
+	CACHE_JPC
+	CACHE_CP
+	CACHE_STACK		@ monitorenter may expand stack!!!	
+	cmp	r0, #0
+	bne	handle_exception
+	DISPATCH 1
+
+	Opcode	monitorexit
+	mov	r0, r8
+	POP	r1
+	DECACHE_JPC
+	DECACHE_STACK
+	bl	Helper_monitorexit
+	CACHE_JPC
+	CACHE_CP
+	cmp	r0, #0
+	bne	handle_exception
+	DISPATCH 1
+
+	ALIGN_CODE
+vm_fatal_error:
+	adr	r0, .fatal_filename
+	mov	r1, #99
+	bl	_Z28report_should_not_reach_herePKci
+	b	breakpoint
+.fatal_filename:
+	.ascii	"[Optimsed Assembler Interpreter Loop]\000"
+
+// This extra entry point for vm_fatal_error (at vm_fatal_error +
+// CODE_ALIGN_SIZE) allows vm_fatal_error to be used as an entry point
+// in the asm_method_table.
+	ALIGN_CODE	
+	b	vm_fatal_error
+
+	ALIGN_WORD
+
+	Opcode	aastore
+	DECACHE_JPC
+	DECACHE_STACK
+	mov	r0, r8
+	POP	r1, r2, r3
+	bl	Helper_aastore
+	CACHE_JPC
+	CACHE_CP
+	cmp	r0, #0
+	bne	handle_exception
+	DISPATCH 1
+
+	Opcode	wide
+	ldrb	r2, [jpc, #1]
+	ldrb	r1, [jpc, #2]	@ zero_extendqisi2
+	ldrb	r3, [jpc, #3]	@ zero_extendqisi2
+
+	sub	lr, r2, #opc_aload+1
+	cmp	lr, #opc_istore - (opc_aload+1)
+	bcc	wide_undef_opc_exception
+
+	sub	lr, r2, #opc_iload
+	cmp	r2, #opc_istore
+	subcs	lr, lr, #opc_istore - (opc_aload+1)
+	cmp	r2, #opc_astore+1
+
+	orr	r1, r3, r1, asl #8
+	adr	r3, wide_case_table
+	ldrcc	pc, [r3, lr, lsl #2]
+	
+	cmp	r2, #opc_ret
+	beq	do_wide_ret
+	cmp	r2, #opc_iinc
+	beq	do_wide_iinc
+wide_undef_opc_exception:
+	mov	r0, #VMSYMBOLS_InternalError
+	adr	r1, undef_opcode_msg
+	b	raise_exception_with_msg
+undef_opcode_msg:
+	.ascii  "undefined opcode\000"
+	ALIGN_WORD
+
+wide_case_table:
+        .word	case_wide_iload
+        .word	case_wide_lload
+        .word	case_wide_fload
+        .word	case_wide_dload
+        .word	case_wide_aload
+        .word	case_wide_istore
+        .word	case_wide_lstore
+        .word	case_wide_fstore
+        .word	case_wide_dstore
+        .word	case_wide_astore
+
+case_wide_iload:
+case_wide_fload:
+case_wide_aload:
+	ldr	r2, [locals, -r1, lsl #2]
+	PUSH	r2
+	DISPATCH	4
+case_wide_istore:
+case_wide_fstore:
+case_wide_astore:
+	POP	r2
+	str	r2, [locals, -r1, lsl #2]
+	DISPATCH	4
+case_wide_dload:
+case_wide_lload:
+	sub	r1, locals, r1, lsl #2
+	ldmda	r1, {r1, r2}
+	PUSH	r1, r2
+	DISPATCH	4
+case_wide_dstore:
+case_wide_lstore:
+	POP	r2, r3
+	sub	r1, locals, r1, lsl #2
+	stmda	r1, {r2, r3}
+	DISPATCH	4
+do_wide_ret:
+	ldr	r2, [istate, #ISTATE_METHOD]
+	ldr	r2, [r2, #METHOD_CONSTMETHOD]
+	ldr	r1, [locals, -r1, lsl #2]
+	add	jpc, r2, r1
+	DISPATCH	CONSTMETHOD_CODEOFFSET
+do_wide_iinc:
+	ldrsb	r2, [jpc, #4]
+	ldrb	r3, [jpc, #5]
+	orr	r2, r3, r2, lsl #8
+	ldr	r3, [locals, -r1, lsl #2]
+	add	r3, r3, r2
+	str	r3, [locals, -r1, lsl #2]
+	DISPATCH	6
+
+	Opcode	multianewarray
+	ldrb	tmp1, [jpc, #3]	@ zero_extendqisi2
+	mov	r0, thread
+	add	r1, stack, tmp1, lsl #2
+	DECACHE_JPC
+	DECACHE_STACK
+	bl	_ZN18InterpreterRuntime14multianewarrayEP10JavaThreadPi
+	CACHE_JPC
+	ldr	r1, [thread, #THREAD_PENDING_EXC]
+	CACHE_CP
+	cmp	r1, #0
+	ldr	r3, [thread, #THREAD_VM_RESULT]
+	bne	handle_exception
+	str	r3, [stack, tmp1, asl #2]!
+	str	r1, [thread, #THREAD_VM_RESULT]
+	sub	stack, stack, #4
+	DISPATCH	4
+
+	Opcode	jsr_w
+	ldr	r3, [istate, #ISTATE_METHOD]
+	ldr	r1, [r3, #METHOD_CONSTMETHOD]
+	rsb	r2, r1, jpc
+	sub	r2, r2, #CONSTMETHOD_CODEOFFSET - 5
+	str	r2, [stack], #-4
+	b	do_goto_w
+
+	Opcode	goto_w
+	add	r2, jpc, #1
+	ldrb	tmp1, [jpc, #1]	@ zero_extendqisi2
+	ldrb	r3, [r2, #3]	@ zero_extendqisi2
+	ldrb	r0, [r2, #1]	@ zero_extendqisi2
+	ldrb	ip, [r2, #2]	@ zero_extendqisi2
+	orr	r3, r3, tmp1, asl #24
+	orr	r3, r3, r0, asl #16
+	orr	r3, r3, ip, asl #8
+	cmp	r3, #0
+	add	jpc, jpc, r3
+	bgt	1f
+
+	ldr	r3, [dispatch, #SafePointSynchronize_state_Address-XXX]
+	ldr	r1, [r3]
+	cmp	r1, #1
+	bne	1f
+	DECACHE_JPC
+	DECACHE_STACK
+	mov	r0, thread
+	bl	Helper_SafePoint
+	CACHE_JPC
+	CACHE_CP
+	cmp	r0, #0
+	bne	handle_exception
+1:
+	DISPATCH	0
+
+	Opcode	breakpoint
+	mov	r2, jpc
+	DECACHE_STACK
+	DECACHE_JPC
+	mov	r0, thread
+	ldr	r1, [istate, #ISTATE_METHOD]
+	bl	_ZN18InterpreterRuntime24get_original_bytecode_atEP10JavaThreadP13methodOopDescPh
+	mov	tmp1, r0
+	mov	r0, thread
+	ldr	r3, [thread, #THREAD_PENDING_EXC]
+	cmp	r3, #0
+	bne	handle_exception
+	ldr	r2, [istate, #ISTATE_BCP]
+	ldr	r1, [istate, #ISTATE_METHOD]
+	bl	_ZN18InterpreterRuntime11_breakpointEP10JavaThreadP13methodOopDescPh
+	CACHE_JPC
+	ldr	r3, [thread, #THREAD_PENDING_EXC]
+	CACHE_CP
+	cmp	r3, #0
+	and	r0, tmp1, #255
+	bne	handle_exception
+	DISPATCH_BYTECODE
+
+#ifndef FAST_BYTECODES
+	Opcode	bgetfield
+	Opcode	cgetfield
+	Opcode	igetfield
+	Opcode	lgetfield
+	Opcode	sgetfield
+	Opcode	aputfield
+	Opcode	bputfield
+	Opcode	cputfield
+	Opcode	iputfield
+	Opcode	lputfield
+	Opcode	invokevfinal
+	Opcode	invokeresolved
+	Opcode	invokespecialresolved
+	Opcode	invokestaticresolved
+	Opcode	iaccess_0
+	Opcode	iload_0_iconst_N
+	Opcode	iload_iconst_N
+	Opcode	iadd_istore_N
+	Opcode	isub_istore_N
+	Opcode	iand_istore_N
+	Opcode	ior_istore_N
+	Opcode	ixor_istore_N
+	Opcode	iadd_u4store
+	Opcode	isub_u4store
+	Opcode	iand_u4store
+	Opcode	ior_u4store
+	Opcode	ixor_u4store
+	Opcode	fast_iload_iload
+	Opcode	fast_iload_iload_N
+	Opcode	fast_iload_N_iload
+	Opcode	fast_iload_N_iload_N
+#endif
+	Opcode	undefined
+	// Decache to get better diagnostic info
+	DECACHE_JPC
+	DECACHE_STACK
+	ldr	r2, [dispatch, #Bytecodes_name_Address-XXX]
+	ldrb	r3, [jpc, #0]	@ zero_extendqisi2
+	adrl	r0, bytecode_interpreter_str
+	cmp	r3, #last_implemented_bytecode+1
+	adrcs	ip, unknown_bytecode
+	ldrcc	ip, [r2, r3, asl #2]
+	adr	r2, unimplemented_opcode_msg
+	ldr	r1, =__LINE__
+	str	ip, [arm_sp, #-8]!
+ 	bl	Helper_report_fatal
+	b	breakpoint
+	.ltorg
+unimplemented_opcode_msg:
+	.ascii  "\011*** Unimplemented opcode: %d = %s\012\000"
+unknown_bytecode:
+	.ascii	"<unknown>\000"
+	ALIGN_WORD
+
+	Opcode	return_register_finalizer
+	ldr	r1, [locals, #0]
+	ldr	r3, [r1, #4]
+	ldr	r2, [r3, #KLASS_PART+KLASS_ACCESSFLAGS]
+	tst	r2, #JVM_ACC_HAS_FINALIZER
+	beq	handle_return
+	DECACHE_JPC
+	DECACHE_STACK
+	mov	r0, thread
+	bl	_ZN18InterpreterRuntime18register_finalizerEP10JavaThreadP7oopDesc
+	CACHE_JPC
+	ldr	r3, [thread, #THREAD_PENDING_EXC]
+@ CACHE_LOCALS & CACHE_CP not require for handle_retuen / handle_exception
+	cmp	r3, #0
+	beq	handle_return
+	b	handle_exception
+
+// This code is logically part of normal_entry_synchronized, but it's
+// been moved out because there is only a FAST_ENTRY_OFFSET sized gap
+// here.
+
+.normal_entry_return_synchronized:
+	mov	r0, #0	@ deoptimized_frames = 0
+	ldmfd	arm_sp!, {regset, pc}
+	SLOW_ENTRY
+normal_entry_synchronized:
+	stmfd	arm_sp!, {regset, lr}
+	mov	thread, r2
+	ldr	r7, [thread, #THREAD_STACK_SIZE]
+	ldr	r3, [thread, #THREAD_STACK_BASE]
+	rsb	r3, r7, r3
+	rsb	r3, r3, arm_sp
+	cmp	r3, #32768
+	blt	stack_overflow_no_frame
+	add	lr, pc, #(.normal_entry_return_synchronized-(.fast_normal_entry1+4))
+.fast_normal_entry1:
+
+	FAST_ENTRY
+fast_normal_entry_synchronized:
+	stmfd	arm_sp!, {fast_regset, lr}
+
+	mov	tmp1, r0
+
+	ldrh	r2, [tmp1, #METHOD_MAXLOCALS]
+	ldrh	r3, [tmp1, #METHOD_SIZEOFPARAMETERS]
+	rsb	r8, r3, r2
+
+	ldr	r1, [thread, #THREAD_JAVA_SP]
+	ldrh	r0, [tmp1, #METHOD_MAXSTACK]
+	ldr	r3, [thread, #THREAD_JAVA_STACK_BASE]
+
+	sub	r5, r1, r8, lsl #2
+	sub	r5, r5, #FRAME_SIZE+STACK_SPARE+LEAF_STACK_SIZE
+	sub	r5, r5, r0, lsl #2
+	cmp	r3, r5
+	bcs	stack_overflow_before_frame
+
+	cmp	r8, #0
+	ble	.normal_entry_synchronized_no_locals
+
+	mov	r2, #0
+.zero_locals_synchronized:
+	subs	r8, r8, #1
+	str	r2, [r1, #-4]!
+	bgt	.zero_locals_synchronized
+	str	r1, [thread, #THREAD_JAVA_SP]
+
+.normal_entry_synchronized_no_locals:
+	mov	r2, thread
+	mov	r1, tmp1
+	add	r0, thread, #THREAD_JAVA_STACK_BASE
+	bl	build_frame
+	ldr	ip, [thread, #THREAD_TOP_ZERO_FRAME]
+	sub	istate, r0, #ISTATE_NEXT_FRAME
+	mov	r2, #0  @ set SP to zero before setting FP
+	str	r0, [thread, #THREAD_TOP_ZERO_FRAME]
+	str	r2, [thread, #THREAD_LAST_JAVA_SP]
+	str	r0, [thread, #THREAD_LAST_JAVA_FP]
+	ldr	r3, [thread, #THREAD_JAVA_SP]
+	str	r3, [thread, #THREAD_LAST_JAVA_SP]
+	str	ip, [istate, #ISTATE_NEXT_FRAME]
+	adrl	ip, dispatch_init_adcon
+	ldm	ip, {r0, r1}
+	add	r0, r0, ip
+	add	dispatch, r1, r0
+	ldr	r0, [istate, #ISTATE_METHOD]
+	ldr	r3, [r0, #METHOD_ACCESSFLAGS]
+	tst	r3, #JVM_ACC_SYNCHRONIZED
+	beq	1f
+
+@ Do Synchronisation
+	CACHE_STACK
+	CACHE_LOCALS
+	tst	r3, #JVM_ACC_STATIC
+	ldrne	r3, [r0, #METHOD_CONSTANTS]
+	ldreq	tmp1, [locals, #0]
+	ldrne	r2, [r3, #CONSTANTPOOL_POOL_HOLDER]
+	ldrne	tmp1, [r2, #KLASS_PART+KLASS_JAVA_MIRROR]
+	ldr	r3, [tmp1, #0]
+	orr	tmp_xxx, r3, #1
+	ldr	ip, [istate, #ISTATE_MONITOR_BASE]
+	str	tmp_xxx, [ip, #-8]
+.normal_do_synchronisation_2:
+	ldr	tmp_vvv, [tmp1, #0]
+	cmp	tmp_xxx, tmp_vvv
+	bne	.normal_do_synchronisation_3
+	mov	r0, tmp_xxx
+	ldr	r1, [istate, #ISTATE_MONITOR_BASE]
+	sub	r1, r1, #8
+	mov	r2, tmp1
+	mov	r3, #0xffffffc0
+	bic	r3, r3, #0xf000
+	blx	r3
+	cmp	r0, #0
+	bne	.normal_do_synchronisation_2
+	b	1f
+.normal_do_synchronisation_3:
+	mov	r0, thread
+	bic	r1, tmp_xxx, #3
+	bl	JavaThread_is_lock_owned
+	cmp	r0, #0
+	beq	.normal_do_synchronisation_4
+	ldr	ip, [istate, #ISTATE_MONITOR_BASE]
+	mov	r3, #0
+	str	r3, [ip, #-8]
+	b	1f
+.normal_do_synchronisation_4:
+	ldr	r1, [istate, #ISTATE_MONITOR_BASE]
+	sub	r1, r1, #8
+	DECACHE_STACK
+	mov	r0, thread
+	bl	_ZN18InterpreterRuntime12monitorenterEP10JavaThreadP15BasicObjectLock
+	ldr	r3, [thread, #THREAD_PENDING_EXC]
+	cmp	r3, #0
+	mov	r2, r0
+	bne	handle_exception_do_not_unlock
+1:
+  USEC ldr	r0, [istate, #ISTATE_METHOD]
+  USEC	ldr	r2, [r0, #METHOD_INVOCATIONCOUNTER]
+  USEC	ldr	lr, [dispatch, #InterpreterInvocationLimit_Address-XXX]
+  USEC	add	r2, r2, #INVOCATIONCOUNTER_COUNTINCREMENT
+  USEC	ldr	lr, [lr]
+  USEC	str	r2, [r0, #METHOD_INVOCATIONCOUNTER]
+  USEC	cmp	r2, lr
+  USEC	bcs	sync_method_entry_freq_count_overflow
+	CACHE_JPC
+	CACHE_LOCALS
+	CACHE_CP
+	DISPATCH	0
+
+#ifdef USE_COMPILER
+sync_method_entry_freq_count_overflow:
+        ldr     r3, [r0, #METHOD_CONSTMETHOD]
+        ldrh    r3, [r3, #CONSTMETHOD_CODESIZE]
+	mov	r1, #0
+	mov	r0, thread
+        cmp     r3, #MAX_FG_METHOD_SIZE
+        bcc     1f
+        ldr     tmp1, [dispatch, #BackgroundCompilation_Address-XXX]
+        mov     r3, #1
+        ldr     r5, [tmp1]
+        str     r3, [tmp1]
+        bl      FREQ_COUNT_OVERFLOW
+        str     r5, [tmp1]
+        b       2f
+1:
+	bl	FREQ_COUNT_OVERFLOW
+2:
+  T2	cmp	r0, #0
+	CACHE_LOCALS
+  T2	bne	call_thumb2
+	CACHE_JPC
+	CACHE_CP
+	DISPATCH	0
+#endif
+
+# r2 = [jpc, #1]
+# r1 = [jpc, #2]
+	Opcode	invokeinterface
+	ldrb	r1, [jpc, #2]
+	DECACHE_STACK
+        add     r0, constpool, r1, lsl #12
+	add	r0, r0, r2, asl #4
+	DECACHE_JPC
+        ldr     r2, [r0, #CP_OFFSET]
+        and     r2, r2, #0x00ff0000
+        cmp     r2, #opc_invokeinterface << 16
+        blne    resolve_invokeinterface
+
+	ldr	r3, [r0, #CP_OFFSET+12]
+	and	r2, r3, #255
+	ldr	r2, [stack, r2, lsl #2]
+	SW_NPC	cmp	r2, #0
+	SW_NPC	beq	null_ptr_exception
+.abortentry110:
+	ldr	tmp1, [r2, #4]				@ rcvr->klass()
+	tst	r3, #flag_methodInterface
+	bne	.invokeinterface_methodInterface
+
+	ldr	lr, [r0, #CP_OFFSET+4]			@ lr = iclass
+
+	add	r1, tmp1, #INSTANCEKLASS_VTABLE_OFFSET
+	ldr	r2, [tmp1, #KLASS_PART+INSTANCEKLASS_VTABLE_LEN]
+	ldr	ip, [tmp1, #KLASS_PART+INSTANCEKLASS_ITABLE_LEN]
+	add	r2, r2, #1
+	bic	r2, r2, #1
+
+	add	r1, r1, r2, lsl #2
+
+	mov	r2, #0
+1:
+	cmp	r2, ip
+	beq	incompatibleclass_exception
+	ldr	r3, [r1], #8
+	add	r2, r2, #1
+	cmp	lr, r3
+	bne	1b
+
+	ldr	r3, [r0, #CP_OFFSET+8]
+	ldr	r2, [r1, #-4]
+	add	r3, tmp1, r3, lsl #2
+	ldr	tmp1, [r3, r2]
+	cmp	tmp1, #0
+	beq	abstractmethod_exception
+.invokeinterface_invoke:
+	ldr	ip, [tmp1, #METHOD_FROM_INTERPRETED]
+	mov	r1, #0
+	str	r1, [thread, #THREAD_LAST_JAVA_FP]
+	str	r1, [thread, #THREAD_LAST_JAVA_SP]
+
+	add	stack, stack, #4
+	str	stack, [thread, #THREAD_JAVA_SP]
+
+	ldr	r3, [ip]
+
+	mov	r0, tmp1
+#ifdef SHARK
+	mov	r2, thread
+#else
+	add	r3, r3, #FAST_ENTRY_OFFSET
+#endif
+	blx	r3
+
+	adrl	ip, dispatch_init_adcon
+	ldm	ip, {r0, r1}
+	add	r0, r0, ip
+	add	dispatch, r1, r0
+
+	CACHE_LOCALS
+
+	CACHE_JPC
+	ldr	stack, [thread, #THREAD_JAVA_SP]
+	ldr	r2, [istate, #ISTATE_STACK_LIMIT]
+	sub	stack, stack, #4
+
+	ldr	r1, [thread, #THREAD_JAVA_SP]
+	stmfd	arm_sp!, {r1}
+	mov	r1, #0
+	str	r1, [thread, #THREAD_LAST_JAVA_SP]
+	ldr	r1, [thread, #THREAD_TOP_ZERO_FRAME]
+	add	r2, r2, #4
+	str	r2, [thread, #THREAD_JAVA_SP]
+	str	r1, [thread, #THREAD_LAST_JAVA_FP]
+	ldmfd	arm_sp!, {r1}
+	str	r1, [thread, #THREAD_LAST_JAVA_SP]
+	DISPATCH_START	5
+	ldr	r3, [thread, #THREAD_PENDING_EXC]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	cmp	r3, #0
+	DISPATCH_NEXT
+	bne	invokeinterface_exception_fix
+	DISPATCH_NEXT
+	CACHE_CP
+	DISPATCH_FINISH
+
+.invokeinterface_methodInterface:
+	tst	r3, #flag_vfinalMethod
+	ldrne	tmp1, [r0, #CP_OFFSET+8]
+	bne	.invokeinterface_invoke
+	ldr	r1, [r0, #CP_OFFSET+8]
+	add	r3, tmp1, r1, lsl #2
+	ldr	tmp1, [r3, #INSTANCEKLASS_VTABLE_OFFSET]
+	b	.invokeinterface_invoke
+
+# r2 = [jpc, #1]
+	Opcode	invokedynamic
+	DECACHE_STACK
+	// Fetch index bytes from bytecode
+	ldrb	r0, [jpc, #2]
+	ldrb	r1, [jpc, #3]
+	ldrb	r3, [jpc, #4]
+	orr	r2, r2, r0, lsl #8
+	orr	r2, r2, r1, lsl #16
+	orr	r1, r2, r3, lsl #24
+	// The index is inverted, so we invert it back with MVN
+        mvn     r1, r1
+	// The pool entry is in R0
+        add     r0, constpool, r1, lsl #4
+
+	// Save the pool entry
+	stmfd	arm_sp!, {r0}
+
+	DECACHE_JPC
+	ldr     r1, [r0, #CP_OFFSET+4]  // Pointer to call site
+	// Already resolved?
+	cmp     r1, #0 
+	bleq    resolve_invokedynamic
+
+	// Get the offset from a call site to the corresponding target
+	// method handle
+	bl	Helper_target_offset_in_bytes
+	mov	lr, r0
+
+	// Restore the pool entry
+	ldmfd	arm_sp!, {r0}
+
+	ldr	r0, [r0, #CP_OFFSET+4]	// Call site
+.abortentry119:
+	ldr	r0, [r0, lr]		// Method handle
+	mov	r1, thread
+
+	// Call the target method
+	bl	_ZN14CppInterpreter21process_method_handleEP7oopDescP6Thread
+
+	// Load up the interpreter registers.  Probably not necessary
+	adrl	ip, dispatch_init_adcon
+	ldm	ip, {r0, r1}
+	add	r0, r0, ip
+	add	dispatch, r1, r0
+
+	CACHE_LOCALS
+
+	CACHE_JPC
+	ldr	stack, [thread, #THREAD_JAVA_SP]
+	ldr	r2, [istate, #ISTATE_STACK_LIMIT]
+	sub	stack, stack, #4
+
+	// Fix up everything in the thread state to point to the
+	// current frame
+	ldr	r1, [thread, #THREAD_JAVA_SP]
+	stmfd	arm_sp!, {r1}
+	mov	r1, #0
+	str	r1, [thread, #THREAD_LAST_JAVA_SP]
+	ldr	r1, [thread, #THREAD_TOP_ZERO_FRAME]
+	add	r2, r2, #4
+	str	r2, [thread, #THREAD_JAVA_SP]
+	str	r1, [thread, #THREAD_LAST_JAVA_FP]
+	ldmfd	arm_sp!, {r1}
+	str	r1, [thread, #THREAD_LAST_JAVA_SP]
+	DISPATCH_START	5
+	// Test for an exception
+	ldr	r3, [thread, #4]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	cmp	r3, #0
+	DISPATCH_NEXT
+	bne	invokedynamic_exception_fix
+	DISPATCH_NEXT
+	CACHE_CP
+	DISPATCH_FINISH
+
+resolve_invokedynamic:
+	stmfd	arm_sp!, {lr}
+	ldr	r0, [istate, #ISTATE_THREAD]
+	bl	_ZN18InterpreterRuntime21resolve_invokedynamicEP10JavaThread
+	ldmfd	arm_sp!, {pc}
+
+	// Handler for java.lang.invoke.MethodHandles::invoke
+	ALIGN_CODE
+method_handle_entry:
+	stmfd   arm_sp!, {thread, lr}
+	mov     thread, r2
+	bl      fast_method_handle_entry
+	ldmfd   arm_sp!, {thread, pc}
+	ALIGN_CODE
+fast_method_handle_entry:
+	stmfd	arm_sp!, {regset, lr}
+	mov	r2, thread
+	bl	_ZN14CppInterpreter19method_handle_entryEP13methodOopDesciP6Thread
+	ldmia	sp!, {regset, pc}
+
+	// Common code for fast_aldc and fast_aldc_w
+# r0 = constpool cache entry
+	.macro	aldc	opc, seq_len
+	// Save the pool entry
+	stmfd	arm_sp!, {r0}
+
+	DECACHE_JPC
+	ldr     r1, [r0, #CP_OFFSET+4]  // Pointer to call site
+	// Already resolved?
+	cmp     r1, #0 
+	mov	r0, thread
+	mov	r1, #\opc
+	bleq    _ZN18InterpreterRuntime11resolve_ldcEP10JavaThreadN9Bytecodes4CodeE
+
+	// Restore the pool entry
+	ldmfd	arm_sp!, {r0}
+
+	ldr     r1, [r0, #CP_OFFSET+4]  // Pointer to MethodHandle
+	PUSH	r1
+
+	// Test for an exception
+	ldr	r3, [thread, #4]
+	cmp	r3, #0
+	bne	handle_exception
+	
+	DISPATCH	\seq_len
+	.endm
+
+	// Handler for ldc MethodHandle
+# r2 = [jpc, #1]
+	Opcode fast_aldc
+	DECACHE_STACK
+        add     r0, constpool, r2, lsl #4
+	aldc opc_fast_aldc, 2
+
+	// Handler for ldc_w MethodHandle
+# r2 = [jpc, #1]
+# r1 = [jpc, #2]
+	Opcode fast_aldc_w
+	// Fetch index bytes from bytecode
+	ldrb	r1, [jpc, #2]
+	DECACHE_STACK
+	orr	r1, r2, r1, lsl #8
+        add     r0, constpool, r1, lsl #4
+	aldc opc_fast_aldc_w, 3
+
+#ifdef FAST_BYTECODES
+# r2 = [jpc, #1]
+# r1 = [jpc, #2]
+	Opcode	invokevfinal
+	ldrb	r1, [jpc, #2]
+	DECACHE_STACK
+	add	r0, constpool, r1, lsl #12
+	DECACHE_JPC
+	add	r0, r2, asl #4
+	ldr	r3, [r0, #CP_OFFSET+12]
+	and	r1, r3, #255
+	ldr	r2, [stack, r1, asl #2]
+	mov	r1, #0
+	SW_NPC	cmp	r2, #0
+	SW_NPC	beq	null_ptr_exception
+.abortentry117:
+	HW_NPC	ldr	r3, [r2]		@ Only to provoke abort
+
+	ldr	tmp1, [r0, #CP_OFFSET+8]
+
+	ldr	ip, [tmp1, #METHOD_FROM_INTERPRETED]
+	str	r1, [thread, #THREAD_LAST_JAVA_SP]
+
+	add	stack, stack, #4
+	str	stack, [thread, #THREAD_JAVA_SP]
+
+	ldr	r3, [ip, #0]
+	b	normal_dispatch_and_return
+#endif // FAST_BYTECODES
+
+# r2 = [jpc, #1]
+# r1 = [jpc, #2]
+        Opcode  invokevirtual
+	ldrb	r1, [jpc, #2]
+        add     r0, constpool, r1, lsl #12
+	add	r0, r0, r2, asl #4
+        ldr     r2, [r0, #CP_OFFSET]
+        and     r2, r2, #0xff000000
+        cmp     r2, #opc_invokevirtual << 24
+        blne    resolve_invokevirtual
+        ldr     r3, [r0, #CP_OFFSET+12]
+#ifdef FAST_BYTECODES
+        mov     r0, #opc_invokeresolved
+        tst     r3, #flag_vfinalMethod
+        movne   r0, #opc_invokevfinal
+        b       rewrite_bytecode
+#else
+        DECACHE_STACK
+	DECACHE_JPC
+        and     r1, r3, #255
+        ldr     r2, [stack, r1, asl #2]
+        mov     r1, #0
+        cmp     r2, #0
+        beq     null_ptr_exception
+
+        ldr     tmp1, [r0, #CP_OFFSET+8]
+        tst     r3, #flag_vfinalMethod
+        bne     1f
+
+        ldr     r3, [r2, #4]
+        add     r3, r3, tmp1, lsl #2
+        ldr     tmp1, [r3, #INSTANCEKLASS_VTABLE_OFFSET]
+1:
+#endif // FAST_BYTECODES
+
+#ifdef FAST_BYTECODES
+# r2 = [jpc, #1]
+# r1 = [jpc, #2]
+        Opcode  invokeresolved
+	ldrb	r1, [jpc, #2]
+        DECACHE_STACK
+        add     r0, constpool, r1, lsl #12
+	DECACHE_JPC
+        add     r0, r0, r2, asl #4
+        ldr     r3, [r0, #CP_OFFSET+12]
+        and     r1, r3, #255
+        ldr     r2, [stack, r1, asl #2]
+        mov     r1, #0
+        SW_NPC	cmp     r2, #0
+        SW_NPC	beq     null_ptr_exception_jpc_0
+
+        ldr     tmp1, [r0, #CP_OFFSET+8]
+.abortentry104:
+        ldr     r3, [r2, #4]
+        add     r3, r3, tmp1, lsl #2
+        ldr     tmp1, [r3, #INSTANCEKLASS_VTABLE_OFFSET]
+#endif // FAST_BYTECODES
+        ldr     ip, [tmp1, #METHOD_FROM_INTERPRETED]
+        str     r1, [thread, #THREAD_LAST_JAVA_SP]
+
+        add     stack, stack, #4
+        str     stack, [thread, #THREAD_JAVA_SP]
+
+        ldr     r3, [ip, #0]
+
+normal_dispatch_and_return:
+	mov	r0, tmp1
+#ifdef SHARK
+	mov	r2, thread
+#else
+	add	r3, r3, #FAST_ENTRY_OFFSET
+#endif
+	blx	r3
+
+	adrl	ip, dispatch_init_adcon
+	ldm	ip, {r0, r1}
+	add	r0, r0, ip
+	add	dispatch, r1, r0
+
+	CACHE_LOCALS
+
+	CACHE_JPC
+	ldr	stack, [thread, #THREAD_JAVA_SP]
+	ldr	r2, [istate, #ISTATE_STACK_LIMIT]
+	sub	stack, stack, #4
+
+	ldr	r1, [thread, #THREAD_TOP_ZERO_FRAME]
+	add	r2, r2, #4
+	mov	r3, #0
+	str	r3, [thread, #THREAD_LAST_JAVA_SP]
+	str	r2, [thread, #THREAD_JAVA_SP]
+	str	r1, [thread, #THREAD_LAST_JAVA_FP]
+	str	r2, [thread, #THREAD_LAST_JAVA_SP]
+	DISPATCH_START	3
+	ldr	r3, [thread, #THREAD_PENDING_EXC]
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	cmp	r3, #0
+	DISPATCH_NEXT
+
+	bne	invoke_exception_fix
+	DISPATCH_NEXT
+	CACHE_CP
+	DISPATCH_FINISH
+
+	Opcode	invokestatic
+	ldrb	r1, [jpc, #2]
+        add     r0, constpool, r1, lsl #12
+	add	r0, r0, r2, asl #4
+        ldr     r2, [r0, #CP_OFFSET]
+	and	r2, r2, #0x00ff0000
+	cmp	r2, #opc_invokestatic << 16
+	blne	resolve_invokestatic
+  FBC	mov	r0, #opc_invokestaticresolved
+  FBC	b	rewrite_bytecode
+
+  FBC	Opcode	invokestaticresolved
+  FBC	ldrb	r1, [jpc, #2]
+        DECACHE_STACK
+  FBC   add     r0, constpool, r1, lsl #12
+	DECACHE_JPC
+  FBC	add	r0, r2, asl #4
+
+	ldr	tmp1, [r0, #CP_OFFSET+4]
+	mov	r1, #0
+	ldr	r3, [tmp1, #METHOD_FROM_INTERPRETED]
+	str	r1, [thread, #THREAD_LAST_JAVA_SP]
+	str	r1, [thread, #THREAD_LAST_JAVA_FP]
+
+	add	stack, stack, #4
+	str	stack, [thread, #THREAD_JAVA_SP]
+
+	ldr	r3, [r3, #0]
+	b	normal_dispatch_and_return
+
+
+	Opcode	invokespecial
+     	ldrb	r1, [jpc, #2]
+        add     r0, constpool, r1, lsl #12
+	add	r0, r0, r2, asl #4
+        ldr     r2, [r0, #CP_OFFSET]
+        and     r2, r2, #0x00ff0000
+        cmp     r2, #opc_invokespecial << 16
+        blne     resolve_invokespecial
+  FBC	mov	r0, #opc_invokespecialresolved
+  FBC	b	rewrite_bytecode
+
+  FBC	Opcode	invokespecialresolved
+  FBC	ldrb	r1, [jpc, #2]
+        DECACHE_STACK
+  FBC   add     r0, constpool, r1, lsl #12
+	DECACHE_JPC
+  FBC	add	r0, r2, asl #4
+
+	ldr	r3, [r0, #CP_OFFSET+12]
+	and	r3, r3, #255
+	ldr	r2, [stack, r3, asl #2]
+	mov	r1, #0
+	SW_NPC	cmp	r2, #0
+	SW_NPC	beq	null_ptr_exception
+.abortentry118:
+	HW_NPC	ldr	r3, [r2]		@ Only to provoke abort
+
+	ldr	tmp1, [r0, #CP_OFFSET+4]
+
+	ldr	ip, [tmp1, #METHOD_FROM_INTERPRETED]
+	str	r1, [thread, #THREAD_LAST_JAVA_SP]
+
+	add	stack, stack, #4
+	str	stack, [thread, #THREAD_JAVA_SP]
+
+	ldr	r3, [ip, #0]
+	b	normal_dispatch_and_return
+
+// This code is logically part of normal_entry, but it's been moved
+// out because there is only a FAST_ENTRY_OFFSET sized gap here.
+
+.normal_entry_return:
+	mov	r0, #0	@ deoptimized_frames = 0
+	ldmfd	arm_sp!, {regset, pc}
+	SLOW_ENTRY
+normal_entry:
+	stmfd	arm_sp!, {regset, lr}
+	mov	thread, r2
+	ldr	r7, [thread, #THREAD_STACK_SIZE]
+	ldr	r3, [thread, #THREAD_STACK_BASE]
+	rsb	r3, r7, r3
+	rsb	r3, r3, arm_sp
+	cmp	r3, #32768
+	blt	stack_overflow_no_frame
+	add	lr, pc, #(.normal_entry_return-(.normal_entry1+4))
+.normal_entry1:
+
+	FAST_ENTRY
+fast_normal_entry:
+	adrl	ip, dispatch_init_adcon
+	mov	tmp1, r0
+	ldm	ip, {r0, r1}
+	add	r0, r0, ip
+	ldr	stack, [thread, #THREAD_JAVA_SP]
+	add	dispatch, r1, r0
+
+	stmdb	arm_sp!, {fast_regset, lr}
+
+	ldrh	r0, [tmp1, #METHOD_MAXLOCALS]
+	mov	r1, #0
+	ldrh	r3, [tmp1, #METHOD_SIZEOFPARAMETERS]
+        mov     ip, #INTERPRETER_FRAME
+	ldrh	r2, [tmp1, #METHOD_MAXSTACK]
+        sub     r7, r0, r3
+
+	ldr	r3, [thread, #THREAD_JAVA_STACK_BASE]
+	sub	r5, stack, r7, lsl #2
+	sub	r5, r5, #FRAME_SIZE+STACK_SPARE+LEAF_STACK_SIZE
+	sub	r5, r5, r2, lsl #2
+	cmp	r3, r5
+	bcs	stack_overflow_before_frame
+
+        subs    r5, r7, #2
+        tst     r7, #1
+        strne   r1, [stack, #-4]!  // stack->push(0);
+        bcc     3f
+1:
+        str     r1, [stack, #-4]
+        str     r1, [stack, #-8]!
+        subs    r5, r5, #2
+        bcs     1b
+3:
+	ldr	r3, [thread, #THREAD_TOP_ZERO_FRAME]
+	mov	lr, #0
+        sub     istate, stack, #FRAME_SIZE     // stack->push(INTERPRETER_FRAME);
+        sub     r2, istate, r2, lsl #2
+        str     lr, [istate, #ISTATE_MSG]
+	str	r2, [thread, #THREAD_JAVA_SP]
+        sub     r5, r2, #4                      @ stack limit = istate - stackwords - 4
+	str	r3, [istate, #ISTATE_NEXT_FRAME]
+	str	ip, [istate, #ISTATE_FRAME_TYPE]
+	str	istate, [istate, #ISTATE_MONITOR_BASE]
+	str	r5, [istate, #ISTATE_STACK_LIMIT]
+	str	istate, [istate, #ISTATE_STACK_BASE]
+        sub     locals, stack, #4
+	str	r1, [istate, #ISTATE_OOP_TEMP]
+        add     locals, locals, r0, lsl #2
+        sub     stack, istate, #4
+        ldr     jpc, [tmp1, #METHOD_CONSTMETHOD]
+        ldr     constpool, [tmp1, #METHOD_CONSTANTS]
+        add     ip, istate, #ISTATE_NEXT_FRAME
+	DISPATCH_START	CONSTMETHOD_CODEOFFSET
+        ldr     constpool, [constpool, #CONSTANTPOOL_CACHE]
+        str     ip, [thread, #THREAD_TOP_ZERO_FRAME]
+  USEC	ldr	r3, [tmp1, #METHOD_INVOCATIONCOUNTER]
+	mov	r1, #0
+        str     r1, [thread, #THREAD_LAST_JAVA_SP]
+        str     ip, [thread, #THREAD_LAST_JAVA_FP]
+	ldr	ip, [thread, #THREAD_JAVA_SP]
+        str     ip, [thread, #THREAD_LAST_JAVA_SP]
+	DISPATCH_NEXT
+  USEC	ldr	lr, [dispatch, #InterpreterInvocationLimit_Address-XXX]
+  USEC	add	r3, r3, #INVOCATIONCOUNTER_COUNTINCREMENT
+        str     thread, [istate, #ISTATE_THREAD]
+  USEC	ldr	lr, [lr]
+  USEC	str	r3, [tmp1, #METHOD_INVOCATIONCOUNTER]
+	str	locals, [istate, #ISTATE_LOCALS]
+  USEC	cmp	r3, lr
+	str	constpool, [istate, #ISTATE_CONSTANTS]
+  USEC	bcs	method_entry_freq_count_overflow
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	DISPATCH_NEXT
+	str	tmp1, [istate, #ISTATE_METHOD]
+ 	str	istate, [istate, #ISTATE_SELF_LINK]
+@	mov	lr, #0
+@        str     lr, [istate, #ISTATE_PREV_LINK]
+@	str	lr, [istate, #ISTATE_CALLEE]
+	DISPATCH_FINISH
+#ifdef USE_COMPILER
+method_entry_freq_count_overflow:
+        ldr     r3, [tmp1, #METHOD_CONSTMETHOD]
+	DECACHE_JPC
+        ldrh    r3, [r3, #CONSTMETHOD_CODESIZE]
+	str	tmp1, [istate, #ISTATE_METHOD]
+	mov	r1, #0
+	mov	r0, thread
+        cmp     r3, #MAX_FG_METHOD_SIZE
+        bcc     1f
+        ldr     tmp1, [dispatch, #BackgroundCompilation_Address-XXX]
+        mov     r3, #1
+        ldr     r5, [tmp1]
+        str     r3, [tmp1]
+        bl      FREQ_COUNT_OVERFLOW
+        str     r5, [tmp1]
+        b       2f
+1:
+	bl	FREQ_COUNT_OVERFLOW
+2:
+ T2	cmp	r0, #0
+ T2	bne	call_thumb2
+	CACHE_JPC
+	CACHE_CP
+	DISPATCH	0
+
+#ifdef T2JIT
+
+#define JAZ_V1	r5
+#define JAZ_V2	r6
+#define JAZ_V3	r7
+#define JAZ_V4	r8
+#define	JAZ_V5	r9
+#define	JAZ_V6	r11
+
+#define JAZ_REGSET	JAZ_V1,JAZ_V2,JAZ_V3,JAZ_V4,JAZ_V5,JAZ_V6
+#define JAZ_REGSET_LEN	6
+
+call_thumb2:
+	str	istate, [istate, #ISTATE_SELF_LINK]
+	stmdb	sp!, {JAZ_REGSET}
+	mov	ip, #0
+3:
+	ldrsh	r3, [r1], #2
+	cmp	r3, #-1
+	ldrne	r3, [locals, -r3, lsl #2]
+	strne	r3, [sp, ip, lsl #2]
+	add	ip, ip, #1
+	cmp	ip, #JAZ_REGSET_LEN
+	bne	3b
+
+	ldmia	sp!, {JAZ_REGSET}
+1:
+	add	stack, stack, #4
+	bx r0
+#endif // T2JIT
+
+#endif // USE_COMPILER
+	.global	Thumb2_Install
+	.type Thumb2_Install, %function
+Thumb2_Install:
+@	ldr	r0, [r0]
+	str	r1, [r0, #METHOD_FROM_INTERPRETED]
+	bx	lr
+
+handle_return:
+	ldr	r9, [istate, #ISTATE_MONITOR_BASE]	@ r9 = base
+
+	ldr	tmp1, [istate, #ISTATE_STACK_BASE]	@ tmp1 = end
+
+	cmp	tmp1, r9
+	blcc	return_check_monitors
+
+	mov	r3, #0
+	ldrb	lr, [jpc, #0]
+
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	str	r3, [thread, #THREAD_LAST_JAVA_SP]
+	str	r3, [thread, #THREAD_LAST_JAVA_FP]
+	ldr	r0, [istate, #ISTATE_METHOD]
+	ldr	r3, [r2, #0]
+	ldrh	r0, [r0, #40]
+	add	r1, r2, #4
+	str	r3, [thread, #THREAD_TOP_ZERO_FRAME]
+
+	add	r1, r1, r0, lsl #2
+
+	cmp	lr, #opc_lreturn
+	cmpne	lr, #opc_dreturn
+	ldreq	r0, [stack, #8]
+	streq	r0, [r1, #-4]!
+	cmpne	lr, #opc_ireturn
+	cmpne	lr, #opc_freturn
+	cmpne	lr, #opc_areturn
+	ldreq	r0, [stack, #4]
+	streq	r0, [r1, #-4]!
+
+	str	r1, [thread, #THREAD_JAVA_SP]
+
+	mov	r0, #0	@ deoptimized_frames = 0
+	ldmfd	arm_sp!, {fast_regset, pc}
+
+@ ----------------------------------------------------------------------------------------
+stack_overflow_no_frame:
+	mov	r0, thread
+	mov	ip, #0
+	str	ip, [r0, #THREAD_LAST_JAVA_SP]
+	ldr	ip, [r0, #THREAD_TOP_ZERO_FRAME]
+	str	ip, [r0, #THREAD_LAST_JAVA_FP]
+	ldr	ip, [r0, #THREAD_JAVA_SP]
+	str	ip, [r0, #THREAD_LAST_JAVA_SP]
+	bl	_ZN18InterpreterRuntime24throw_StackOverflowErrorEP10JavaThread
+	ldmfd	arm_sp!, {regset, pc}
+
+stack_overflow_before_frame:
+	mov	r0, thread
+	mov	ip, #0
+	str	ip, [r0, #THREAD_LAST_JAVA_SP]
+	ldr	ip, [r0, #THREAD_TOP_ZERO_FRAME]
+	str	ip, [r0, #THREAD_LAST_JAVA_FP]
+	ldr	ip, [r0, #THREAD_JAVA_SP]
+	str	ip, [r0, #THREAD_LAST_JAVA_SP]
+	bl	_ZN18InterpreterRuntime24throw_StackOverflowErrorEP10JavaThread
+	ldmfd	arm_sp!, {fast_regset, pc}
+
+handle_exception_do_not_unlock:
+	mov	r3, #1
+	strb	r3, [thread, #THREAD_DO_NOT_UNLOCK]
+	b	handle_exception_with_bcp
+
+abstractmethod_exception:
+	mov	r0, #VMSYMBOLS_AbstractMethodError
+	b	raise_exception
+incompatibleclass_exception:
+	mov	r0, #VMSYMBOLS_IncompatibleClassChangeError
+raise_exception:
+	adr	r1, null_str
+raise_exception_with_msg:
+	stmdb	sp!, {r0, r1}
+	bl	load_dispatch
+	ldmia	sp!, {r0, r1}
+	DECACHE_JPC
+        DECACHE_STACK
+	mov	r2, r1
+	ldr	r1, [dispatch, #VmSymbols_symbols_Address-XXX]
+	ldr	r1, [r1, r0, lsl #2]
+	mov	r0, thread
+	bl	Helper_Raise
+        b       handle_exception_with_bcp
+null_str:
+	.byte	0
+	ALIGN_WORD
+
+invokedynamic_exception_fix:
+invokeinterface_exception_fix:
+	sub	jpc, jpc, #2
+invoke_exception_fix:
+invokenative_exception:
+return_exception:
+	sub	jpc, jpc, #3
+resolve_exception:
+putfield_exception:
+getfield_exception:
+handle_exception:
+@ jpc = Exception PC
+@ stack = garbage
+@ locals = garbage
+@ constpool = garbage
+	DECACHE_JPC
+handle_exception_with_bcp:
+	bl	load_dispatch
+	CACHE_JPC
+	ldr	stack, [istate, #ISTATE_STACK_BASE]
+	sub	stack, stack, #4
+	DECACHE_STACK
+	cmp	jpc, #0
+	beq	1f
+
+	mov	r0, istate
+	mov	r1, thread
+	bl	Helper_HandleException
+	cmp	r0, #0
+	beq	1f
+
+	mov	jpc, r0
+	CACHE_STACK
+	CACHE_LOCALS
+	CACHE_CP
+	DISPATCH 0
+1:
+	ldr	r9, [istate, #ISTATE_MONITOR_BASE]	@ r9 = base
+
+	ldr	tmp1, [istate, #ISTATE_STACK_BASE]	@ tmp1 = end
+
+	mov	r3, #0
+	ldrb	r0, [thread, #THREAD_DO_NOT_UNLOCK]
+	strb	r3, [thread, #THREAD_DO_NOT_UNLOCK]
+	cmp	r0, #0
+	bne	2f
+
+	cmp	tmp1, r9
+	blcc	return_check_monitors
+
+2:
+	mov	r3, #0
+
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	str	r3, [thread, #THREAD_LAST_JAVA_SP]
+	ldr	r0, [istate, #ISTATE_METHOD]
+	ldr	r3, [r2, #0]
+	ldrh	r0, [r0, #40]
+	add	r1, r2, #4
+	str	r3, [thread, #THREAD_TOP_ZERO_FRAME]
+
+	add	r1, r1, r0, lsl #2
+
+	str	r1, [thread, #THREAD_JAVA_SP]
+
+	mov	r0, #0	@ deoptimized_frames = 0
+	ldmfd	arm_sp!, {fast_regset, pc}
+
+return_check_monitors:
+	stmdb	arm_sp!, {r4, lr}
+
+	DECACHE_JPC	// FIXME: May not be needed.
+	ldr	r2, [istate, #ISTATE_METHOD]
+	ldr	r4, [r2, #METHOD_ACCESSFLAGS]
+	tst	r4, #1<<5
+	subne	r9, r9, #8
+	cmp	tmp1, r9
+	bcs	2f
+1:
+	ldr	r3, [tmp1, #4]
+	cmp	r3, #0
+	bne	3f
+	add	tmp1, tmp1, #8
+	cmp	tmp1, r9
+	bcc	1b
+
+2:
+	tst	r4, #1<<5
+
+	ldmeqia	arm_sp!, {r4, pc}
+
+	ldr	tmp1, [r9, #4]		@ base->obj == NULL
+	cmp	tmp1, #0
+	beq	4f
+
+	ldr	r0, [r9, #0]			@ r0 = header
+	mov	r3, #0
+	cmp	r0, #0
+	str	r3, [r9, #4]			@ base->obj = NULL
+
+	ldmeqia	arm_sp!, {r4, pc}
+
+	mov	r1, tmp1
+	mov	r2, r9
+	bl	cmpxchg_ptr
+	cmp	r9, r0
+
+	ldmeqia	arm_sp!, {r4, pc}
+
+	str	tmp1, [r9, #4]
+
+	mov	r1, r9
+	mov	r0, thread
+	bl	Helper_synchronized_exit
+
+	ldmeqia	arm_sp!, {r4, pc}
+
+3:
+	mov	r0, thread
+	bl	Helper_RaiseIllegalMonitorException
+	b	2b
+
+4:
+	mov	r0, thread
+	bl	Helper_RaiseIllegalMonitorException
+	ldmia	arm_sp!, {r4, pc}
+
+	SLOW_ENTRY
+accessor_entry:
+	stmfd	arm_sp!, {regset, lr}
+	mov	thread, r2
+	ldr	r7, [thread, #THREAD_STACK_SIZE]
+	ldr	r3, [thread, #THREAD_STACK_BASE]
+	rsb	r3, r7, r3
+	rsb	r3, r3, arm_sp
+	cmp	r3, #32768
+	blt	stack_overflow_no_frame
+	bl	fast_accessor_entry
+	ldmia	sp!, {regset, pc}
+
+	FAST_ENTRY
+fast_accessor_entry:
+  USEC	adrl	ip, dispatch_init_adcon
+  USEC	ldr	r3, [ip]
+  USEC	add	r3, r3, ip
+  USEC	ldr	ip, [ip, #invocationlimit_adcon-dispatch_init_adcon]
+  USEC	ldr	ip, [r3, ip]
+
+  USEC	ldr	r3, [r0, #METHOD_INVOCATIONCOUNTER]
+  USEC	ldr	ip, [ip, #0]
+  USEC	add	r3, r3, #INVOCATIONCOUNTER_COUNTINCREMENT
+  USEC	str	r3, [r0, #METHOD_INVOCATIONCOUNTER]
+  USEC	cmp	r3, ip
+  USEC	bcs	fast_normal_entry
+
+	ldr	r1, [r0, #METHOD_CONSTMETHOD]
+	ldrb	r3, [r1, #CONSTMETHOD_CODEOFFSET+2]
+	ldrb	r1, [r1, #CONSTMETHOD_CODEOFFSET+3]
+	ldr	ip, [r0, #METHOD_CONSTANTS]
+	ldr	ip, [ip, #CONSTANTPOOL_CACHE]
+	orr	r3, r3, r1, lsl #8		@ r3 = index
+
+	add	r1, ip, #CP_OFFSET
+	ldr	r3, [r1, r3, lsl #4]!		@ r1 = cache, r3 = flags
+	ldr	ip, [thread, #THREAD_JAVA_SP]			@ ip == stack
+	and	r3, r3, #0x00ff0000
+	cmp	r3, #opc_getfield << 16
+	ldr	r3, [ip, #0]
+	bne	fast_normal_entry
+
+	cmp	r3, #0
+	beq	fast_normal_entry
+
+	ldr	r0, [r1, #12]
+	ldr	r1, [r1, #8]
+	movs	r0, r0, lsr #29
+	bls	accessor_non_w
+
+	ldr	r0, [r3, r1]
+	str	r0, [ip, #0]
+	mov	r0, #0
+	bx	lr
+
+div_zero_jpc_1:
+	sub	jpc, jpc, #1
+.lrem_0:
+.ldiv_0:
+divide_by_zero_exception:
+	mov	r0, #VMSYMBOLS_ArithmeticException
+	adr	r1, div_zero_msg
+	b	raise_exception_with_msg
+div_zero_msg:
+	.ascii  "/ by int zero\000"
+	ALIGN_WORD
+
+array_bound_exception_jpc_4_r3:
+	sub	jpc, jpc, #1
+array_bound_exception_jpc_3_r3:
+	sub	jpc, jpc, #1
+array_bound_exception_jpc_2_r3:
+	sub	jpc, jpc, #1
+array_bound_exception_jpc_1_r3:
+	sub	jpc, jpc, #1
+array_bound_exception_jpc_0_r3:
+	mov	r2, r3
+	b	array_bounds_exception
+array_bound_exception_jpc_1_tmp1:
+	sub	jpc, jpc, #1
+array_bound_exception_jpc_0_tmp1:
+	mov	r2, tmp1
+	b	array_bounds_exception
+array_bound_exception_jpc_3:
+	sub	jpc, jpc, #1
+array_bound_exception_jpc_2:
+	sub	jpc, jpc, #1
+array_bound_exception_jpc_1:
+	sub	jpc, jpc, #1
+array_bound_exception_jpc_0:
+array_bounds_exception:
+	DECACHE_JPC
+        DECACHE_STACK
+	mov	r1, r2
+	mov	r0, thread
+	bl	Helper_RaiseArrayBoundException
+        b       handle_exception_with_bcp
+
+#ifndef HW_NULL_PTR_CHECK
+null_ptr_exception_jpc_5:
+	sub	jpc, jpc, #1
+null_ptr_exception_jpc_4:
+	sub	jpc, jpc, #1
+null_ptr_exception_jpc_3:
+	sub	jpc, jpc, #1
+null_ptr_exception_jpc_2:
+	sub	jpc, jpc, #1
+null_ptr_exception_jpc_1:
+	sub	jpc, jpc, #1
+null_ptr_exception_jpc_0:
+#endif
+null_ptr_exception:
+	mov	r0, #VMSYMBOLS_NullPointerException
+	b	raise_exception
+
+@ ==== SW FP ==============================================================================
+
+	Opcode	fadd
+	POP	r0, r1
+        bl      __aeabi_fadd
+	PUSH	r0
+	DISPATCH	1
+
+	Opcode	fsub
+	POP	r1
+	POP	r0
+        bl      __aeabi_fsub
+	PUSH	r0
+	DISPATCH	1
+
+	Opcode	fmul
+	POP	r0, r1
+        bl      __aeabi_fmul
+	PUSH	r0
+	DISPATCH	1
+
+	Opcode	fdiv
+	POP	r1
+	POP	r0
+        bl      __aeabi_fdiv
+	PUSH	r0
+	DISPATCH	1
+
+	Opcode	ddiv
+	POP	r2, r3
+	POP	r0, r1
+        bl      __aeabi_ddiv
+	PUSH	r0, r1
+	DISPATCH	1
+
+	Opcode	fcmpl
+        ldmib   stack, {r0, r1}
+        bl      __aeabi_fcmpgt
+        cmp     r0, #0
+        movne   r3, #-1
+        bne     3f
+        ldmib   stack, {r0, r1}
+        bl      __aeabi_fcmplt
+        cmp     r0, #0
+        movne   r3, #1
+        bne     3f
+        ldmib   stack, {r0, r1}
+        bl      __aeabi_fcmpeq
+        cmp     r0, #0
+        movne   r3, #0
+        moveq   r3, #-1
+3:
+	DISPATCH_START	1
+	add	stack, stack, #8
+	DISPATCH_NEXT
+	PUSH	r3
+	DISPATCH_FINISH
+
+	Opcode	fcmpg
+        ldmib   stack, {r0, r1}
+        bl      __aeabi_fcmpgt
+        cmp     r0, #0
+        movne   r3, #-1
+        bne     4f
+        ldmib   stack, {r0, r1}
+        bl      __aeabi_fcmplt
+        cmp     r0, #0
+        movne   r3, #1
+        bne     4f
+        ldmib   stack, {r0, r1}
+        bl      __aeabi_fcmpeq
+        cmp     r0, #0
+        movne   r3, #0
+        moveq   r3, #1
+4:
+	DISPATCH_START	1
+	add	stack, stack, #8
+	DISPATCH_NEXT
+	PUSH	r3
+	DISPATCH_FINISH
+
+	Opcode	dcmpl
+        ldmib   stack, {r0, r1, r2, r3}
+        bl      __aeabi_dcmpgt
+        cmp     r0, #0
+        movne   r3, #-1
+        bne     5f
+        ldmib   stack, {r0, r1, r2, r3}
+        bl      __aeabi_dcmplt
+        cmp     r0, #0
+        movne   r3, #1
+        bne     5f
+        ldmib   stack, {r0, r1, r2, r3}
+        bl      __aeabi_dcmpeq
+        cmp     r0, #0
+        movne   r3, #0
+        moveq   r3, #-1
+5:
+	DISPATCH_START	1
+	add	stack, stack, #16
+	DISPATCH_NEXT
+	PUSH	r3
+	DISPATCH_FINISH
+
+	Opcode	dcmpg
+        ldmib   stack, {r0, r1, r2, r3}
+        bl      __aeabi_dcmpgt
+        cmp     r0, #0
+        movne   r3, #-1
+        bne     6f
+        ldmib   stack, {r0, r1, r2, r3}
+        bl      __aeabi_dcmplt
+        cmp     r0, #0
+        movne   r3, #1
+        bne     6f
+        ldmib   stack, {r0, r1, r2, r3}
+        bl      __aeabi_dcmpeq
+        cmp     r0, #0
+        movne   r3, #0
+        moveq   r3, #1
+6:
+	DISPATCH_START	1
+	add	stack, stack, #16
+	DISPATCH_NEXT
+	PUSH	r3
+	DISPATCH_FINISH
+
+@ ==== Fast SW FP emulation ===============================================================
+
+#define al		r0
+#define ah		r1
+#define bl		r2
+#define bh		r3
+#define tmp		tmp1
+#define sh		r12
+#define ex_add		r14
+
+@ TOS = TOSM1 + TOS
+@ What we actually do is TOS = TOS + TOSM1
+@ --- do_dadd_vtos -------------------------------------------------
+	Opcode	dadd
+	POP	al, ah, bl, bh
+	mov	tmp, #0xff000000
+	orr	tmp, tmp, #0x00e00000
+        bics    ex_add, tmp, ah, LSL #1
+        bicnes  ex_add, tmp, bh, LSL #1
+        beq     .dadd_naninf
+        teq     ah, bh
+        eormi   bh, bh, #1 << 31
+        bmi     ._dsub1
+._dadd1:
+        subs    ex_add, al, bl
+        sbcs    sh, ah, bh
+        bhs     .dadd_swapped
+        adds    bl, bl, ex_add
+        adc     bh, bh, sh
+        subs    al, al, ex_add
+        sbc     ah, ah, sh
+.dadd_swapped:
+        mov     ex_add, ah, LSR #20
+        sub     sh, ex_add, bh, LSR #20
+        tst     tmp, bh, LSL #1
+        beq     .dadd_uncommon
+        bic     ah, ah, ex_add, LSL #20
+        bic     bh, bh, tmp
+        orr     bh, bh, #1 << 20
+.dadd_doadd:
+        rsbs    tmp, sh, #32
+        blo     .dadd_bigshift
+.dadd_smallshift:
+        adds    al, al, bl, LSR sh
+        adc     ah, ah, bh, LSR sh
+        adds    al, al, bh, LSL tmp
+        adcs    ah, ah, #0
+        cmp     ah, #1 << 20
+        bhs     .dadd_carry
+        add     ah, ah, ex_add, LSL #20     @ add exponent
+.dadd_nocarry:
+        movs    bl, bl, LSL tmp         @ round and sticky bits
+	bpl	.dadd_exit
+        adds    al, al, #1
+        movccs  bl, bl, LSL #1          @ is sticky bit zero?
+	bne	.dadd_exit
+.dadd_roundeven:
+        cmp     al, #0
+        bicne   al, al, #1              @ RTE if carry didn't occur
+        adceq   ah, ah, #0              @ increment high word if it did
+.dadd_check_overflow_inx:
+        mov     bh, ah, LSL #1
+        cmp     bh, #0xFFE00000
+	blo	.dadd_exit
+        subhs   ah, ah, #3<<29          @ bias exponent
+	b	.return_double_Inf
+.dadd_bigshift:
+        cmp     bl, #1
+        adc     bl, bh, bh
+        sub     sh, sh, #32             @ nonzero
+        rsbs    tmp, sh, #31            @ sets C if within a word
+        movlo   tmp, #0                 @ C clear if sh > 31
+        addhss  al, al, bh, LSR sh
+        adc     ah, ah, ex_add, LSL #20
+        cmp     ex_add, ah, LSR #20
+        beq     .dadd_nocarry
+        sub     ah, ah, ex_add, LSL #20
+.dadd_carry:
+        add     ah, ah, #1 << 20
+        movs    ah, ah, LSR #1
+        add     ah, ah, ex_add, LSL #20
+        movs    al, al, RRX
+        bcc     .dadd_check_overflow_exact
+        adcs    al, al, #0
+        movccs  tmp, bl, LSL tmp    @EQ = round to even
+        bne     .dadd_check_overflow_exact
+        b       .dadd_roundeven
+.dadd_rnearup_carry:
+        adds    al, al, #1
+        movccs  bl, bl, LSL #1          @ is sticky bit zero?
+        bne     .dadd_check_overflow_inx
+        b       .dadd_roundeven
+.dadd_check_overflow_exact:
+        mov     bh, ah, LSL #1
+        cmp     bh, #0xFFE00000
+	blo	.dadd_exit
+        sub     ah, ah, #3<<29          @ bias exponent
+
+.return_double_Inf:
+	and	a3, ah, #0x80000000
+.return_double_Inf_1:
+	mov	al, #0
+	mov	ah, #0x7f000000
+	orr	ah, ah, #0x00f00000
+	orr	ah,ah,a3
+.dadd_exit:
+	PUSH	al, ah
+	DISPATCH	1
+
+.dadd_uncommon:
+        orrs    tmp, bl, bh, LSL #1     @ Is b zero or denormal?
+        beq     .dadd_bzero
+        movs    tmp, ex_add, LSL #21
+        bic     ah, ah, ex_add, LSL #20
+        bicne   bh, bh, #1 << 31
+        subne   sh, sh, #1              @ adjust exponent to fake exp_b = 1
+        bne     .dadd_doadd    
+        adds    al, al, bl
+        adc     ah, ah, bh
+        b       .daddsub_denorm
+.dadd_bzero:
+        movs    tmp, ex_add, LSL #21        @ is a denormal?
+	bne	.dadd_exit
+        orrs    tmp, al, ah, LSL #1     @ a zero?
+	beq	.dadd_exit
+        b       .daddsub_denorm
+
+.dadd_naninf:
+        cmp     al, #1                  @ sets C if al!=0
+        adc     ex_add, ah, ah
+        cmp     bl, #1
+        adc     sh, bh, bh
+        cmp     ex_add, tmp                 @ HI if a is NaN
+        cmpls   sh, tmp                 @ HI if either is NaN
+        bhi     .return_double_NaN
+        cmp     ex_add, sh
+        beq     .dadd_twoinf
+        cmp     ex_add, tmp                 @ EQ if a is Inf
+        movne   ah, bh
+        movne   al, bl
+	b	.dadd_exit
+.dadd_twoinf:
+        teq     ah, bh
+	bpl	.dadd_exit
+	b	.return_double_NaN
+
+@ --- do_dsub_itos -------------------------------------------------
+	Opcode	dsub
+	POP	al, ah, bl, bh
+        mov     tmp, #0xff000000
+        orr     tmp, tmp, #0x00e00000
+        bics    ex_add, tmp, ah, LSL #1
+        bicnes  ex_add, tmp, bh, LSL #1
+        beq     .drsb_naninf
+	teq	ah, bh
+	eor	ah, ah, #1 << 31
+	bmi	._dadd1
+	eor	bh, bh, #1 << 31
+._dsub1:
+        subs    ex_add, al, bl
+        sbcs    sh, ah, bh
+        bhs     .dsub_swapped
+.dsub_do_swap:
+        eor     sh, sh, #1 << 31        @ negate a and b as a - b == -b - -a
+        adds    bl, bl, ex_add
+        adc     bh, bh, sh
+        subs    al, al, ex_add
+        sbc     ah, ah, sh
+.dsub_swapped:
+        mov     ex_add, ah, LSR #20
+        sub     sh, ex_add, bh, LSR #20
+        tst     tmp, bh, LSL #1
+        beq     .dsub_uncommon
+        bic     ah, ah, ex_add, LSL #20
+        bic     bh, bh, tmp, ASR #1
+        rsbs    bl, bl, #0
+        rsc     bh, bh, tmp, ASR #1     @ 0xffe00000 >> 1 = -(1 << 20)
+.dsub_dosub:
+        rsbs    tmp, sh, #32
+        blo     .dsub_bigshift
+.dsub_smallshift:
+        adds    al, al, bl, LSR sh
+        adc     ah, ah, bh, ASR sh
+        adds    al, al, bh, LSL tmp
+        adcs    ah, ah, #0
+        bmi     .dsub_borrow
+.dsub_noborrow:
+        add     ah, ah, ex_add, LSL #20
+        movs    bl, bl, LSL tmp
+.dsub_dorounding:
+	bpl	.dsub_exit
+        adds    al, al, #1              @ Z flag set if carry to high word
+        cmpne   bl, #0x80000000         @ check we don't have to round to even
+	bne	.dsub_exit
+        cmp     al, #0
+        addeq   ah, ah, #1
+        bicne   al, al, #1
+	b	.dsub_exit
+.dsub_bigshift:
+        cmp     bl, #1
+        adc     bl, bh, bh 
+        sub     sh, sh, #32
+        rsbs    tmp, sh, #31
+        blo     .dsub_hugeshift
+        adds    al, al, bh, ASR sh
+        adcs    ah, ah, #-1
+        bpl     .dsub_noborrow
+.dsub_borrow:
+        add     tmp, tmp, #1
+        movs    tmp, bl, LSL tmp
+        adcs    al, al, al              @ shift al,ah left including guard bit
+        adc     ah, ah, ah
+        add     sh, ah, ex_add, LSL #21     @ ah = 0xFFE00000 + fraction. Adding
+        movs    sh, sh, LSR #21         @ C-bit is clear if bit 20 of ah
+        bls     .dsub_renormalize        @   clear, so 2 bits or more
+        add     ah, ah, ex_add, LSL #20
+        adds    al, al, tmp, LSR #31    @ C and Z flag are set if carry over
+        cmpcc   tmp, #0x80000000        @ check that we don't have to round
+	bne	.dsub_exit
+        cmp     al, #0
+        addeq   ah, ah, #1
+        bicne   al, al, #1
+	b	.dsub_exit
+.dsub_renormalize:
+        bcs     .dsub_ex_one
+        adds    ah, ah, #1 << 21
+        cmpeq   al, #0
+        beq     .dsub_retzero            @ go and deal with it, if so
+        mov     sh, ex_add, LSR #11
+        bic     ex_add, ex_add, #1 << 11
+        sub     ex_add, ex_add, #2        @ for leading bit
+.dsub_renormloop:                 @ TODO: add loop for 8 bit per cycle renorm
+        adds    al, al, al
+        adc     ah, ah, ah
+        sub     ex_add, ex_add, #1
+        tst     ah, #1 << 20
+        beq     .dsub_renormloop
+        add     ah, ah, sh, LSL #31 
+        add     ah, ah, ex_add, LSL #20
+        cmp     ex_add, #0
+	bgt	.dsub_exit
+        add     ah, ah, #3 << 29        @ bias exponent
+        @ Rounding direction indicator is zero (denormal results are exact)
+	mov	ip, #0
+        b       .__dunder
+.dsub_hugeshift:
+.dsub_return:
+        add     ah, ah, ex_add, LSL #20
+.dsub_return1:
+.dsub_exit:
+	PUSH	al, ah
+	DISPATCH	1
+.dsub_ex_one:    @ underflow when ex = 1 - shift back to denorm
+        movs    ah, ah, ASR #1
+        mov     al, al, RRX
+        add     ah, ah, ex_add, LSL #20
+        b       .dsub_denorm
+.dsub_uncommon:
+        orrs    tmp, bl, bh, LSL #12    @ is b zero or denorm?
+        beq     .dsub_bzero
+        movs    tmp, ex_add, LSL #21
+        bic     ah, ah, ex_add, LSL #20
+        beq     .dsub_both_denorm
+        bic     bh, bh, #1 << 31
+        sub     sh, sh, #1
+        rsbs    bl, bl,#0
+        rsc     bh, bh,#0
+        b       .dsub_dosub    
+.dsub_both_denorm:
+        subs    al, al, bl
+        sbc     ah, ah, bh
+        b       .dsub_denorm
+.dsub_bzero:
+        orrs    tmp, al, ah, LSL #1
+        bne     .dsub_denorm             @ return a@ but it might be denormal
+.dsub_retzero:
+        mov     ah, #0                  @ clear sign bit (al is already 0)
+	b	.dsub_exit
+.dsub_denorm:
+.daddsub_denorm:
+        movs    bl, ah, LSL #1          @ discard sign bit
+        tsteq   al, al                  @ do we have a zero?
+        beq     .dsub_retzero            @ yes@ go and ensure the right sign
+	b	.dsub_exit
+.drsb_naninf:
+        @ Handle NaNs and infinities in reverse subtraction. We
+        @ just swap the operands and go to dsub_naninf.
+        eor     ah, ah, bh
+        eor     al, al, bl
+        eor     bh, bh, ah
+        eor     bl, bl, al
+        eor     ah, ah, bh
+        eor     al, al, bl
+.dsub_naninf:
+        cmp     al, #1                  @ sets C if al!=0
+        adc     ex_add, ah, ah
+        cmp     bl, #1
+        adc     sh, bh, bh
+        cmp     ex_add, tmp                 @ HI if a is NaN
+        cmpls   sh, tmp                 @ HI if either is NaN
+        bhi     .return_double_NaN
+        cmp     ex_add, sh
+        beq     .dsub_twoinf
+        cmp     ex_add, tmp                 @ EQ if a is Inf
+        eorne   ah, bh, #0x80000000
+        movne   al, bl
+	b	.dsub_exit
+.dsub_twoinf:
+        teq     ah, bh
+	bmi	.dsub_exit
+
+.return_double_NaN:
+	and	a3, ah, #0x80000000
+	mov	al, #0
+	mov	ah, #0x7f000000
+	orr	ah, ah, #0x00f80000
+	orr	ah,ah,a3
+	b	.dsub_exit
+
+@ === underflow handler ================================================
+
+#define	INX_pos	30
+#define INX_bit (1<<30)
+
+#define exp	r2
+#define temp	r3
+
+.__dunder:
+        tst     ah, ah
+        orrmi   ip, ip, #1<<16
+        mov     temp, #0x600
+        mov     exp, ah, LSR #20      @ find the exponent
+        add     temp, temp, #1
+        bic     ah, ah, exp, LSL #20 @ remove exponent from mantissa
+        bic     exp, exp, #0x800        @ lose the sign bit
+        sub     exp, temp, exp
+        orr     ah, ah, #1<<20      @ put on mantissa leading bit
+        cmp     exp, #53
+        bhi     .dunder_stickyonly
+        beq     .dunder_roundbit
+        cmp     exp, #21
+        blo     .dunder_hiword
+        subs    exp, exp, #32
+        bls     .dunder_hiloword
+.dunder_loloword:
+        rsb     temp, exp, #32
+        cmp     al, #0
+        mov     al, ah, LSR exp
+        mov     exp, ah, LSL temp
+        orrne   exp, exp, #1
+        mov     ah, #0
+        b       .dunder_round
+.dunder_hiloword:
+        rsb     temp, exp, #0
+        add     exp, exp, #32
+        mov     ah, ah, LSL temp
+        orr     ah, ah, al, LSR exp
+        mov     exp, al, LSL temp
+        mov     al, ah
+        mov     ah, #0
+        b       .dunder_round
+.dunder_hiword:
+        rsb     temp, exp, #32
+        mov     tmp, al, LSL temp
+        mov     temp, ah, LSL temp
+        orr     al, temp, al, LSR exp
+        mov     ah, ah, LSR exp
+        mov     exp, tmp
+        b       .dunder_round
+.dunder_roundbit:
+        orrs    exp, al, ah, LSL #12
+        mov     al, #0
+        mov     ah, #0
+        mov     exp, #0x80000000
+        addne   exp, exp, #1
+        b       .dunder_round
+.dunder_stickyonly:
+        mov     exp, #1
+        mov     ah, #0
+        mov     al, #0
+.dunder_round:
+        tst     ip, #1<<16
+        bic     ip, ip, #1<<16
+        orrne   ah, ah, #0x80000000
+        tst     exp, exp
+	beq	.dsub_exit
+        movs    exp, exp, LSL #1        @ round bit in C, sticky in ~Z
+        bcc     .dunder_rerounded        @ if no round bit, we're done
+        beq     .dunder_roundeven        @ RTE is tricky due to rerounding
+.dunder_roundup:
+        adds    al, al, #1          @ round up
+        adc     ah, ah, #0
+.dunder_rerounded:
+	b	.dsub_exit
+.dunder_roundeven:
+        movs    exp, ip, ASR #INX_pos   @ get -1, 0, +1 from direction bits
+        bmi     .dunder_roundup          @ if -1, round up unconditionally
+        bne     .dunder_rerounded        @ if +1, round down unconditionally
+        adds    al, al, #1          @ round up ...
+        adc     ah, ah, #0
+        bic     al, al, #1          @ ... and then to even
+        b       .dunder_rerounded
+
+@ === MULTIPLY Double ===================================================
+
+#define ex_m		r14
+#define uh		r12
+#define ul		r4
+#define rs		r4
+#define th		r5
+#define tl		r11
+
+@ --- do_dmul_itos -------------------------------------------------
+	Opcode	dmul
+	POP	al, ah, bl, bh
+	stmdb	arm_sp!, {r4, r5}
+	mov	tmp, #0x7f00000
+	orr	tmp, tmp, #0x00f0000
+        bics    ex_m, tmp, ah, LSR #4     @ test for Infs or NaNs
+        bicnes  ex_m, tmp, bh, LSR #4
+        beq     .dmul_naninf
+        and     ex_m, tmp, ah, LSR #4     @ get exponent of a
+        eor     th, ah, bh              @ compute sign of result
+        orr     ex_m, ex_m, th, LSR #31     @   and save it at bottom of ex
+        ands    th, tmp, bh, LSR #4     @ get exponent of b, and test
+        tstne   ah, tmp, LSL #4         @   for zeros/denorms ...
+        beq     .dmul_zerodenorm         @   go and renormalise if we got any
+.dmul_normalised:
+        add     ex_m, ex_m, th              @ calculate exponent of result
+        sub     ex_m, ex_m, #0x3FC0000      @ rebias exponent mostly
+        bic     ah, ah, tmp, LSL #5     @ clear sign and all but lo bit of exp
+        bic     bh, bh, tmp, LSL #5
+        orr     ah, ah, #1<<20          @ set leading bit on mantissa
+        orr     bh, bh, #1<<20          @ set leading bit on mantissa
+.dmul_mantmul:
+        umull   ul, uh, ah, bl
+        umull   tl, th, al, bh
+        adds    tl, tl, ul
+        adcs    th, th, uh              @ carry from here is used below
+        umull   ul, uh, ah, bh          @ uh:ul is top part
+        adc     bh, uh, #0              @ get carry from above addition
+        umull   ah, uh, al, bl          @ uh:ah is bottom part
+        adds    tl, tl, uh
+        adcs    th, th, ul
+        adcs    bh, bh, #0
+        tst     ah, ah
+        orrne   tl, tl, #1              @ don't lose sticky bit
+        mov     bl, #-4
+        mov     uh, #32-12              @ uh will be corresponding right shift
+        cmp     bh, #0x200              @ C set if it's 11 bits
+        adc     uh, uh, #0
+        adc     bl, bl, ex_m, ASR #16     @ increment exponent correspondingly
+        rsb     ul, uh, #32             @ ul is left shift
+        mov     ah, bh, LSL ul
+        mov     al, th, LSL ul
+        orr     ah, ah, th, LSR uh
+        orr     al, al, tl, LSR uh
+        add     ah, ah, bl, LSL #20 @ put exponent back on (may wrap
+        eor     ah, ah, ex_m, LSL #31 @ put sign back on (with EOR so that
+        movs    rs, tl, LSL ul          @ compute the round word
+        beq     .dmul_exact
+        movs    uh, rs, LSL #1          @ rs is already in place
+        movcc   rs, #-INX_bit           @ direction indicator: rounded down
+        bcc     .dmul_rounded
+        orreq   bh, bh, #1<<31          @ save the round-to-even bit
+        adcs    al, al, #0          @ round up if necessary
+        adc     ah, ah, #0
+        mov     rs, #INX_bit            @ direction indicator: rounded up
+        tst     al, bh, LSR #31       @ does RTE do anything?
+        bic     al, al, bh, LSR #31 @ perform RTE
+        movne   rs, #-INX_bit           @ if RTE had effect, we rounded down
+.dmul_exact:
+.dmul_rounded:
+        teq     ah, ex_m, LSL #31       @ do the signs agree?
+	mov	uh, #0x70000000         @ even if so, need to test exp 0/7FF
+	orr	uh, uh, #0x0ff00000
+        bmi     .dmul_outflow            @ if not, UFL or OFL
+        tst     ah, uh
+        bicnes  uh, uh, ah            @ is exp 0 or 7FF?
+        beq     .dmul_outflow
+.dmul_exit:
+	ldmia	arm_sp!, {r4, r5}
+	PUSH	al, ah
+	DISPATCH	1
+
+.dmul_rdirect:
+        movs    ul, ex_m, LSL #31         @ put sign bit in N
+        tstpl   uh, #2 << 22        @ if +ve: EQ iff round up. Keeps PL
+        tstmi   uh, #1 << 22         @ if -ve: EQ iff round up.
+        moveq   rs, #INX_bit            @ rounded up
+        movne   rs, #-INX_bit           @ rounded down
+        addeqs  al, al, #1          @ may set C
+        adc     ah, ah, #0
+        b       .dmul_rounded
+
+.dmul_outflow:
+        cmp     ex_m, #0x400<<16          @ Which ballpark are we in?
+        addle   ah, ah, #0x60000000 @ Bias up if underflow
+        subge   ah, ah, #0x60000000 @ Bias down if overflow
+	mov	ip, rs
+	ldmia	arm_sp!, {r4, r5}
+        ble     .__dunder                @ underflow
+	b	.return_double_Inf
+
+.dmul_zerodenorm:
+        orrs    ul, al, ah, LSL #1      @ is a zero?
+        orrnes  ul, bl, bh, LSL #1      @ or is b zero?
+        beq     .dmul_zero               @ Return zero if so
+
+        tst     th, th                  @ is b denormal?
+        beq     .dmul_renorm_op2
+.dmul_done_op2:
+        bics    ul, ex_m, #1              @ is a denormal? (careful of sign bit)
+        beq     .dmul_renorm_op1
+        b       .dmul_normalised
+.dmul_zero:
+        mov     al, #0                  @ clear low word
+        mov     ah, ex_m, LSL #31         @ get sign of result and clear hi word
+	b	.dmul_exit
+
+.dmul_renorm_op1:
+        add     ex_m, ex_m, #1<<16          @ correct the exponent
+        bic     ah, ah, #0x80000000       @ this will get in our way
+        orrs    tl, ah, al, LSR #12    @ is highest set bit in low twelve?
+        moveq   al, al, LSL #20         @ if so, move it up
+        subeq   ex_m, ex_m, #20<<16       @ and adjust exponent
+        tst     ah, ah                  @ is highest set bit in low word?
+        moveq   ah, al, LSR #12         @ if so, move up by 20
+        moveq   al, al, LSL #20
+        subeq   ex_m, ex_m, #20<<16       @ and adjust exponent
+        mov     ul, #0                 @ shift of top word
+        movs    tl, ah, LSR #(21-16)   @ is highest set bit within 16 of top?
+        moveq   ah, ah, LSL #16         @ if not, move up
+        addeq   ul, ul, #16           @ and adjust exponent
+        movs    tl, ah, LSR #(21-8)   @ is highest set bit within 8 of top?
+        moveq   ah, ah, LSL #8         @ if not, move up
+        addeq   ul, ul, #8           @ and adjust exponent
+        movs    tl, ah, LSR #(21-4)   @ is highest set bit within 4 of top?
+        moveq   ah, ah, LSL #4         @ if not, move up
+        addeq   ul, ul, #4           @ and adjust exponent
+        movs    tl, ah, LSR #(21-2)   @ is highest set bit within 2 of top?
+        moveq   ah, ah, LSL #2         @ if not, move up
+        addeq   ul, ul, #2           @ and adjust exponent
+        movs    tl, ah, LSR #(21-1)   @ is highest set bit within 1 of top?
+        moveq   ah, ah, LSL #1         @ if not, move up
+        addeq   ul, ul, #1           @ and adjust exponent
+
+        sub     ex_m, ex_m, ul, LSL #16  @ calculate final pseudo exponent
+        mov     tl, al, LSL ul        @ shift low word up by same amout
+        rsb     ul, ul, #32           @ compute reverse shift for al
+        orr     ah, ah, al, LSR ul     @ put in high end of low word
+        mov     al, tl
+
+	mov	tmp, #0x7f00000
+	orr	tmp, tmp, #0x00f0000
+
+        b       .dmul_normalised
+
+.dmul_renorm_op2:
+        add     th, th, #1<<16          @ correct the exponent
+        bic     bh, bh, #0x80000000       @ this will get in our way
+        orrs    tl, bh, bl, LSR #12    @ is highest set bit in low twelve?
+        moveq   bl, bl, LSL #20         @ if so, move it up
+        subeq   th, th, #20<<16       @ and adjust exponent
+        tst     bh, bh                  @ is highest set bit in low word?
+        moveq   bh, bl, LSR #12         @ if so, move up by 20
+        moveq   bl, bl, LSL #20
+        subeq   th, th, #20<<16       @ and adjust exponent
+        mov     ul, #0                 @ shift of top word
+        movs    tl, bh, LSR #(21-16)   @ is highest set bit within 16 of top?
+        moveq   bh, bh, LSL #16         @ if not, move up
+        addeq   ul, ul, #16           @ and adjust exponent
+        movs    tl, bh, LSR #(21-8)   @ is highest set bit within 8 of top?
+        moveq   bh, bh, LSL #8         @ if not, move up
+        addeq   ul, ul, #8           @ and adjust exponent
+        movs    tl, bh, LSR #(21-4)   @ is highest set bit within 4 of top?
+        moveq   bh, bh, LSL #4         @ if not, move up
+        addeq   ul, ul, #4           @ and adjust exponent
+        movs    tl, bh, LSR #(21-2)   @ is highest set bit within 2 of top?
+        moveq   bh, bh, LSL #2         @ if not, move up
+        addeq   ul, ul, #2           @ and adjust exponent
+        movs    tl, bh, LSR #(21-1)   @ is highest set bit within 1 of top?
+        moveq   bh, bh, LSL #1         @ if not, move up
+        addeq   ul, ul, #1           @ and adjust exponent
+
+        sub     th, th, ul, LSL #16  @ calculate final pseudo exponent
+        mov     tl, bl, LSL ul        @ shift low word up by same amout
+        rsb     ul, ul, #32           @ compute reverse shift for bl
+        orr     bh, bh, bl, LSR ul     @ put in high end of low word
+        mov     bl, tl
+
+	mov	tmp, #0x7f00000
+	orr	tmp, tmp, #0x00f0000
+
+        b       .dmul_done_op2
+
+.dmul_naninf:
+        mov     uh, ah, LSL #1          @ discard sign bit on a
+        mov     ul, bh, LSL #1          @ and on b
+        cmp     uh, tmp, LSL #5         @ HI if ah shows a to be NaN
+        cmpeq   al, #0                  @ now HI if a is NaN
+        cmpls   ul, tmp, LSL #5         @ another chance to set HI ...
+        cmpeq   bl, #0                  @  ... if b is NaN
+        bhi     .dmul_ivo
+        orrs    ul, al, ah, LSL #1      @ is a zero?
+        orrnes  ul, bl, bh, LSL #1      @ or is b zero?
+        beq     .dmul_ivo
+        eor     ah, ah, bh
+        mov     al, #0
+        and     ah, ah, #0x80000000
+        orr     ah, ah, tmp, LSL #4
+	b	.dmul_exit
+
+.dmul_ivo:
+	ldmia	arm_sp!, {r4, r5}
+	b	.return_double_NaN
+
+#undef al
+#undef ah
+#undef bl
+#undef bh
+#undef tmp
+#undef sh
+#undef ex_add
+
+#undef	INX_pos
+#undef INX_bit
+
+#undef exp
+#undef temp
+
+#undef ex_m
+#undef uh
+#undef ul
+#undef rs
+#undef th
+#undef tl
+
+@ --- ignore_safepoints ---------------------------------------------------------------------------
+	.global	_ZN14CppInterpreter17ignore_safepointsEv
+	.type	_ZN14CppInterpreter17ignore_safepointsEv, %function
+_ZN14CppInterpreter17ignore_safepointsEv:
+#ifdef NOTICE_SAFEPOINTS
+	adrl	ip, dispatch_init_adcon
+	ldm	ip, {r2, r3}
+	add	r2, r2, ip
+	add	ip, r3, r2
+	ldr	r2, [ip, #AbstractInterpreter_notice_safepoints-XXX]
+	ldrb	r1, [r2, #0]	@ zero_extendqisi2
+	cmp	r1, #0
+	bxeq	lr
+	mov	r3, #0
+	strb	r3, [r2, #0]
+	adrl	r3, main_dispatch_table
+#ifdef HW_FP
+	ldr	r0, [ip, #CPUInfo-XXX]
+	tst	r0, #ARCH_VFP
+	beq	2f
+#endif
+	mov	r2, #256
+1:
+	ldr	r1, [r3], #4
+	str	r1, [ip], #4
+	subs	r2, r2, #1
+	bne	1b
+	sub	ip, ip, #4 * 256
+	b	4f
+
+@ No HW FP - must update the table from a combination main_dispatch_table and
+@ vfp_table. Previously this updated from main_dispatch_table first, and then
+@ overwrite the updated entries with those from vfp_table. However, this creates
+@ a window where the jump table has vfp entries, so in a multithreaded world we
+@ can get undefined VFP instructions.
+@ The code below updates from both tables simultaneously. Note: this relies on
+@ the enties in vfp_table being in opcode order.
+#ifdef HW_FP
+2:
+	stmdb	arm_sp!, {r4, lr}
+	mov	r2, #0
+	adrl	r0, vfp_table
+	ldr	r4, [r0], #4
+3:
+	ldr	r1, [r3], #4
+	cmp	r2, r4
+	ldreq	r1, [r0], #4
+	ldreq	r4, [r0], #4
+	str	r1, [ip], #4
+	add	r2, r2, #1
+	cmp	r2, #256
+	bcc	3b
+	sub	ip, ip, #4 * 256
+	ldmia	arm_sp!, {r4, lr}
+#endif // HW_FP
+
+4:
+	ldr	r0, [ip, #CPUInfo-XXX]
+	tst	r0, #ARCH_CLZ
+	beq	5f
+
+	adrl	r0, do_idiv_clz
+	str	r0, [ip, #opc_idiv * 4]
+	adrl	r0, do_irem_clz
+	str	r0, [ip, #opc_irem * 4]
+
+5:
+#endif // NOTICE_SAFEPOINTS
+
+	bx	lr
+
+@ --- notice_safepoints ---------------------------------------------------------------------------
+	.global	_ZN14CppInterpreter17notice_safepointsEv
+	.type	_ZN14CppInterpreter17notice_safepointsEv, %function
+_ZN14CppInterpreter17notice_safepointsEv:
+#ifdef NOTICE_SAFEPOINTS
+	adrl	ip, dispatch_init_adcon
+	ldm	ip, {r2, r3}
+	add	r2, r2, ip
+	add	ip, r3, r2
+	ldr	r2, [ip, #AbstractInterpreter_notice_safepoints-XXX]
+	ldrb	r1, [r2, #0]	@ zero_extendqisi2
+	cmp	r1, #0
+	bxne	lr
+	mov	r3, #1
+	strb	r3, [r2, #0]
+	adrl	r3, safe_dispatch_table
+	mov	r2, #256
+1:
+	ldr	r1, [r3], #4
+	str	r1, [ip], #4
+	subs	r2, r2, #1
+	bne	1b
+#endif
+	bx	lr
+
+@ --- END execute.s ----------------------------------------------------------------------------
+
+	ALIGN_CODE
+bci_init:
+        stmfd   sp!, {r4, lr}
+
+	adrl	r3, dispatch_init_adcon
+	ldm	r3, {r0, r1}
+	add	r0, r0, r3
+        add     r4, r1, r0
+	adrl	r2, adcon_init_table
+        mov     r1, r4
+1:
+	ldr	ip, [r2], #4
+	cmp	ip, #0
+	ldrne	ip, [r0, ip]
+	strne	ip, [r1, #-4]!
+	bne	1b
+	adrl	r2, main_dispatch_table
+	mov	r1, #256
+        mov     r3, r4
+2:
+	ldr	ip, [r2], #4
+	str	ip, [r3], #4
+	subs	r1, r1, #1
+	bne	2b
+
+	bl	hwcap
+	str	r0, [r4, #CPUInfo-XXX]
+
+#ifdef USE_COMPILER
+
+#define NPROCESSORS_CONF        83
+
+        mov     r0, #NPROCESSORS_CONF
+        bl      sysconf
+        cmp     r0, #2
+
+#ifdef DISABLE_BG_COMP_ON_NON_MP
+        movcc   r0, #0
+        ldrcc   r1, [r4, #BackgroundCompilation_Address-XXX]
+        strccb  r0, [r1]
+#endif
+
+        movcs   r0, #MP_COMPILE_THRESHOLD
+        movcc   r0, #UP_COMPILE_THRESHOLD
+        ldr     r1, [r4, #CompileThreshold_Address-XXX]
+        str     r0, [r1]
+
+#endif // USE_COMPILER
+
+#ifdef T2JIT
+	bl	Thumb2_Initialize
+#endif
+
+#ifdef HW_FP
+	ldr	r0, [r4, #CPUInfo-XXX]
+	tst	r0, #ARCH_VFP
+	bne	4f
+
+@ No HW FP - replace the HW FP entries with SW entries
+update_vfp_table:
+	adr	r0, vfp_table
+	adrl	ip, dispatch_init_adcon
+	ldm	ip, {r2, r3}
+	add	r2, r2, ip
+	add	ip, r3, r2
+.update_vfp_loop:
+	ldr	r1, [r0], #4
+	cmp	r1, #0
+	ldrne	r2, [r0], #4
+	strne	r2, [ip, r1, lsl #2]
+	bne	.update_vfp_loop
+4:
+#endif // HW_FP
+
+	ldr	r0, [r4, #CPUInfo-XXX]
+	tst	r0, #ARCH_CLZ
+	beq	5f
+
+	adrl	r0, do_idiv_clz
+	str	r0, [r4, #opc_idiv * 4]
+	adrl	r0, do_irem_clz
+	str	r0, [r4, #opc_irem * 4]
+
+5:
+	ldmia	sp!, {r4, pc}
+
+#ifdef HW_FP
+vfp_table:
+	.word	opc_fadd,	do_fadd
+	.word	opc_dadd,	do_dadd
+	.word	opc_fsub,	do_fsub
+	.word	opc_dsub,	do_dsub
+	.word	opc_fmul,	do_fmul
+	.word	opc_dmul,	do_dmul
+	.word	opc_fdiv,	do_fdiv
+	.word	opc_ddiv,	do_ddiv
+	.word	opc_fcmpl,	do_fcmpl
+	.word	opc_fcmpg,	do_fcmpg
+	.word	opc_dcmpl,	do_dcmpl
+	.word	opc_dcmpg,	do_dcmpg
+	.word	0
+#endif // HW_FP
+
+load_dispatch:
+	adrl	ip, dispatch_init_adcon
+	ldm	ip, {r0, r1}
+	add	r0, r0, ip
+	add	dispatch, r1, r0
+	mov	pc, lr
+
+	ALIGN_DATA
+dispatch_init_adcon:
+	.word	_GLOBAL_OFFSET_TABLE_-dispatch_init_adcon, opclabels_data(GOTOFF)
+adcon_init_table:
+	.word	_ZN18InterpreterRuntime22slow_signature_handlerEP10JavaThreadP13methodOopDescPiS4_(GOT)
+	.word	_ZN20SafepointSynchronize6_stateE(GOT)
+	.word	_ZN9vmSymbols8_symbolsE(GOT)
+	.word	always_do_update_barrier(GOT)
+	.word	_ZN8Universe14_collectedHeapE(GOT)
+	.word	_ZN9Bytecodes5_nameE(GOT)
+	.word	_ZN19AbstractInterpreter18_notice_safepointsE(GOT)
+	.word	_ZN18ThreadLocalStorage13_thread_indexE(GOT)
+	.word	_ZN7oopDesc3_bsE(GOT)
+	.word	PrintCommandLineFlags(GOT)
+	.word	_ZN11JvmtiExport28_can_post_interpreter_eventsE(GOT)
+	.word	UseCompiler(GOT)
+invocationlimit_adcon:
+	.word	_ZN17InvocationCounter26InterpreterInvocationLimitE(GOT)
+        .word   CompileThreshold(GOT)
+        .word   BackgroundCompilation(GOT)
+        .word   UseOnStackReplacement(GOT)
+	.word	0
+
+	ALIGN_DATA
+main_dispatch_table:
+	MAIN_DISPATCH_TABLE
+
+#ifdef NOTICE_SAFEPOINTS
+safe_dispatch_table:
+
+/*  WARNING: If you change any of these bytecodes, you must also
+    change the table in bytecodes_arm.def to make it match.  */
+
+	.word	do_nop	@ 0 0x00
+	.word	do_u4const_0	@ 1 0x01
+	.word	do_iconst_N	@ 2 0x02
+	.word	do_iconst_N	@ 3 0x03
+	.word	do_iconst_N	@ 4 0x04
+	.word	do_iconst_N	@ 5 0x05
+	.word	do_iconst_N	@ 6 0x06
+	.word	do_iconst_N	@ 7 0x07
+	.word	do_iconst_N	@ 8 0x08
+	.word	do_u8const_0	@ 9 0x09
+	.word	do_lconst_1	@ 10 0x0a
+	.word	do_u4const_0	@ 11 0x0b
+	.word	do_fconst_1	@ 12 0x0c
+	.word	do_fconst_2	@ 13 0x0d
+	.word	do_u8const_0	@ 14 0x0e
+	.word	do_dconst_1	@ 15 0x0f
+	.word	do_bipush	@ 16 0x10
+	.word	do_sipush	@ 17 0x11
+	.word	do_ldc	@ 18 0x12
+	.word	do_ldc_w	@ 19 0x13
+	.word	do_ldc2_w	@ 20 0x14
+	.word	do_u4load	@ 21 0x15
+	.word	do_u8load	@ 22 0x16
+	.word	do_u4load	@ 23 0x17
+	.word	do_u8load	@ 24 0x18
+	.word	do_u4load	@ 25 0x19
+	.word	do_iload_0	@ 26 0x1a
+	.word	do_iload_0	@ 27 0x1b
+	.word	do_iload_0	@ 28 0x1c
+	.word	do_iload_0	@ 29 0x1d
+	.word	do_u8load_0	@ 30 0x1e
+	.word	do_u8load_1	@ 31 0x1f
+	.word	do_u8load_2	@ 32 0x20
+	.word	do_u8load_3	@ 33 0x21
+	.word	do_fload_0	@ 34 0x22
+	.word	do_fload_0	@ 35 0x23
+	.word	do_fload_0	@ 36 0x24
+	.word	do_fload_0	@ 37 0x25
+	.word	do_u8load_0	@ 38 0x26
+	.word	do_u8load_1	@ 39 0x27
+	.word	do_u8load_2	@ 40 0x28
+	.word	do_u8load_3	@ 41 0x29
+	.word	do_aload_0	@ 42 0x2a
+	.word	do_aload_0	@ 43 0x2b
+	.word	do_aload_0	@ 44 0x2c
+	.word	do_aload_0	@ 45 0x2d
+	.word	do_u4aload	@ 46 0x2e
+	.word	do_u8aload	@ 47 0x2f
+	.word	do_u4aload	@ 48 0x30
+	.word	do_u8aload	@ 49 0x31
+	.word	do_u4aload	@ 50 0x32
+	.word	do_baload	@ 51 0x33
+	.word	do_caload	@ 52 0x34
+	.word	do_saload	@ 53 0x35
+	.word	do_u4store	@ 54 0x36
+	.word	do_u8store	@ 55 0x37
+	.word	do_u4store	@ 56 0x38
+	.word	do_u8store	@ 57 0x39
+	.word	do_u4store	@ 58 0x3a
+	.word	do_u4store_0	@ 59 0x3b
+	.word	do_u4store_1	@ 60 0x3c
+	.word	do_u4store_2	@ 61 0x3d
+	.word	do_u4store_3	@ 62 0x3e
+	.word	do_u8store_0	@ 63 0x3f
+	.word	do_u8store_1	@ 64 0x40
+	.word	do_u8store_2	@ 65 0x41
+	.word	do_u8store_3	@ 66 0x42
+	.word	do_u4store_0	@ 67 0x43
+	.word	do_u4store_1	@ 68 0x44
+	.word	do_u4store_2	@ 69 0x45
+	.word	do_u4store_3	@ 70 0x46
+	.word	do_u8store_0	@ 71 0x47
+	.word	do_u8store_1	@ 72 0x48
+	.word	do_u8store_2	@ 73 0x49
+	.word	do_u8store_3	@ 74 0x4a
+	.word	do_u4store_0	@ 75 0x4b
+	.word	do_u4store_1	@ 76 0x4c
+	.word	do_u4store_2	@ 77 0x4d
+	.word	do_u4store_3	@ 78 0x4e
+	.word	do_u4astore	@ 79 0x4f
+	.word	do_u8astore	@ 80 0x50
+	.word	do_u4astore	@ 81 0x51
+	.word	do_u8astore	@ 82 0x52
+	.word	do_aastore	@ 83 0x53
+	.word	do_bastore	@ 84 0x54
+	.word	do_u2astore	@ 85 0x55
+	.word	do_u2astore	@ 86 0x56
+	.word	do_jpop	@ 87 0x57
+	.word	do_jpop2	@ 88 0x58
+	.word	do_dup	@ 89 0x59
+	.word	do_dup_x1	@ 90 0x5a
+	.word	do_dup_x2	@ 91 0x5b
+	.word	do_dup2	@ 92 0x5c
+	.word	do_dup2_x1	@ 93 0x5d
+	.word	do_dup2_x2	@ 94 0x5e
+	.word	do_swap	@ 95 0x5f
+	.word	do_iadd	@ 96 0x60
+	.word	do_ladd	@ 97 0x61
+	.word	do_fadd	@ 98 0x62
+	.word	do_dadd	@ 99 0x63
+	.word	do_isub	@ 100 0x64
+	.word	do_lsub	@ 101 0x65
+	.word	do_fsub	@ 102 0x66
+	.word	do_dsub	@ 103 0x67
+	.word	do_imul	@ 104 0x68
+	.word	do_lmul	@ 105 0x69
+	.word	do_fmul	@ 106 0x6a
+	.word	do_dmul	@ 107 0x6b
+	.word	do_idiv	@ 108 0x6c
+	.word	do_ldiv	@ 109 0x6d
+	.word	do_fdiv	@ 110 0x6e
+	.word	do_ddiv	@ 111 0x6f
+	.word	do_irem	@ 112 0x70
+	.word	do_lrem	@ 113 0x71
+	.word	do_frem	@ 114 0x72
+	.word	do_drem	@ 115 0x73
+	.word	do_ineg	@ 116 0x74
+	.word	do_lneg	@ 117 0x75
+	.word	do_fneg	@ 118 0x76
+	.word	do_dneg	@ 119 0x77
+	.word	do_ishl	@ 120 0x78
+	.word	do_lshl	@ 121 0x79
+	.word	do_ishr	@ 122 0x7a
+	.word	do_lshr	@ 123 0x7b
+	.word	do_iushr	@ 124 0x7c
+	.word	do_lushr	@ 125 0x7d
+	.word	do_iand	@ 126 0x7e
+	.word	do_land	@ 127 0x7f
+	.word	do_ior	@ 128 0x80
+	.word	do_lor	@ 129 0x81
+	.word	do_ixor	@ 130 0x82
+	.word	do_lxor	@ 131 0x83
+	.word	do_iinc	@ 132 0x84
+	.word	do_i2l	@ 133 0x85
+	.word	do_i2f	@ 134 0x86
+	.word	do_i2d	@ 135 0x87
+	.word	do_l2i	@ 136 0x88
+	.word	do_l2f	@ 137 0x89
+	.word	do_l2d	@ 138 0x8a
+	.word	do_f2i	@ 139 0x8b
+	.word	do_f2l	@ 140 0x8c
+	.word	do_f2d	@ 141 0x8d
+	.word	do_d2i	@ 142 0x8e
+	.word	do_d2l	@ 143 0x8f
+	.word	do_d2f	@ 144 0x90
+	.word	do_i2b	@ 145 0x91
+	.word	do_i2c	@ 146 0x92
+	.word	do_i2s	@ 147 0x93
+	.word	do_lcmp	@ 148 0x94
+	.word	do_fcmpl	@ 149 0x95
+	.word	do_fcmpg	@ 150 0x96
+	.word	do_dcmpl	@ 151 0x97
+	.word	do_dcmpg	@ 152 0x98
+	.word	do_ifeq	@ 153 0x99
+	.word	do_ifne	@ 154 0x9a
+	.word	do_iflt	@ 155 0x9b
+	.word	do_ifge	@ 156 0x9c
+	.word	do_ifgt	@ 157 0x9d
+	.word	do_ifle	@ 158 0x9e
+	.word	do_if_icmpeq	@ 159 0x9f
+	.word	do_if_icmpne	@ 160 0xa0
+	.word	do_if_icmplt	@ 161 0xa1
+	.word	do_if_icmpge	@ 162 0xa2
+	.word	do_if_icmpgt	@ 163 0xa3
+	.word	do_if_icmple	@ 164 0xa4
+	.word	do_if_icmpeq	@ 165 0xa5
+	.word	do_if_icmpne	@ 166 0xa6
+	.word	do_goto	@ 167 0xa7
+	.word	do_jsr	@ 168 0xa8
+	.word	do_ret	@ 169 0xa9
+	.word	do_tableswitch	@ 170 0xaa
+	.word	do_lookupswitch	@ 171 0xab
+	.word	do_ireturn	@ 172 0xac
+	.word	do_lreturn	@ 173 0xad
+	.word	do_ireturn	@ 174 0xae
+	.word	do_lreturn	@ 175 0xaf
+	.word	do_ireturn	@ 176 0xb0
+	.word	do_return	@ 177 0xb1
+	.word	do_getstatic	@ 178 0xb2
+	.word	do_putstatic	@ 179 0xb3
+	.word	do_getfield	@ 180 0xb4
+	.word	do_putfield	@ 181 0xb5
+	.word	do_invokevirtual	@ 182 0xb6
+	.word	do_invokespecial	@ 183 0xb7
+	.word	do_invokestatic	@ 184 0xb8
+	.word	do_invokeinterface	@ 185 0xb9
+	.word	do_invokedynamic	@ 186 0xba
+	.word	do_new	@ 187 0xbb
+	.word	do_newarray	@ 188 0xbc
+	.word	do_anewarray	@ 189 0xbd
+	.word	do_arraylength	@ 190 0xbe
+	.word	do_athrow	@ 191 0xbf
+	.word	do_checkcast	@ 192 0xc0
+	.word	do_instanceof	@ 193 0xc1
+	.word	do_monitorenter	@ 194 0xc2
+	.word	do_monitorexit	@ 195 0xc3
+	.word	do_wide	@ 196 0xc4
+	.word	do_multianewarray	@ 197 0xc5
+	.word	do_ifeq	@ 198 0xc6
+	.word	do_ifne	@ 199 0xc7
+	.word	do_goto_w	@ 200 0xc8
+	.word	do_jsr_w	@ 201 0xc9
+	.word	do_breakpoint	@ 202 0xca
+	.word	do_undefined	@ 203 0xcb
+	.word	do_bgetfield	@ 204 0xcc
+	.word	do_cgetfield	@ 205 0xcd
+	.word	do_undefined	@ 206 0xce
+	.word	do_undefined	@ 207 0xcf
+	.word	do_igetfield	@ 208 0xd0
+	.word	do_lgetfield	@ 209 0xd1
+	.word	do_sgetfield	@ 210 0xd2
+	.word	do_aputfield	@ 211 0xd3
+	.word	do_bputfield	@ 212 0xd4
+	.word	do_cputfield	@ 213 0xd5
+	.word	do_undefined	@ 214 0xd6
+	.word	do_undefined	@ 215 0xd7
+	.word	do_iputfield	@ 216 0xd8
+	.word	do_lputfield	@ 217 0xd9
+	.word	do_undefined	@ 218 0xda
+	.word	do_iaccess_0	@ 219 0xdb
+	.word	do_iaccess_0	@ 220 0xdc
+	.word	do_iaccess_0	@ 221 0xdd
+	.word	do_iaccess_0	@ 222 0xde
+	.word	do_invokeresolved	@ 223 0xdf
+	.word	do_invokespecialresolved	@ 224 0xe0
+	.word	do_invokestaticresolved	@ 225 0xe1
+	.word	do_invokevfinal	@ 226 0xe2
+	.word	do_fast_iload_iload	@ 227 0xe3
+	.word	do_fast_iload_iload_N	@ 228 0xe4
+	.word	do_fast_aldc		@ 229 0xe5
+	.word	do_fast_aldc_w	@ 230 0xe6
+	.word	do_return_register_finalizer	@ 231 0xe7
+	.word	do_undefined	@ 232 0xe8
+	.word	do_iload_0_iconst_N	@ 233 0xe9
+	.word	do_iload_0_iconst_N	@ 234 0xea
+	.word	do_iload_0_iconst_N	@ 235 0xeb
+	.word	do_iload_0_iconst_N	@ 236 0xec
+	.word	do_iload_iconst_N	@ 237 0xed
+	.word	do_iadd_istore_N	@ 238 0xee
+	.word	do_isub_istore_N	@ 239 0xef
+	.word	do_iand_istore_N	@ 240 0xf0
+	.word	do_ior_istore_N	@ 241 0xf1
+	.word	do_ixor_istore_N	@ 242 0xf2
+	.word	do_iadd_u4store	@ 243 0xf3
+	.word	do_isub_u4store	@ 244 0xf4
+	.word	do_iand_u4store	@ 245 0xf5
+	.word	do_ior_u4store	@ 246 0xf6
+	.word	do_ixor_u4store	@ 247 0xf7
+	.word	do_fast_iload_N_iload	@ 248 0xf8
+	.word	do_fast_iload_N_iload	@ 249 0xf9
+	.word	do_fast_iload_N_iload	@ 250 0xfa
+	.word	do_fast_iload_N_iload	@ 251 0xfb
+	.word	do_fast_iload_N_iload_N	@ 252 0xfc
+	.word	do_fast_iload_N_iload_N	@ 253 0xfd
+	.word	do_fast_iload_N_iload_N	@ 254 0xfe
+	.word	do_fast_iload_N_iload_N	@ 255 0xff
+#endif
+
+	SUB_DISPATCH_TABLES
+
+	.arch	armv7-a
+
+	ALIGN_CODE
+	.global	Thumb2_stubs
+	.type Thumb2_stubs, %function
+Thumb2_stubs:
+	.global	Thumb2_idiv_stub
+	.type Thumb2_idiv_stub, %function
+Thumb2_idiv_stub:
+int_div:
+	cmp     r1, #0x21
+	adr	r3, 1f
+	eor     r12, r0, r1
+	ldrcc	pc, [r3, r1, lsl #2]
+	rsblt   r1, r1, #0
+	subs    r2, r1, #1
+	beq     2f
+	movs    r3, r0
+	rsbmi   r3, r0, #0
+	cmp     r3, r1
+	bls     3f
+	tst     r1, r2
+	beq     4f
+	clz     r2, r3
+	clz     r0, r1
+	sub     r2, r0, r2
+	rsbs    r2, r2, #31
+	add     r2, r2, r2, lsl #1
+	mov     r0, #0
+	add     pc, pc, r2, lsl #2
+	mov	r0, #0
+	cmp     r3, r1, lsl #31
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #31
+	cmp     r3, r1, lsl #30
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #30
+	cmp     r3, r1, lsl #29
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #29
+	cmp     r3, r1, lsl #28
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #28
+	cmp     r3, r1, lsl #27
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #27
+	cmp     r3, r1, lsl #26
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #26
+	cmp     r3, r1, lsl #25
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #25
+	cmp     r3, r1, lsl #24
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #24
+	cmp     r3, r1, lsl #23
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #23
+	cmp     r3, r1, lsl #22
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #22
+	cmp     r3, r1, lsl #21
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #21
+	cmp     r3, r1, lsl #20
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #20
+	cmp     r3, r1, lsl #19
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #19
+	cmp     r3, r1, lsl #18
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #18
+	cmp     r3, r1, lsl #17
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #17
+	cmp     r3, r1, lsl #16
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #16
+	cmp     r3, r1, lsl #15
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #15
+	cmp     r3, r1, lsl #14
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #14
+	cmp     r3, r1, lsl #13
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #13
+	cmp     r3, r1, lsl #12
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #12
+	cmp     r3, r1, lsl #11
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #11
+	cmp     r3, r1, lsl #10
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #10
+	cmp     r3, r1, lsl #9
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #9
+	cmp     r3, r1, lsl #8
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #8
+	cmp     r3, r1, lsl #7
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #7
+	cmp     r3, r1, lsl #6
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #6
+	cmp     r3, r1, lsl #5
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #5
+	cmp     r3, r1, lsl #4
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #4
+	cmp     r3, r1, lsl #3
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #3
+	cmp     r3, r1, lsl #2
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #2
+	cmp     r3, r1, lsl #1
+	adc     r0, r0, r0
+	subcs   r3, r3, r1, lsl #1
+	cmp     r3, r1
+	adc     r0, r0, r0
+	subcs   r3, r3, r1
+	cmp     r12, #0
+	rsbmi   r0, r0, #0
+	bx      lr
+2:
+	teq     r12, r0
+	rsbmi   r0, r0, #0
+	bx      lr
+3:
+	movcc   r0, #0
+	asreq   r0, r12, #31
+	orreq   r0, r0, #1
+	bx      lr
+4:
+	clz     r2, r1
+	rsb     r2, r2, #31
+	cmp     r12, #0
+	lsr     r0, r3, r2
+	rsbmi   r0, r0, #0
+	bx      lr
+1:
+	.word	Thumb2_DivZero_Handler
+	.word	jdiv_1
+	.word	jdiv_2
+	.word	jdiv_3
+	.word	jdiv_4
+	.word	jdiv_5
+	.word	jdiv_6
+	.word	jdiv_7
+	.word	jdiv_8
+	.word	jdiv_9
+	.word	jdiv_10
+	.word	jdiv_11
+	.word	jdiv_12
+	.word	jdiv_13
+	.word	jdiv_14
+	.word	jdiv_15
+	.word	jdiv_16
+	.word	jdiv_17
+	.word	jdiv_18
+	.word	jdiv_19
+	.word	jdiv_20
+	.word	jdiv_21
+	.word	jdiv_22
+	.word	jdiv_23
+	.word	jdiv_24
+	.word	jdiv_25
+	.word	jdiv_26
+	.word	jdiv_27
+	.word	jdiv_28
+	.word	jdiv_29
+	.word	jdiv_30
+	.word	jdiv_31
+	.word	jdiv_32
+	ALIGN_CODE
+	.global	Thumb2_irem_stub
+	.type Thumb2_irem_stub, %function
+Thumb2_irem_stub:
+int_rem:
+	cmp     r1, #0x21
+	adr	r3, 1f
+	ldrcc	pc, [r3, r1, lsl #2]
+	rsblt   r1, r1, #0
+	subs    r2, r1, #1
+	beq     2f
+	movs    r12, r0
+	rsbmi   r0, r0, #0
+	cmp     r0, r1
+	bls     3f
+	tst     r1, r2
+	beq     4f
+	clz     r2, r0
+	clz     r3, r1
+	sub     r2, r3, r2
+	rsbs    r2, r2, #31
+	add     pc, pc, r2, lsl #3
+	mov	r3, #0
+	cmp     r0, r1, lsl #31
+	subcs   r0, r0, r1, lsl #31
+	cmp     r0, r1, lsl #30
+	subcs   r0, r0, r1, lsl #30
+	cmp     r0, r1, lsl #29
+	subcs   r0, r0, r1, lsl #29
+	cmp     r0, r1, lsl #28
+	subcs   r0, r0, r1, lsl #28
+	cmp     r0, r1, lsl #27
+	subcs   r0, r0, r1, lsl #27
+	cmp     r0, r1, lsl #26
+	subcs   r0, r0, r1, lsl #26
+	cmp     r0, r1, lsl #25
+	subcs   r0, r0, r1, lsl #25
+	cmp     r0, r1, lsl #24
+	subcs   r0, r0, r1, lsl #24
+	cmp     r0, r1, lsl #23
+	subcs   r0, r0, r1, lsl #23
+	cmp     r0, r1, lsl #22
+	subcs   r0, r0, r1, lsl #22
+	cmp     r0, r1, lsl #21
+	subcs   r0, r0, r1, lsl #21
+	cmp     r0, r1, lsl #20
+	subcs   r0, r0, r1, lsl #20
+	cmp     r0, r1, lsl #19
+	subcs   r0, r0, r1, lsl #19
+	cmp     r0, r1, lsl #18
+	subcs   r0, r0, r1, lsl #18
+	cmp     r0, r1, lsl #17
+	subcs   r0, r0, r1, lsl #17
+	cmp     r0, r1, lsl #16
+	subcs   r0, r0, r1, lsl #16
+	cmp     r0, r1, lsl #15
+	subcs   r0, r0, r1, lsl #15
+	cmp     r0, r1, lsl #14
+	subcs   r0, r0, r1, lsl #14
+	cmp     r0, r1, lsl #13
+	subcs   r0, r0, r1, lsl #13
+	cmp     r0, r1, lsl #12
+	subcs   r0, r0, r1, lsl #12
+	cmp     r0, r1, lsl #11
+	subcs   r0, r0, r1, lsl #11
+	cmp     r0, r1, lsl #10
+	subcs   r0, r0, r1, lsl #10
+	cmp     r0, r1, lsl #9
+	subcs   r0, r0, r1, lsl #9
+	cmp     r0, r1, lsl #8
+	subcs   r0, r0, r1, lsl #8
+	cmp     r0, r1, lsl #7
+	subcs   r0, r0, r1, lsl #7
+	cmp     r0, r1, lsl #6
+	subcs   r0, r0, r1, lsl #6
+	cmp     r0, r1, lsl #5
+	subcs   r0, r0, r1, lsl #5
+	cmp     r0, r1, lsl #4
+	subcs   r0, r0, r1, lsl #4
+	cmp     r0, r1, lsl #3
+	subcs   r0, r0, r1, lsl #3
+	cmp     r0, r1, lsl #2
+	subcs   r0, r0, r1, lsl #2
+	cmp     r0, r1, lsl #1
+	subcs   r0, r0, r1, lsl #1
+	cmp     r0, r1
+	subcs   r0, r0, r1
+	cmp     r12, #0
+	rsbmi   r0, r0, #0
+	bx      lr
+2:
+	mov	r0, #0
+	bx      lr
+3:
+	moveq	r0, #0
+	cmp	r12, #0
+	rsbmi	r0, r0, #0
+	bx	lr
+4:
+	and	r0, r0, r2
+	cmp	r12, #0
+	rsbmi	r0, r0, #0
+	bx      lr
+1:
+	.word	Thumb2_DivZero_Handler
+	.word	jrem_1
+	.word	jrem_2
+	.word	jrem_3
+	.word	jrem_4
+	.word	jrem_5
+	.word	jrem_6
+	.word	jrem_7
+	.word	jrem_8
+	.word	jrem_9
+	.word	jrem_10
+	.word	jrem_11
+	.word	jrem_12
+	.word	jrem_13
+	.word	jrem_14
+	.word	jrem_15
+	.word	jrem_16
+	.word	jrem_17
+	.word	jrem_18
+	.word	jrem_19
+	.word	jrem_20
+	.word	jrem_21
+	.word	jrem_22
+	.word	jrem_23
+	.word	jrem_24
+	.word	jrem_25
+	.word	jrem_26
+	.word	jrem_27
+	.word	jrem_28
+	.word	jrem_29
+	.word	jrem_30
+	.word	jrem_31
+	.word	jrem_32
+
+#ifdef T2JIT
+
+	.macro	LOAD_FRAME
+	ldr	Rframe, [thread, #THREAD_TOP_ZERO_FRAME]
+	.endm
+
+@ R0 = BCI
+@ R1 = index
+
+	.global	Thumb2_invokeinterface_stub
+	.type Thumb2_invokeinterface_stub, %function
+Thumb2_invokeinterface_stub:
+	LOAD_FRAME
+	stmdb	sp!, {ip, lr}
+	ldr	ip, [Rframe, #FRAME_METHOD]
+	sub	stack, stack, #4
+	ldr	r2, [Rframe, #FRAME_CONSTANTS]
+	ldr	ip, [ip, #METHOD_CONSTMETHOD]
+	str	stack, [Rframe, #FRAME_STACK]
+	add	jpc, ip, r0
+        add     r0, r2, r1, lsl #4
+	str	jpc, [Rframe, #FRAME_BCP]
+
+        ldr     r2, [r0, #CP_OFFSET]
+        and     r2, r2, #0x00ff0000
+        cmp     r2, #opc_invokeinterface << 16
+        bne     istub_resolve
+2:
+	ldr	r3, [r0, #CP_OFFSET+12]
+	and	r2, r3, #255
+	ldr	r2, [stack, r2, lsl #2]
+	cmp	r2, #0
+	beq	istub_null_ptr_exception
+	ldr	tmp1, [r2, #4]				@ rcvr->klass()
+	tst	r3, #flag_methodInterface
+	bne	istub_methodInterface
+
+	ldr	lr, [r0, #CP_OFFSET+4]			@ lr = iclass
+
+	add	r1, tmp1, #INSTANCEKLASS_VTABLE_OFFSET
+	ldr	r2, [tmp1, #KLASS_PART+INSTANCEKLASS_VTABLE_LEN]
+	ldr	ip, [tmp1, #KLASS_PART+INSTANCEKLASS_ITABLE_LEN]
+	add	r2, r2, #1
+	bic	r2, r2, #1
+
+	add	r1, r1, r2, lsl #2
+
+	mov	r2, #0
+1:
+	cmp	r2, ip
+	beq	istub_incompatibleclass_exception
+	ldr	r3, [r1], #8
+	add	r2, r2, #1
+	cmp	lr, r3
+	bne	1b
+
+	ldr	r3, [r0, #CP_OFFSET+8]
+	ldr	r2, [r1, #-4]
+	add	r3, tmp1, r3, lsl #2
+	ldr	tmp1, [r3, r2]
+	cmp	tmp1, #0
+	beq	istub_abstractmethod_exception
+istub_invoke:
+	ldr	ip, [tmp1, #METHOD_FROM_INTERPRETED]
+	mov	r1, #0
+	str	r1, [thread, #THREAD_LAST_JAVA_FP]
+
+	add	stack, stack, #4
+	str	stack, [thread, #THREAD_JAVA_SP]
+
+	ldr	r3, [ip]
+
+	mov	r0, tmp1
+#ifdef SHARK
+	mov	r2, thread
+#else
+	add	r3, r3, #FAST_ENTRY_OFFSET
+#endif
+	blx	r3
+
+	LOAD_FRAME
+
+	ldr	stack, [thread, #THREAD_JAVA_SP]
+	ldr	r2, [Rframe, #FRAME_STACK_LIMIT]
+
+	ldr	r1, [thread, #THREAD_TOP_ZERO_FRAME]
+	add	r2, r2, #4
+	mov	r0, #0
+	str	r0, [thread, #THREAD_LAST_JAVA_SP]
+	str	r2, [thread, #THREAD_JAVA_SP]
+	str	r1, [thread, #THREAD_LAST_JAVA_FP]
+	str	r2, [thread, #THREAD_LAST_JAVA_SP]
+	ldr	r3, [thread, #4]
+	cmp	r3, #0
+	bne	istub_exception
+	ldmia	sp!, {ip, pc}
+
+istub_methodInterface:
+	tst	r3, #flag_vfinalMethod
+	ldrne	tmp1, [r0, #CP_OFFSET+8]
+	bne	istub_invoke
+	ldr	r1, [r0, #CP_OFFSET+8]
+	add	r3, tmp1, r1, lsl #2
+	ldr	tmp1, [r3, #INSTANCEKLASS_VTABLE_OFFSET]
+	b	istub_invoke
+
+istub_resolve:
+	mov	tmp1, r1
+	mov	r1, #opc_invokeinterface
+	mov	r0, thread
+	ldr	ip, resolve_invoke_adcon
+	blx	ip
+	ldr	r3, [thread, #4]
+	ldr	r2, [Rframe, #FRAME_CONSTANTS]
+	cmp	r3, #0
+	bne	istub_exception
+	add	r0, r2, tmp1, lsl #4	@ r1 = cache
+	b	2b
+
+istub_exception:
+	ldmia	sp!, {ip, lr}
+	ldr	ip, handle_exception_adcon
+	LOAD_ISTATE
+	bx	ip
+
+istub_null_ptr_exception:
+	mov	r0, #VMSYMBOLS_NullPointerException
+	b	3f
+istub_abstractmethod_exception:
+	mov	r0, #VMSYMBOLS_AbstractMethodError
+	b	3f
+istub_incompatibleclass_exception:
+	mov	r0, #VMSYMBOLS_IncompatibleClassChangeError
+3:
+	ldr	jpc, [Rframe, #FRAME_BCP]
+	ldmia	sp!, {ip, lr}
+	ldr	ip, raise_exception_adcon
+	LOAD_ISTATE
+	bx	ip
+
+resolve_invoke_adcon:
+	.word	_ZN18InterpreterRuntime14resolve_invokeEP10JavaThreadN9Bytecodes4CodeE
+resolve_get_put_adcon:
+       	.word   _ZN18InterpreterRuntime15resolve_get_putEP10JavaThreadN9Bytecodes4CodeE
+handle_exception_adcon:
+	.word	handle_exception_with_bcp
+raise_exception_adcon:
+	.word	raise_exception
+helper_aputfield_adcon:
+	.word	Helper_aputfield
+lr_to_bci_adcon:
+	.word	Thumb2_lr_to_bci
+
+@ R0 = BCI
+@ R1 = index
+	.global	Thumb2_invokevirtual_stub
+	.type Thumb2_invokevirtual_stub, %function
+Thumb2_invokevirtual_stub:
+	LOAD_FRAME
+	stmdb	sp!, {ip, lr}
+        ldr     ip, [Rframe, #FRAME_METHOD]
+        sub     stack, stack, #4
+        ldr     r2, [Rframe, #FRAME_CONSTANTS]
+        ldr     ip, [ip, #METHOD_CONSTMETHOD]
+	str	stack, [Rframe, #FRAME_STACK]
+        add     jpc, ip, r0
+        add     r0, r2, r1, lsl #4
+	str	jpc, [Rframe, #FRAME_BCP]
+
+        ldr     r2, [r0, #CP_OFFSET]
+        and     r2, r2, #0xff000000
+        cmp     r2, #opc_invokevirtual << 24
+        bne     ivstub_resolve
+2:
+
+	ldr	r3, [r0, #CP_OFFSET+12]
+        and     r2, r3, #255
+        ldr     r2, [stack, r2, asl #2]
+        cmp     r2, #0
+        beq     istub_null_ptr_exception
+
+        ldr     tmp1, [r0, #CP_OFFSET+8]
+        tst     r3, #flag_vfinalMethod
+        bne     1f
+
+        ldr     r3, [r2, #4]
+        add     r3, r3, tmp1, lsl #2
+        ldr     tmp1, [r3, #INSTANCEKLASS_VTABLE_OFFSET]
+1:
+	mov	r1, #0
+        ldr     ip, [tmp1, #METHOD_FROM_INTERPRETED]
+        str     r1, [thread, #THREAD_LAST_JAVA_SP]
+        str     r1, [thread, #THREAD_LAST_JAVA_FP]
+
+        add     stack, stack, #4
+        str     stack, [thread, #THREAD_JAVA_SP]
+
+        ldr     r3, [ip, #0]
+
+	mov	r0, tmp1
+#ifdef SHARK
+	mov	r2, thread
+#else
+	add	r3, r3, #FAST_ENTRY_OFFSET
+#endif
+	blx	r3
+	LOAD_FRAME
+
+	ldr	stack, [thread, #THREAD_JAVA_SP]
+	ldr	r2, [Rframe, #FRAME_STACK_LIMIT]
+
+	mov	r0, #0
+	str	r0, [thread, #THREAD_LAST_JAVA_SP]
+	ldr	r1, [thread, #THREAD_TOP_ZERO_FRAME]
+	add	r2, r2, #4
+	str	r2, [thread, #THREAD_JAVA_SP]
+	str	r1, [thread, #THREAD_LAST_JAVA_FP]
+	str	r2, [thread, #THREAD_LAST_JAVA_SP]
+	ldr	r3, [thread, #4]
+	cmp	r3, #0
+	bne	istub_exception
+	ldmia	sp!, {ip, pc}
+
+ivstub_resolve:
+	mov	tmp1, r1
+	mov	r1, #opc_invokevirtual
+	mov	r0, thread
+	ldr	ip, resolve_invoke_adcon
+	blx	ip
+	ldr	r3, [thread, #4]
+	ldr	r2, [Rframe, #FRAME_CONSTANTS]
+	cmp	r3, #0
+	bne	istub_exception
+	add	r0, r2, tmp1, lsl #4	@ r1 = cache
+	b	2b
+
+@ R0 = BCI
+@ R1 = index
+	.global	Thumb2_invokevfinalresolved_stub
+Thumb2_invokevfinalresolved_stub:
+	LOAD_FRAME
+        stmdb   sp!, {ip, lr}
+        ldr     ip, [Rframe, #FRAME_METHOD]
+        sub     stack, stack, #4
+        ldr     r2, [Rframe, #FRAME_CONSTANTS]
+        ldr     ip, [ip, #METHOD_CONSTMETHOD]
+        DECACHE_STACK_USING_FRAME
+        add     jpc, ip, r0
+
+        add     r0, r2, r1, lsl #4
+        DECACHE_JPC_USING_FRAME
+        ldr     r3, [r0, #CP_OFFSET+12]
+        and     r2, r3, #255
+        ldr     r2, [stack, r2, asl #2]
+        cmp     r2, #0
+        beq     istub_null_ptr_exception
+
+        ldr     tmp1, [r0, #CP_OFFSET+8]
+        mov     r1, #0
+        ldr     ip, [tmp1, #METHOD_FROM_INTERPRETED]
+        str     r1, [thread, #THREAD_LAST_JAVA_SP]
+
+        add     stack, stack, #4
+        str     stack, [thread, #THREAD_JAVA_SP]
+
+        ldr     r3, [ip, #0]
+
+        mov     r0, tmp1
+#ifdef SHARK
+	mov	r2, thread
+#else
+        add     r3, r3, #FAST_ENTRY_OFFSET
+#endif
+        blx     r3
+	LOAD_FRAME
+
+        ldr     stack, [thread, #THREAD_JAVA_SP]
+        ldr     r2, [Rframe, #FRAME_STACK_LIMIT]
+
+        add     r2, r2, #4
+        str     r2, [thread, #THREAD_JAVA_SP]
+        str     Rframe, [thread, #THREAD_LAST_JAVA_SP]
+        ldr     r3, [thread, #4]
+        cmp     r3, #0
+        bne     istub_exception
+        ldmia   sp!, {ip, pc}
+
+@ R0 = BCI
+@ R1 = index
+	.global	Thumb2_invokevirtualresolved_stub
+Thumb2_invokevirtualresolved_stub:
+	LOAD_FRAME
+        stmdb   sp!, {ip, lr}
+        ldr     ip, [Rframe, #FRAME_METHOD]
+        sub     stack, stack, #4
+        ldr     r2, [Rframe, #FRAME_CONSTANTS]
+        ldr     ip, [ip, #METHOD_CONSTMETHOD]
+        DECACHE_STACK_USING_FRAME
+        add     jpc, ip, r0
+
+        add     r0, r2, r1, lsl #4
+        DECACHE_JPC_USING_FRAME
+
+        ldr     r3, [r0, #CP_OFFSET+12]
+        and     r2, r3, #255
+        ldr     r2, [stack, r2, asl #2]
+        cmp     r2, #0
+        beq     istub_null_ptr_exception
+
+        ldr     tmp1, [r0, #CP_OFFSET+8]
+        ldr     r3, [r2, #4]
+        add     r3, r3, tmp1, lsl #2
+        ldr     tmp1, [r3, #INSTANCEKLASS_VTABLE_OFFSET]
+        mov     r1, #0
+        ldr     ip, [tmp1, #METHOD_FROM_INTERPRETED]
+        str     r1, [thread, #THREAD_LAST_JAVA_SP]
+
+        add     stack, stack, #4
+        str     stack, [thread, #THREAD_JAVA_SP]
+
+        ldr     r3, [ip, #0]
+
+        mov     r0, tmp1
+#ifdef SHARK
+	mov	r2, thread
+#else
+        add     r3, r3, #FAST_ENTRY_OFFSET
+#endif
+        blx     r3
+	LOAD_FRAME
+
+        ldr     stack, [thread, #THREAD_JAVA_SP]
+        ldr     r2, [Rframe, #FRAME_STACK_LIMIT]
+
+        add     r2, r2, #4
+        str     r2, [thread, #THREAD_JAVA_SP]
+        str     Rframe, [thread, #THREAD_LAST_JAVA_SP]
+        ldr     r3, [thread, #4]
+        cmp     r3, #0
+        bne     istub_exception
+        ldmia   sp!, {ip, pc}
+
+@ R0 = BCI
+@ R1 = index
+	.global	Thumb2_invokestatic_stub
+	.type Thumb2_invokestatic_stub, %function
+Thumb2_invokestatic_stub:
+	LOAD_FRAME
+        stmdb   sp!, {ip, lr}
+        ldr     ip, [Rframe, #FRAME_METHOD]
+        sub     stack, stack, #4
+        ldr     r2, [Rframe, #FRAME_CONSTANTS]
+        ldr     ip, [ip, #METHOD_CONSTMETHOD]
+        DECACHE_STACK_USING_FRAME
+        add     jpc, ip, r0
+
+        add     r0, r2, r1, lsl #4
+        DECACHE_JPC_USING_FRAME
+
+        ldr     r2, [r0, #CP_OFFSET]
+	and	r2, r2, #0x00ff0000
+	cmp	r2, #opc_invokestatic << 16
+	bne	isstub_resolve
+2:
+	ldr	tmp1, [r0, #CP_OFFSET+4]
+	mov	r1, #0
+	ldr	ip, [tmp1, #METHOD_FROM_INTERPRETED]
+	str	r1, [thread, #THREAD_LAST_JAVA_SP]
+
+	add	stack, stack, #4
+	str	stack, [thread, #THREAD_JAVA_SP]
+
+	ldr	r3, [ip, #0]
+
+        mov     r0, tmp1
+#ifdef SHARK
+	mov	r2, thread
+#else
+        add     r3, r3, #FAST_ENTRY_OFFSET
+#endif
+        blx     r3
+	LOAD_FRAME
+
+        ldr     stack, [thread, #THREAD_JAVA_SP]
+        ldr     r2, [Rframe, #FRAME_STACK_LIMIT]
+
+        ldr     r1, [thread, #THREAD_TOP_ZERO_FRAME]
+        add     r2, r2, #4
+	mov	r3, #0
+	str	r3, [thread, #THREAD_LAST_JAVA_SP]	
+        str     r1, [thread, #THREAD_LAST_JAVA_FP]
+        str     r2, [thread, #THREAD_JAVA_SP]
+        str     Rframe, [thread, #THREAD_LAST_JAVA_SP] // FIXME: Don't understand this
+        ldr     r3, [thread, #4]
+        cmp     r3, #0
+        bne     istub_exception
+	mov	r0, #0
+        ldmia   sp!, {ip, pc}
+
+isstub_resolve:
+        mov     tmp1, r1
+        mov     r1, #opc_invokestatic
+        mov	r0, thread
+        ldr     ip, resolve_invoke_adcon
+        blx     ip
+        ldr     r3, [thread, #4]
+        ldr     r2, [Rframe, #FRAME_CONSTANTS]
+        cmp     r3, #0
+        bne     istub_exception
+        add     r0, r2, tmp1, lsl #4    @ r1 = cache
+        b       2b
+
+@ R0 = BCI
+@ R1 = index
+	.global	Thumb2_invokestaticresolved_stub
+Thumb2_invokestaticresolved_stub:
+	LOAD_FRAME
+	stmdb	sp!, {ip, lr}
+        ldr     ip, [Rframe, #FRAME_METHOD]
+        sub     stack, stack, #4
+        ldr     r2, [Rframe, #FRAME_CONSTANTS]
+        ldr     ip, [ip, #METHOD_CONSTMETHOD]
+        DECACHE_STACK_USING_FRAME
+        add     jpc, ip, r0
+        add     r0, r2, r1, lsl #4
+        DECACHE_JPC_USING_FRAME
+        ldr     tmp1, [r0, #CP_OFFSET+4]
+        mov     r1, #0
+        ldr     ip, [tmp1, #METHOD_FROM_INTERPRETED]
+        str     r1, [thread, #THREAD_LAST_JAVA_SP]
+
+        add     stack, stack, #4
+        str     stack, [thread, #THREAD_JAVA_SP]
+        ldr     r3, [ip, #0]
+
+        mov     r0, tmp1
+#ifdef SHARK
+	mov	r2, thread
+#else
+        add     r3, r3, #FAST_ENTRY_OFFSET
+#endif
+        blx     r3
+	LOAD_FRAME
+
+        ldr     stack, [thread, #THREAD_JAVA_SP]
+        ldr     r2, [Rframe, #FRAME_STACK_LIMIT]
+
+        ldr     r1, [thread, #THREAD_TOP_ZERO_FRAME]
+        add     r2, r2, #4
+	mov	r3, #0
+	str	r3, [thread, #THREAD_LAST_JAVA_SP]	
+        str     r1, [thread, #THREAD_LAST_JAVA_FP]
+        str     r2, [thread, #THREAD_JAVA_SP]
+        str     Rframe, [thread, #THREAD_LAST_JAVA_SP] // FIXME: Don't understand this
+        ldr     r3, [thread, #4]
+        cmp     r3, #0
+        bne     istub_exception
+	mov	r0, #0
+        ldmia   sp!, {ip, pc}
+
+@ R0 = BCI
+@ R1 = index
+	.global	Thumb2_invokespecial_stub
+	.type Thumb2_invokespecial_stub, %function
+Thumb2_invokespecial_stub:
+	LOAD_FRAME
+        stmdb   sp!, {ip, lr}
+        ldr     ip, [Rframe, #FRAME_METHOD]
+        sub     stack, stack, #4
+        ldr     r2, [Rframe, #FRAME_CONSTANTS]
+        ldr     ip, [ip, #METHOD_CONSTMETHOD]
+        DECACHE_STACK_USING_FRAME
+        add     jpc, ip, r0
+
+        add     r0, r2, r1, lsl #4
+        DECACHE_JPC_USING_FRAME
+ 
+        ldr     r2, [r0, #CP_OFFSET]
+ 	and	r2, r2, #0x00ff0000
+	cmp	r2, #opc_invokespecial << 16
+	bne	ispstub_resolve
+2:
+        ldr     r3, [r0, #CP_OFFSET+12]
+        and     r3, r3, #255
+        ldr     r2, [stack, r3, asl #2]
+	cmp	r2, #0
+	beq	istub_null_ptr_exception
+
+	ldr	tmp1, [r0, #CP_OFFSET+4]
+	mov	r1, #0
+	ldr	ip, [tmp1, #METHOD_FROM_INTERPRETED]
+	str	r1, [thread, #THREAD_LAST_JAVA_SP]
+
+	add	stack, stack, #4
+	str	stack, [thread, #THREAD_JAVA_SP]
+
+	ldr	r3, [ip, #0]
+
+        mov     r0, tmp1
+#ifdef SHARK
+	mov	r2, thread
+#else
+        add     r3, r3, #FAST_ENTRY_OFFSET
+#endif
+        mov     r2, thread
+        blx     r3
+	LOAD_FRAME
+
+        ldr     stack, [thread, #THREAD_JAVA_SP]
+        ldr     r2, [Rframe, #FRAME_STACK_LIMIT]
+
+        ldr     r1, [thread, #THREAD_TOP_ZERO_FRAME]
+        add     r2, r2, #4
+	mov	r3, #0
+	str	r3, [thread, #THREAD_LAST_JAVA_SP]	
+        str     r1, [thread, #THREAD_LAST_JAVA_FP]
+        str     r2, [thread, #THREAD_JAVA_SP]
+        str     Rframe, [thread, #THREAD_LAST_JAVA_SP] // FIXME: Don't understand this
+        ldr     r3, [thread, #4]
+        cmp     r3, #0
+        bne     istub_exception
+	mov	r0, #0
+        ldmia   sp!, {ip, pc}
+
+ispstub_resolve:
+        mov     tmp1, r1
+        mov     r1, #opc_invokespecial
+        mov	r0, thread
+        ldr     ip, resolve_invoke_adcon
+        blx     ip
+        ldr     r3, [thread, #4]
+        ldr     r2, [Rframe, #FRAME_CONSTANTS]
+        cmp     r3, #0
+        bne     istub_exception
+        add     r0, r2, tmp1, lsl #4    @ r1 = cache
+        b       2b
+
+@ R0 = BCI
+@ R1 = index
+	.global	Thumb2_invokespecialresolved_stub
+Thumb2_invokespecialresolved_stub:
+	LOAD_FRAME
+        stmdb   sp!, {ip, lr}
+        ldr     ip, [Rframe, #FRAME_METHOD]
+        sub     stack, stack, #4
+        ldr     r2, [Rframe, #FRAME_CONSTANTS]
+        ldr     ip, [ip, #METHOD_CONSTMETHOD]
+        DECACHE_STACK_USING_FRAME
+        add     jpc, ip, r0
+
+        add     r0, r2, r1, lsl #4
+        DECACHE_JPC_USING_FRAME
+        ldr     r3, [r0, #CP_OFFSET+12]
+        and     r3, r3, #255
+        ldr     r2, [stack, r3, asl #2]
+        cmp     r2, #0
+        beq     istub_null_ptr_exception
+
+        ldr     tmp1, [r0, #CP_OFFSET+4]
+        mov     r1, #0
+        ldr     ip, [tmp1, #METHOD_FROM_INTERPRETED]
+        str     r1, [thread, #THREAD_LAST_JAVA_SP]
+
+        add     stack, stack, #4
+        str     stack, [thread, #THREAD_JAVA_SP]
+
+        ldr     r3, [ip, #0]
+
+        mov     r0, tmp1
+#ifdef SHARK
+	mov	r2, thread
+#else
+        add     r3, r3, #FAST_ENTRY_OFFSET
+#endif
+        blx     r3
+	LOAD_FRAME
+
+        ldr     stack, [thread, #THREAD_JAVA_SP]
+        ldr     r2, [Rframe, #FRAME_STACK_LIMIT]
+
+        ldr     r1, [thread, #THREAD_TOP_ZERO_FRAME]
+        add     r2, r2, #4
+	mov	r3, #0
+	str	r3, [thread, #THREAD_LAST_JAVA_SP]	
+        str     r1, [thread, #THREAD_LAST_JAVA_FP]
+        str     r2, [thread, #THREAD_JAVA_SP]
+        str     Rframe, [thread, #THREAD_LAST_JAVA_SP] // FIXME: Don't understand this
+        ldr     r3, [thread, #4]
+        cmp     r3, #0
+        bne     istub_exception
+	mov	r0, #0
+        ldmia   sp!, {ip, pc}
+
+@ R0 = BCI
+@ R1 = index
+	.global	Thumb2_getfield_word_stub
+	.type Thumb2_getfield_word_stub, %function
+Thumb2_getfield_word_stub:
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r2, [r2, #FRAME_CONSTANTS]
+	add	r2, r2, r1, lsl #4
+	ldr	r3, [r2, #CP_OFFSET]
+	and	r3, r3, #0x00ff0000
+	cmp	r3, #opc_getfield << 16
+	bne	1f
+2:
+	ldr	r3, [stack], #4		@ POP r3
+	ldr	ip, [r2, #CP_OFFSET+8]
+	cmp	r3, #0
+	beq	field_null_ptr_exception
+
+	GO_IF_VOLATILE	r2, r2, 3f
+
+	ldr	r3, [r3, ip]
+	str	r3, [stack, #-4]!	@ PUSH r3
+	bx	lr
+3:
+	
+	ldr	r3, [r3, ip]
+	FullBarrier
+	str	r3, [stack, #-4]!	@ PUSH r3
+	bx	lr
+1:
+	mov	ip, lr
+	bl	getfield_stub_unresolved
+	mov	lr, ip
+	b	2b
+
+@ R0 = BCI
+@ R1 = index
+	.global	Thumb2_getfield_sh_stub
+	.type Thumb2_getfield_sh_stub, %function
+Thumb2_getfield_sh_stub:
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r2, [r2, #FRAME_CONSTANTS]
+	add	r2, r2, r1, lsl #4
+	ldr	r3, [r2, #CP_OFFSET]
+	and	r3, r3, #0x00ff0000
+	cmp	r3, #opc_getfield << 16
+	bne	1f
+2:
+	ldr	r3, [stack], #4		@ POP r3
+	ldr	ip, [r2, #CP_OFFSET+8]
+	cmp	r3, #0
+	beq	field_null_ptr_exception
+
+	GO_IF_VOLATILE	r2, r2, 3f
+
+	ldrsh	r3, [r3, ip]
+	str	r3, [stack, #-4]!	@ PUSH r3
+	bx	lr
+3:
+	ldrsh	r3, [r3, ip]
+	FullBarrier
+	str	r3, [stack, #-4]!	@ PUSH r3
+	bx	lr
+1:
+	mov	ip, lr
+	bl	getfield_stub_unresolved
+	mov	lr, ip
+	b	2b
+
+@ R0 = BCI
+@ R1 = index
+	.global	Thumb2_getfield_h_stub
+	.type Thumb2_getfield_h_stub, %function
+Thumb2_getfield_h_stub:
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r2, [r2, #FRAME_CONSTANTS]
+	add	r2, r2, r1, lsl #4
+	ldr	r3, [r2, #CP_OFFSET]
+	and	r3, r3, #0x00ff0000
+	cmp	r3, #opc_getfield << 16
+	bne	1f
+2:
+	ldr	r3, [stack], #4		@ POP r3
+	ldr	ip, [r2, #CP_OFFSET+8]
+	cmp	r3, #0
+	beq	field_null_ptr_exception
+
+	GO_IF_VOLATILE	r2, r2, 3f
+
+	ldrh	r3, [r3, ip]
+	str	r3, [stack, #-4]!	@ PUSH r3
+	bx	lr
+3:
+	ldrh	r3, [r3, ip]
+	FullBarrier
+	str	r3, [stack, #-4]!	@ PUSH r3
+	bx	lr
+1:
+	mov	ip, lr
+	bl	getfield_stub_unresolved
+	mov	lr, ip
+	b	2b
+
+@ R0 = BCI
+@ R1 = index
+	.global	Thumb2_getfield_sb_stub
+	.type Thumb2_getfield_sb_stub, %function
+Thumb2_getfield_sb_stub:
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r2, [r2, #FRAME_CONSTANTS]
+	add	r2, r2, r1, lsl #4
+	ldr	r3, [r2, #CP_OFFSET]
+	and	r3, r3, #0x00ff0000
+	cmp	r3, #opc_getfield << 16
+	bne	1f
+2:
+	ldr	r3, [stack], #4		@ POP r3
+	ldr	ip, [r2, #CP_OFFSET+8]
+	cmp	r3, #0
+	beq	field_null_ptr_exception
+
+	GO_IF_VOLATILE	r2, r2, 3f
+
+	ldrsb	r3, [r3, ip]
+	str	r3, [stack, #-4]!	@ PUSH r3
+	bx	lr
+3:
+	ldrsb	r3, [r3, ip]
+	FullBarrier
+	str	r3, [stack, #-4]!	@ PUSH r3
+	bx	lr
+1:
+	mov	ip, lr
+	bl	getfield_stub_unresolved
+	mov	lr, ip
+	b	2b
+
+@ R0 = BCI
+@ R1 = index
+	.global	Thumb2_getfield_dw_stub
+	.type Thumb2_getfield_dw_stub, %function
+Thumb2_getfield_dw_stub:
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r2, [r2, #FRAME_CONSTANTS]
+	add	r2, r2, r1, lsl #4
+	ldr	r3, [r2, #CP_OFFSET]
+	and	r3, r3, #0x00ff0000
+	cmp	r3, #opc_getfield << 16
+	bne	1f
+2:
+	ldr	r3, [stack], #4		@ POP r3
+	ldr	ip, [r2, #CP_OFFSET+8]
+	cmp	r3, #0
+	beq	field_null_ptr_exception
+
+	GO_IF_VOLATILE	r2, r2, 3f
+
+	ldrd	r2, r3, [r3, ip]
+	stmdb	stack!, {r2, r3}	@ PUSH r2, r3
+	bx	lr
+3:
+	ldrd	r2, r3, [r3, ip]	// FIXME: Shold be ldrexd
+	FullBarrier
+	stmdb	stack!, {r2, r3}	@ PUSH r2, r3
+	bx	lr
+1:
+	mov	ip, lr
+	bl	getfield_stub_unresolved
+	mov	lr, ip
+	b	2b
+	.ltorg
+	
+@ R0 = BCI
+@ R1 = index
+putstatic_stub_unresolved:
+	mov	r2, #opc_putstatic
+	b	field_stub_unresolved
+getstatic_stub_unresolved:
+	mov	r2, #opc_getstatic
+	b	field_stub_unresolved
+putfield_stub_unresolved:
+	mov	r2, #opc_putfield
+	b	field_stub_unresolved
+getfield_stub_unresolved:
+	mov	r2, #opc_getfield
+field_stub_unresolved:
+	stmdb	sp!, {r0, r1, ip, lr}
+	ldr	lr, [thread, #THREAD_TOP_ZERO_FRAME]
+        ldr     ip, [lr, #FRAME_METHOD]
+	sub	r3, stack, #4
+	ldr	ip, [ip, #METHOD_CONSTMETHOD]
+	str	r3, [lr, #FRAME_STACK]	@ DECACHE_STACK
+	add	r3, ip, r0
+	str	r3, [lr, #FRAME_BCP]	@ DECACHE_JPC
+	ldr	ip, resolve_get_put_adcon
+	mov	r1, r2
+	mov	r0, thread
+	blx	ip
+	ldmia	sp!, {r0, r1, ip, lr}
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r3, [thread, #4]
+	ldr	r2, [r2, #FRAME_CONSTANTS]
+	cmp	r3, #0
+	bne	field_exception
+	add	r2, r2, r1, lsl #4
+	bx	lr
+
+field_null_ptr_exception:
+	stmdb	sp!, {JAZ_REGSET}
+	ldr	ip, [thread, #THREAD_TOP_ZERO_FRAME]
+        ldr     r1, [ip, #FRAME_METHOD]
+	ldr	r3, [ip, #FRAME_LOCALS]
+        ldr     ip, [r1, #METHOD_CONSTMETHOD]
+        add     jpc, ip, r0
+ 	mov	r0, #VMSYMBOLS_NullPointerException
+	bic	r0, lr, #TBIT
+	mov	r2, sp
+
+@ We already have BCI, so just call lr_to_bci to save the locals
+@ The result value is ignored
+	ldr	ip, lr_to_bci_adcon
+	blx	ip
+
+	add	sp, sp, #JAZ_REGSET_LEN * 4
+ 	ldr	ip, raise_exception_adcon
+	LOAD_ISTATE
+ 	bx	ip
+ 
+ field_exception:
+ 	ldr	ip, handle_exception_adcon
+	LOAD_ISTATE
+ 	bx	ip
+
+@ R0 = BCI
+@ R1 = index
+	.global	Thumb2_putfield_word_stub
+	.type Thumb2_putfield_word_stub, %function
+Thumb2_putfield_word_stub:
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r2, [r2, #FRAME_CONSTANTS]
+	add	r2, r2, r1, lsl #4
+	ldr	r3, [r2, #CP_OFFSET]
+	and	r3, r3, #0xff000000
+	cmp	r3, #opc_putfield << 24
+	bne	1f
+2:
+	GO_IF_VOLATILE	r3, r2, 3f
+	
+	ldr	ip, [r2, #CP_OFFSET+8]
+	ldmia	stack!, {r2, r3}	@ r2 = value, r3 = obj
+	cmp	r3, #0
+	beq	field_null_ptr_exception
+
+	str	r2, [r3, ip]
+	bx	lr
+3:	
+	ldr	ip, [r2, #CP_OFFSET+8]
+	ldmia	stack!, {r2, r3}	@ r2 = value, r3 = obj
+	cmp	r3, #0
+	beq	field_null_ptr_exception
+
+	StoreStoreBarrier
+	str	r2, [r3, ip]
+	StoreLoadBarrier
+	bx	lr
+1:
+	mov	ip, lr
+	bl	putfield_stub_unresolved
+	mov	lr, ip
+	b	2b
+
+	.global	Thumb2_putfield_h_stub
+	.type Thumb2_putfield_h_stub, %function
+Thumb2_putfield_h_stub:
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r2, [r2, #FRAME_CONSTANTS]
+	add	r2, r2, r1, lsl #4
+	ldr	r3, [r2, #CP_OFFSET]
+	and	r3, r3, #0xff000000
+	cmp	r3, #opc_putfield << 24
+	bne	1f
+2:
+	GO_IF_VOLATILE	r3, r2, 3f
+	
+	ldr	ip, [r2, #CP_OFFSET+8]
+	ldmia	stack!, {r2, r3}	@ r2 = value, r3 = obj
+	cmp	r3, #0
+	beq	field_null_ptr_exception
+
+	strh	r2, [r3, ip]
+	bx	lr
+3:
+	ldr	ip, [r2, #CP_OFFSET+8]
+	ldmia	stack!, {r2, r3}	@ r2 = value, r3 = obj
+	cmp	r3, #0
+	beq	field_null_ptr_exception
+
+	StoreStoreBarrier
+	strh	r2, [r3, ip]
+	StoreLoadBarrier
+	bx	lr
+1:
+	mov	ip, lr
+	bl	putfield_stub_unresolved
+	mov	lr, ip
+	b	2b
+
+	.global	Thumb2_putfield_b_stub
+	.type Thumb2_putfield_b_stub, %function
+Thumb2_putfield_b_stub:
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r2, [r2, #FRAME_CONSTANTS]
+	add	r2, r2, r1, lsl #4
+	ldr	r3, [r2, #CP_OFFSET]
+	and	r3, r3, #0xff000000
+	cmp	r3, #opc_putfield << 24
+	bne	1f
+2:
+	GO_IF_VOLATILE	r3, r2, 3f
+	
+	ldr	ip, [r2, #CP_OFFSET+8]
+	ldmia	stack!, {r2, r3}	@ r2 = value, r3 = obj
+	cmp	r3, #0
+	beq	field_null_ptr_exception
+
+	strb	r2, [r3, ip]
+	bx	lr
+3:
+	ldr	ip, [r2, #CP_OFFSET+8]
+	ldmia	stack!, {r2, r3}	@ r2 = value, r3 = obj
+	cmp	r3, #0
+	beq	field_null_ptr_exception
+
+	StoreStoreBarrier
+	strb	r2, [r3, ip]
+	StoreLoadBarrier
+	bx	lr
+1:
+	mov	ip, lr
+	bl	putfield_stub_unresolved
+	mov	lr, ip
+	b	2b
+
+	.global	Thumb2_putfield_a_stub
+	.type Thumb2_putfield_a_stub, %function
+Thumb2_putfield_a_stub:
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r2, [r2, #FRAME_CONSTANTS]
+	add	r2, r2, r1, lsl #4
+	ldr	r3, [r2, #CP_OFFSET]
+	and	r3, r3, #0xff000000
+	cmp	r3, #opc_putfield << 24
+	bne	1f
+2:
+	GO_IF_VOLATILE	r3, r2, 3f
+	
+	ldr	ip, [r2, #CP_OFFSET+8]
+	ldmia	stack!, {r2, r3}	@ r2 = value, r3 = obj
+	cmp	r3, #0
+	beq	field_null_ptr_exception
+
+	str	r2, [r3, ip]
+	ldr	ip, helper_aputfield_adcon
+	mov	r0, r3
+	bx	ip
+3:
+	ldr	ip, [r2, #CP_OFFSET+8]
+	ldmia	stack!, {r2, r3}	@ r2 = value, r3 = obj
+	cmp	r3, #0
+	beq	field_null_ptr_exception
+
+	StoreStoreBarrier
+	str	r2, [r3, ip]
+	StoreLoadBarrier
+	ldr	ip, helper_aputfield_adcon
+	mov	r0, r3
+	bx	ip
+1:
+	mov	ip, lr
+	bl	putfield_stub_unresolved
+	mov	lr, ip
+	b	2b
+
+	.global	Thumb2_putfield_dw_stub
+	.type Thumb2_putfield_dw_stub, %function
+Thumb2_putfield_dw_stub:
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r2, [r2, #FRAME_CONSTANTS]
+	add	r2, r2, r1, lsl #4
+	ldr	r3, [r2, #CP_OFFSET]
+	and	r3, r3, #0xff000000
+	cmp	r3, #opc_putfield << 24
+	bne	1f
+2:
+	GO_IF_VOLATILE	r3, r2, 3f
+	
+	ldr	r1, [r2, #CP_OFFSET+8]
+	ldmia	stack!, {r2, r3, ip}	@ r2,r3 = value, ip = obj
+	cmp	ip, #0
+	beq	field_null_ptr_exception
+
+	strd	r2,r3, [ip, r1]
+	bx	lr
+3:
+	ldr	r1, [r2, #CP_OFFSET+8]
+	ldmia	stack!, {r2, r3, ip}	@ r2,r3 = value, ip = obj
+	cmp	ip, #0
+	beq	field_null_ptr_exception
+
+	StoreStoreBarrier
+	// FIXME: This should use strexd on an MP system
+	strd	r2,r3, [ip, r1]
+	StoreLoadBarrier
+	bx	lr
+1:
+	mov	ip, lr
+	bl	putfield_stub_unresolved
+	mov	lr, ip
+	b	2b
+
+@ R0 = BCI
+@ R1 = index
+	.global	Thumb2_getstatic_word_stub
+	.type Thumb2_getstatic_word_stub, %function
+Thumb2_getstatic_word_stub:
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r2, [r2, #FRAME_CONSTANTS]
+	add	r2, r2, r1, lsl #4
+	ldr	r3, [r2, #CP_OFFSET]
+	and	r3, r3, #0x00ff0000
+	cmp	r3, #opc_getstatic << 16
+	bne	1f
+2:
+	GO_IF_VOLATILE	r3, r2, 3f
+	
+	ldr	r3, [r2, #CP_OFFSET+4]
+	ldr	ip, [r2, #CP_OFFSET+8]
+
+	ldr	r3, [r3, ip]
+	str	r3, [stack, #-4]!	@ PUSH r3
+	bx	lr
+3:
+	ldr	r3, [r2, #CP_OFFSET+4]
+	ldr	ip, [r2, #CP_OFFSET+8]
+
+	ldr	r3, [r3, ip]
+	FullBarrier
+	str	r3, [stack, #-4]!	@ PUSH r3
+	bx	lr
+1:
+	mov	ip, lr
+	bl	getstatic_stub_unresolved
+	mov	lr, ip
+	b	2b
+
+	.global	Thumb2_getstatic_h_stub
+	.type Thumb2_getstatic_h_stub, %function
+Thumb2_getstatic_h_stub:
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r2, [r2, #FRAME_CONSTANTS]
+	add	r2, r2, r1, lsl #4
+	ldr	r3, [r2, #CP_OFFSET]
+	and	r3, r3, #0x00ff0000
+	cmp	r3, #opc_getstatic << 16
+	bne	1f
+2:
+	GO_IF_VOLATILE	r3, r2, 3f
+	
+	ldr	r3, [r2, #CP_OFFSET+4]
+	ldr	ip, [r2, #CP_OFFSET+8]
+
+	ldrh	r3, [r3, ip]
+	str	r3, [stack, #-4]!	@ PUSH r3
+	bx	lr
+3:
+	ldr	r3, [r2, #CP_OFFSET+4]
+	ldr	ip, [r2, #CP_OFFSET+8]
+
+	ldrh	r3, [r3, ip]
+	FullBarrier
+	str	r3, [stack, #-4]!	@ PUSH r3
+	bx	lr
+1:
+	mov	ip, lr
+	bl	getstatic_stub_unresolved
+	mov	lr, ip
+	b	2b
+
+	.global	Thumb2_getstatic_sh_stub
+	.type Thumb2_getstatic_sh_stub, %function
+Thumb2_getstatic_sh_stub:
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r2, [r2, #FRAME_CONSTANTS]
+	add	r2, r2, r1, lsl #4
+	ldr	r3, [r2, #CP_OFFSET]
+	and	r3, r3, #0x00ff0000
+	cmp	r3, #opc_getstatic << 16
+	bne	1f
+2:
+	GO_IF_VOLATILE	r3, r2, 3f
+	
+	ldr	r3, [r2, #CP_OFFSET+4]
+	ldr	ip, [r2, #CP_OFFSET+8]
+
+	ldrsh	r3, [r3, ip]
+	str	r3, [stack, #-4]!	@ PUSH r3
+	bx	lr
+3:
+	ldr	r3, [r2, #CP_OFFSET+4]
+	ldr	ip, [r2, #CP_OFFSET+8]
+
+	ldrsh	r3, [r3, ip]
+	FullBarrier
+	str	r3, [stack, #-4]!	@ PUSH r3
+	bx	lr
+1:
+	mov	ip, lr
+	bl	getstatic_stub_unresolved
+	mov	lr, ip
+	b	2b
+
+	.global	Thumb2_getstatic_sb_stub
+	.type Thumb2_getstatic_sb_stub, %function
+Thumb2_getstatic_sb_stub:
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r2, [r2, #FRAME_CONSTANTS]
+	add	r2, r2, r1, lsl #4
+	ldr	r3, [r2, #CP_OFFSET]
+	and	r3, r3, #0x00ff0000
+	cmp	r3, #opc_getstatic << 16
+	bne	1f
+2:
+	GO_IF_VOLATILE	r3, r2, 3f
+	
+	ldr	r3, [r2, #CP_OFFSET+4]
+	ldr	ip, [r2, #CP_OFFSET+8]
+
+	ldrsb	r3, [r3, ip]
+	str	r3, [stack, #-4]!	@ PUSH r3
+	bx	lr
+3:
+	ldr	r3, [r2, #CP_OFFSET+4]
+	ldr	ip, [r2, #CP_OFFSET+8]
+
+	ldrsb	r3, [r3, ip]
+	FullBarrier
+	str	r3, [stack, #-4]!	@ PUSH r3
+	bx	lr
+1:
+	mov	ip, lr
+	bl	getstatic_stub_unresolved
+	mov	lr, ip
+	b	2b
+
+	.global	Thumb2_getstatic_dw_stub
+	.type Thumb2_getstatic_dw_stub, %function
+Thumb2_getstatic_dw_stub:
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r2, [r2, #FRAME_CONSTANTS]
+	add	r2, r2, r1, lsl #4
+	ldr	r3, [r2, #CP_OFFSET]
+	and	r3, r3, #0x00ff0000
+	cmp	r3, #opc_getstatic << 16
+	bne	1f
+2:
+	GO_IF_VOLATILE	r3, r2, 3f
+	
+	ldr	r3, [r2, #CP_OFFSET+4]
+	ldr	ip, [r2, #CP_OFFSET+8]
+
+	ldrd	r2, r3, [r3, ip]
+	stmdb	stack!, {r2, r3}	@ PUSH r2, r3
+	bx	lr
+3:
+	ldr	r3, [r2, #CP_OFFSET+4]
+	ldr	ip, [r2, #CP_OFFSET+8]
+
+	ldrd	r2, r3, [r3, ip]
+	FullBarrier
+	// FIXME: This should use strexd on an MP system
+	stmdb	stack!, {r2, r3}	@ PUSH r2, r3
+	bx	lr
+1:
+	mov	ip, lr
+	bl	getstatic_stub_unresolved
+	mov	lr, ip
+	b	2b
+
+@ R0 = BCI
+@ R1 = index
+	.global	Thumb2_putstatic_word_stub
+	.type Thumb2_putstatic_word_stub, %function
+Thumb2_putstatic_word_stub:
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r2, [r2, #FRAME_CONSTANTS]
+	add	r2, r2, r1, lsl #4
+	ldr	r3, [r2, #CP_OFFSET]
+	and	r3, r3, #0xff000000
+	cmp	r3, #opc_putstatic << 24
+	bne	1f
+2:
+	GO_IF_VOLATILE	r3, r2, 3f
+	
+	ldr	r3, [r2, #CP_OFFSET+4]
+	ldr	ip, [r2, #CP_OFFSET+8]
+	ldr	r2, [stack], #4		@ POP r2
+
+	str	r2, [r3, ip]
+	bx	lr
+3:
+	ldr	r3, [r2, #CP_OFFSET+4]
+	ldr	ip, [r2, #CP_OFFSET+8]
+	ldr	r2, [stack], #4		@ POP r2
+
+	StoreStoreBarrier
+	str	r2, [r3, ip]
+	StoreLoadBarrier
+	bx	lr
+1:
+	mov	ip, lr
+	bl	putstatic_stub_unresolved
+	mov	lr, ip
+	b	2b
+
+@ R0 = BCI
+@ R1 = index
+	.global	Thumb2_putstatic_h_stub
+	.type Thumb2_putstatic_h_stub, %function
+Thumb2_putstatic_h_stub:
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r2, [r2, #FRAME_CONSTANTS]
+	add	r2, r2, r1, lsl #4
+	ldr	r3, [r2, #CP_OFFSET]
+	and	r3, r3, #0xff000000
+	cmp	r3, #opc_putstatic << 24
+	bne	1f
+2:
+	GO_IF_VOLATILE	r3, r2, 3f
+	
+	ldr	r3, [r2, #CP_OFFSET+4]
+	ldr	ip, [r2, #CP_OFFSET+8]
+	ldr	r2, [stack], #4		@ POP r2
+
+	strh	r2, [r3, ip]
+	bx	lr
+3:
+	ldr	r3, [r2, #CP_OFFSET+4]
+	ldr	ip, [r2, #CP_OFFSET+8]
+	ldr	r2, [stack], #4		@ POP r2
+
+	StoreStoreBarrier
+	strh	r2, [r3, ip]
+	StoreLoadBarrier
+	bx	lr
+1:
+	mov	ip, lr
+	bl	putstatic_stub_unresolved
+	mov	lr, ip
+	b	2b
+
+@ R0 = BCI
+@ R1 = index
+	.global	Thumb2_putstatic_b_stub
+	.type Thumb2_putstatic_b_stub, %function
+Thumb2_putstatic_b_stub:
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r2, [r2, #FRAME_CONSTANTS]
+	add	r2, r2, r1, lsl #4
+	ldr	r3, [r2, #CP_OFFSET]
+	and	r3, r3, #0xff000000
+	cmp	r3, #opc_putstatic << 24
+	bne	1f
+2:
+	GO_IF_VOLATILE	r3, r2, 3f
+	
+	ldr	r3, [r2, #CP_OFFSET+4]
+	ldr	ip, [r2, #CP_OFFSET+8]
+	ldr	r2, [stack], #4		@ POP r2
+
+	strb	r2, [r3, ip]
+	bx	lr
+3:
+	ldr	r3, [r2, #CP_OFFSET+4]
+	ldr	ip, [r2, #CP_OFFSET+8]
+	ldr	r2, [stack], #4		@ POP r2
+
+	StoreStoreBarrier
+	strb	r2, [r3, ip]
+	StoreLoadBarrier
+	bx	lr
+1:
+	mov	ip, lr
+	bl	putstatic_stub_unresolved
+	mov	lr, ip
+	b	2b
+
+@ R0 = BCI
+@ R1 = index
+	.global	Thumb2_putstatic_dw_stub
+	.type Thumb2_putstatic_dw_stub, %function
+Thumb2_putstatic_dw_stub:
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r2, [r2, #FRAME_CONSTANTS]
+	add	r2, r2, r1, lsl #4
+	ldr	r3, [r2, #CP_OFFSET]
+	and	r3, r3, #0xff000000
+	cmp	r3, #opc_putstatic << 24
+	bne	1f
+2:
+	GO_IF_VOLATILE	r3, r2, 3f
+	
+	ldr	r1, [r2, #CP_OFFSET+4]
+	ldr	ip, [r2, #CP_OFFSET+8]
+	ldmia	stack!, {r2, r3}
+
+	strd	r2,r3, [r1, ip]
+	bx	lr
+3:
+	ldr	r1, [r2, #CP_OFFSET+4]
+	ldr	ip, [r2, #CP_OFFSET+8]
+	ldmia	stack!, {r2, r3}
+
+	StoreStoreBarrier
+	strd	r2,r3, [r1, ip]
+	StoreLoadBarrier
+	bx	lr
+1:
+	mov	ip, lr
+	bl	putstatic_stub_unresolved
+	mov	lr, ip
+	b	2b
+
+@ R0 = BCI
+@ R1 = index
+	.global	Thumb2_putstatic_a_stub
+	.type Thumb2_putstatic_a_stub, %function
+Thumb2_putstatic_a_stub:
+	ldr	r2, [thread, #THREAD_TOP_ZERO_FRAME]
+	ldr	r2, [r2, #FRAME_CONSTANTS]
+	add	r2, r2, r1, lsl #4
+	ldr	r3, [r2, #CP_OFFSET]
+	and	r3, r3, #0xff000000
+	cmp	r3, #opc_putstatic << 24
+	bne	1f
+2:
+	GO_IF_VOLATILE	r3, r2, 3f
+	
+	ldr	r3, [r2, #CP_OFFSET+4]
+	ldr	ip, [r2, #CP_OFFSET+8]
+	ldr	r2, [stack], #4		@ POP r2
+
+	str	r2, [r3, ip]
+	ldr	ip, helper_aputfield_adcon
+	mov	r0, r3
+	bx	ip
+3:
+	ldr	r3, [r2, #CP_OFFSET+4]
+	ldr	ip, [r2, #CP_OFFSET+8]
+	ldr	r2, [stack], #4		@ POP r2
+
+	StoreStoreBarrier
+	str	r2, [r3, ip]
+	StoreLoadBarrier
+	ldr	ip, helper_aputfield_adcon
+	mov	r0, r3
+	bx	ip
+1:
+	mov	ip, lr
+	bl	putstatic_stub_unresolved
+	mov	lr, ip
+	b	2b
+
+#endif // T2JIT
+
+	.global	Thumb2_stubs_end
+	.type Thumb2_stubs_end, %function
+Thumb2_stubs_end:
+
+	ALIGN_CODE
+jdiv_1:
+	bx	lr
+jdiv_2:
+        add     r0, r0, r0, lsr #31
+        mov     r0, r0, asr #1
+	bx	lr
+jdiv_24:
+        add     r0, r0, r0, lsr #31
+        mov     r0, r0, asr #1
+jdiv_12:
+        add     r0, r0, r0, lsr #31
+        mov     r0, r0, asr #1
+jdiv_6:
+        add     r0, r0, r0, lsr #31
+        mov     r0, r0, asr #1
+jdiv_3:
+	ldr	r1, dc_3
+        smull	r3, r2, r0, r1
+        sub	r0, r2, r0, asr #31
+	bx	lr
+jdiv_4:
+	mov	r1, r0, asr #31
+	add	r0, r0, r1, lsr #30
+	mov	r0, r0, asr #2
+	bx	lr
+jdiv_20:
+        add     r0, r0, r0, lsr #31
+        mov     r0, r0, asr #1
+jdiv_10:
+        add     r0, r0, r0, lsr #31
+        mov     r0, r0, asr #1
+jdiv_5:
+	ldr	r1, dc_5
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r0, r3, r2, asr #1
+	bx	lr
+jdiv_28:
+        add     r0, r0, r0, lsr #31
+        mov     r0, r0, asr #1
+jdiv_14:
+        add     r0, r0, r0, lsr #31
+        mov     r0, r0, asr #1
+jdiv_7:
+	ldr	r1, dc_7
+        smull	r3, r2, r0, r1
+        mov	r1, r0, asr #31
+        add	r3, r0, r2
+        rsb	r0, r1, r3, asr #2
+	bx	lr
+jdiv_8:
+	mov	r1, r0, asr #31
+	add	r0, r0, r1, lsr #29
+	mov	r0, r0, asr #3
+	bx	lr
+jdiv_18:
+        add     r0, r0, r0, lsr #31
+        mov     r0, r0, asr #1
+jdiv_9:
+	ldr	r1, dc_9
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r0, r3, r2, asr #1
+	bx	lr
+jdiv_22:
+        add     r0, r0, r0, lsr #31
+        mov     r0, r0, asr #1
+jdiv_11:
+	ldr	r1, dc_11
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r0, r3, r2, asr #1
+	bx	lr
+jdiv_26:
+        add     r0, r0, r0, lsr #31
+        mov     r0, r0, asr #1
+jdiv_13:
+	ldr	r1, dc_13
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r0, r3, r2, asr #2
+	bx	lr
+jdiv_30:
+        add     r0, r0, r0, lsr #31
+        mov     r0, r0, asr #1
+jdiv_15:
+	ldr	r1, dc_15
+        smull	r3, r2, r0, r1
+        mov	r1, r0, asr #31
+        add	r3, r0, r2
+        rsb	r0, r1, r3, asr #3
+	bx	lr
+jdiv_16:
+	mov	r1, r0, asr #31
+	add	r0, r0, r1, lsr #28
+	mov	r0, r0, asr #4
+	bx	lr
+jdiv_17:
+	ldr	r1, dc_17
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r0, r3, r2, asr #3
+	bx	lr
+jdiv_19:
+	ldr	r1, dc_19
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r0, r3, r2, asr #3
+	bx	lr
+jdiv_21:
+	ldr	r1, dc_21
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r0, r3, r2, asr #2
+	bx	lr
+jdiv_23:
+	ldr	r1, dc_23
+        smull	r3, r2, r0, r1
+        mov	r1, r0, asr #31
+        add	r3, r0, r2
+        rsb	r0, r1, r3, asr #4
+	bx	lr
+jdiv_25:
+	ldr	r1, dc_25
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r0, r3, r2, asr #3
+	bx	lr
+jdiv_27:
+	ldr	r1, dc_27
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r0, r3, r2, asr #3
+	bx	lr
+jdiv_29:
+	ldr	r1, dc_29
+        smull	r3, r2, r0, r1
+        mov	r1, r0, asr #31
+        add	r3, r0, r2
+        rsb	r0, r1, r3, asr #4
+	bx	lr
+jdiv_31:
+	ldr	r1, dc_31
+        smull	r3, r2, r0, r1
+        mov	r1, r0, asr #31
+        add	r3, r0, r2
+        rsb	r0, r1, r3, asr #4
+	bx	lr
+jdiv_32:
+	mov	r1, r0, asr #31
+	add	r0, r0, r1, lsr #27
+	mov	r0, r0, asr #5
+	bx	lr
+jrem_1:
+	mov	r0, #0
+	bx	lr
+jrem_2:
+	add	r3, r0, r0, lsr #31
+        mov	r1, r3, asr #1
+	sub	r0, r0, r1, lsl #1
+	bx	lr
+jrem_3:
+	ldr	r1, dc_3
+        smull	r3, r2, r0, r1
+        sub	r1, r2, r0, asr #31
+	add	r3, r1, r1, lsl #1
+	sub	r0, r0, r3
+	bx	lr
+jrem_4:
+	movs	r3, r0
+        addmi	r3, r3, #3
+        mov	r1, r3, asr #2
+	sub	r0, r0, r1, lsl #2
+	bx	lr
+jrem_5:
+	ldr	r1, dc_5
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r1, r3, r2, asr #1
+	add	r3, r1, r1, lsl #2
+	sub	r0, r0, r3
+	bx	lr
+jrem_6:
+	ldr	r1, dc_6
+        smull	r3, r2, r0, r1
+        sub	r1, r2, r0, asr #31
+	add	r3, r1, r1, lsl #1
+	sub	r0, r0, r3, lsl #1
+	bx	lr
+jrem_7:
+	ldr	r1, dc_7
+        smull	r3, r2, r0, r1
+        mov	r1, r0, asr #31
+        add	r3, r0, r2
+        rsb	r1, r1, r3, asr #2
+	rsb	r3, r1, r1, lsl #3
+	sub	r0, r0, r3
+	bx	lr
+jrem_8:
+	movs	r3, r0
+        addmi	r3, r3, #7
+        mov	r1, r3, asr #3
+	sub	r0, r0, r1, lsl #3
+	bx	lr
+jrem_9:
+	ldr	r1, dc_9
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r1, r3, r2, asr #1
+	add	r3, r1, r1, lsl #3
+	sub	r0, r0, r3
+	bx	lr
+jrem_10:
+	ldr	r1, dc_10
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r1, r3, r2, asr #2
+	add	r3, r1, r1, lsl #2
+	sub	r0, r0, r3, lsl #1
+	bx	lr
+jrem_11:
+	ldr	r1, dc_11
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r1, r3, r2, asr #1
+	add	r3, r1, r1, lsl #2
+	add	r3, r1, r3, lsl #1
+	sub	r0, r0, r3
+	bx	lr
+jrem_12:
+	ldr	r1, dc_12
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r1, r3, r2, asr #1
+	add	r3, r1, r1, lsl #1
+	sub	r0, r0, r3, lsl #2
+	bx	lr
+jrem_13:
+	ldr	r1, dc_13
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r1, r3, r2, asr #2
+	add	r3, r1, r1, lsl #1
+	add	r3, r1, r3, lsl #2
+	sub	r0, r0, r3
+	bx	lr
+jrem_14:
+	ldr	r1, dc_14
+        smull	r3, r2, r0, r1
+        mov	r1, r0, asr #31
+        add	r3, r0, r2
+        rsb	r1, r1, r3, asr #3
+	rsb	r3, r1, r1, lsl #3
+	sub	r0, r0, r3, lsl #1
+	bx	lr
+jrem_15:
+	ldr	r1, dc_15
+        smull	r3, r2, r0, r1
+        mov	r1, r0, asr #31
+        add	r3, r0, r2
+        rsb	r1, r1, r3, asr #3
+	rsb	r3, r1, r1, lsl #4
+	sub	r0, r0, r3
+	bx	lr
+jrem_16:
+	movs	r3, r0
+        addmi	r3, r3, #15
+        mov	r1, r3, asr #4
+	sub	r0, r0, r1, lsl #4
+	bx	lr
+jrem_17:
+	ldr	r1, dc_17
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r1, r3, r2, asr #3
+	add	r3, r1, r1, lsl #4
+	sub	r0, r0, r3
+	bx	lr
+jrem_18:
+	ldr	r1, dc_18
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r1, r3, r2, asr #2
+	add	r3, r1, r1, lsl #3
+	sub	r0, r0, r3, lsl #1
+	bx	lr
+jrem_19:
+	ldr	r1, dc_19
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r1, r3, r2, asr #3
+	add	r3, r1, r1, lsl #3
+	add	r3, r1, r3, lsl #1
+	sub	r0, r0, r3
+	bx	lr
+jrem_20:
+	ldr	r1, dc_20
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r1, r3, r2, asr #3
+	add	r3, r1, r1, lsl #2
+	sub	r0, r0, r3, lsl #2
+	bx	lr
+jrem_21:
+	ldr	r1, dc_21
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r1, r3, r2, asr #2
+	add	r3, r1, r1, lsl #1
+	rsb	r3, r3, r3, lsl #3
+	sub	r0, r0, r3
+	bx	lr
+jrem_22:
+	ldr	r1, dc_22
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r1, r3, r2, asr #2
+	add	r3, r1, r1, lsl #2
+	add	r3, r1, r3, lsl #1
+	sub	r0, r0, r3, lsl #1
+	bx	lr
+jrem_23:
+	ldr	r1, dc_23
+        smull	r3, r2, r0, r1
+        mov	r1, r0, asr #31
+        add	r3, r0, r2
+        rsb	r1, r1, r3, asr #4
+	add	r3, r1, r1, lsl #1
+	rsb	r3, r1, r3, lsl #3
+	sub	r0, r0, r3
+	bx	lr
+jrem_24:
+	ldr	r1, dc_24
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r1, r3, r2, asr #2
+	add	r3, r1, r1, lsl #1
+	sub	r0, r0, r3, lsl #3
+	bx	lr
+jrem_25:
+	ldr	r1, dc_25
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r1, r3, r2, asr #3
+	add	r3, r1, r1, lsl #2
+	add	r3, r3, r3, lsl #2
+	sub	r0, r0, r3
+	bx	lr
+jrem_26:
+	ldr	r1, dc_26
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r1, r3, r2, asr #3
+	add	r3, r1, r1, lsl #1
+	add	r3, r1, r3, lsl #2
+	sub	r0, r0, r3, lsl #1
+	bx	lr
+jrem_27:
+	ldr	r1, dc_27
+        smull	r3, r2, r0, r1
+        mov	r3, r0, asr #31
+        rsb	r1, r3, r2, asr #3
+	add	r3, r1, r1, lsl #1
+	add	r3, r3, r3, lsl #3
+	sub	r0, r0, r3
+	bx	lr
+jrem_28:
+	ldr	r1, dc_28
+        smull	r3, r2, r0, r1
+        mov	r1, r0, asr #31
+        add	r3, r0, r2
+        rsb	r1, r1, r3, asr #4
+	rsb	r3, r1, r1, lsl #3
+	sub	r0, r0, r3, lsl #2
+	bx	lr
+jrem_29:
+	ldr	r1, dc_29
+        smull	r3, r2, r0, r1
+        mov	r1, r0, asr #31
+        add	r3, r0, r2
+        rsb	r1, r1, r3, asr #4
+	rsb	r3, r1, r1, lsl #3
+	add	r3, r1, r3, lsl #2
+	sub	r0, r0, r3
+	bx	lr
+jrem_30:
+	ldr	r1, dc_30
+        smull	r3, r2, r0, r1
+        mov	r1, r0, asr #31
+        add	r3, r0, r2
+        rsb	r1, r1, r3, asr #4
+	rsb	r3, r1, r1, lsl #4
+	sub	r0, r0, r3, lsl #1
+	bx	lr
+jrem_31:
+	ldr	r1, dc_31
+        smull	r3, r2, r0, r1
+        mov	r1, r0, asr #31
+        add	r3, r0, r2
+        rsb	r1, r1, r3, asr #4
+	rsb	r3, r1, r1, lsl #5
+	sub	r0, r0, r3
+	bx	lr
+jrem_32:
+	movs	r3, r0
+        addmi	r3, r3, #31
+        mov	r1, r3, asr #5
+	sub	r0, r0, r1, lsl #5
+	bx	lr
+	ALIGN_DATA
+dc_7:
+dc_14:
+	.word     0x92492493
+dc_15:
+dc_30:
+	.word     0x88888889
+dc_23:
+	.word     0xb21642c9
+dc_28:
+	.word     0x92492493
+dc_29:
+	.word     0x8d3dcb09
+dc_31:
+	.word     0x84210843
+dc_6:
+dc_12:
+dc_24:
+	.word     0x2aaaaaab
+dc_19:
+	.word     0x6bca1af3
+dc_5:
+dc_10:
+dc_20:
+	.word     0x66666667
+dc_21:
+	.word     0x30c30c31
+dc_11:
+dc_22:
+	.word     0x2e8ba2e9
+dc_26:
+dc_13:
+	.word     0x4ec4ec4f
+dc_25:
+	.word     0x51eb851f
+dc_27:
+	.word     0x4bda12f7
+dc_3:
+	.word     0x55555556
+dc_17:
+	.word     0x78787879
+dc_9:
+dc_18:
+	.word     0x38e38e39
+
+	.global	Thumb2_DivZero_Handler
+	.type Thumb2_DivZero_Handler, %function
+Thumb2_DivZero_Handler:
+#ifdef T2JIT
+
+	adrl	r0, idiv_clz_ret
+	cmp	r0, lr
+	addne	r0, r0, #irem_clz_ret - idiv_clz_ret
+	cmpne	r0, lr
+	beq	divide_by_zero_exception
+	stmdb	sp!, {JAZ_REGSET}
+	LOAD_FRAME
+	bic	r0, lr, #TBIT
+	ldr	r1, [Rframe, #FRAME_METHOD]
+        ldr     jpc, [r1, #METHOD_CONSTMETHOD]
+	add	jpc, jpc, #CONSTMETHOD_CODEOFFSET
+	mov	r2, sp
+	ldr	r3, [Rframe, #FRAME_LOCALS]
+	bl	Thumb2_lr_to_bci
+	add	sp, sp, #JAZ_REGSET_LEN * 4
+	cmp	r0, #-1
+	moveq	jpc, #0
+	addne	jpc, jpc, r0
+	bl	load_dispatch
+	LOAD_ISTATE
+#endif // T2JIT
+	b	divide_by_zero_exception
+
+#ifdef T2JIT
+
+	.global	Thumb2_Handle_Exception
+	.type Thumb2_Handle_Exception, %function
+	.global	Thumb2_Handle_Exception_NoRegs
+	.type Thumb2_Handle_Exception_NoRegs, %function
+	.global Thumb2_ArrayBounds_Handler
+	.type Thumb2_ArrayBounds_Handler, %function
+	.global Thumb2_NullPtr_Handler
+	.type Thumb2_NullPtr_Handler, %function
+	.global Thumb2_Stack_Overflow
+	.type Thumb2_Stack_Overflow, %function
+Thumb2_ArrayBounds_Handler:
+	stmdb	sp!, {JAZ_REGSET}
+	LOAD_FRAME
+	bic	r0, lr, #TBIT
+	ldr	r1, [Rframe, #FRAME_METHOD]
+        ldr     jpc, [r1, #METHOD_CONSTMETHOD]
+	add	jpc, jpc, #CONSTMETHOD_CODEOFFSET
+	mov	r2, sp
+	ldr	r3, [Rframe, #FRAME_LOCALS]
+	bl	Thumb2_lr_to_bci
+	add	sp, sp, #JAZ_REGSET_LEN * 4
+	cmp	r0, #-1
+	moveq	jpc, #0
+	addne	jpc, jpc, r0
+	bl	load_dispatch
+	mov	r0, #VMSYMBOLS_ArrayIndexOutOfBounds
+	LOAD_ISTATE
+	b	raise_exception
+Thumb2_Handle_Exception:
+	stmdb	sp!, {JAZ_REGSET}
+	LOAD_FRAME
+	bic	r0, lr, #TBIT
+	ldr	r1, [Rframe, #FRAME_METHOD]
+        ldr     jpc, [r1, #METHOD_CONSTMETHOD]
+	add	jpc, jpc, #CONSTMETHOD_CODEOFFSET
+	mov	r2, sp
+	ldr	r3, [Rframe, #FRAME_LOCALS]
+	bl	Thumb2_lr_to_bci
+	add	sp, sp, #JAZ_REGSET_LEN * 4
+	cmp	r0, #-1
+	moveq	jpc, #0
+	addne	jpc, jpc, r0
+	bl	load_dispatch
+	LOAD_ISTATE
+	b	handle_exception
+Thumb2_Handle_Exception_NoRegs:
+	LOAD_FRAME
+	ldr	r0, [Rframe, #FRAME_STACK_LIMIT]
+	add	r0, r0, #4
+	str	r0, [thread, #THREAD_JAVA_SP]
+	bic	r0, lr, #TBIT
+	ldr	r1, [Rframe, #FRAME_METHOD]
+        ldr     jpc, [r1, #METHOD_CONSTMETHOD]
+	add	jpc, jpc, #CONSTMETHOD_CODEOFFSET
+	mov	r2, #0
+	bl	Thumb2_lr_to_bci
+	cmp	r0, #-1
+	moveq	jpc, #0
+	addne	jpc, jpc, r0
+	bl	load_dispatch
+	LOAD_ISTATE
+	b	handle_exception
+Thumb2_NullPtr_Handler:
+	stmdb	sp!, {JAZ_REGSET}
+	LOAD_FRAME
+	bic	r0, lr, #TBIT
+	ldr	r1, [Rframe, #FRAME_METHOD]
+        ldr     jpc, [r1, #METHOD_CONSTMETHOD]
+	add	jpc, jpc, #CONSTMETHOD_CODEOFFSET
+	mov	r2, sp
+	ldr	r3, [Rframe, #FRAME_LOCALS]
+	bl	Thumb2_lr_to_bci
+	add	sp, sp, #JAZ_REGSET_LEN * 4
+	cmp	r0, #-1
+	moveq	jpc, #0
+	addne	jpc, jpc, r0
+	bl	load_dispatch
+	LOAD_ISTATE
+	b	null_ptr_exception
+
+Thumb2_Stack_Overflow:
+	mov	r0, thread
+	mov	r2, #0
+	str	r2, [r0, #THREAD_LAST_JAVA_SP]
+	ldr	ip, [r0, #THREAD_TOP_ZERO_FRAME]
+	ldr	r2, [r0, #THREAD_JAVA_SP]
+	str	ip, [r0, #THREAD_LAST_JAVA_FP]
+	str	r2, [r0, #THREAD_LAST_JAVA_SP]
+	bl	_ZN18InterpreterRuntime24throw_StackOverflowErrorEP10JavaThread
+	mov	r0, #0
+	ldmfd	arm_sp!, {fast_regset, pc}
+
+	.global	Thumb2_Exit_To_Interpreter
+	.type Thumb2_Exit_To_Interpreter, %function
+Thumb2_Exit_To_Interpreter:
+	LOAD_ISTATE
+	bl	load_dispatch
+	sub	stack, stack, #4
+	CACHE_CP
+	CACHE_LOCALS
+	DISPATCH	0
+
+	.global	Thumb2_monitorenter
+Thumb2_monitorenter:
+	stmdb	sp!, {ip, lr}
+	sub	stack, stack, #4
+	mov	r0, r8
+	POP	r1
+	DECACHE_JPC
+	DECACHE_STACK
+	bl	Helper_monitorenter
+	CACHE_STACK		@ monitorenter may expand stack!!!
+	ldmia	sp!, {ip, lr}
+	cmp	r0, #0
+	bne	handle_exception
+	add	stack, stack, #4
+	bx	lr
+
+	.global	Thumb2_Clear_Cache
+	.type Thumb2_Clear_Cache, %function
+Thumb2_Clear_Cache:
+	stmdb	sp!, {r7}
+	mov	r2, #0
+	mov	r7, #2
+	orr	r7, r7, #0xf0000
+	svc	0
+	ldmia	sp!, {r7}
+	bx	lr
+
+#endif // T2JIT
+
+	.section	.init_array,"aw",%init_array
+	.word	bci_init(target1)
+
+	.data
+	.global	CPUInfo
+	ALIGN_DATA
+        .word   0, 0, 0, 0, 0, 0, 0, 0
+        .word   0, 0, 0, 0, 0
+DispatchBreakPoint:					.word	0
+CPUInfo:						.word	0
+CodeTrace_Idx:						.word	0
+UseOnStackReplacement_Address:                          .word   0
+BackgroundCompilation_Address:                          .word   0
+CompileThreshold_Address:                               .word   0
+InterpreterInvocationLimit_Address:			.word	0
+UseCompiler_Address:					.word	0
+can_post_interpreter_events:				.word	0 	
+PrintCommandLineFlags_Address:				.word	0
+oopDesc_Address:					.word	0
+ThreadLocalStorage_thread_index:			.word	0
+AbstractInterpreter_notice_safepoints:			.word	0
+Bytecodes_name_Address:					.word	0
+Universe_collectedHeap_Address:				.word	0
+always_do_update_barrier_Address:			.word	0
+VmSymbols_symbols_Address:				.word	0
+SafePointSynchronize_state_Address:			.word	0
+InterpreterRuntime_slow_signature_handler_Address:	.word	0
+XXX:
+opclabels_data:
+	.word	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+	.word	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+	.word	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+	.word	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+	.word	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+	.word	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+	.word	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+	.word	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+	.word	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+	.word	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+	.word	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+	.word	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+	.word	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+	.word	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+	.word	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+	.word	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+#endif
+
+#endif // __arm__
--- a/src/cpu/zero/vm/cppInterpreter_zero.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -36,6 +36,7 @@
 #include "oops/oop.inline.hpp"
 #include "prims/jvmtiExport.hpp"
 #include "prims/jvmtiThreadState.hpp"
+#include "prims/methodHandles.hpp"
 #include "runtime/arguments.hpp"
 #include "runtime/deoptimization.hpp"
 #include "runtime/frame.inline.hpp"
@@ -65,6 +66,13 @@
   CALL_VM_NOCHECK_NOFIX(func)                   \
   fixup_after_potential_safepoint()
 
+
+#ifdef z_CPPDEBUG
+#define CPPINT_DEBUG( Z_code_ ) Z_code_
+#else
+#define CPPINT_DEBUG( Z_code_ )
+#endif
+
 int CppInterpreter::normal_entry(methodOop method, intptr_t UNUSED, TRAPS) {
   JavaThread *thread = (JavaThread *) THREAD;
 
@@ -699,6 +707,9 @@
     method_handle = adapter;
   }
 
+  CPPINT_DEBUG( tty->print_cr( "Process method_handle sp: 0x%x unwind_sp: 0x%x result_slots: %d.", \
+			       stack->sp(), unwind_sp, result_slots ); )
+
   // Start processing
   process_method_handle(method_handle, THREAD);
   if (HAS_PENDING_EXCEPTION)
@@ -718,6 +729,8 @@
   }
 
   // Check
+  CPPINT_DEBUG( tty->print_cr( "Exiting method_handle_entry,  sp: 0x%x unwind_sp: 0x%x result_slots: %d.", \
+			       stack->sp(), unwind_sp, result_slots ); )
   assert(stack->sp() == unwind_sp - result_slots, "should be");
 
   // No deoptimized frames on the stack
@@ -725,6 +738,7 @@
 }
 
 void CppInterpreter::process_method_handle(oop method_handle, TRAPS) {
+
   JavaThread *thread = (JavaThread *) THREAD;
   ZeroStack *stack = thread->zero_stack();
   intptr_t *vmslots = stack->sp();
@@ -739,6 +753,7 @@
     (MethodHandles::EntryKind) (((intptr_t) entry) & 0xffffffff);
 
   methodOop method = NULL;
+  CPPINT_DEBUG( tty->print_cr( "\nEntering %s 0x%x.",MethodHandles::entry_name(entry_kind), (char *)vmslots ); )
   switch (entry_kind) {
   case MethodHandles::_invokestatic_mh:
     direct_to_method = true;
@@ -811,11 +826,15 @@
   case MethodHandles::_bound_int_mh:
   case MethodHandles::_bound_long_mh:
     {
-      BasicType arg_type  = T_ILLEGAL;
-      int       arg_mask  = -1;
-      int       arg_slots = -1;
-      MethodHandles::get_ek_bound_mh_info(
-        entry_kind, arg_type, arg_mask, arg_slots);
+      //     BasicType arg_type  = T_ILLEGAL;
+      //     int       arg_mask  = -1;
+      //     int       arg_slots = -1;
+      //     MethodHandles::get_ek_bound_mh_info(
+      //      entry_kind, arg_type, arg_mask, arg_slots);
+      BasicType arg_type = MethodHandles::ek_bound_mh_arg_type(entry_kind);
+      int arg_mask = 0;
+      int arg_slots = type2size[arg_type];;
+
       int arg_slot =
         java_lang_invoke_BoundMethodHandle::vmargslot(method_handle);
 
@@ -961,10 +980,13 @@
         java_lang_invoke_AdapterMethodHandle::conversion(method_handle);
       int arg2 = MethodHandles::adapter_conversion_vminfo(conv);
 
-      int swap_bytes = 0, rotate = 0;
-      MethodHandles::get_ek_adapter_opt_swap_rot_info(
-        entry_kind, swap_bytes, rotate);
-      int swap_slots = swap_bytes >> LogBytesPerWord;
+      // int swap_bytes = 0, rotate = 0;
+      //     MethodHandles::get_ek_adapter_opt_swap_rot_info(
+      //        entry_kind, swap_bytes, rotate);
+      int swap_slots = MethodHandles::ek_adapter_opt_swap_slots(entry_kind);
+      int rotate = MethodHandles::ek_adapter_opt_swap_mode(entry_kind);
+      int swap_bytes = swap_slots * Interpreter::stackElementSize;
+      swap_slots = swap_bytes >> LogBytesPerWord;
 
       intptr_t tmp;
       switch (rotate) {
@@ -1080,12 +1102,309 @@
     }
     break;
 
-  default:
-    tty->print_cr("unhandled entry_kind %s",
+  case MethodHandles::_adapter_opt_spread_0:
+  case MethodHandles::_adapter_opt_spread_1_ref:
+  case MethodHandles::_adapter_opt_spread_2_ref:
+  case MethodHandles::_adapter_opt_spread_3_ref:
+  case MethodHandles::_adapter_opt_spread_4_ref:
+  case MethodHandles::_adapter_opt_spread_5_ref:
+  case MethodHandles::_adapter_opt_spread_ref:
+  case MethodHandles::_adapter_opt_spread_byte:
+  case MethodHandles::_adapter_opt_spread_char:
+  case MethodHandles::_adapter_opt_spread_short:
+  case MethodHandles::_adapter_opt_spread_int:
+  case MethodHandles::_adapter_opt_spread_long:
+  case MethodHandles::_adapter_opt_spread_float:
+  case MethodHandles::_adapter_opt_spread_double:
+    {
+
+      // spread an array out into a group of arguments
+
+      int arg_slot =
+        java_lang_invoke_AdapterMethodHandle::vmargslot(method_handle);
+      // Fetch the argument, which we will cast to the required array type.
+      oop arg = VMSLOTS_OBJECT(arg_slot);
+
+      BasicType elem_type      =
+        MethodHandles::ek_adapter_opt_spread_type(entry_kind);
+      int       elem_slots     = 
+        type2size[elem_type];  // 1 or 2
+      int       array_slots    = 
+        1;  // array is always a T_OBJECT
+      int       length_offset  = 
+        arrayOopDesc::length_offset_in_bytes();
+      int       elem0_offset   = 
+        arrayOopDesc::base_offset_in_bytes(elem_type);
+      int       length_constant = 
+        MethodHandles::ek_adapter_opt_spread_count(entry_kind);
+      int       array_length = 0;
+      void      *array_elem0 = NULL;       
+
+      CPPINT_DEBUG( tty->print_cr( \
+        "ENTERING _adapter_opt_spread: %s %d %d 0x%x 0x%x", \
+        type2name(elem_type), arg_slot, length_constant, (char *)arg, stack->sp() ); )
+
+      // If the spread count is -1, the length is "variable" ie controlled
+      // by the array length.
+      // See ek_adapter_opt_spread_count in methodHandles.hpp
+      // If array lenth is 0 or spread count is 0 , we will remove the argslot.
+
+      bool length_can_be_zero = (length_constant == 0);
+      if (length_constant < 0) {
+        // some adapters with variable length must handle the zero case
+        if (!OptimizeMethodHandles ||
+            elem_type != T_OBJECT)
+          length_can_be_zero = true;
+      }
+
+      if (arg == NULL) {
+        CPPINT_DEBUG( tty->print_cr( \
+          "arg NULL implies Array_length == 0, remove slot." ); )
+        // remove arg slot
+        remove_vmslots(arg_slot, 1, THREAD); // doesn't trap
+        vmslots = stack->sp(); // unused, but let the compiler figure that out
+        CPPINT_DEBUG( tty->print_cr( \
+          " >> Would LEAVE _adapter_opt_spread with NPE." ); )
+#ifdef _NOT_DEF_
+	// queue a nullpointer exception for the caller
+        stack->set_sp(calculate_unwind_sp(stack, method_handle));
+        CALL_VM_NOCHECK_NOFIX(
+          throw_exception(
+            thread,
+            vmSymbols::java_lang_NullPointerException()));
+        // NB all oops trashed!
+        assert(HAS_PENDING_EXCEPTION, "should do");
+        return;
+#endif
+      } else {    //  (arg != NULL) 
+        klassOop objKlassOop = arg->klass();
+        klassOop klassOf = java_lang_Class::as_klassOop(
+          java_lang_invoke_AdapterMethodHandle::argument(method_handle));
+
+        if (objKlassOop != klassOf &&
+            !objKlassOop->klass_part()->is_subtype_of(klassOf)) {
+          CPPINT_DEBUG( tty->print_cr( \
+            "CLASS CAST ERROR #1 in _adapter_opt_spread." ); )
+          ResourceMark rm(THREAD);
+          const char* objName = Klass::cast(objKlassOop)->external_name();
+          const char* klassName = Klass::cast(klassOf)->external_name();
+          char* message = SharedRuntime::generate_class_cast_message(
+            objName, klassName);
+
+          stack->set_sp(calculate_unwind_sp(stack, method_handle));
+          CALL_VM_NOCHECK_NOFIX(
+            throw_exception(
+              thread,
+              vmSymbols::java_lang_ClassCastException(), message));
+          // NB all oops trashed!
+          assert(HAS_PENDING_EXCEPTION, "should do");
+          return;
+        }
+
+        // Check the array type.
+
+        klassOop array_klass_oop = NULL;
+        BasicType array_type = java_lang_Class::as_BasicType(
+          java_lang_invoke_AdapterMethodHandle::argument(method_handle),
+            &array_klass_oop);
+        arrayKlassHandle array_klass(THREAD, array_klass_oop);
+
+        assert(array_type == T_OBJECT, "");
+        assert(Klass::cast(array_klass_oop)->oop_is_array(), "");
+        if (!(array_type == T_OBJECT) || 
+            !(Klass::cast(array_klass_oop)->oop_is_array())) {
+          CPPINT_DEBUG( tty->print_cr( \
+            "CLASS CAST ERROR #2 not an array in _adapter_opt_spread." ); )
+          ResourceMark rm(THREAD);
+          const char* objName = Klass::cast(objKlassOop)->external_name();
+          const char* klassName = Klass::cast(klassOf)->external_name();
+          char* message = SharedRuntime::generate_class_cast_message(
+            objName, klassName);
+          stack->set_sp(calculate_unwind_sp(stack, method_handle));
+          CALL_VM_NOCHECK_NOFIX(
+            throw_exception(
+              thread,
+              vmSymbols::java_lang_ClassCastException(), message));
+          // NB all oops trashed!
+          assert(HAS_PENDING_EXCEPTION, "should do");
+          return;
+        }
+
+        klassOop element_klass_oop = NULL;
+        BasicType element_type = 
+          java_lang_Class::as_BasicType(array_klass->component_mirror(),
+            &element_klass_oop);
+        KlassHandle element_klass(THREAD, element_klass_oop);
+	if ((elem_type != T_OBJECT) && (elem_type != element_type)) {
+          CPPINT_DEBUG( tty->print_cr( \
+            "CLASS CAST ERROR #3 invalid type %s != %s in _adapter_opt_spread.", \
+            type2name(elem_type), type2name(element_type)  ); )
+          ResourceMark rm(THREAD);
+          const char* objName = Klass::cast(objKlassOop)->external_name();
+          const char* klassName = Klass::cast(klassOf)->external_name();
+          char* message = SharedRuntime::generate_class_cast_message(
+            objName, klassName);
+          stack->set_sp(calculate_unwind_sp(stack, method_handle));
+          CALL_VM_NOCHECK_NOFIX(
+            throw_exception(
+              thread,
+              vmSymbols::java_lang_ClassCastException(), message));
+          // NB all oops trashed!
+          assert(HAS_PENDING_EXCEPTION, "should do");
+          return;
+        }
+
+        array_length = arrayOop(arg)->length();
+
+        // Check the required length.
+        if (length_constant > 0) { // must match ?
+          if ( array_length != length_constant ) {
+            CPPINT_DEBUG( tty->print_cr( \
+              "ARRY INDEX ERROR #4 invalid array length in _adapter_opt_spread." ); )
+            //fixme  ArrayIndexOutOfBoundsException ?
+            ResourceMark rm(THREAD);
+            const char* objName = Klass::cast(objKlassOop)->external_name();
+            const char* klassName = Klass::cast(klassOf)->external_name();
+            char* message = SharedRuntime::generate_class_cast_message(
+              objName, klassName);
+
+            stack->set_sp(calculate_unwind_sp(stack, method_handle));
+            CALL_VM_NOCHECK_NOFIX(
+              throw_exception(
+                thread,
+                vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message));
+            // NB all oops trashed!
+            assert(HAS_PENDING_EXCEPTION, "should do");
+            return;
+          }
+        // use array_length ?
+        } else { // length_constant == [ -1 or 0 ] 
+          if ( (array_length > 0) || length_can_be_zero ) {
+            // use array_length.
+          } else { // array_length 0 and not length_can_be_zero
+            CPPINT_DEBUG( tty->print_cr( \
+              "ARRY INDEX ERROR #5 arry length 0 in _adapter_opt_spread." ); )
+            //fixme   ArrayIndexOutOfBoundsException ?
+            ResourceMark rm(THREAD);
+            const char* objName = Klass::cast(objKlassOop)->external_name();
+            const char* klassName = Klass::cast(klassOf)->external_name();
+            char* message = SharedRuntime::generate_class_cast_message(
+              objName, klassName);
+
+            stack->set_sp(calculate_unwind_sp(stack, method_handle));
+            CALL_VM_NOCHECK_NOFIX(
+              throw_exception(
+                thread,
+                vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message));
+            // NB all oops trashed!
+            assert(HAS_PENDING_EXCEPTION, "should do");
+            return;
+          }
+        }
+
+        // Array length checked out.  Now insert any required arg slots.
+        // array_length - 1 more slots if array_length > 0
+        // otherwise  if array_length == 0 remove arg_slot.
+
+        if ( array_length > 0 ) {
+          int slots = (array_length * elem_slots) - 1;
+          CPPINT_DEBUG( tty->print_cr( \
+            "array_length %d %d slots needed in _adapter_opt_spread.",\
+              array_length, slots); )
+          debug_only(if (elem_slots == 2) \
+            assert ((slots % 2 == 1)," bad slots calc"));
+          if ( slots > 0 ) {
+            intptr_t *unwind_sp = 
+            calculate_unwind_sp(stack, method_handle);
+            insert_vmslots(arg_slot, slots, THREAD);
+            if (HAS_PENDING_EXCEPTION) {
+              // all oops trashed
+              stack->set_sp(unwind_sp);
+              return;
+            }
+          }
+          vmslots = stack->sp();
+          arg_slot += slots;
+
+          array_elem0 = arrayOop(arg)->base(elem_type);
+
+          // Copy from the array to the new arg slots.
+          // [from native : Beware:  Arguments that are shallow 
+          // on the stack are deep in the array,
+          // and vice versa.  So a downward-growing stack (the usual) 
+          // has to be copied elementwise in reverse order 
+          // from the source array.]
+
+          void * array_elem = array_elem0;
+          int top_slot = arg_slot;
+
+          debug_only(if (elem_slots == 2) \
+            assert ((((ulong)(char *)&vmslots[top_slot]) % \
+              (u_int)type2aelembytes(elem_type) == 0), \
+                " bad arg alignment"));
+
+          CPPINT_DEBUG( tty->print_cr( \
+            "BEGIN ARRY LOOP %d %d 0x%x 0x%x _adapter_opt_spread.",\
+              array_length, top_slot, &vmslots[top_slot], array_elem  ); )
+
+          for (int index = 0; index < array_length; index++) {
+            switch (elem_type) {
+            case T_BYTE:
+              SET_VMSLOTS_INT(*(jint*)array_elem, top_slot);
+              break;
+            case T_CHAR:
+              SET_VMSLOTS_INT(*(jint*)array_elem, top_slot);
+              break;
+            case T_SHORT:
+              SET_VMSLOTS_INT(*(jint*)array_elem, top_slot);
+              break;
+            case T_INT:
+              SET_VMSLOTS_INT(*(jint*)array_elem, top_slot);
+              break;
+            case T_FLOAT:
+              SET_VMSLOTS_FLOAT(*(jfloat*)array_elem,top_slot);
+              break;
+            case T_LONG:
+              SET_VMSLOTS_LONG(*(jlong*)array_elem, top_slot);
+              break;
+            case T_DOUBLE:
+              SET_VMSLOTS_DOUBLE(*(jdouble*)array_elem, top_slot);
+              break;
+            case T_OBJECT:
+              SET_VMSLOTS_OBJECT(*(oopDesc**)array_elem, top_slot);
+              break;
+            default:
+              tty->print_cr("unhandled type %s", type2name(elem_type));
+              ShouldNotReachHere();
+            }
+            array_elem = (void*)((char *)array_elem +
+              type2aelembytes(element_type));
+            top_slot -= elem_slots;
+          }
+          arg_slot++;
+        }
+      }
+      if ((array_length == 0) && (arg != NULL)) {
+        CPPINT_DEBUG( tty->print_cr( \
+          "Array_length == 0, will remove slot." ); )
+        // remove arg slot
+        remove_vmslots(arg_slot, 1, THREAD); // doesn't trap
+         // unused, but let the compiler figure that out
+        vmslots = stack->sp();
+        //
+      }
+      CPPINT_DEBUG( tty->print_cr( \
+        "LEAVING _adapter_opt_spread: %s 0x%x 0x%x \n", \
+          type2name(elem_type), (char *)arg, (char *)stack->sp() ); )
+    }
+        break;
+    default:
+      tty->print_cr("unhandled entry_kind %s",
                   MethodHandles::entry_name(entry_kind));
-    ShouldNotReachHere();
+      ShouldNotReachHere();
   }
 
+
   // Continue along the chain
   if (direct_to_method) {
     if (method == NULL) {
@@ -1138,6 +1457,7 @@
     tty->print_cr("dst_rtype = %s", type2name(dst_rtype));
     ShouldNotReachHere();
   }
+  CPPINT_DEBUG( tty->print_cr( "LEAVING %s\n",MethodHandles::entry_name(entry_kind) ); )
 }
 
 // The new slots will be inserted before slot insert_before.
@@ -1353,10 +1673,21 @@
   return generate_entry((address) CppInterpreter::normal_entry);
 }
 
+#ifdef HOTSPOT_ASM
+extern "C" address asm_generate_method_entry(
+  AbstractInterpreter::MethodKind kind);
+#endif // HOTSPOT_ASM
+
 address AbstractInterpreterGenerator::generate_method_entry(
     AbstractInterpreter::MethodKind kind) {
   address entry_point = NULL;
 
+#ifdef HOTSPOT_ASM
+    address asm_entry = asm_generate_method_entry(kind);
+    if (asm_entry)
+      return ((InterpreterGenerator*) this)->generate_entry(asm_entry);
+#endif // HOTSPOT_ASM
+
   switch (kind) {
   case Interpreter::zerolocals:
   case Interpreter::zerolocals_synchronized:
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/zero/vm/deoptimizerFrame_zero.hpp	Fri Aug 09 12:21:36 2013 +0100
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2003-2007 Sun Microsystems, Inc.  All Rights Reserved.
+ * Copyright 2008 Red Hat, Inc.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ */
+
+// |  ...               |
+// +--------------------+  ------------------
+// | frame_type         |       low addresses
+// | next_frame         |      high addresses
+// +--------------------+  ------------------
+// |  ...               |
+
+class DeoptimizerFrame : public ZeroFrame {
+ private:
+  DeoptimizerFrame() : ZeroFrame() {
+    ShouldNotCallThis();
+  }
+
+ protected:
+  enum Layout {
+    header_words = jf_header_words
+  };
+
+ public:
+  static DeoptimizerFrame *build(ZeroStack* stack);
+
+ public:
+  void identify_word(int   frame_index,
+                     int   offset,
+                     char* fieldbuf,
+                     char* valuebuf,
+                     int   buflen) const;
+};
--- a/src/cpu/zero/vm/frame_zero.inline.hpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/cpu/zero/vm/frame_zero.inline.hpp	Fri Aug 09 12:21:36 2013 +0100
@@ -36,6 +36,8 @@
   _deopt_state = unknown;
 }
 
+inline address  frame::sender_pc()           const { ShouldNotCallThis();  }
+
 inline frame::frame(ZeroFrame* zf, intptr_t* sp) {
   _zeroframe = zf;
   _sp = sp;
--- a/src/cpu/zero/vm/methodHandles_zero.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/cpu/zero/vm/methodHandles_zero.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -28,6 +28,8 @@
 #include "memory/allocation.inline.hpp"
 #include "prims/methodHandles.hpp"
 
+#define __ _masm->
+
 int MethodHandles::adapter_conversion_ops_supported_mask() {
   return ((1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_ONLY)
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_RETYPE_RAW)
@@ -38,12 +40,73 @@
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_ROT_ARGS)
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_DUP_ARGS)
          |(1<<java_lang_invoke_AdapterMethodHandle::OP_DROP_ARGS)
-         //|(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS) //BUG!
+         |(1<<java_lang_invoke_AdapterMethodHandle::OP_SPREAD_ARGS)
          );
-  // FIXME: MethodHandlesTest gets a crash if we enable OP_SPREAD_ARGS.
 }
 
 void MethodHandles::generate_method_handle_stub(MacroAssembler*          masm,
                                                 MethodHandles::EntryKind ek) {
   init_entry(ek, (MethodHandleEntry *) ek);
 }
+void MethodHandles::RicochetFrame::generate_ricochet_blob(MacroAssembler* _masm,
+                                                          // output params:
+                                                          int* bounce_offset,
+                                                          int* exception_offset,
+                                                          int* frame_size_in_words) {
+  (*frame_size_in_words) = 0;
+  address start = __ pc();
+  (*bounce_offset) = __ pc() - start;
+  (*exception_offset) = __ pc() - start;
+}
+
+frame MethodHandles::ricochet_frame_sender(const frame& fr, RegisterMap *map) {
+  //RicochetFrame* f = RicochetFrame::from_frame(fr);
+  // Cf. is_interpreted_frame path of frame::sender
+  //  intptr_t* younger_sp = fr.sp();
+  //  intptr_t* sp         = fr.sender_sp();
+  //  return frame(sp, younger_sp, this_frame_adjusted_stack);
+  ShouldNotCallThis();  
+}
+
+void MethodHandles::ricochet_frame_oops_do(const frame& fr, OopClosure* blk, const RegisterMap* reg_map) {
+  //  ResourceMark rm;
+  //  RicochetFrame* f = RicochetFrame::from_frame(fr);
+
+  // pick up the argument type descriptor:
+  //  Thread* thread = Thread::current();
+  // process fixed part
+  //  blk->do_oop((oop*)f->saved_target_addr());
+  //  blk->do_oop((oop*)f->saved_args_layout_addr());
+
+  // process variable arguments:
+  //  if (cookie.is_null())  return;  // no arguments to describe
+
+  // the cookie is actually the invokeExact method for my target
+  // his argument signature is what I'm interested in
+  //  assert(cookie->is_method(), "");
+  //  methodHandle invoker(thread, methodOop(cookie()));
+  //  assert(invoker->name() == vmSymbols::invokeExact_name(), "must be this kind of method");
+  //  assert(!invoker->is_static(), "must have MH argument");
+  //  int slot_count = invoker->size_of_parameters();
+  //  assert(slot_count >= 1, "must include 'this'");
+  //  intptr_t* base = f->saved_args_base();
+  //  intptr_t* retval = NULL;
+  //  if (f->has_return_value_slot())
+  //    retval = f->return_value_slot_addr();
+  //  int slot_num = slot_count - 1;
+  //  intptr_t* loc = &base[slot_num];
+  //blk->do_oop((oop*) loc);   // original target, which is irrelevant
+  //  int arg_num = 0;
+  //  for (SignatureStream ss(invoker->signature()); !ss.is_done(); ss.next()) {
+  //    if (ss.at_return_type())  continue;
+  //    BasicType ptype = ss.type();
+  //    if (ptype == T_ARRAY)  ptype = T_OBJECT; // fold all refs to T_OBJECT
+  //    assert(ptype >= T_BOOLEAN && ptype <= T_OBJECT, "not array or void");
+  //    slot_num -= type2size[ptype];
+  //    loc = &base[slot_num];
+  //    bool is_oop = (ptype == T_OBJECT && loc != retval);
+  //    if (is_oop)  blk->do_oop((oop*)loc);
+  //    arg_num += 1;
+  //  }
+  //  assert(slot_num == 0, "must have processed all the arguments");
+}
--- a/src/cpu/zero/vm/methodHandles_zero.hpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/cpu/zero/vm/methodHandles_zero.hpp	Fri Aug 09 12:21:36 2013 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2011 Red Hat, Inc.
+ * Copyright 2011, 2012 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -29,3 +29,26 @@
   adapter_code_size = 0
 };
 
+class RicochetFrame : public ResourceObj {
+  friend class MethodHandles;
+ private:
+  /*
+    RF field            x86                 SPARC
+    sender_pc           *(rsp+0)            I7-0x8
+    sender_link         rbp                 I6+BIAS
+    exact_sender_sp     rsi/r13             I5_savedSP
+    conversion          *(rcx+&amh_conv)    L5_conv
+    saved_args_base     rax                 L4_sab (cf. Gargs = G4)
+    saved_args_layout   #NULL               L3_sal
+    saved_target        *(rcx+&mh_vmtgt)    L2_stgt
+    continuation        #STUB_CON           L1_cont
+   */
+ public:
+
+static void generate_ricochet_blob(MacroAssembler* _masm,
+                                     // output params:
+                                     int* bounce_offset,
+                                     int* exception_offset,
+                                     int* frame_size_in_words);
+
+};
--- a/src/cpu/zero/vm/sharedRuntime_zero.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/cpu/zero/vm/sharedRuntime_zero.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
+ * Copyright 2007, 2008, 2009, 2010, 2011, 2012 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -47,6 +47,12 @@
 #endif
 
 
+
+static address zero_null_code_stub() {
+  address start = ShouldNotCallThisStub();
+  return start;
+}
+
 int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
                                            VMRegPair *regs,
                                            int total_args_passed,
@@ -63,9 +69,9 @@
                         AdapterFingerPrint *fingerprint) {
   return AdapterHandlerLibrary::new_entry(
     fingerprint,
-    ShouldNotCallThisStub(),
-    ShouldNotCallThisStub(),
-    ShouldNotCallThisStub());
+    ZeroNullStubEntry( CAST_FROM_FN_PTR(address,zero_null_code_stub) ),
+    ZeroNullStubEntry( CAST_FROM_FN_PTR(address,zero_null_code_stub) ),
+    ZeroNullStubEntry( CAST_FROM_FN_PTR(address,zero_null_code_stub) ));
 }
 
 nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
@@ -96,22 +102,23 @@
   ShouldNotCallThis();
 }
 
+JRT_LEAF(void, zero_stub())
+  ShouldNotCallThis();
+JRT_END
+
+
 static RuntimeStub* generate_empty_runtime_stub(const char* name) {
-  CodeBuffer buffer(name, 0, 0);
-  return RuntimeStub::new_runtime_stub(name, &buffer, 0, 0, NULL, false);
+  return CAST_FROM_FN_PTR(RuntimeStub*,zero_stub);
 }
 
 static SafepointBlob* generate_empty_safepoint_blob() {
-  CodeBuffer buffer("handler_blob", 0, 0);
-  return SafepointBlob::create(&buffer, NULL, 0);
+  return CAST_FROM_FN_PTR(SafepointBlob*,zero_stub);
 }
 
 static DeoptimizationBlob* generate_empty_deopt_blob() {
-  CodeBuffer buffer("handler_blob", 0, 0);
-  return DeoptimizationBlob::create(&buffer, NULL, 0, 0, 0, 0);
+  return CAST_FROM_FN_PTR(DeoptimizationBlob*,zero_stub);
 }
 
-
 void SharedRuntime::generate_deopt_blob() {
   _deopt_blob = generate_empty_deopt_blob();
 }
@@ -124,6 +131,7 @@
   return generate_empty_runtime_stub("resolve_blob");
 }
 
+
 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
                                          VMRegPair *regs,
                                          int total_args_passed) {
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/cpu/zero/vm/thumb2.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -0,0 +1,7952 @@
+/*
+ * Copyright 2009, 2010 Edward Nevill
+ * Copyright 2012, 2013 Red Hat
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifdef __arm__
+
+#undef T2JIT
+#if !defined(DISABLE_THUMB2) && defined(HOTSPOT_ASM) && !defined(SHARK)
+#define T2JIT
+#endif
+
+#ifdef T2JIT
+
+// setting DISABLE_THUMB2_JVMTI at build time disables notification
+// of JVMTI dynamic_generate and compiled_method_load events
+#undef THUMB2_JVMTI
+#if !defined(DISABLE_THUMB2_JVMTI)
+#define THUMB2_JVMTI
+#endif
+
+#define T2_PRINT_COMPILATION
+#define T2_PRINT_STATISTICS
+#define T2_PRINT_DISASS
+#define T2_PRINT_REGUSAGE
+
+#define T2EE_PRINT_REGUSAGE
+#define CODE_ALIGN 64
+
+#define SLOW_ENTRY_OFFSET 24
+#define FAST_ENTRY_OFFSET 40
+
+#ifdef T2_PRINT_STATISTICS
+static char *t2_print_statistics;
+#endif
+
+#ifdef T2_PRINT_REGUSAGE
+static char *t2_print_regusage;
+#endif
+
+static char *t2_ospace;
+#define OSPACE t2_ospace
+
+#ifdef PRODUCT
+#define THUMB2_CODEBUF_SIZE (8 * 1024 * 1024)
+#else
+#define THUMB2_CODEBUF_SIZE (4 * 1024 * 1024)
+#endif
+#define THUMB2_MAX_BYTECODE_SIZE 10000
+#define THUMB2_MAX_T2CODE_SIZE 65000
+#define THUMB2_MAXLOCALS 1000
+
+#include <sys/mman.h>
+#include <ucontext.h>
+#include "precompiled.hpp"
+#include "interpreter/bytecodes.hpp"
+#include "compiler/compilerOracle.hpp"
+
+#define opc_nop			0x00
+#define opc_aconst_null		0x01
+#define opc_iconst_m1		0x02
+#define opc_iconst_0		0x03
+#define opc_iconst_1		0x04
+#define opc_iconst_2		0x05
+#define opc_iconst_3		0x06
+#define opc_iconst_4		0x07
+#define opc_iconst_5		0x08
+#define opc_lconst_0		0x09
+#define opc_lconst_1		0x0a
+#define opc_fconst_0		0x0b
+#define opc_fconst_1		0x0c
+#define opc_fconst_2		0x0d
+#define opc_dconst_0		0x0e
+#define opc_dconst_1		0x0f
+#define opc_bipush		0x10
+#define opc_sipush		0x11
+#define opc_ldc			0x12
+#define opc_ldc_w		0x13
+#define opc_ldc2_w		0x14
+#define opc_iload		0x15
+#define opc_lload		0x16
+#define opc_fload		0x17
+#define opc_dload		0x18
+#define opc_aload		0x19
+#define opc_iload_0		0x1a
+#define opc_iload_1		0x1b
+#define opc_iload_2		0x1c
+#define opc_iload_3		0x1d
+#define opc_lload_0		0x1e
+#define opc_lload_1		0x1f
+#define opc_lload_2		0x20
+#define opc_lload_3		0x21
+#define opc_fload_0		0x22
+#define opc_fload_1		0x23
+#define opc_fload_2		0x24
+#define opc_fload_3		0x25
+#define opc_dload_0		0x26
+#define opc_dload_1		0x27
+#define opc_dload_2		0x28
+#define opc_dload_3		0x29
+#define opc_aload_0		0x2a
+#define opc_aload_1		0x2b
+#define opc_aload_2		0x2c
+#define opc_aload_3		0x2d
+#define opc_iaload		0x2e
+#define opc_laload		0x2f
+#define opc_faload		0x30
+#define opc_daload		0x31
+#define opc_aaload		0x32
+#define opc_baload		0x33
+#define opc_caload		0x34
+#define opc_saload		0x35
+#define opc_istore		0x36
+#define opc_lstore		0x37
+#define opc_fstore		0x38
+#define opc_dstore		0x39
+#define opc_astore		0x3a
+#define opc_istore_0		0x3b
+#define opc_istore_1		0x3c
+#define opc_istore_2		0x3d
+#define opc_istore_3		0x3e
+#define opc_lstore_0		0x3f
+#define opc_lstore_1		0x40
+#define opc_lstore_2		0x41
+#define opc_lstore_3		0x42
+#define opc_fstore_0		0x43
+#define opc_fstore_1		0x44
+#define opc_fstore_2		0x45
+#define opc_fstore_3		0x46
+#define opc_dstore_0		0x47
+#define opc_dstore_1		0x48
+#define opc_dstore_2		0x49
+#define opc_dstore_3		0x4a
+#define opc_astore_0		0x4b
+#define opc_astore_1		0x4c
+#define opc_astore_2		0x4d
+#define opc_astore_3		0x4e
+#define opc_iastore		0x4f
+#define opc_lastore		0x50
+#define opc_fastore		0x51
+#define opc_dastore		0x52
+#define opc_aastore		0x53
+#define opc_bastore		0x54
+#define opc_castore		0x55
+#define opc_sastore		0x56
+#define opc_pop			0x57
+#define opc_pop2		0x58
+#define opc_dup			0x59
+#define opc_dup_x1		0x5a
+#define opc_dup_x2		0x5b
+#define opc_dup2		0x5c
+#define opc_dup2_x1		0x5d
+#define opc_dup2_x2		0x5e
+#define opc_swap		0x5f
+#define opc_iadd		0x60
+#define opc_ladd		0x61
+#define opc_fadd		0x62
+#define opc_dadd		0x63
+#define opc_isub		0x64
+#define opc_lsub		0x65
+#define opc_fsub		0x66
+#define opc_dsub		0x67
+#define opc_imul		0x68
+#define opc_lmul		0x69
+#define opc_fmul		0x6a
+#define opc_dmul		0x6b
+#define opc_idiv		0x6c
+#define opc_ldiv		0x6d
+#define opc_fdiv		0x6e
+#define opc_ddiv		0x6f
+#define opc_irem		0x70
+#define opc_lrem		0x71
+#define opc_frem		0x72
+#define opc_drem		0x73
+#define opc_ineg		0x74
+#define opc_lneg		0x75
+#define opc_fneg		0x76
+#define opc_dneg		0x77
+#define opc_ishl		0x78
+#define opc_lshl		0x79
+#define opc_ishr		0x7a
+#define opc_lshr		0x7b
+#define opc_iushr		0x7c
+#define opc_lushr		0x7d
+#define opc_iand		0x7e
+#define opc_land		0x7f
+#define opc_ior			0x80
+#define opc_lor			0x81
+#define opc_ixor		0x82
+#define opc_lxor		0x83
+#define opc_iinc		0x84
+#define opc_i2l			0x85
+#define opc_i2f			0x86
+#define opc_i2d			0x87
+#define opc_l2i			0x88
+#define opc_l2f			0x89
+#define opc_l2d			0x8a
+#define opc_f2i			0x8b
+#define opc_f2l			0x8c
+#define opc_f2d			0x8d
+#define opc_d2i			0x8e
+#define opc_d2l			0x8f
+#define opc_d2f			0x90
+#define opc_i2b			0x91
+#define opc_i2c			0x92
+#define opc_i2s			0x93
+#define opc_lcmp		0x94
+#define opc_fcmpl		0x95
+#define opc_fcmpg		0x96
+#define opc_dcmpl		0x97
+#define opc_dcmpg		0x98
+#define opc_ifeq		0x99
+#define opc_ifne		0x9a
+#define opc_iflt		0x9b
+#define opc_ifge		0x9c
+#define opc_ifgt		0x9d
+#define opc_ifle		0x9e
+#define opc_if_icmpeq		0x9f
+#define opc_if_icmpne		0xa0
+#define opc_if_icmplt		0xa1
+#define opc_if_icmpge		0xa2
+#define opc_if_icmpgt		0xa3
+#define opc_if_icmple		0xa4
+#define opc_if_acmpeq		0xa5
+#define opc_if_acmpne		0xa6
+#define opc_goto		0xa7
+#define opc_jsr			0xa8
+#define opc_ret			0xa9
+#define opc_tableswitch		0xaa
+#define opc_lookupswitch	0xab
+#define opc_ireturn		0xac
+#define opc_lreturn		0xad
+#define opc_freturn		0xae
+#define opc_dreturn		0xaf
+#define opc_areturn		0xb0
+#define opc_return		0xb1
+#define opc_getstatic		0xb2
+#define opc_putstatic		0xb3
+#define opc_getfield		0xb4
+#define opc_putfield		0xb5
+#define opc_invokevirtual	0xb6
+#define opc_invokespecial	0xb7
+#define opc_invokestatic	0xb8
+#define opc_invokeinterface	0xb9
+#define opc_invokedynamic	0xba
+#define opc_new			0xbb
+#define opc_newarray		0xbc
+#define opc_anewarray		0xbd
+#define opc_arraylength		0xbe
+#define opc_athrow		0xbf
+#define opc_checkcast		0xc0
+#define opc_instanceof		0xc1
+#define opc_monitorenter	0xc2
+#define opc_monitorexit		0xc3
+#define opc_wide		0xc4
+#define opc_multianewarray	0xc5
+#define opc_ifnull		0xc6
+#define opc_ifnonnull		0xc7
+#define opc_goto_w		0xc8
+#define opc_jsr_w		0xc9
+#define opc_breakpoint		0xca
+
+#define OPC_LAST_JAVA_OP	0xca
+
+#define opc_fast_aldc		Bytecodes::_fast_aldc
+#define opc_fast_aldc_w		Bytecodes::_fast_aldc_w
+
+#define opc_bgetfield			0xcc
+#define opc_cgetfield			0xcd
+#define opc_igetfield			0xd0
+#define opc_lgetfield			0xd1
+#define opc_sgetfield			0xd2
+#define opc_aputfield			0xd3
+#define opc_bputfield			0xd4
+#define opc_cputfield			0xd5
+#define opc_iputfield			0xd8
+#define opc_lputfield			0xd9
+#define opc_iaccess_0			0xdb
+#define opc_iaccess_1			0xdc
+#define opc_iaccess_2			0xdd
+#define opc_iaccess_3			0xde
+#define opc_invokeresolved		0xdf
+#define opc_invokespecialresolved	0xe0
+#define opc_invokestaticresolved	0xe1
+#define opc_invokevfinal		0xe2
+#define opc_iload_iload			0xe3
+
+#define opc_return_register_finalizer   0xe7
+#define opc_dmac                        0xe8
+#define opc_iload_0_iconst_N            0xe9
+#define opc_iload_1_iconst_N            0xea
+#define opc_iload_2_iconst_N            0xeb
+#define opc_iload_3_iconst_N            0xec
+#define opc_iload_iconst_N              0xed
+#define opc_iadd_istore_N               0xee
+#define opc_isub_istore_N               0xef
+#define opc_iand_istore_N               0xf0
+#define opc_ior_istore_N                0xf1
+#define opc_ixor_istore_N               0xf2
+#define opc_iadd_u4store                0xf3
+#define opc_isub_u4store                0xf4
+#define opc_iand_u4store                0xf5
+#define opc_ior_u4store                 0xf6
+#define opc_ixor_u4store                0xf7
+#define opc_iload_0_iload               0xf8
+#define opc_iload_1_iload               0xf9
+#define opc_iload_2_iload               0xfa
+#define opc_iload_3_iload               0xfb
+#define opc_iload_0_iload_N             0xfc
+#define opc_iload_1_iload_N             0xfd
+#define opc_iload_2_iload_N             0xfe
+#define opc_iload_3_iload_N             0xff
+
+
+#define H_IREM				0
+#define H_IDIV				1
+#define H_LDIV				2
+#define H_LREM				3
+#define H_FREM				4
+#define H_DREM				5
+#define	H_LDC				6
+#define H_NEW				8
+#define H_I2F				9
+#define H_I2D				10
+#define H_L2F				11
+#define H_L2D				12
+#define H_F2I				13
+#define H_F2L				14
+#define H_F2D				15
+#define H_D2I				16
+#define H_D2L				17
+#define H_D2F				18
+#define H_NEWARRAY			19
+#define H_ANEWARRAY			20
+#define H_MULTIANEWARRAY		21
+#define H_INSTANCEOF			22
+#define H_CHECKCAST			23
+#define H_AASTORE			24
+#define H_APUTFIELD			25
+#define H_SYNCHRONIZED_ENTER		26
+#define H_SYNCHRONIZED_EXIT		27
+
+#define H_EXIT_TO_INTERPRETER		28
+
+#define H_RET				H_EXIT_TO_INTERPRETER
+#define H_DEADCODE			H_EXIT_TO_INTERPRETER
+#define H_ATHROW			H_EXIT_TO_INTERPRETER
+
+#define H_HANDLE_EXCEPTION		29
+#define H_ARRAYBOUND			30
+
+#define H_LDC_W				31
+
+#define H_DEBUG_METHODENTRY		32
+#define H_DEBUG_METHODEXIT		33
+#define H_DEBUG_METHODCALL		34
+
+#define H_INVOKEINTERFACE		35
+#define H_INVOKEVIRTUAL			36
+#define H_INVOKESTATIC			37
+#define H_INVOKESPECIAL			38
+
+#define H_GETFIELD_WORD			39
+#define H_GETFIELD_SH			40
+#define H_GETFIELD_H			41
+#define H_GETFIELD_SB			42
+#define H_GETFIELD_DW			43
+
+#define H_PUTFIELD_WORD			44
+#define H_PUTFIELD_H			45
+#define H_PUTFIELD_B			46
+#define H_PUTFIELD_A			47
+#define H_PUTFIELD_DW			48
+
+#define H_GETSTATIC_WORD		49
+#define H_GETSTATIC_SH			50
+#define H_GETSTATIC_H			51
+#define H_GETSTATIC_SB			52
+#define H_GETSTATIC_DW			53
+
+#define H_PUTSTATIC_WORD		54
+#define H_PUTSTATIC_H			55
+#define H_PUTSTATIC_B			56
+#define H_PUTSTATIC_A			57
+#define H_PUTSTATIC_DW			58
+
+#define H_STACK_OVERFLOW		59
+
+#define H_HANDLE_EXCEPTION_NO_REGS	60
+
+#define H_INVOKESTATIC_RESOLVED		61
+#define H_INVOKESPECIAL_RESOLVED	62
+#define H_INVOKEVIRTUAL_RESOLVED	63
+#define H_INVOKEVFINAL			64
+
+#define H_MONITORENTER			65
+#define H_MONITOREXIT			66
+
+#define H_SAFEPOINT              	67
+
+#define H_LAST                          68  // Not used
+
+unsigned handlers[H_LAST];
+
+#define LEAF_STACK_SIZE			200
+#define STACK_SPARE			40
+
+#define COMPILER_RESULT_FAILED	1	// Failed to compiled this method
+#define COMPILER_RESULT_FATAL	2	// Fatal - dont try compile anything ever again
+
+#include <setjmp.h>
+
+static jmp_buf compiler_error_env;
+
+#define J_BogusImplementation() longjmp(compiler_error_env, COMPILER_RESULT_FAILED)
+
+#ifdef PRODUCT
+
+#define JASSERT(cond, msg)	0
+#define J_Unimplemented() longjmp(compiler_error_env, COMPILER_RESULT_FATAL)
+
+#else
+
+#define JASSERT(cond, msg)	do { if (!(cond)) fatal(msg); } while (0)
+#define J_Unimplemented()       { report_unimplemented(__FILE__, __LINE__); BREAKPOINT; }
+
+#endif // PRODUCT
+
+#define GET_NATIVE_U2(p)	(*(unsigned short *)(p))
+#define GET_NATIVE_U4(p)	(*(unsigned *)(p))
+
+#define GET_JAVA_S1(p)		(((signed char *)(p))[0])
+#define GET_JAVA_S2(p)  	((((signed char *)(p))[0] << 8) + (p)[1])
+#define GET_JAVA_U2(p)		(((p)[0] << 8) + (p)[1])
+#define GET_JAVA_U4(p)		(((p)[0] << 24) + ((p)[1] << 16) + ((p)[2] << 8) + (p)[3])
+
+#define BYTESEX_REVERSE(v) (((v)<<24) | (((v)<<8) & 0xff0000) | (((v)>>8) & 0xff00) | ((v)>>24))
+#define BYTESEX_REVERSE_U2(v) (((v)<<8) | ((v)>>8))
+
+// n.b. this value is chosen because it is an illegal thumb2 instruction
+#define THUMB2_POLLING_PAGE_MAGIC 0xdead
+
+typedef struct Thumb2_CodeBuf {
+  unsigned size;
+  char *sp;
+  char *hp;
+} Thumb2_CodeBuf;
+
+Thumb2_CodeBuf *thumb2_codebuf;
+
+unsigned bc_stackinfo[THUMB2_MAX_BYTECODE_SIZE];
+unsigned locals_info[1000];
+unsigned stack[1000];
+unsigned r_local[1000];
+
+#ifdef THUMB2_JVMTI
+// jvmti needs to map start address of generated code for a bytecode
+// to corresponding bytecode index so agents can correlate code address
+// ranges with bci and thence line number
+static jvmtiAddrLocationMap *address_bci_map = NULL;
+static jint address_bci_map_length = 0;
+
+static void *stub_gen_code_start = 0;
+static void *stub_gen_code_end = 0;
+
+// function used to lazily initialize the address to bci translation map
+// the first time a compiled method is generated.
+static void address_bci_map_init(JavaThread *thread)
+{
+  // the dynamic_code_generated event posted to notify generation of
+  // the stub code has to be posted lazily because generation happens
+  // in Thumb2_Initialize under bci_init and the latter is called as a
+  // side-effect of loading libjvm.o. we don't have a Java thread at
+  // that point nor, indeed, any agents to catch the notify. so the
+  // info cached by Thumb2_Initialize needs to be posted when the
+  // first compiled method load event is notified, at which point we
+  // will indeed have a current thread.
+
+  {
+    // a thread transition from in Java to in VM is required before
+    // calling into Jvmti
+
+    ThreadInVMfromJava transition(thread);
+
+    JvmtiExport::post_dynamic_code_generated("thumb2_dynamic_stubs_block",
+					   stub_gen_code_start,
+					   stub_gen_code_end);
+
+    // n.b. exiting this block reverts the thread state to in Java
+  }
+  
+
+  // the map is lazily allocated so we don't use the space unless we
+  // are actually using the JIT
+
+  // at worst we need a start address for every bytecode so
+  // the map size is limited by the compiler's bytecode limit
+  address_bci_map = new jvmtiAddrLocationMap[THUMB2_MAX_BYTECODE_SIZE];
+}
+
+// clear the address to bci translation map
+static void address_bci_map_reset(JavaThread *thread)
+{
+  // this only gets called after obtaining the compiler lock so there
+  // is no need to worry about races
+  
+  if (address_bci_map == NULL) {
+    address_bci_map_init(thread);
+  }
+
+  // this effectively clears the previous map
+
+  address_bci_map_length = 0;
+}
+
+// add an entry to the address to bci translation map
+// this will never exceed the available space
+static void address_bci_map_add(void *addr, unsigned bci)
+{
+    address_bci_map[address_bci_map_length].start_address = addr;
+    address_bci_map[address_bci_map_length].location = bci;
+    address_bci_map_length++;
+}
+#endif // THUMB2_JVMTI
+
+#ifdef T2_PRINT_DISASS
+short start_bci[THUMB2_MAX_T2CODE_SIZE];
+short end_bci[THUMB2_MAX_T2CODE_SIZE];
+#endif
+
+bool DebugSwitch = false;
+
+// XXX hardwired constants!
+#define ENTRY_FRAME             1
+#define INTERPRETER_FRAME       2
+#define SHARK_FRAME             3
+#define FAKE_STUB_FRAME         4
+
+#include "offsets_arm.s"
+
+#define BC_FLAGS_MASK		0xf0000000
+#define BC_VISITED		0x80000000
+#define BC_BRANCH_TARGET	0x40000000
+#define BC_COMPILED		0x20000000
+#define BC_BACK_TARGET		0x10000000
+
+#define IS_DEAD(x)	(((x) & BC_VISITED) == 0)
+
+#define LOCAL_MODIFIED		31
+#define LOCAL_REF		30
+#define LOCAL_DOUBLE		29
+#define LOCAL_FLOAT		28
+#define LOCAL_LONG		27
+#define LOCAL_INT		26
+#define LOCAL_ALLOCATED		25
+
+#define LOCAL_COUNT_BITS	10
+#define LOCAL_READ_POS		0
+#define LOCAL_WRITE_POS		LOCAL_COUNT_BITS
+
+#define LOCAL_READS(x)		(((x) >> LOCAL_READ_POS) & ((1<<LOCAL_COUNT_BITS)-1))
+#define LOCAL_WRITES(x)		(((x) >> LOCAL_WRITE_POS) & ((1<<LOCAL_COUNT_BITS)-1))
+#define LOCAL_SET_COUNTS(r, w)	(((r) << LOCAL_READ_POS) | (((w) << LOCAL_WRITE_POS)))
+#define LOCAL_INC_COUNT(c)	((c) < ((1<<LOCAL_COUNT_BITS)-1) ? (c)+1 : (c))
+
+#define STACK_REGS	4
+#define FP_STACK_REGS	4
+
+typedef unsigned	u32;
+typedef unsigned	Reg;
+
+#define	ARM_R0		0
+#define ARM_R1		1
+#define ARM_R2		2
+#define ARM_R3		3
+#define ARM_R4		4
+#define ARM_R5		5
+#define ARM_R6		6
+#define ARM_R7		7
+#define ARM_R8		8
+#define ARM_R9		9
+#define ARM_R10		10
+#define ARM_R11		11
+#define ARM_IP		12
+#define ARM_SP		13
+#define ARM_LR		14
+#define ARM_PC		15
+#define ARM_CPSR	16	// CPSR in sigcontext
+#define ARM_FAULT	17	// fault address in sigcontext
+
+#define CPSR_THUMB_BIT	(1<<5)
+
+#define VFP_S0		32
+#define VFP_S1		33
+#define VFP_S2		34
+#define VFP_S3		35
+#define VFP_S4		36
+#define VFP_S5		37
+#define VFP_S6		38
+#define VFP_S7		39
+
+#define VFP_D0		64
+#define VFP_D1		65
+#define VFP_D2		66
+#define VFP_D3		67
+#define VFP_D4		68
+#define VFP_D5		69
+#define VFP_D6		70
+#define VFP_D7		71
+
+#define PREGS	6
+
+#define JAZ_V1	ARM_R5
+#define JAZ_V2	ARM_R6
+#define JAZ_V3	ARM_R7
+#define JAZ_V4	ARM_R8
+#define JAZ_V5	ARM_R9
+#define JAZ_V6	ARM_R11
+
+#define Rstack		ARM_R4
+#define Rlocals		ARM_R7
+#define Ristate		ARM_R8
+#define Rthread		ARM_R10
+
+#define Rint_jpc	ARM_R5
+
+#define IS_ARM_INT_REG(r) ((r) <= ARM_PC)
+#define IS_ARM_FP_REG(r) (!IS_ARM_INT_REG(r))
+
+#define I_REGSET	((1<<ARM_R4) | (1<<ARM_R5) | (1<<ARM_R6) | (1<<ARM_R7) | \
+			 (1<<ARM_R9) | (1<<ARM_R10) | (1<<ARM_R11))
+#define C_REGSET	(1<<ARM_R8)
+
+#define LOG2(n) binary_log2(n)
+
+unsigned binary_log2(unsigned n)
+{
+  unsigned r = 0;
+  if ((n & 0xffff) == 0) r = 16, n >>= 16;
+  if ((n & 0xff) == 0) r += 8, n >>= 8;
+  if ((n & 0xf) == 0) r += 4, n >>= 4;
+  if ((n & 3) == 0) r += 2, n >>= 2;
+  if ((n & 1) == 0) r += 1;
+  return r;
+}
+
+typedef struct Compiled_Method {
+    // All entry points aligned on a cache line boundary
+    //		.align	CODE_ALIGN
+    // slow_entry:				@ callee save interface
+    // 		push	{r4, r5, r6, r7, r9, r10, r11, lr}
+    // 		mov	Rthread, r2
+    // 		bl	fast_entry
+    // 		pop	{r4, r5, r6, r7, r9, r10, r11, pc}
+    unsigned slow_entry[4];
+    unsigned *osr_table;			// pointer to the osr table
+    unsigned *exception_table;
+    Compiled_Method *next;
+    // The next 6 halfword give the register mapping for JAZ_V1 to JAZ_v5
+    // This is used when receovering from an exception so we can push
+    // the register back into the local variables pool.
+    short regusage[6];
+    unsigned header_end[1];
+    // fast_entry:
+    // 		push	{r8, lr}
+    // 		...	@ The compiled code
+    // 		pop	{r8, pc}
+    // 		.align	WORD_ALIGN
+    // code_handle:				@ from interpreted entry
+    // 		.word	slow_entry		@ bottom bit must be set!
+    // osr_table:
+    // 		.word	<no. of entries>
+    // @@@ For bytecode 0 and for each backwards branch target
+    // 		.short	<bytecode index>
+    // 		.short	<code offset>		@ offset in halfwords from slow_entry
+} Compiled_Method;
+
+Compiled_Method *compiled_method_list = 0;
+Compiled_Method **compiled_method_list_tail_ptr = &compiled_method_list;
+
+typedef struct Thumb2_Entrypoint {
+  unsigned compiled_entrypoint;
+  short *regusage;
+} Thumb2_Entrypoint;
+
+typedef struct CodeBuf {
+    unsigned short *codebuf;
+    unsigned idx;
+    unsigned limit;
+} CodeBuf;
+
+typedef struct Thumb2_Stack {
+    unsigned *stack;
+    unsigned depth;
+} Thumb2_Stack;
+
+#define IS_SREG(r) ((r) < STACK_REGS)
+
+typedef struct Thumb2_Registers {
+    unsigned *r_local;
+    unsigned npregs;
+    unsigned pregs[PREGS];
+    int mapping[PREGS];
+} Thumb2_Registers;
+
+typedef struct Thumb2_Info {
+    JavaThread *thread;
+    methodOop method;
+    unsigned *bc_stackinfo;
+    unsigned *locals_info;
+    jubyte *code_base;
+    unsigned code_size;
+    CodeBuf *codebuf;
+    Thumb2_Stack *jstack;
+    Thumb2_Registers *jregs;
+    unsigned compiled_return;
+    unsigned compiled_word_return[12];  // R0 .. R11
+    unsigned is_leaf;
+    unsigned use_istate;
+} Thumb2_Info;
+
+#define IS_INT_SIZE_BASE_TYPE(c) (c=='B' || c=='C' || c=='F' || c=='I' || c=='S' || c=='Z')
+#define IS_INT_SIZE_TYPE(c) (IS_INT_SIZE_BASE_TYPE(c) || c == 'L' || c == '[')
+
+void Thumb2_save_local_refs(Thumb2_Info *jinfo, unsigned stackdepth);
+void Thumb2_restore_local_refs(Thumb2_Info *jinfo, unsigned stackdepth);
+void Thumb2_Exit(Thumb2_Info *jinfo, unsigned handler, unsigned bci, unsigned stackdepth);
+
+static int method_stackchange(const jbyte *base)
+{
+  jbyte c;
+  int stackchange = 0;
+
+  c = *base++;
+  JASSERT(c == '(', "Invalid signature, missing '('");
+  while ((c = *base++) != ')') {
+    stackchange -= 1;
+    if (c == 'J' || c == 'D') {
+      stackchange -= 1;
+    } else if (c == '[') {
+      do { c = *base++; } while (c == '[');
+      if (c == 'L')
+	do { c = *base++; } while (c != ';');
+    } else if (c == 'L') {
+      do { c = *base++; } while (c != ';');
+    } else {
+      JASSERT(IS_INT_SIZE_BASE_TYPE(c), "Invalid signature, bad arg type");
+    }
+  }
+  JASSERT(c == ')', "Invalid signature, missing ')'");
+  c = *base++;
+  if (c == 'J' || c == 'D') stackchange += 2;
+  else if (c != 'V') {
+    stackchange += 1;
+    JASSERT(IS_INT_SIZE_TYPE(c), "Invalid signature, bad ret type");
+  }
+  return stackchange;
+}
+
+static void Thumb2_local_info_from_sig(Thumb2_Info *jinfo, methodOop method,
+				       const jbyte *base)
+{
+  jbyte c;
+  unsigned arg = 0;
+  unsigned *locals_info = jinfo->locals_info;
+  unsigned local_info;
+
+  if (!method->is_static()) locals_info[arg++] = 1 << LOCAL_REF;
+  c = *base++;
+  JASSERT(c == '(', "Invalid signature, missing '('");
+  while ((c = *base++) != ')') {
+    local_info = 1 << LOCAL_INT;
+    if (c == 'J') local_info = 1 << LOCAL_LONG;
+    else if (c == 'D') local_info = 1 << LOCAL_DOUBLE;
+    else if (c == '[') {
+      local_info = 1 << LOCAL_REF;
+      do { c = *base++; } while (c == '[');
+      if (c == 'L')
+	do { c = *base++; } while (c != ';');
+    } else if (c == 'L') {
+      local_info = 1 << LOCAL_REF;
+      do { c = *base++; } while (c != ';');
+    } else {
+      JASSERT(IS_INT_SIZE_BASE_TYPE(c), "Invalid signature, bad arg type");
+    }
+    locals_info[arg++] = local_info;
+  }
+}
+
+#define T_UNDEFINED_32	0xf7f0a000
+#define T_UNDEFINED_16	0xde00
+
+static const char *local_types[] = { "int", "long", "float", "double", "ref" };
+
+#ifdef T2_PRINT_DISASS
+
+class Hsdis {
+public:
+
+  typedef void* (*decode_instructions_event_callback_ftype)  (void*, const char*, void*);
+
+  typedef void* (*decode_instructions_ftype)
+    (void* start, void* end,
+     decode_instructions_event_callback_ftype event_callback,
+     void* event_stream,
+     void* printf_callback,
+     void* printf_stream,
+     const char* options);
+
+  decode_instructions_ftype decode_instructions;
+
+  void *lib;
+
+  // Load hsdis-arm.so lazily.
+  Hsdis()
+  {
+    decode_instructions = NULL;
+
+    if (PrintAssembly) {
+      if (lib = dlopen("hsdis-arm.so", RTLD_NOW)) {
+	decode_instructions
+	  = (typeof decode_instructions)dlsym(lib, "decode_instructions");
+      }
+
+      if (! (decode_instructions)) {
+	fprintf (stderr, "PrintAssembly (or T2_PRINT_DISASS) is set, but\n"
+		 "hsdis-arm.so has not been found or is invalid.  If you want to\n"
+		 "see a disassembly, please ensure that a valid copy of\n"
+		 "hsdis-arm.so is present somewhere in your library load path.\n");
+	abort();
+      }
+    }
+  }
+};
+
+static void *print_address(void *stream, const char *tag, void *data);
+
+void Thumb2_disass(Thumb2_Info *jinfo)
+{
+  unsigned code_size = jinfo->code_size;
+  jubyte *code_base = jinfo->code_base;
+  unsigned *bc_stackinfo = jinfo->bc_stackinfo;
+  unsigned *locals_info = jinfo->locals_info;
+  unsigned nlocals = jinfo->method->max_locals();
+  int bci = 0;
+  int last_bci = -1;
+  int start_b, end_b;
+  unsigned nodisass;
+
+  unsigned short *codebuf = jinfo->codebuf->codebuf;
+  unsigned idx, compiled_len;
+
+  static Hsdis hsdis;
+
+  fflush(stdout);
+  fflush(stderr);
+
+  compiled_len = jinfo->codebuf->idx * 2;
+  for (idx = 0; idx < compiled_len; ) {
+    nodisass = 0;
+    start_b = start_bci[idx/2];
+    end_b = end_bci[idx/2];
+    if (start_b != -1) {
+      last_bci != -1;
+      for (bci = start_b; bci < end_b; ) {
+	unsigned stackinfo = bc_stackinfo[bci];
+	unsigned opcode;
+	int len;
+
+	if (stackinfo & BC_BRANCH_TARGET)
+	  fprintf(stderr, "----- Basic Block -----\n");
+	JASSERT(bci > last_bci, "disass not advancing");
+	last_bci = bci;
+	fprintf(stderr, "%c%4d : ", (stackinfo & BC_VISITED) ? ' ' : '?', bci);
+	opcode = code_base[bci];
+	if (opcode > OPC_LAST_JAVA_OP) {
+	  if (Bytecodes::is_defined((Bytecodes::Code)opcode))
+	    opcode = (unsigned)Bytecodes::java_code((Bytecodes::Code)opcode);
+	}
+	len = Bytecodes::length_for((Bytecodes::Code)opcode);
+	if (len <= 0) {
+	  Bytecodes::Code code = Bytecodes::code_at(NULL, (address)(code_base+bci));
+	  len = (Bytecodes::special_length_at
+		 (code,
+		  (address)(code_base+bci), (address)(code_base+code_size)));
+	}
+	switch (opcode) {
+	  case opc_tableswitch: {
+	    int nbci = (bci & ~3) + 4;
+	    int low, high;
+	    unsigned w;
+	    unsigned *table;
+	    int def;
+	    unsigned n, i;
+
+	    fprintf(stderr, "%02x ", opcode);
+	    for (int i = 1; i < 5; i++)
+	      fprintf(stderr, "   ");
+	    fprintf(stderr, "%s\n", Bytecodes::name((Bytecodes::Code)opcode));
+	    fprintf(stderr, "\t%d bytes padding\n", nbci - (bci+1));
+	    w = *(unsigned int *)(code_base + nbci + 4);
+	    low = (int)BYTESEX_REVERSE(w);
+	    w = *(unsigned int *)(code_base + nbci + 8);
+	    high = (int)BYTESEX_REVERSE(w);
+	    w = *(unsigned int *)(code_base + nbci + 0);
+	    def = (int)BYTESEX_REVERSE(w);
+	    table = (unsigned int *)(code_base + nbci + 12);
+	    fprintf(stderr, "\tdefault:\t0x%08x\n", def);
+	    fprintf(stderr, "\tlow:\t\t0x%08x\n", low);
+	    fprintf(stderr, "\thigh:\t\t0x%08x\n", high);
+	    n = high - low + 1;
+	    while (low <= high) {
+	      int off;
+
+	      w = *table++;
+	      off = (int)BYTESEX_REVERSE(w);
+	      fprintf(stderr, "\toffset %d:\t0x%08x\n", low, off);
+	      low++;
+	    }
+	    bci += len;
+	    {
+	      // The insn sequence generated by tableswitch is 14
+	      // bytes long.
+	      const int tableswitch_code_len = 14;
+	      fprintf(stderr, "0x%08x:\t", (int)codebuf+idx);
+		unsigned short *p = codebuf + idx/2;
+		hsdis.decode_instructions((char*)p,
+					  (char *)p + tableswitch_code_len,
+					  print_address, NULL, NULL, stdout,
+					  "force-thumb");
+		idx += tableswitch_code_len;
+	    }
+	    for (i = 0; i < n; i++) {
+	      fprintf(stderr, "0x%08x:\t.short\t0x%04x\n", (int)codebuf+idx, *(short *)((int)codebuf + idx));
+	      idx += 2;
+	    }
+	    nodisass = 1;
+	    break;
+	  }
+	  case opc_lookupswitch: {
+	    unsigned w;
+	    unsigned nbci = (bci & ~3) + 4;;
+	    int def;
+	    int npairs;	// The Java spec says signed but must be >= 0??
+	    unsigned *table;
+
+	    fprintf(stderr, "%02x ", opcode);
+	    for (int i = 1; i < 5; i++)
+	      fprintf(stderr, "   ");
+	    fprintf(stderr, "%s\n", Bytecodes::name((Bytecodes::Code)opcode));
+	    fprintf(stderr, "\t%d bytes padding\n", nbci - (bci+1));
+
+	    w = *(unsigned int *)(code_base + nbci + 0);
+	    def = (int)BYTESEX_REVERSE(w);
+	    w = *(unsigned int *)(code_base + nbci + 4);
+	    npairs = (int)BYTESEX_REVERSE(w);
+	    table = (unsigned int *)(code_base + nbci + 8);
+	    fprintf(stderr, "\tdefault:\t0x%08x\n", def);
+	    fprintf(stderr, "\tnpairs:\t\t0x%08x\n", npairs);
+	    for (int i = 0; i < npairs; i++) {
+	      unsigned match, off;
+	      w = table[0];
+	      match = BYTESEX_REVERSE(w);
+	      w = table[1];
+	      table += 2;
+	      off = BYTESEX_REVERSE(w);
+	      fprintf(stderr, "\t  match: 0x%08x, offset: 0x%08x\n", match, off);
+	    }
+	    break;
+	  }
+
+	  default:
+	    for (int i = 0; i < 5; i++) {
+	      if (i < len)
+		fprintf(stderr, "%02x ", code_base[bci+i]);
+	      else
+		fprintf(stderr, "   ");
+	    }
+	    fprintf(stderr, "%s\n", Bytecodes::name((Bytecodes::Code)code_base[bci]));
+	    break;
+	}
+	bci += len;
+      }
+    }
+    if (!nodisass) {
+      {
+	int len;
+	unsigned s1, s2;
+
+	s1 = *(unsigned short *)((int)codebuf + idx);
+	s2 = *(unsigned short *)((int)codebuf + idx + 2);
+	if (s1 == T_UNDEFINED_16 || ((s1 << 16) + s2) == T_UNDEFINED_32) {
+	  if (s1 == T_UNDEFINED_16) {
+	    fprintf(stderr, "undefined (0xde00) - UNPATCHED BRANCH???");
+	    len = 2;
+	  } else {
+	    fprintf(stderr, "undefined (0xf7f0a000) - UNPATCHED BRANCH???");
+	    len = 4;
+	  }
+	} else {
+	  char *p = (char*)codebuf + idx;
+	  len = 2;
+	  while (len + idx < compiled_len
+		 && start_bci[(len + idx)/2] == -1)
+	    len += 2;
+	  hsdis.decode_instructions((char*)p, (char*)p + len,
+				      print_address, NULL, NULL, stderr,
+				      "force-thumb");
+	}
+	idx += len;
+      }
+    }
+  }
+  fflush(stderr);
+}
+// where
+static void *print_address(void *, const char *tag, void *data) {
+  if (strcmp(tag, "insn") == 0)
+    fprintf(stderr, "0x%08x:\t", (unsigned int)data);
+  return NULL;
+}
+#endif // T2_PRINT_DISASS
+
+#define BCI(len, pop, push, special, islocal, islocal_n, isstore, local_n, local_type) \
+	((len) | ((pop)<<3) | ((push)<<6) | (unsigned)((special) << 31) | ((islocal) << 30) | ((islocal_n) << 29) | ((isstore) << 28) | ((local_n) << 9) | ((local_type) << 11))
+
+#define BCI_LEN(x) 	((x) & 7)
+#define BCI_POP(x) 	(((x)>>3) & 7)
+#define BCI_PUSH(x) 	(((x)>>6) & 7)
+#define BCI_LOCAL_N(x)	(((x)>>9) & 3)
+#define BCI_LOCAL_TYPE(x) (((x) >> 11) & 7)
+
+#define BCI_TYPE_INT	0
+#define BCI_TYPE_LONG	1
+#define BCI_TYPE_FLOAT	2
+#define BCI_TYPE_DOUBLE	3
+#define BCI_TYPE_REF	4
+
+#define BCI_SPECIAL(x) 	((x) & 0x80000000)
+#define BCI_ISLOCAL(x)	((x) & 0x40000000)
+#define BCI_ISLOCAL_N(x) ((x) & 0x20000000)
+#define BCI_ISSTORE(x)	((x) & 0x10000000)
+
+static const unsigned bcinfo[256] = {
+	BCI(1, 0, 0, 0, 0, 0, 0, 0, 0),	// nop
+	BCI(1, 0, 1, 0, 0, 0, 0, 0, 0),	// aconst_null
+	BCI(1, 0, 1, 0, 0, 0, 0, 0, 0),	// iconst_m1
+	BCI(1, 0, 1, 0, 0, 0, 0, 0, 0),	// iconst_0
+	BCI(1, 0, 1, 0, 0, 0, 0, 0, 0),	// iconst_1
+	BCI(1, 0, 1, 0, 0, 0, 0, 0, 0),	// iconst_2
+	BCI(1, 0, 1, 0, 0, 0, 0, 0, 0),	// iconst_3
+	BCI(1, 0, 1, 0, 0, 0, 0, 0, 0),	// iconst_4
+	BCI(1, 0, 1, 0, 0, 0, 0, 0, 0),	// iconst_5
+	BCI(1, 0, 2, 0, 0, 0, 0, 0, 0),	// lconst_0
+	BCI(1, 0, 2, 0, 0, 0, 0, 0, 0),	// lconst_1
+	BCI(1, 0, 1, 0, 0, 0, 0, 0, 0),	// fconst_0
+	BCI(1, 0, 1, 0, 0, 0, 0, 0, 0),	// fconst_1
+	BCI(1, 0, 1, 0, 0, 0, 0, 0, 0),	// fconst_2
+	BCI(1, 0, 2, 0, 0, 0, 0, 0, 0),	// dconst_0
+	BCI(1, 0, 2, 0, 0, 0, 0, 0, 0),	// dconst_1
+	BCI(2, 0, 1, 0, 0, 0, 0, 0, 0),	// bipush
+	BCI(3, 0, 1, 0, 0, 0, 0, 0, 0),	// bipush
+	BCI(2, 0, 1, 0, 0, 0, 0, 0, 0),	// ldc
+	BCI(3, 0, 1, 0, 0, 0, 0, 0, 0),	// ldc_w
+	BCI(3, 0, 2, 0, 0, 0, 0, 0, 0),	// ldc2_w
+	BCI(2, 0, 1, 0, 1, 0, 0, 0, BCI_TYPE_INT),	// iload
+	BCI(2, 0, 2, 0, 1, 0, 0, 0, BCI_TYPE_LONG),	// lload
+	BCI(2, 0, 1, 0, 1, 0, 0, 0, BCI_TYPE_FLOAT),	// fload
+	BCI(2, 0, 2, 0, 1, 0, 0, 0, BCI_TYPE_DOUBLE),	// dload
+	BCI(2, 0, 1, 0, 1, 0, 0, 0, BCI_TYPE_REF),	// aload
+	BCI(1, 0, 1, 0, 1, 1, 0, 0, BCI_TYPE_INT),	// iload_0
+	BCI(1, 0, 1, 0, 1, 1, 0, 1, BCI_TYPE_INT),	// iload_1
+	BCI(1, 0, 1, 0, 1, 1, 0, 2, BCI_TYPE_INT),	// iload_2
+	BCI(1, 0, 1, 0, 1, 1, 0, 3, BCI_TYPE_INT),	// iload_3
+	BCI(1, 0, 2, 0, 1, 1, 0, 0, BCI_TYPE_LONG),	// lload_0
+	BCI(1, 0, 2, 0, 1, 1, 0, 1, BCI_TYPE_LONG),	// lload_1
+	BCI(1, 0, 2, 0, 1, 1, 0, 2, BCI_TYPE_LONG),	// lload_2
+	BCI(1, 0, 2, 0, 1, 1, 0, 3, BCI_TYPE_LONG),	// lload_3
+	BCI(1, 0, 1, 0, 1, 1, 0, 0, BCI_TYPE_FLOAT),	// fload_0
+	BCI(1, 0, 1, 0, 1, 1, 0, 1, BCI_TYPE_FLOAT),	// fload_1
+	BCI(1, 0, 1, 0, 1, 1, 0, 2, BCI_TYPE_FLOAT),	// fload_2
+	BCI(1, 0, 1, 0, 1, 1, 0, 3, BCI_TYPE_FLOAT),	// fload_3
+	BCI(1, 0, 2, 0, 1, 1, 0, 0, BCI_TYPE_DOUBLE),	// dload_0
+	BCI(1, 0, 2, 0, 1, 1, 0, 1, BCI_TYPE_DOUBLE),	// dload_1
+	BCI(1, 0, 2, 0, 1, 1, 0, 2, BCI_TYPE_DOUBLE),	// dload_2
+	BCI(1, 0, 2, 0, 1, 1, 0, 3, BCI_TYPE_DOUBLE),	// dload_3
+	BCI(1, 0, 1, 0, 1, 1, 0, 0, BCI_TYPE_REF),	// aload_0
+	BCI(1, 0, 1, 0, 1, 1, 0, 1, BCI_TYPE_REF),	// aload_1
+	BCI(1, 0, 1, 0, 1, 1, 0, 2, BCI_TYPE_REF),	// aload_2
+	BCI(1, 0, 1, 0, 1, 1, 0, 3, BCI_TYPE_REF),	// aload_3
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// iaload
+	BCI(1, 2, 2, 0, 0, 0, 0, 0, 0),	// laload
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// faload
+	BCI(1, 2, 2, 0, 0, 0, 0, 0, 0),	// daload
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// aaload
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// baload
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// caload
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// saload
+	BCI(2, 1, 0, 0, 1, 0, 1, 0, BCI_TYPE_INT),	// istore
+	BCI(2, 2, 0, 0, 1, 0, 1, 0, BCI_TYPE_LONG),	// lstore
+	BCI(2, 1, 0, 0, 1, 0, 1, 0, BCI_TYPE_FLOAT),	// fstore
+	BCI(2, 2, 0, 0, 1, 0, 1, 0, BCI_TYPE_DOUBLE),	// dstore
+	BCI(2, 1, 0, 0, 1, 0, 1, 0, BCI_TYPE_REF),	// astore
+	BCI(1, 1, 0, 0, 1, 1, 1, 0, BCI_TYPE_INT),	// istore_0
+	BCI(1, 1, 0, 0, 1, 1, 1, 1, BCI_TYPE_INT),	// istore_1
+	BCI(1, 1, 0, 0, 1, 1, 1, 2, BCI_TYPE_INT),	// istore_2
+	BCI(1, 1, 0, 0, 1, 1, 1, 3, BCI_TYPE_INT),	// istore_3
+	BCI(1, 2, 0, 0, 1, 1, 1, 0, BCI_TYPE_LONG),	// lstore_0
+	BCI(1, 2, 0, 0, 1, 1, 1, 1, BCI_TYPE_LONG),	// lstore_1
+	BCI(1, 2, 0, 0, 1, 1, 1, 2, BCI_TYPE_LONG),	// lstore_2
+	BCI(1, 2, 0, 0, 1, 1, 1, 3, BCI_TYPE_LONG),	// lstore_3
+	BCI(1, 1, 0, 0, 1, 1, 1, 0, BCI_TYPE_FLOAT),	// fstore_0
+	BCI(1, 1, 0, 0, 1, 1, 1, 1, BCI_TYPE_FLOAT),	// fstore_1
+	BCI(1, 1, 0, 0, 1, 1, 1, 2, BCI_TYPE_FLOAT),	// fstore_2
+	BCI(1, 1, 0, 0, 1, 1, 1, 3, BCI_TYPE_FLOAT),	// fstore_3
+	BCI(1, 2, 0, 0, 1, 1, 1, 0, BCI_TYPE_DOUBLE),	// dstore_0
+	BCI(1, 2, 0, 0, 1, 1, 1, 1, BCI_TYPE_DOUBLE),	// dstore_1
+	BCI(1, 2, 0, 0, 1, 1, 1, 2, BCI_TYPE_DOUBLE),	// dstore_2
+	BCI(1, 2, 0, 0, 1, 1, 1, 3, BCI_TYPE_DOUBLE),	// dstore_3
+	BCI(1, 1, 0, 0, 1, 1, 1, 0, BCI_TYPE_REF),	// astore_0
+	BCI(1, 1, 0, 0, 1, 1, 1, 1, BCI_TYPE_REF),	// astore_1
+	BCI(1, 1, 0, 0, 1, 1, 1, 2, BCI_TYPE_REF),	// astore_2
+	BCI(1, 1, 0, 0, 1, 1, 1, 3, BCI_TYPE_REF),	// astore_3
+	BCI(1, 3, 0, 0, 0, 0, 0, 0, 0),	// iastore
+	BCI(1, 4, 0, 0, 0, 0, 0, 0, 0),	// dastore
+	BCI(1, 3, 0, 0, 0, 0, 0, 0, 0),	// fastore
+	BCI(1, 4, 0, 0, 0, 0, 0, 0, 0),	// lastore
+	BCI(1, 3, 0, 0, 0, 0, 0, 0, 0),	// aastore
+	BCI(1, 3, 0, 0, 0, 0, 0, 0, 0),	// bastore
+	BCI(1, 3, 0, 0, 0, 0, 0, 0, 0),	// castore
+	BCI(1, 3, 0, 0, 0, 0, 0, 0, 0),	// sastore
+	BCI(1, 1, 0, 0, 0, 0, 0, 0, 0),	// pop
+	BCI(1, 2, 0, 0, 0, 0, 0, 0, 0),	// pop2
+	BCI(1, 1, 2, 0, 0, 0, 0, 0, 0),	// dup
+	BCI(1, 2, 3, 0, 0, 0, 0, 0, 0),	// dup_x1
+	BCI(1, 3, 4, 0, 0, 0, 0, 0, 0),	// dup_x2
+	BCI(1, 2, 4, 0, 0, 0, 0, 0, 0),	// dup2
+	BCI(1, 3, 5, 0, 0, 0, 0, 0, 0),	// dup2_x1
+	BCI(1, 4, 6, 0, 0, 0, 0, 0, 0),	// dup2_x2
+	BCI(1, 1, 1, 0, 0, 0, 0, 0, 0),	// swap
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// iadd
+	BCI(1, 4, 2, 0, 0, 0, 0, 0, 0),	// ladd
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// fadd
+	BCI(1, 4, 2, 0, 0, 0, 0, 0, 0),	// dadd
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// isub
+	BCI(1, 4, 2, 0, 0, 0, 0, 0, 0),	// lsub
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// fsub
+	BCI(1, 4, 2, 0, 0, 0, 0, 0, 0),	// dsub
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// imul
+	BCI(1, 4, 2, 0, 0, 0, 0, 0, 0),	// lmul
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// fmul
+	BCI(1, 4, 2, 0, 0, 0, 0, 0, 0),	// dmul
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// idiv
+	BCI(1, 4, 2, 0, 0, 0, 0, 0, 0),	// ldiv
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// fdiv
+	BCI(1, 4, 2, 0, 0, 0, 0, 0, 0),	// ddiv
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// irem
+	BCI(1, 4, 2, 0, 0, 0, 0, 0, 0),	// lrem
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// frem
+	BCI(1, 4, 2, 0, 0, 0, 0, 0, 0),	// drem
+	BCI(1, 1, 1, 0, 0, 0, 0, 0, 0),	// ineg
+	BCI(1, 2, 2, 0, 0, 0, 0, 0, 0),	// lneg
+	BCI(1, 1, 1, 0, 0, 0, 0, 0, 0),	// fneg
+	BCI(1, 2, 2, 0, 0, 0, 0, 0, 0),	// dneg
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// ishl
+	BCI(1, 3, 2, 0, 0, 0, 0, 0, 0),	// lshl
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// ishr
+	BCI(1, 3, 2, 0, 0, 0, 0, 0, 0),	// lshr
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// iushr
+	BCI(1, 3, 2, 0, 0, 0, 0, 0, 0),	// lushr
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// iand
+	BCI(1, 4, 2, 0, 0, 0, 0, 0, 0),	// land
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// ior
+	BCI(1, 4, 2, 0, 0, 0, 0, 0, 0),	// lor
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// ixor
+	BCI(1, 4, 2, 0, 0, 0, 0, 0, 0),	// lxor
+	BCI(3, 0, 0, 0, 1, 0, 1, 0, BCI_TYPE_INT),	// iinc
+	BCI(1, 1, 2, 0, 0, 0, 0, 0, 0),	// i2l
+	BCI(1, 1, 1, 0, 0, 0, 0, 0, 0),	// i2f
+	BCI(1, 1, 2, 0, 0, 0, 0, 0, 0),	// i2d
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// l2i
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// l2f
+	BCI(1, 2, 2, 0, 0, 0, 0, 0, 0),	// l2d
+	BCI(1, 1, 1, 0, 0, 0, 0, 0, 0),	// f2i
+	BCI(1, 1, 2, 0, 0, 0, 0, 0, 0),	// f2l
+	BCI(1, 1, 2, 0, 0, 0, 0, 0, 0),	// f2d
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// d2i
+	BCI(1, 2, 2, 0, 0, 0, 0, 0, 0),	// d2l
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// d2f
+	BCI(1, 1, 1, 0, 0, 0, 0, 0, 0),	// i2b
+	BCI(1, 1, 1, 0, 0, 0, 0, 0, 0),	// i2c
+	BCI(1, 1, 1, 0, 0, 0, 0, 0, 0),	// i2s
+	BCI(1, 4, 1, 0, 0, 0, 0, 0, 0),	// lcmp
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// fcmpl
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// fcmpg
+	BCI(1, 4, 1, 0, 0, 0, 0, 0, 0),	// dcmpl
+	BCI(1, 4, 1, 0, 0, 0, 0, 0, 0),	// dcmpg
+	BCI(3, 1, 0, 1, 0, 0, 0, 0, 0),	// ifeq
+	BCI(3, 1, 0, 1, 0, 0, 0, 0, 0),	// ifne
+	BCI(3, 1, 0, 1, 0, 0, 0, 0, 0),	// iflt
+	BCI(3, 1, 0, 1, 0, 0, 0, 0, 0),	// ifge
+	BCI(3, 1, 0, 1, 0, 0, 0, 0, 0),	// ifgt
+	BCI(3, 1, 0, 1, 0, 0, 0, 0, 0),	// ifle
+	BCI(3, 2, 0, 1, 0, 0, 0, 0, 0),	// if_icmpeq
+	BCI(3, 2, 0, 1, 0, 0, 0, 0, 0),	// if_icmpne
+	BCI(3, 2, 0, 1, 0, 0, 0, 0, 0),	// if_icmplt
+	BCI(3, 2, 0, 1, 0, 0, 0, 0, 0),	// if_icmpge
+	BCI(3, 2, 0, 1, 0, 0, 0, 0, 0),	// if_icmpgt
+	BCI(3, 2, 0, 1, 0, 0, 0, 0, 0),	// if_icmple
+	BCI(3, 2, 0, 1, 0, 0, 0, 0, 0),	// if_acmpeq
+	BCI(3, 2, 0, 1, 0, 0, 0, 0, 0),	// if_acmpne
+	BCI(3, 0, 0, 1, 0, 0, 0, 0, 0),	// goto
+	BCI(3, 0, 1, 1, 0, 0, 0, 0, 0),	// jsr
+	BCI(2, 0, 0, 1, 0, 0, 0, 0, 0),	// ret
+	BCI(0, 1, 0, 1, 0, 0, 0, 0, 0),	// tableswitch
+	BCI(0, 1, 0, 1, 0, 0, 0, 0, 0),	// lookupswitch
+	BCI(1, 1, 0, 1, 0, 0, 0, 0, 0),	// ireturn
+	BCI(1, 2, 0, 1, 0, 0, 0, 0, 0),	// lreturn
+	BCI(1, 1, 0, 1, 0, 0, 0, 0, 0),	// freturn
+	BCI(1, 2, 0, 1, 0, 0, 0, 0, 0),	// dreturn
+	BCI(1, 1, 0, 1, 0, 0, 0, 0, 0),	// areturn
+	BCI(1, 0, 0, 1, 0, 0, 0, 0, 0),	// return
+	BCI(3, 0, 0, 1, 0, 0, 0, 0, 0),	// getstatic
+	BCI(3, 0, 0, 1, 0, 0, 0, 0, 0),	// putstatic
+	BCI(3, 0, 0, 1, 0, 0, 0, 0, 0),	// getfield
+	BCI(3, 0, 0, 1, 0, 0, 0, 0, 0),	// putfield
+	BCI(3, 0, 0, 1, 0, 0, 0, 0, 0),	// invokevirtual
+	BCI(3, 0, 0, 1, 0, 0, 0, 0, 0),	// invokespecial
+	BCI(3, 0, 0, 1, 0, 0, 0, 0, 0),	// invokestatic
+	BCI(3, 0, 0, 1, 0, 0, 0, 0, 0),	// invokeinterface
+	BCI(5, 0, 0, 1, 0, 0, 0, 0, 0),	// invokedynamic
+	BCI(3, 0, 1, 0, 0, 0, 0, 0, 0),	// new
+	BCI(2, 1, 1, 0, 0, 0, 0, 0, 0),	// newarray
+	BCI(3, 1, 1, 0, 0, 0, 0, 0, 0),	// anewarray
+	BCI(1, 1, 1, 0, 0, 0, 0, 0, 0),	// arraylength
+	BCI(1, 1, 1, 1, 0, 0, 0, 0, 0),	// athrow
+	BCI(3, 1, 1, 0, 0, 0, 0, 0, 0),	// checkcast
+	BCI(3, 1, 1, 0, 0, 0, 0, 0, 0),	// instanceof
+	BCI(1, 1, 0, 0, 0, 0, 0, 0, 0),	// monitorenter
+	BCI(1, 1, 0, 0, 0, 0, 0, 0, 0),	// monitorexit
+	BCI(0, 0, 0, 1, 0, 0, 0, 0, 0),	// wide
+	BCI(4, 0, 0, 1, 0, 0, 0, 0, 0),	// multianewarray
+	BCI(3, 1, 0, 1, 0, 0, 0, 0, 0),	// ifnull
+	BCI(3, 1, 0, 1, 0, 0, 0, 0, 0),	// ifnonnull
+	BCI(5, 0, 0, 1, 0, 0, 0, 0, 0),	// goto_w
+	BCI(5, 0, 0, 1, 0, 0, 0, 0, 0),	// jsr_w
+	BCI(1, 0, 0, 1, 0, 0, 0, 0, 0),	// breakpoint
+	BCI(0, 0, 0, 1, 0, 0, 0, 0, 0),	// unused 0xcb
+	BCI(3, 1, 1, 0, 0, 0, 0, 0, 0),	// bgetfield
+	BCI(3, 1, 1, 0, 0, 0, 0, 0, 0),	// cgetfield
+	BCI(0, 0, 0, 1, 0, 0, 0, 0, 0),	// unused 0xce
+	BCI(0, 0, 0, 1, 0, 0, 0, 0, 0),	// unused 0xcf
+	BCI(3, 1, 1, 0, 0, 0, 0, 0, 0),	// igetfield
+	BCI(3, 1, 2, 0, 0, 0, 0, 0, 0),	// lgetfield
+	BCI(3, 1, 1, 0, 0, 0, 0, 0, 0),	// sgetfield
+	BCI(3, 2, 0, 0, 0, 0, 0, 0, 0),	// aputfield
+	BCI(3, 2, 0, 0, 0, 0, 0, 0, 0),	// bputfield
+	BCI(3, 2, 0, 0, 0, 0, 0, 0, 0),	// cputfield
+	BCI(0, 0, 0, 1, 0, 0, 0, 0, 0),	// unused 0xd6
+	BCI(0, 0, 0, 1, 0, 0, 0, 0, 0),	// unused 0xd7
+	BCI(3, 2, 0, 0, 0, 0, 0, 0, 0),	// iputfield
+	BCI(3, 3, 0, 0, 0, 0, 0, 0, 0),	// lputfield
+	BCI(0, 0, 0, 1, 0, 0, 0, 0, 0),	// unused 0xda
+	BCI(1, 0, 1, 0, 1, 1, 0, 0, BCI_TYPE_REF),	// iaccess_0
+	BCI(1, 0, 1, 0, 1, 1, 0, 1, BCI_TYPE_REF),	// iaccess_1
+	BCI(1, 0, 1, 0, 1, 1, 0, 2, BCI_TYPE_REF),	// iaccess_2
+	BCI(1, 0, 1, 0, 1, 1, 0, 3, BCI_TYPE_REF),	// iaccess_3
+	BCI(3, 0, 0, 1, 0, 0, 0, 0, 0),	// invokeresolved
+	BCI(3, 0, 0, 1, 0, 0, 0, 0, 0),	// invokespecialresolved
+	BCI(3, 0, 0, 1, 0, 0, 0, 0, 0),	// invokestaticresolved
+	BCI(3, 0, 0, 1, 0, 0, 0, 0, 0),	// invokevfinal
+	BCI(2, 0, 1, 0, 1, 0, 0, 0, BCI_TYPE_INT),	// iload_iload
+	BCI(2, 0, 1, 0, 1, 0, 0, 0, BCI_TYPE_INT),	// iload_iload_N
+	BCI(2, 0, 1, 0, 0, 0, 0, 0, 0),	// fast_aldc
+	BCI(3, 0, 1, 0, 0, 0, 0, 0, 0),	// fast_aldc_w
+	BCI(1, 0, 0, 1, 0, 0, 0, 0, 0),	// return_register_finalizer
+	BCI(1, 4, 2, 0, 0, 0, 0, 0, 0),	// dmac
+	BCI(1, 0, 1, 0, 1, 1, 0, 0, BCI_TYPE_INT),	// iload_0_iconst_N
+	BCI(1, 0, 1, 0, 1, 1, 0, 1, BCI_TYPE_INT),	// iload_1_iconst_N
+	BCI(1, 0, 1, 0, 1, 1, 0, 2, BCI_TYPE_INT),	// iload_2_iconst_N
+	BCI(1, 0, 1, 0, 1, 1, 0, 3, BCI_TYPE_INT),	// iload_3_iconst_N
+	BCI(2, 0, 1, 0, 1, 0, 0, 0, BCI_TYPE_INT),	// iload_iconst_N
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// iadd_istore_N
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// isub_istore_N
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// iand_istore_N
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// ior_istore_N
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// ixor_istore_N
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// iadd_u4store
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// isub_u4store
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// iand_u4store
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// ior_u4store
+	BCI(1, 2, 1, 0, 0, 0, 0, 0, 0),	// ixor_u4store
+	BCI(1, 0, 1, 0, 1, 1, 0, 0, BCI_TYPE_INT),	// iload_0_iload
+	BCI(1, 0, 1, 0, 1, 1, 0, 1, BCI_TYPE_INT),	// iload_1_iload
+	BCI(1, 0, 1, 0, 1, 1, 0, 2, BCI_TYPE_INT),	// iload_2_iload
+	BCI(1, 0, 1, 0, 1, 1, 0, 3, BCI_TYPE_INT),	// iload_3_iload
+	BCI(1, 0, 1, 0, 1, 1, 0, 0, BCI_TYPE_INT),	// iload_0_iload_N
+	BCI(1, 0, 1, 0, 1, 1, 0, 1, BCI_TYPE_INT),	// iload_1_iload_N
+	BCI(1, 0, 1, 0, 1, 1, 0, 2, BCI_TYPE_INT),	// iload_2_iload_N
+	BCI(1, 0, 1, 0, 1, 1, 0, 3, BCI_TYPE_INT),	// iload_3_iload_N
+};
+
+void Thumb2_pass1(Thumb2_Info *jinfo, unsigned stackdepth, unsigned bci)
+{
+  unsigned code_size = jinfo->code_size;
+  jubyte *code_base = jinfo->code_base;
+  unsigned *bc_stackinfo = jinfo->bc_stackinfo;
+  unsigned *locals_info = jinfo->locals_info;
+  //constantPoolCacheOop cp = jinfo->method->constants()->cache();
+
+  bc_stackinfo[bci] |= BC_BRANCH_TARGET;
+  while (bci < code_size) {
+    unsigned stackinfo = bc_stackinfo[bci];
+    unsigned bytecodeinfo;
+    unsigned opcode;
+
+    if (stackinfo & BC_VISITED) break;
+    JASSERT((int)stackdepth >= 0, "stackdepth < 0!!");
+    bc_stackinfo[bci] = (stackinfo & BC_FLAGS_MASK) | stackdepth | BC_VISITED;
+    opcode = code_base[bci];
+//	printf("bci = 0x%04x, opcode = 0x%02x (%s)", bci, opcode,  Bytecodes::name((Bytecodes::Code)opcode));
+    bytecodeinfo = bcinfo[opcode];
+    if (!BCI_SPECIAL(bytecodeinfo)) {
+      if (BCI_ISLOCAL(bytecodeinfo)) {
+	unsigned local = BCI_LOCAL_N(bytecodeinfo);
+	unsigned local_type = BCI_LOCAL_TYPE(bytecodeinfo) + LOCAL_INT;
+	unsigned local_modified = 0;
+	unsigned linfo;
+	unsigned read_count, write_count;
+
+	if (!BCI_ISLOCAL_N(bytecodeinfo)) local = code_base[bci+1];
+	if (BCI_ISSTORE(bytecodeinfo)) local_modified = 1U << LOCAL_MODIFIED;
+	linfo = locals_info[local];
+	read_count = LOCAL_READS(linfo);
+	write_count = LOCAL_WRITES(linfo);
+	if (local_modified)
+	  write_count = LOCAL_INC_COUNT(write_count);
+	else
+	  read_count = LOCAL_INC_COUNT(read_count);
+	
+	locals_info[local] |= (1 << local_type) | LOCAL_SET_COUNTS(read_count, write_count) | local_modified;
+	if (local_type == LOCAL_LONG || local_type == LOCAL_DOUBLE) {
+	  locals_info[local+1] |= (1 << local_type) | LOCAL_SET_COUNTS(read_count, write_count) | local_modified;
+	}
+      }
+      bci += BCI_LEN(bytecodeinfo);
+      stackdepth += BCI_PUSH(bytecodeinfo) - BCI_POP(bytecodeinfo);
+      JASSERT(stackdepth <= (unsigned)jinfo->method->max_stack(), "stack over/under flow?");
+      continue;
+    }
+
+    switch (opcode) {
+
+      case opc_goto: {
+	int off = GET_JAVA_S2(code_base+bci+1);
+	bci += off;
+	bc_stackinfo[bci] |= BC_BRANCH_TARGET;
+	if (off < 0) bc_stackinfo[bci] |= BC_BACK_TARGET;
+	break;
+      }
+      case opc_goto_w: {
+	int off = GET_JAVA_U4(code_base+bci+1);
+	bci += off;
+	bc_stackinfo[bci] |= BC_BRANCH_TARGET;
+	if (off < 0) bc_stackinfo[bci] |= BC_BACK_TARGET;
+	break;
+      }
+
+      case opc_ifeq:
+      case opc_ifne:
+      case opc_iflt:
+      case opc_ifge:
+      case opc_ifgt:
+      case opc_ifle:
+      case opc_ifnull:
+      case opc_ifnonnull: {
+	int off = GET_JAVA_S2(code_base+bci+1);
+	if (off < 0) bc_stackinfo[bci+off] |= BC_BACK_TARGET;
+	stackdepth -= 1;
+        Thumb2_pass1(jinfo, stackdepth, bci + off);
+	bci += 3;
+	break;
+      }
+
+      case opc_if_icmpeq:
+      case opc_if_icmpne:
+      case opc_if_icmplt:
+      case opc_if_icmpge:
+      case opc_if_icmpgt:
+      case opc_if_icmple:
+      case opc_if_acmpeq:
+      case opc_if_acmpne: {
+	int off = GET_JAVA_S2(code_base+bci+1);
+	if (off < 0) bc_stackinfo[bci+off] |= BC_BACK_TARGET;
+	stackdepth -= 2;
+        Thumb2_pass1(jinfo, stackdepth, bci + off);
+	bci += 3;
+	break;
+      }
+
+      case opc_jsr: {
+	int off = GET_JAVA_S2(code_base+bci+1);
+	if (off < 0) bc_stackinfo[bci+off] |= BC_BACK_TARGET;
+        Thumb2_pass1(jinfo, stackdepth+1, bci + off);
+	bci += 3;
+	stackdepth = 0;
+	break;
+      }
+      case opc_jsr_w: {
+	int off = GET_JAVA_U4(code_base+bci+1);
+	if (off < 0) bc_stackinfo[bci+off] |= BC_BACK_TARGET;
+        Thumb2_pass1(jinfo, stackdepth+1, bci + off);
+	bci += 5;
+	break;
+      }
+
+      case opc_ireturn:
+      case opc_lreturn:
+      case opc_freturn:
+      case opc_dreturn:
+      case opc_areturn:
+      case opc_return:
+      case opc_return_register_finalizer:
+      case opc_ret:
+      case opc_athrow:
+	// The test for BC_VISITED above will break out of the loop!!!
+	break;
+
+      case opc_tableswitch: {
+	int low, high;
+	unsigned w;
+	unsigned *table;
+	unsigned nbci;
+	int def;
+
+	stackdepth -= 1;
+	nbci = bci & ~3;
+	w = *(unsigned int *)(code_base + nbci + 8);
+	low = (int)BYTESEX_REVERSE(w);
+	w = *(unsigned int *)(code_base + nbci + 12);
+	high = (int)BYTESEX_REVERSE(w);
+	w = *(unsigned int *)(code_base + nbci + 4);
+	def = (int)BYTESEX_REVERSE(w);
+	table = (unsigned int *)(code_base + nbci + 16);
+
+	while (low <= high) {
+	  int off;
+	  w = *table++;
+	  off = (int)BYTESEX_REVERSE(w);
+	  if (off < 0) bc_stackinfo[bci+off] |= BC_BACK_TARGET;
+	  Thumb2_pass1(jinfo, stackdepth, bci + off);
+	  low++;
+	}
+
+	bci += def;
+	bc_stackinfo[bci] |= BC_BRANCH_TARGET;
+	if (def < 0) bc_stackinfo[bci] |= BC_BACK_TARGET;
+	break;
+      }
+
+      case opc_lookupswitch: {
+	unsigned w;
+	unsigned nbci;
+	int def;
+	int npairs;	// The Java spec says signed but must be >= 0??
+	unsigned *table;
+
+	stackdepth -= 1;
+	nbci = bci & ~3;
+	w = *(unsigned int *)(code_base + nbci + 4);
+	def = (int)BYTESEX_REVERSE(w);
+	w = *(unsigned int *)(code_base + nbci + 8);
+	npairs = (int)BYTESEX_REVERSE(w);
+	table = (unsigned int *)(code_base + nbci + 16);
+
+	for (int i = 0; i < npairs; i++) {
+	  int off;
+	  w = *table;
+	  table += 2;
+	  off = (int)BYTESEX_REVERSE(w);
+	  if (off < 0) bc_stackinfo[bci+off] |= BC_BACK_TARGET;
+	  Thumb2_pass1(jinfo, stackdepth, bci + off);
+	}
+
+	bci += def;
+	bc_stackinfo[bci] |= BC_BRANCH_TARGET;
+	if (def < 0) bc_stackinfo[bci] |= BC_BACK_TARGET;
+	break;
+      }
+
+      case opc_getstatic:
+      case opc_putstatic:
+      case opc_getfield:
+      case opc_putfield: {
+	int index = GET_NATIVE_U2(code_base+bci+1);
+	constantPoolOop pool = jinfo->method->constants();
+	Symbol *sig = pool->signature_ref_at(index);
+	const jbyte *base = sig->base();
+	jbyte c = *base;
+	int stackchange;
+
+	opcode = code_base[bci];
+	if (opcode == opc_getfield || opcode == opc_putfield)
+	  stackdepth -= 1;
+	stackchange = 1;
+	if (c == 'J' || c == 'D') stackchange = 2;
+	if (opcode == opc_getfield || opcode == opc_getstatic)
+	  stackdepth += stackchange;
+	else
+	  stackdepth -= stackchange;
+	bci += 3;
+	break;
+      }
+
+      case opc_invokedynamic: {
+	int site_index = GET_NATIVE_U4(code_base+bci+1);
+	constantPoolOop pool = jinfo->method->constants();
+	int main_index = pool->cache()->secondary_entry_at(site_index)->main_entry_index();
+	// int pool_index = pool->cache()->entry_at(main_index)->constant_pool_index();
+	Symbol *sig = pool->signature_ref_at(main_index);
+	const jbyte *base = sig->base();
+
+	//tty->print("%d: %s: %s\n", opcode, name->as_C_string(), sig->as_C_string());
+	stackdepth += method_stackchange(base);
+	opcode = code_base[bci];
+	bci += 5;
+	break;
+      }
+
+      case opc_invokeresolved:
+      case opc_invokespecialresolved:
+      case opc_invokestaticresolved:
+      case opc_invokevfinal:
+      case opc_invokeinterface:
+      case opc_invokevirtual:
+      case opc_invokespecial:
+      case opc_invokestatic: {
+	int index = GET_NATIVE_U2(code_base+bci+1);
+	constantPoolOop pool = jinfo->method->constants();
+	Symbol *sig = pool->signature_ref_at(index);
+	const jbyte *base = sig->base();
+
+	jinfo->is_leaf = 0;
+	//tty->print("%d: %s: %s\n", opcode, name->as_C_string(), sig->as_C_string());
+	stackdepth += method_stackchange(base);
+	opcode = code_base[bci];
+	bci += 3;
+	if (opcode == opc_invokeinterface) bci += 2;
+	if (opcode != opc_invokestatic && opcode != opc_invokestaticresolved)
+	  stackdepth -= 1;
+	break;
+      }
+
+      case opc_multianewarray:
+	stackdepth = (stackdepth - code_base[bci+3]) + 1;
+	bci += 4;
+	break;
+
+      case opc_wide: {
+	opcode = code_base[bci+1];
+	if (opcode == opc_iinc) {
+	  bci += 6;
+	} else {
+	  bci += 4;
+	  if (opcode == opc_iload ||
+	  	opcode == opc_fload || opcode == opc_aload)
+	    stackdepth += 1;
+	  else if (opcode == opc_lload || opcode == opc_dload)
+	    stackdepth += 2;
+	  else if (opcode == opc_istore ||
+	  	opcode == opc_fstore || opcode == opc_astore)
+	    stackdepth -= 1;
+	  else if (opcode == opc_lstore || opcode == opc_dstore)
+	    stackdepth -= 2;
+	  else if (opcode != opc_ret)
+	    fatal(err_msg("Undefined wide opcode %d\n", opcode));
+	}
+	break;
+      }
+
+      default:
+	opcode = code_base[bci];
+	fatal(err_msg("Undefined opcode %d\n", opcode));
+	break;
+    }
+  }
+}
+
+void Thumb2_RegAlloc(Thumb2_Info *jinfo)
+{
+  unsigned *locals_info = jinfo->locals_info;
+  unsigned i, j;
+  unsigned linfo;
+  unsigned score, max_score;
+  unsigned local;
+  unsigned nlocals = jinfo->method->max_locals();
+  unsigned *pregs = jinfo->jregs->pregs;
+  unsigned npregs = jinfo->jregs->npregs;
+
+  for (i = 0; i < npregs; i++) jinfo->jregs->mapping[i] = -1;
+  for (i = 0; i < npregs; i++) {
+    if (jinfo->use_istate && pregs[i] == Ristate) continue;
+    max_score = 0;
+    for (j = 0; j < nlocals; j++) {
+      linfo = locals_info[j];
+
+      if (linfo & ((1<<LOCAL_ALLOCATED)|(1<<LOCAL_DOUBLE))) continue;
+      score = LOCAL_READS(linfo) + LOCAL_WRITES(linfo);
+      if (linfo & (1<<LOCAL_MODIFIED)) score = (score+1) >> 2;
+      if (linfo & (1<<LOCAL_REF)) score = score - (score >> 2);
+      if (linfo & (1<<LOCAL_LONG)) score = (score+1) >> 2;
+      if (score > max_score) max_score = score, local = j;
+    }
+    if (max_score < (OSPACE ? 8 : 2)) break;
+    locals_info[local] |= 1<<LOCAL_ALLOCATED;
+    jinfo->jregs->r_local[local] = pregs[i];
+    jinfo->jregs->mapping[i] = local;
+  }
+#ifdef T2_PRINT_REGUSAGE
+  if (t2_print_regusage) {
+    fprintf(stderr, "Regalloc: %d physical registers allocated as follows\n", npregs);
+    for (j = 0; j < nlocals; j++) {
+      unsigned r = jinfo->jregs->r_local[j];
+      if (r) {
+	unsigned typ = (locals_info[j] >> LOCAL_INT) & 0x1f;
+	fprintf(stderr, "  ARM Reg R%d -> local %d (type = %s)\n", r, j, local_types[LOG2(typ)]);
+      }
+    }
+  }
+#endif
+}
+
+//-------------------------------------------------------------------------------------
+
+#define Thumb2		1
+#define ThumbEE		0
+
+#define	DA	0
+#define	IA	1
+#define DB	2
+#define IB	3
+
+#define	PUSH_ED	0
+#define PUSH_EA	1
+#define	PUSH_FD	2
+#define	PUSH_FA	3
+
+#define	POP_FA	0
+#define	POP_FD	1
+#define	POP_EA	2
+#define	POP_ED	3
+
+#define ROR(imm, sh) (((imm) >> (sh)) | ((imm) << (32 - (sh))))
+#define ROL(imm, sh) (((imm) << (sh)) | ((imm) >> (32 - (sh))))
+
+#define abs(i) ((i) < 0 ? -(i) : (i))
+#define U(i) ((i) < 0 ? 0 : 1)
+
+#define LS_STR		0
+#define	LS_STRB		1
+#define	LS_STRH		2
+#define LS_LDRSB	3
+#define	LS_LDR		4
+#define LS_LDRB		5
+#define	LS_LDRH		6
+#define LS_LDRSH	7
+
+#define LS_IS_LDR(op)	((op) >= LS_LDRSB)
+#define LS_IS_WORD(op)	(((op) & 3) == LS_STR)
+#define LS_IS_BYTE(op)	(((op) & 3) == LS_STRB || (op) == LS_LDRSB)
+#define LS_IS_HW(op)	(((op) & 3) == LS_STRH || (op) == LS_LDRSH)
+
+static const unsigned t_ls_ops[16] = {
+	0x5000,		0xf8400000,
+	0x5400,		0xf8000000,
+	0x5200,		0xf8200000,
+	0x5600,		0xf9100000,
+	0x5800,		0xf8500000,
+	0x5c00,		0xf8100000,
+	0x5a00,		0xf8300000,
+	0x5e00,		0xf9300000,
+};
+
+#define DP_ADC	0
+#define DP_ADD	1
+#define DP_AND	2
+#define DP_ASR	3
+#define DP_BIC	4
+#define DP_CMN	5
+#define DP_CMP	6
+#define DP_EOR	7
+#define DP_LSL	8
+#define DP_LSR	9
+#define DP_MOV	10
+#define DP_MVN	11
+#define DP_ORN	12
+#define DP_ORR	13
+#define DP_ROR	14
+#define DP_RSB	15
+#define DP_SBC	16
+#define DP_SUB	17
+#define DP_TEQ	18
+#define DP_TST	19
+#define DP_MUL	20
+
+static const unsigned n_ops[] = {
+	DP_SBC,		// ADC	x, y == SBC x, ~y
+	DP_SUB,		// ADD	x, y == SUB x, -y
+	DP_BIC,		// AND	x, y == BIX x, ~y
+	(unsigned)-1,	// ASR
+	DP_AND,		// BIC	x, y == AND x, ~y
+	DP_CMP,		// CMN	x, y == CMP x, -y
+	DP_CMN,		// CMP	x, y == CMN x, -y
+	(unsigned)-1,	// EOR
+	(unsigned)-1,	// LSL
+	(unsigned)-1,	// LSR
+	DP_MVN,		// MOV	x, y == MVN x, ~y
+	DP_MOV,		// MVN	x, y == MOV x, ~y
+	DP_ORR,		// ORN	x, y == ORR x, ~y
+	DP_ORN,		// ORR	x, y == ORN x, ~y
+	(unsigned)-1,	// ROR
+	(unsigned)-1,	// RSB
+	DP_ADC,		// SBC	x, y == ADC x, ~y
+	DP_ADD,		// ADD	x, y == SUB x, -y
+	(unsigned)-1,	// TEQ
+	(unsigned)-1,	// TST
+	(unsigned)-1,	// MUL
+};
+
+#define N_OP(op)	n_ops[(op)]
+
+static const unsigned t_dop_ops[] = {
+//	Rd, Rm, #N	Rd, Rn, Rm
+	0xf1400000,	0xeb400000,	// ADC
+	0xf1000000,	0xeb000000,	// ADD
+	0xf0000000,	0xea000000,	// AND
+	0xea4f0020,	0xfa40f000,	// ASR
+	0xf0200000,	0xea200000,	// BIC
+	0xf1100f00,	0xeb100f00,	// CMN
+	0xf1b00f00,	0xebb00f00,	// CMP
+	0xf0800000,	0xea800000,	// EOR
+	0xea4f0000,	0xfa00f000,	// LSL
+	0xea4f0010,	0xfa20f000,	// LSR
+	0xf04f0000,	0xea4f0000,	// MOV
+	0xf06f0000,	0xea6f0000,	// MVN
+	0xf0600000,	0xea600000,	// ORN
+	0xf0400000,	0xea400000,	// ORR
+	0xea4f0030,	0xfa6f0000,	// ROR
+	0xf1c00000,	0xebc00000,	// RSB
+	0xf1600000,	0xeb600000,	// SBC
+	0xf1a00000,	0xeba00000,	// SUB
+	0xf0900f00,	0xea900f00,	// TEQ
+	0xf0100f00,	0xea100f00,	// TST
+	(unsigned)-1,	0xfb00f000,	// MUL
+};
+
+#define DP_IMM(op)	t_dop_ops[(op)*2]
+#define DP_REG(op)	t_dop_ops[(op)*2+1]
+
+#define VP_ADD	0
+#define VP_SUB	1
+#define VP_MUL	2
+#define VP_DIV	3
+#define VP_SQRT 4
+
+static const unsigned t_vop_ops[] = {
+	0xee300a00,			// VADD
+	0xee300a40,			// VSUB
+	0xee200a00,			// VMUL
+	0xee800a00,			// VDIV
+	0xeeb10bc0			// VSQRT
+};
+
+#define VP_REG(op)	t_vop_ops[op]
+
+#define T1_LS_OP(op)	t_ls_ops[(op)*2]
+#define T2_LS_OP(op)	t_ls_ops[(op)*2+1]
+
+#define SHIFT_LSL	0
+#define SHIFT_LSR	1
+#define SHIFT_ASR	2
+#define SHIFT_ROR	3
+#define SHIFT_RRX	3
+
+//------------------------------------------------------------------------------------
+
+#define TBIT 1
+
+#define E_STR_IMM6(src, imm6)		(0xce00 | ((imm6)<<3) | (src))
+#define E_LDR_IMM6(dst, imm6)		(0xcc00 | ((imm6)<<3) | (dst))
+#define E_LDR_IMM5(dst, imm5)		(0xcb00 | ((imm5)<<3) | (dst))
+#define E_LDR_IMM3(dst, base, imm3)	(0xc800 | ((imm3)<<6) | ((base) << 3) | (dst))
+
+#define T_MOV_IMM8(r, imm8)		(0x2000 | ((r)<<8) | (imm8))
+#define T_MOV_BYTELANE(r, typ, b)	(0xf04f0000 | ((typ) << 12) | ((r) << 8) | (b))
+#define T_MOV_ROT_IMM(r, ror, imm)	\
+		(0xf04f0000 | (((ror) & 0x10) << (26-4)) | (((ror) & 0xe) << (12-1)) |	\
+		(((ror) & 1) << 7) | ((r) << 8) | ((imm) & 0x7f))
+#define T_MOVW_IMM16(r, imm)		\
+		(0xf2400000 | (((imm) & 0xf000) << (16-12)) | (((imm) & 0x800) << (26-11)) | \
+		(((imm) & 0x700) << (12-8)) | ((imm) & 0xff) | ((r) << 8))
+#define T_MOVT_IMM16(r, imm)		\
+		(0xf2c00000 | (((imm) & 0xf000) << (16-12)) | (((imm) & 0x800) << (26-11)) | \
+		(((imm) & 0x700) << (12-8)) | ((imm) & 0xff) | ((r) << 8))
+#define T_MVN_BYTELANE(r, typ, b)	(0xf06f0000 | ((typ) << 12) | ((r) << 8) | (b))
+#define T_MVN_ROT_IMM(r, ror, imm)	(0xf06f0000 | (((ror) & 0x10) << (26-4)) |	\
+		(((ror) & 0xe) << (12-1)) | (((ror) & 1) << 7) | ((r) << 8) | ((imm) & 0x7f))
+
+#define T_ORR_ROT_IMM(dst, src, ror, imm)	(0xf0400000 | (((ror) & 0x10) << (26-4)) | \
+		(((ror) & 0xe) << (12-1)) | (((ror) & 1) << 7) | ((src) << 16) |	\
+		((dst) << 8) | ((imm) & 0x7f))
+#define T_ORN_ROT_IMM(dst, src, ror, imm)	(0xf0600000 | (((ror) & 0x10) << (26-4)) | \
+		(((ror) & 0xe) << (12-1)) | (((ror) & 1) << 7) | ((src) << 16) |	\
+		((dst) << 8) | ((imm) & 0x7f))
+
+#define T_STR_IMM5(src, base, imm5)	(0x6000 | ((imm5) << 6) | ((base) << 3) | (src))
+#define T_STR_SP_IMM8(src, imm8)	(0x9000 | ((src) << 8) | (imm8))
+#define T_STR_IMM12(src, base, imm12)	(0xf8c00000 | ((src)<<12) | ((base)<<16) | (imm12))
+#define T_STR_IMM8(src, base, imm8, pre, wb)	(0xf8400800 | ((src)<<12) | 		\
+		((base)<<16) | ((pre)<<10) | (U(imm8)<<9) | ((wb)<<8) | abs(imm8))
+
+#define T_LDR_IMM5(dst, base, imm5)	(0x6800 | ((imm5) << 6) | ((base) << 3) | (dst))
+#define T_LDR_SP_IMM8(src, imm8)	(0x9800 | ((dst) << 8) | (imm8))
+#define T_LDR_IMM12(dst, base, imm12)	(0xf8d00000 | ((dst)<<12) | ((base)<<16) | (imm12))
+#define T_LDR_IMM8(src, base, imm8, pre, wb)	(0xf8500800 | ((dst)<<12) | 		\
+		((base)<<16) | ((pre)<<10) | (U(imm8)<<9) | ((wb)<<8) | abs(imm8))
+
+#define T_STRB_IMM5(src, base, imm5)	(0x7000 | ((imm5) << 6) | ((base) << 3) | (src))
+#define T_STRB_IMM12(src, base, imm12)	(0xf8800000 | ((src)<<12) | ((base)<<16) | (imm12))
+#define T_STRB_IMM8(src, base, imm8, pre, wb)	(0xf8000800 | ((src)<<12) | 		\
+		((base)<<16) | ((pre)<<10) | (U(imm8)<<9) | ((wb)<<8) | abs(imm8))
+
+#define T_LDRB_IMM5(dst, base, imm5)	(0x7800 | ((imm5) << 6) | ((base) << 3) | (dst))
+#define T_LDRB_IMM12(dst, base, imm12)	(0xf8900000 | ((dst)<<12) | ((base)<<16) | (imm12))
+#define T_LDRB_IMM8(dst, base, imm8, pre, wb)	(0xf8100800 | ((dst)<<12) | 		\
+		((base)<<16) | ((pre)<<10) | (U(imm8)<<9) | ((wb)<<8) | abs(imm8))
+
+#define T_STRH_IMM5(dst, base, imm5)	(0x8000 | ((imm5) << 6) | ((base) << 3) | (dst))
+#define T_STRH_IMM12(dst, base, imm12)	(0xf8a00000 | ((dst)<<12) | ((base)<<16) | (imm12))
+#define T_STRH_IMM8(dst, base, imm8, pre, wb)	(0xf8200800 | ((dst)<<12) | 		\
+		((base)<<16) | ((pre)<<10) | (U(imm8)<<9) | ((wb)<<8) | abs(imm8))
+
+#define T_LDRH_IMM5(dst, base, imm5)	(0x8800 | ((imm5) << 6) | ((base) << 3) | (dst))
+#define T_LDRH_IMM12(dst, base, imm12)	(0xf8b00000 | ((dst)<<12) | ((base)<<16) | (imm12))
+#define T_LDRH_IMM8(dst, base, imm8, pre, wb)	(0xf8300800 | ((dst)<<12) | 		\
+		((base)<<16) | ((pre)<<10) | (U(imm8)<<9) | ((wb)<<8) | abs(imm8))
+
+#define T_LDRSH_IMM12(dst, base, imm12)	(0xf9b00000 | ((dst)<<12) | ((base)<<16) | (imm12))
+#define T_LDRSH_IMM8(dst, base, imm8, pre, wb)	(0xf9300800 | ((dst)<<12) | 		\
+		((base)<<16) | ((pre)<<10) | (U(imm8)<<9) | ((wb)<<8) | abs(imm8))
+
+#define T_LDRSB_IMM12(dst, base, imm12)	(0xf9900000 | ((dst)<<12) | ((base)<<16) | (imm12))
+#define T_LDRSB_IMM8(dst, base, imm8, pre, wb)	(0xf9100800 | ((dst)<<12) | 		\
+		((base)<<16) | ((pre)<<10) | (U(imm8)<<9) | ((wb)<<8) | abs(imm8))
+
+#define T_LDRD_IMM(lo, hi, base, imm8, pre, wb)	(0xe8500000 | ((base)<<16) |		\
+		((lo) << 12) | ((hi)<<8) | ((pre)<<24) | (U(imm8)<<23) | ((wb)<<21) | abs(imm8))
+#define T_STRD_IMM(lo, hi, base, imm8, pre, wb)	(0xe8400000 | ((base)<<16) |		\
+		((lo) << 12) | ((hi)<<8) | ((pre)<<24) | (U(imm8)<<23) | ((wb)<<21) | abs(imm8))
+
+#define T_LDREX(dst, base, off) (0xe8500f00 | ((base) << 16) | ((dst) << 12) | ((off) >> 2))
+#define T_STREX(dst, src, base, off) (0xe8400000 | ((base) << 16) | \
+		((src) << 12) | ((dst) << 8) | ((off >> 2)))
+
+#define T_LDREXD(dst1, dst2, base) (0xe8d0007f | ((base) << 16) | ((dst1) << 12) | (dst2 << 8))
+#define T_STREXD(dst, src1, src2, base) (0xe8c00070 | ((base) << 16) | ((src1) << 12) | (src2 << 8) | dst)
+
+#define T_STM8(base, regset)		(0xc000 | ((base) << 8) | (regset))
+#define T_STM16(base, regset, st, wb)	(0xe8000000 | ((st) << 23) | ((wb) << 21) |	\
+		((base) << 16) | (regset))
+
+#define T_LDM8(base, regset)		(0xc800 | ((base) << 8) | (regset))
+#define	T_LDM16(base, regset, st, wb)	(0xe8100000 | ((st) << 23) | ((wb) << 21) |	\
+		((base) << 16) | (regset))
+#define T_POP(regset)	(0xbc00 | (((regset & (1<<ARM_PC)) >> ARM_PC) << 8) | (regset & 0xff))
+#define T_PUSH(regset)	(0xb400 | (((regset & (1<<ARM_LR)) >> ARM_LR) << 8) | (regset & 0xff))
+
+#define	T1_LDR_STR_REG(op, xfer, base, off) 	((op) | ((off) << 6) | ((base) << 3) | (xfer))
+#define T2_LDR_STR_REG(op, xfer, base, off, sh)	((op) | ((base) << 16) | ((xfer) << 12) | \
+		((sh)<<4) | (off))
+
+#define T_CHKA(size, idx)		(0xca00 | (((size) & 8) << (7-3)) | ((idx) << 3) | ((size) & 7))
+#define T_HBL(handler)			(0xc300 | (handler))
+#define T_MISC_CONTROL(op, option)	(0xf3bf8f00 | ((op)<<4) | option)
+#define T_ENTER_LEAVE(enter)		(T_MISC_CONTROL(enter, 0xf))
+#define T_DMB(option)			(T_MISC_CONTROL(5, option))
+
+#define T1_ADD_IMM(dst, src, imm3)	(0x1c00 | ((imm3) << 6) | ((src) << 3) | (dst))
+#define T2_ADD_IMM(r, imm8)		(0x3000 | ((r) << 8) | (imm8))
+#define T3_ADD_BYTELANE(dst, src, typ, b) (0xf1000000 | ((src) << 16) | ((typ) << 12) | \
+		((dst) << 8) | (b))
+#define T3_ADD_ROT_IMM(dst, src, ror, imm) (0xf1000000 | ((src) << 16) | ((dst) << 8) | \
+		(((ror) & 0x10) << (26-4)) | (((ror) & 0x0e) << (12-1)) | (((ror) & 1) << 7) | \
+		((imm) & 0x7f))
+#define T4_ADD_IMM(dst, src, imm)	(0xf2000000 | ((src) << 16) | ((dst) << 8) | \
+		(((imm) & 0x800) << (26-11)) | (((imm) & 0x700) << (12-8)) | ((imm) & 0xff))
+
+#define T1_SUB_IMM(dst, src, imm3)	(0x1e00 | ((imm3) << 6) | ((src) << 3) | (dst))
+#define T2_SUB_IMM(r, imm8)		(0x3800 | ((r) << 8) | (imm8))
+#define T3_SUB_BYTELANE(dst, src, typ, b) (0xf1a00000 | ((src) << 16) | ((typ) << 12) | \
+		((dst) << 8) | (b))
+#define T3_SUB_ROT_IMM(dst, src, ror, imm) (0xf1a00000 | ((src) << 16) | ((dst) << 8) | \
+		(((ror) & 0x10) << (26-4)) | (((ror) & 0x0e) << (12-1)) | (((ror) & 1) << 7) | \
+		((imm) & 0x7f))
+#define T4_SUB_IMM(dst, src, imm)	(0xf2a00000 | ((src) << 16) | ((dst) << 8) | \
+		(((imm) & 0x800) << (26-11)) | (((imm) & 0x700) << (12-8)) | ((imm) & 0xff))
+
+#define T_DOP_BYTELANE(op, dst, src, typ, b)	((op) | ((dst) << 8) | ((src) << 16) | \
+		((typ) << 12) | (b))
+#define T_DOP_ROT_IMM(op, dst, src, ror, imm)	((op) | ((dst) << 8) | ((src) << 16) | \
+		(((ror) & 0x10) << (26-4)) | (((ror) & 0x0e) << (12-1)) | (((ror) & 1) << 7) | \
+		((imm) & 0x7f))
+#define T_SHIFT_IMM(op, dst, src, imm)	((op) | ((dst) << 8) | (src) | \
+		(((imm) & 3) << 6) | (((imm) & 0x1c) << (12-2)))
+#define T_DOP_REG(op, dst, lho, rho, st, sh)	((op) | ((dst) << 8) | ((lho) << 16) | (rho) | \
+		((st) << 4) | (((sh) & 0x1c) << (12-2)) | (((sh) & 3) << 6))
+#define T3_ADD_BYTELANE(dst, src, typ, b) (0xf1000000 | ((src) << 16) | ((typ) << 12) | \
+		((dst) << 8) | (b))
+
+#define T_CMP_IMM(src, imm)		(0x2800 | ((src) << 8) | (imm))
+#define T_CMP_REG(lho, rho)		(0x4280 | ((rho) << 3) | (lho))
+
+#define T_NEG(dst, src)		(0x4240 | (dst) | ((src) << 3))
+#define T_MVN(dst, src)		(0x43c0 | (dst) | ((src) << 3))
+#define T_MOV(dst, src)		(0x4600 | (((dst) & 8) << (7-3)) | ((src) << 3) | ((dst) & 7))
+
+#define T_VMOVS_TOARM(dst, src)	\
+	(0xee100a10 | ((dst) << 12) | (((src) & 1) << 7) | (((src) & 0x1e)<<(16-1)))
+#define T_VMOVS_TOVFP(dst, src) \
+	(0xee000a10 | ((src) << 12) | (((dst) & 1) << 7) | (((dst) & 0x1e)<<(16-1)))
+
+#define T_VMOVD_TOARM(dst_lo, dst_hi, src) \
+  (0xec500b10 | ((dst_lo) << 12) | ((dst_hi) << 16) | (((src) & 0x10)<<(5-4)) | ((src) & 0x0f))
+#define T_VMOVD_TOVFP(dst, src_lo, src_hi) \
+  (0xec400b10 | ((src_lo) << 12) | ((src_hi) << 16) | (((dst) & 0x10)<<(5-4)) | ((dst) & 0x0f))
+
+// VFP reg to VFP re move.
+#define T_VMOVD_VFP_TOVFP(dst, src) (0xeeb00b40 | (((dst) & 0x0f) << 12) | ((src) & 0x0f))
+
+#define T_VOP_REG_S(op, dst, lho, rho)	((op) |				\
+		(((dst) & 1) << 22) | (((dst) & 0x1e) << (12-1)) | 	\
+		(((lho) & 1) << 7) | (((lho) & 0x1e) << (16-1))	 |	\
+		(((rho) & 1) << 5) | (((rho) & 0x1e) >> 1))
+#define T_VOP_REG_D(op, dst, lho, rho)	((op) |	(1 << 8) |		\
+		(((dst) & 0x10) << (22-4)) | (((dst) & 0xf) << 12) | 	\
+		(((lho) & 0x10) << (7-4)) | (((lho) & 0xf) << 16)   |	\
+		(((rho) & 0x10) << (5-4)) | ((rho) & 0xf))
+
+#define T_VCMP_S(lho, rho, e)		(0xeeb40a40 | ((e) << 7) |	\
+		(((lho) & 1) << 22) | (((lho) & 0x1e) << (12-1)) |	\
+		(((rho) & 1) << 5) | (((rho) & 0x1e) >>1))
+#define T_VCMP_D(lho, rho, e)		(0xeeb40b40 | ((e) << 7) |	\
+		(((lho) & 0x10) << (22-4)) | (((lho) & 0x0f) << 12) |	\
+		(((rho) & 0x10) << (5-4)) | ((rho) & 0x0f))
+#define T_VMRS(dst)	(0xeef10a10 | ((dst) << 12))
+
+#define T_MLA(res, lho, rho, a) \
+		(0xfb000000 | ((res) << 8) | ((lho) << 16) | (rho) | ((a) << 12))
+#define T_UMULL(res_lo, res_hi, lho, rho) \
+		(0xfba00000 | ((res_lo) << 12) | ((res_hi) << 8) | ((lho) << 16) | (rho))
+
+#define T_BX(src)		(0x4700 | ((src) << 3))
+#define T_TBH(base, idx)	(0xe8d0f010 | ((base) << 16) | (idx))
+
+#define T_SXTB(dst, src)	(0xb240 | ((src) << 3) | (dst))
+#define T_SXTH(dst, src)	(0xb200 | ((src) << 3) | (dst))
+#define T2_SXTB(dst, src)	(0xfa4ff080 | ((dst) << 8) | (src))
+#define T2_SXTH(dst, src)	(0xfa0ff080 | ((dst) << 8) | (src))
+#define T_UXTH(dst, src)	(0xb280 | ((src) << 3) | (dst))
+#define T2_UXTH(dst, src)	(0xfa1ff080 | ((dst) << 8) | (src))
+
+int out_16(CodeBuf *codebuf, u32 s)
+{
+  if (codebuf->idx >= codebuf->limit)
+	longjmp(compiler_error_env, COMPILER_RESULT_FATAL);
+  codebuf->codebuf[codebuf->idx++] = s;
+  return 0;
+}
+
+int out_16x2(CodeBuf *codebuf, u32 sx2)
+{
+  unsigned s1 = sx2 >> 16;
+  unsigned s2 = sx2 & 0xffff;
+
+  out_16(codebuf, s1);
+  return out_16(codebuf, s2);
+}
+
+int out_32(CodeBuf *codebuf, u32 w)
+{
+  if (codebuf->idx + 2 > codebuf->limit)
+	longjmp(compiler_error_env, COMPILER_RESULT_FATAL);
+  *(u32 *)&(codebuf->codebuf[codebuf->idx]) = w;
+  codebuf->idx += 2;
+  return 0;
+}
+
+u32 out_pos(CodeBuf *codebuf)
+{
+  return (u32)&(codebuf->codebuf[codebuf->idx]);
+}
+
+u32 out_loc(CodeBuf *codebuf)
+{
+  return codebuf->idx * 2;
+}
+
+u32 out_align(CodeBuf *codebuf, unsigned align)
+{
+  while ((out_pos(codebuf) & (align-1)) != 0) out_16(codebuf, 0);
+  return out_pos(codebuf);
+}
+
+u32 out_align_offset(CodeBuf *codebuf, unsigned align, unsigned offset)
+{
+  while ((out_pos(codebuf) & (align-1)) != offset) out_16(codebuf, 0);
+  return out_pos(codebuf);
+}
+
+int thumb_single_shift(unsigned imm)
+{
+  unsigned lsl;
+
+  if (!imm) return -1;
+  lsl = 0;
+  while (!(imm & 0x80000000)) {
+    imm <<= 1;
+    lsl++;
+  }
+  if (lsl >= 24) return -1;
+  if ((imm & 0xff000000) == imm) return lsl+8;
+  return -1;
+}
+
+int thumb_bytelane(u32 imm)
+{
+    unsigned b1 = imm & 0xff;
+    unsigned b2 = (imm >> 8) & 0xff;
+    unsigned b3 = (imm >> 16) & 0xff;
+    unsigned b4 = imm >> 24;
+    int mov_type = -1;
+
+    if (b1 == b3 && b2 == 0 && b4 == 0) mov_type = 1;
+    if (b1 == b2 && b1 == b3 && b1 == b4) mov_type = 3;
+    if (b2 == b4 && b1 == 0 && b3 == 0) mov_type = 2;
+    if (imm < 256) mov_type = 0;
+    return mov_type;
+}
+
+int mov_imm(CodeBuf *codebuf, Reg r, u32 imm)
+{
+  int mov_type, rol;
+
+  if (Thumb2) {
+    if (r < ARM_R8 && imm < 256)
+      return out_16(codebuf, T_MOV_IMM8(r, imm));
+    mov_type = thumb_bytelane(imm);
+    if (mov_type >= 0) {
+      if (mov_type == 2) imm >>= 8;
+      return out_16x2(codebuf, T_MOV_BYTELANE(r, mov_type, (imm & 0xff)));
+    }
+    mov_type = thumb_bytelane(~imm);
+    if (mov_type >= 0) {
+      imm = ~imm;
+      if (mov_type == 2) imm >>= 8;
+      return out_16x2(codebuf, T_MVN_BYTELANE(r, mov_type, (imm & 0xff)));
+    }
+    rol = thumb_single_shift(imm);
+    if (rol >= 0)
+      return out_16x2(codebuf, T_MOV_ROT_IMM(r, rol, ROL(imm, rol)));
+    rol = thumb_single_shift(~imm);
+    if (rol >= 0)
+      return out_16x2(codebuf, T_MVN_ROT_IMM(r, rol, ROL(~imm, rol)));
+    if ((imm & ~0xffff) == 0)
+      return out_16x2(codebuf, T_MOVW_IMM16(r, imm & 0xffff));
+    if (r < ARM_R8) {
+      rol = thumb_single_shift(imm & ~0xff);
+      if (rol >= 0) {
+	out_16(codebuf, T_MOV_IMM8(r, imm & 0xff));
+	return out_16x2(codebuf, T_ORR_ROT_IMM(r, r, rol, ROL(imm & ~0xff, rol)));
+      }
+    }
+    out_16x2(codebuf, T_MOVW_IMM16(r, imm & 0xffff));
+    return out_16x2(codebuf, T_MOVT_IMM16(r, imm >> 16));
+  }
+  J_Unimplemented();
+}
+
+int load_store_reg_no_wb(CodeBuf *codebuf, u32 op, Reg xfer, Reg base, Reg offset,
+							  u32 shift, int pre)
+{
+  if (pre) {
+    if (xfer < ARM_R8 && base < ARM_R8 && offset < ARM_R8) {
+      if (ThumbEE) {
+	if ((shift == 0 && LS_IS_BYTE(op)) || (shift == 1 && LS_IS_HW(op)) ||
+							(shift == 2 && LS_IS_WORD(op)))
+	  return out_16(codebuf, T1_LDR_STR_REG(T1_LS_OP(op), xfer, base, offset));
+      } else if (shift == 0)
+	return out_16(codebuf, T1_LDR_STR_REG(T1_LS_OP(op), xfer, base, offset));
+    }
+    if (shift < 4)
+      return out_16x2(codebuf, T2_LDR_STR_REG(T2_LS_OP(op), xfer, base, offset, shift));
+  }
+  J_Unimplemented();
+}
+
+static int add_reg(CodeBuf *codebuf, u32 dst, u32 lho, u32 rho);
+
+int load_store_reg(CodeBuf *codebuf, u32 op, Reg xfer, Reg base, Reg offset,
+							  u32 shift, int pre, int wb)
+{
+  int rc = load_store_reg_no_wb(codebuf, op, xfer, base, offset, shift, pre);
+  if (wb) {
+    return add_reg(codebuf, base, base, offset);
+  }
+  return rc;
+}
+
+int str_reg(CodeBuf *codebuf, Reg src, Reg base, Reg offset, u32 shift, int pre, int wb)
+{
+  return load_store_reg(codebuf, LS_STR, src, base, offset, shift, pre, wb);
+}
+
+int ldr_reg(CodeBuf *codebuf, Reg dst, Reg base, Reg offset, u32 shift, int pre, int wb)
+{
+  return load_store_reg(codebuf, LS_LDR, dst, base, offset, shift, pre, wb);
+}
+
+int strb_reg(CodeBuf *codebuf, Reg src, Reg base, Reg offset, u32 shift, int pre, int wb)
+{
+  return load_store_reg(codebuf, LS_STRB, src, base, offset, shift, pre, wb);
+}
+
+int ldrb_reg(CodeBuf *codebuf, Reg dst, Reg base, Reg offset, u32 shift, int pre, int wb)
+{
+  return load_store_reg(codebuf, LS_LDRB, dst, base, offset, shift, pre, wb);
+}
+
+int strh_reg(CodeBuf *codebuf, Reg src, Reg base, Reg offset, u32 shift, int pre, int wb)
+{
+  return load_store_reg(codebuf, LS_STRH, src, base, offset, shift, pre, wb);
+}
+
+int ldrh_reg(CodeBuf *codebuf, Reg dst, Reg base, Reg offset, u32 shift, int pre, int wb)
+{
+  return load_store_reg(codebuf, LS_LDRH, dst, base, offset, shift, pre, wb);
+}
+
+int ldrsh_reg(CodeBuf *codebuf, Reg dst, Reg base, Reg offset, u32 shift, int pre, int wb)
+{
+  return load_store_reg(codebuf, LS_LDRSH, dst, base, offset, shift, pre, wb);
+}
+
+int ldrsb_reg(CodeBuf *codebuf, Reg dst, Reg base, Reg offset, u32 shift, int pre, int wb)
+{
+  return load_store_reg(codebuf, LS_LDRSB, dst, base, offset, shift, pre, wb);
+}
+
+int ldrex_imm(CodeBuf *codebuf, Reg dst, Reg base, unsigned offset)
+{
+  if (Thumb2) {
+    if ((offset & 3) == 0 && offset < 256 * 4) {
+      return out_16x2(codebuf, T_LDREX(dst, base, offset));
+    }
+  }
+  J_Unimplemented();
+}
+
+int strex_imm(CodeBuf *codebuf, Reg dst, Reg src, Reg base, unsigned offset)
+{
+  if (Thumb2) {
+    if ((offset & 3) == 0 && offset < 256 * 4) {
+      return out_16x2(codebuf, T_STREX(dst, src, base, offset));
+    }
+  }
+  J_Unimplemented();
+}
+
+int ldrexd(CodeBuf *codebuf, Reg dst0, Reg dst1, Reg base)
+{
+  if (Thumb2) {
+    return out_16x2(codebuf, T_LDREXD(dst0, dst1, base));
+  }
+  J_Unimplemented();
+}
+
+int strexd(CodeBuf *codebuf, Reg dst, Reg src0, Reg src1, Reg base)
+{
+  if (Thumb2) {
+    return out_16x2(codebuf, T_STREXD(dst, src0, src1, base));
+  }
+  J_Unimplemented();
+}
+
+int str_imm(CodeBuf *codebuf, Reg src, Reg base, int offset, int pre, int wb)
+{
+  unsigned uoff;
+
+  if (!pre && !wb) pre = 1, offset = 0;
+  uoff = (unsigned)offset;
+  if (Thumb2) {
+    if (pre && !wb && offset >= 0) {
+      if (base < ARM_R8 && src < ARM_R8 && uoff < 128 && (uoff & 3) == 0)
+	return out_16(codebuf, T_STR_IMM5(src, base, uoff>>2));
+      if (base == ARM_SP && src < ARM_R8 && uoff < 1024 && (uoff &3) ==0)
+	return out_16(codebuf, T_STR_SP_IMM8(src, uoff>>2));
+      if (ThumbEE && base == ARM_R9 && src < ARM_R8 && uoff < 256 && (uoff & 3) == 0)
+	return out_16(codebuf, E_STR_IMM6(src, uoff>>2));
+      if (uoff < (1 << 12))
+	return out_16x2(codebuf, T_STR_IMM12(src, base, uoff));
+    } else if (offset < 256 && offset > -256)
+	return out_16x2(codebuf, T_STR_IMM8(src, base, offset, pre, wb));
+    JASSERT(base != ARM_IP && src != ARM_IP, "src or base == IP in str_imm");
+    mov_imm(codebuf, ARM_IP, offset);
+    return str_reg(codebuf, src, base, ARM_IP, 0, pre, wb);
+  }
+  J_Unimplemented();
+}
+
+int ldr_imm(CodeBuf *codebuf, Reg dst, Reg base, int offset, int pre, int wb)
+{
+  unsigned uoff;
+
+  if (!pre && !wb) pre = 1, offset = 0;
+  uoff = (unsigned)offset;
+  if (Thumb2) {
+    if (pre && !wb && offset >= 0) {
+      if (base < ARM_R8 && dst < ARM_R8 && uoff < 128 && (uoff & 3) ==0)
+	return out_16(codebuf, T_LDR_IMM5(dst, base, uoff>>2));
+      if (base == ARM_SP && dst < ARM_R8 && uoff < 1024 & (uoff & 3) == 0)
+	return out_16(codebuf, T_LDR_SP_IMM8(dst, uoff>>2));
+      if (ThumbEE && base == ARM_R9 && dst < ARM_R8 && uoff < 256 && (uoff & 3) == 0)
+	return out_16(codebuf, E_LDR_IMM6(dst, uoff>>2));
+      if (ThumbEE && base == ARM_R10 && dst < ARM_R8 && uoff < 128 && (uoff & 3) == 0)
+	return out_16(codebuf, E_LDR_IMM5(dst, uoff>>2));
+      if (uoff < (1 << 12))
+	return out_16x2(codebuf, T_LDR_IMM12(dst, base, uoff));
+    } else {
+      if (ThumbEE && pre && !wb && offset <= 0 && offset > -32 && (uoff & 3) == 0 &&
+							base < ARM_R8 && dst < ARM_R8)
+	return out_16(codebuf, E_LDR_IMM3(dst, base, -offset >> 2));
+      if (offset < 256 && offset > -256)
+	return out_16x2(codebuf, T_LDR_IMM8(dst, base, offset, pre, wb));
+    }
+    JASSERT(base != ARM_IP, "base == IP in ldr_imm");
+    mov_imm(codebuf, ARM_IP, offset);
+    return ldr_reg(codebuf, dst, base, ARM_IP, 0, pre, wb);
+  }
+  J_Unimplemented();
+}
+
+int strb_imm(CodeBuf *codebuf, Reg src, Reg base, int offset, int pre, int wb)
+{
+  unsigned uoff;
+
+  if (!pre && !wb) pre = 1, offset = 0;
+  uoff = (unsigned)offset;
+  if (Thumb2) {
+    if (pre && !wb && offset >= 0) {
+      if (base < ARM_R8 && src < ARM_R8 && uoff < 32)
+	return out_16(codebuf, T_STRB_IMM5(src, base, uoff));
+      if (uoff < (1 << 12))
+	return out_16x2(codebuf, T_STRB_IMM12(src, base, uoff));
+    } else if (offset < 256 && offset > -256)
+	return out_16x2(codebuf, T_STRB_IMM8(src, base, offset, pre, wb));
+    JASSERT(base != ARM_IP && src != ARM_IP, "src or base == IP in str_imm");
+    mov_imm(codebuf, ARM_IP, offset);
+    return strb_reg(codebuf, src, base, ARM_IP, 0, pre, wb);
+  }
+  J_Unimplemented();
+}
+
+int ldrb_imm(CodeBuf *codebuf, Reg dst, Reg base, int offset, int pre, int wb)
+{
+  unsigned uoff;
+
+  if (!pre && !wb) pre = 1, offset = 0;
+  uoff = (unsigned)offset;
+  if (Thumb2) {
+    if (pre && !wb && offset >= 0) {
+      if (base < ARM_R8 && dst < ARM_R8 && uoff < 32)
+	return out_16(codebuf, T_LDRB_IMM5(dst, base, uoff));
+      if (uoff < (1 << 12))
+	return out_16x2(codebuf, T_LDRB_IMM12(dst, base, uoff));
+    } else if (offset < 256 && offset > -256)
+	return out_16x2(codebuf, T_LDRB_IMM8(dst, base, offset, pre, wb));
+    JASSERT(base != ARM_IP, "base == IP in ldr_imm");
+    mov_imm(codebuf, ARM_IP, offset);
+    return ldrb_reg(codebuf, dst, base, ARM_IP, 0, pre, wb);
+  }
+  J_Unimplemented();
+}
+
+int strh_imm(CodeBuf *codebuf, Reg src, Reg base, int offset, int pre, int wb)
+{
+  unsigned uoff;
+
+  if (!pre && !wb) pre = 1, offset = 0;
+  uoff = (unsigned)offset;
+  if (Thumb2) {
+    if (pre && !wb && offset >= 0) {
+      if (base < ARM_R8 && src < ARM_R8 && uoff < 64 && (uoff & 1) == 0)
+	return out_16(codebuf, T_STRH_IMM5(src, base, uoff>>1));
+      if (uoff < (1 << 12))
+	return out_16x2(codebuf, T_STRH_IMM12(src, base, uoff));
+    } else if (offset < 256 && offset > -256)
+	return out_16x2(codebuf, T_STRH_IMM8(src, base, offset, pre, wb));
+    JASSERT(base != ARM_IP && src != ARM_IP, "src or base == IP in str_imm");
+    mov_imm(codebuf, ARM_IP, offset);
+    return strh_reg(codebuf, src, base, ARM_IP, 0, pre, wb);
+  }
+  J_Unimplemented();
+}
+
+int ldrh_imm(CodeBuf *codebuf, Reg dst, Reg base, int offset, int pre, int wb)
+{
+  unsigned uoff;
+
+  if (!pre && !wb) pre = 1, offset = 0;
+  uoff = (unsigned)offset;
+  if (Thumb2) {
+    if (pre && !wb && offset >= 0) {
+      if (base < ARM_R8 && dst < ARM_R8 && uoff < 64 && (uoff & 1) == 0)
+	return out_16(codebuf, T_LDRH_IMM5(dst, base, uoff>>1));
+      if (uoff < (1 << 12))
+	return out_16x2(codebuf, T_LDRH_IMM12(dst, base, uoff));
+    } else if (offset < 256 && offset > -256)
+	return out_16x2(codebuf, T_LDRH_IMM8(dst, base, offset, pre, wb));
+    JASSERT(base != ARM_IP, "base == IP in ldr_imm");
+    mov_imm(codebuf, ARM_IP, offset);
+    return ldrh_reg(codebuf, dst, base, ARM_IP, 0, pre, wb);
+  }
+  J_Unimplemented();
+}
+
+int ldrsh_imm(CodeBuf *codebuf, Reg dst, Reg base, int offset, int pre, int wb)
+{
+  unsigned uoff;
+
+  if (!pre && !wb) pre = 1, offset = 0;
+  uoff = (unsigned)offset;
+  if (Thumb2) {
+    if (pre && !wb && offset >= 0) {
+      if (uoff < (1 << 12))
+	return out_16x2(codebuf, T_LDRSH_IMM12(dst, base, uoff));
+    } else if (offset < 256 && offset > -256)
+	return out_16x2(codebuf, T_LDRSH_IMM8(dst, base, offset, pre, wb));
+    JASSERT(base != ARM_IP, "base == IP in ldr_imm");
+    mov_imm(codebuf, ARM_IP, offset);
+    return ldrsh_reg(codebuf, dst, base, ARM_IP, 0, pre, wb);
+  }
+  J_Unimplemented();
+}
+
+int ldrsb_imm(CodeBuf *codebuf, Reg dst, Reg base, int offset, int pre, int wb)
+{
+  unsigned uoff;
+
+  if (!pre && !wb) pre = 1, offset = 0;
+  uoff = (unsigned)offset;
+  if (Thumb2) {
+    if (pre && !wb && offset >= 0) {
+      if (uoff < (1 << 12))
+	return out_16x2(codebuf, T_LDRSB_IMM12(dst, base, uoff));
+    } else if (offset < 256 && offset > -256)
+	return out_16x2(codebuf, T_LDRSB_IMM8(dst, base, offset, pre, wb));
+    JASSERT(base != ARM_IP, "base == IP in ldr_imm");
+    mov_imm(codebuf, ARM_IP, offset);
+    return ldrsb_reg(codebuf, dst, base, ARM_IP, 0, pre, wb);
+  }
+  J_Unimplemented();
+}
+
+int add_imm(CodeBuf *codebuf, u32 dst, u32 src, u32 imm);
+
+int ldrd_imm(CodeBuf *codebuf, Reg dst_lo, Reg dst_hi, Reg base, int offset, int pre, int wb)
+{
+  unsigned uoff;
+
+  if (!pre && !wb) pre = 1, offset = 0;
+  uoff = (unsigned)offset;
+  if (Thumb2) {
+    if (offset < 256 * 4 && offset > -256 * 4 && (offset & 3) == 0)
+      return out_16x2(codebuf, T_LDRD_IMM(dst_lo, dst_hi, base, offset>>2, pre, wb));
+    if (pre && !wb) {
+      add_imm(codebuf, ARM_IP, base, offset);
+      return out_16x2(codebuf, T_LDRD_IMM(dst_lo, dst_hi, ARM_IP, 0, 1, 0));
+    }
+  }
+  J_Unimplemented();
+}
+
+int strd_imm(CodeBuf *codebuf, Reg src_lo, Reg src_hi, Reg base, int offset, int pre, int wb)
+{
+  unsigned uoff;
+
+  if (!pre && !wb) pre = 1, offset = 0;
+  uoff = (unsigned)offset;
+  if (Thumb2) {
+    if (offset < 256 * 4 && offset > -256 * 4 && (offset & 3) == 0)
+      return out_16x2(codebuf, T_STRD_IMM(src_lo, src_hi, base, offset>>2, pre, wb));
+    if (pre && !wb) {
+      add_imm(codebuf, ARM_IP, base, offset);
+      return out_16x2(codebuf, T_STRD_IMM(src_lo, src_hi, ARM_IP, 0, 1, 0));
+    }
+  }
+  J_Unimplemented();
+}
+
+int stm(CodeBuf *codebuf, u32 regset, u32 base, u32 st, u32 wb)
+{
+  JASSERT(regset != 0, "regset != 0 in stm");
+  if (Thumb2) {
+    if (!ThumbEE && base < ARM_R8 && (regset & ~0xff) == 0 && st == IA && wb)
+      return out_16(codebuf, T_STM8(base, regset));
+    if (base == ARM_SP) {
+      if ((regset & ~0x40ff) == 0 && st == DB && wb)
+	return out_16(codebuf, T_PUSH(regset));
+    }
+    if ((regset & -regset) == regset)
+      return str_imm(codebuf, LOG2(regset), base, (st & 1) ? 4 : -4, (st & 2) >> 1, wb);
+    if (st == PUSH_EA || st == PUSH_FD)
+      return out_16x2(codebuf, T_STM16(base, regset, st, wb));
+    return out_16x2(codebuf, T_STM16(base, regset, st, wb));
+  }
+  J_Unimplemented();
+}
+
+int ldm(CodeBuf *codebuf, u32 regset, u32 base, u32 st, u32 wb)
+{
+  JASSERT(regset != 0, "regset != 0 in stm");
+  if (Thumb2) {
+    if (!ThumbEE && base < ARM_R8 && (regset & ~0xff) == 0 && st == IA && wb)
+      return out_16(codebuf, T_LDM8(base, regset));
+    if (base == ARM_SP) {
+      if ((regset & ~0x80ff) == 0 && st == IA && wb)
+	return out_16(codebuf, T_POP(regset));
+    }
+    if ((regset & -regset) == regset)
+      return ldr_imm(codebuf, LOG2(regset), base, (st & 1) ? 4 : -4, (st & 2) >> 1, wb);
+    if (st == POP_EA || st == POP_FD)
+      return out_16x2(codebuf, T_LDM16(base, regset, st, wb));
+  }
+  J_Unimplemented();
+}
+
+int dop_reg(CodeBuf *codebuf, u32 op, u32 dst, u32 lho, u32 rho, u32 sh_typ, u32 shift)
+{
+  unsigned s = 0;
+  if (op != DP_MUL) s = 1 << 20;
+//  JASSERT(dst != ARM_PC, "Terrible things happen if dst == PC && S bit set");
+  return out_16x2(codebuf, T_DOP_REG(DP_REG(op)|s, dst, lho, rho, sh_typ, shift));
+}
+
+int dop_reg_preserve(CodeBuf *codebuf, u32 op, u32 dst, u32 lho, u32 rho, u32 sh_typ, u32 shift)
+{
+  return out_16x2(codebuf, T_DOP_REG(DP_REG(op), dst, lho, rho, sh_typ, shift));
+}
+
+int sxtb(CodeBuf *codebuf, u32 dst, u32 src)
+{
+  if (dst < ARM_R8 && src < ARM_R8)
+    return out_16(codebuf, T_SXTB(dst, src));
+  return out_16x2(codebuf, T2_SXTB(dst, src));
+}
+
+int sxth(CodeBuf *codebuf, u32 dst, u32 src)
+{
+  if (dst < ARM_R8 && src < ARM_R8)
+    return out_16(codebuf, T_SXTH(dst, src));
+  return out_16x2(codebuf, T2_SXTH(dst, src));
+}
+
+int uxth(CodeBuf *codebuf, u32 dst, u32 src)
+{
+  if (dst < ARM_R8 && src < ARM_R8)
+    return out_16(codebuf, T_UXTH(dst, src));
+  return out_16x2(codebuf, T2_UXTH(dst, src));
+}
+
+int mov_reg(CodeBuf *codebuf, u32 dst, u32 src)
+{
+  if (dst == src) return 0;
+  if (dst == ARM_PC) return out_16(codebuf, T_BX(src));
+  return out_16(codebuf, T_MOV(dst, src));
+//  return dop_reg(codebuf, DP_MOV, dst, 0, src, SHIFT_LSL, 0);
+}
+
+int nop_16(CodeBuf *codebuf)
+{
+  return out_16(codebuf, T_MOV(ARM_R0, ARM_R0));
+}
+
+int nop_32(CodeBuf *codebuf)
+{
+  return dop_reg(codebuf, DP_MOV, ARM_R8, 0, ARM_R8, SHIFT_LSL, 0);
+}
+
+int mvn_reg(CodeBuf *codebuf, u32 dst, u32 src)
+{
+  if (dst < ARM_R8 && src < ARM_R8)
+    return out_16(codebuf, T_MVN(dst, src));
+  return dop_reg(codebuf, DP_MVN, dst, 0, src, SHIFT_LSL, 0);
+}
+
+int vmov_reg_s_toVFP(CodeBuf *codebuf, u32 dst, u32 src)
+{
+  return out_16x2(codebuf, T_VMOVS_TOVFP(dst, src));
+}
+
+int vmov_reg_s_toARM(CodeBuf *codebuf, u32 dst, u32 src)
+{
+  return out_16x2(codebuf, T_VMOVS_TOARM(dst, src));
+}
+
+int vmov_reg_d_toVFP(CodeBuf *codebuf, u32 dst, u32 src_lo, u32 src_hi)
+{
+  return out_16x2(codebuf, T_VMOVD_TOVFP(dst, src_lo, src_hi));
+}
+
+int vmov_reg_d_VFP_to_VFP(CodeBuf *codebuf, u32 dst, u32 src)
+{
+  return out_16x2(codebuf, T_VMOVD_VFP_TOVFP(dst, src));
+}
+
+int vmov_reg_d_toARM(CodeBuf *codebuf, u32 dst_lo, u32 dst_hi, u32 src)
+{
+  return out_16x2(codebuf, T_VMOVD_TOARM(dst_lo, dst_hi, src));
+}
+
+int vop_reg_s(CodeBuf *codebuf, u32 op, u32 dst, u32 lho, u32 rho)
+{
+  return out_16x2(codebuf, T_VOP_REG_S(VP_REG(op), dst, lho, rho));
+}
+
+int vop_reg_d(CodeBuf *codebuf, u32 op, u32 dst, u32 lho, u32 rho)
+{
+  return out_16x2(codebuf, T_VOP_REG_D(VP_REG(op), dst, lho, rho));
+}
+
+int vcmp_reg_s(CodeBuf *codebuf, u32 lho, u32 rho, unsigned e)
+{
+  return out_16x2(codebuf, T_VCMP_S(lho, rho, e));
+}
+
+int vcmp_reg_d(CodeBuf *codebuf, u32 lho, u32 rho, unsigned e)
+{
+  return out_16x2(codebuf, T_VCMP_D(lho, rho, e));
+}
+
+int vmrs(CodeBuf *codebuf, u32 dst)
+{
+  return out_16x2(codebuf, T_VMRS(dst));
+}
+
+int add_reg(CodeBuf *codebuf, u32 dst, u32 lho, u32 rho)
+{
+  return dop_reg(codebuf, DP_ADD, dst, lho, rho, SHIFT_LSL, 0);
+}
+
+int cmp_reg(CodeBuf *codebuf, Reg lho, Reg rho)
+{
+  if (lho < ARM_R8 && rho < ARM_R8)
+    return out_16(codebuf, T_CMP_REG(lho, rho));
+  return dop_reg(codebuf, DP_CMP, 0x0f, lho, rho, SHIFT_LSL, 0);
+}
+
+int add_reg_shift(CodeBuf *codebuf, u32 dst, u32 lho, u32 rho, u2 sh_typ, u32 shift)
+{
+  return dop_reg(codebuf, DP_ADD, dst, lho, rho, sh_typ, shift);
+}
+
+int add_imm(CodeBuf *codebuf, u32 dst, u32 src, u32 imm)
+{
+  int imm_type, rol;
+
+  if (imm == 0) return mov_reg(codebuf, dst, src);
+  if (Thumb2) {
+    if (dst < ARM_R8 && src < ARM_R8) {
+      if (imm < 8)
+	return out_16(codebuf, T1_ADD_IMM(dst, src, imm));
+      if (-imm < 8)
+	return out_16(codebuf, T1_SUB_IMM(dst, src, -imm));
+      if (src == dst) {
+	if (imm < 256)
+	  return out_16(codebuf, T2_ADD_IMM(src, imm));
+	if (-imm < 256)
+	  return out_16(codebuf, T2_SUB_IMM(src, -imm));
+      }
+    }
+    imm_type = thumb_bytelane(imm);
+    if (imm_type >= 0) {
+      if (imm_type == 2) imm >>= 8;
+      return out_16x2(codebuf, T3_ADD_BYTELANE(dst, src, imm_type, (imm & 0xff)));
+    }
+    imm_type = thumb_bytelane(-imm);
+    if (imm_type >= 0) {
+      imm = -imm;
+      if (imm_type == 2) imm >>= 8;
+      return out_16x2(codebuf, T3_SUB_BYTELANE(dst, src, imm_type, (imm & 0xff)));
+    }
+    rol = thumb_single_shift(imm);
+    if (rol >= 0)
+      return out_16x2(codebuf, T3_ADD_ROT_IMM(dst, src, rol, ROL(imm, rol)));
+    rol = thumb_single_shift(-imm);
+    if (rol >= 0)
+      return out_16x2(codebuf, T3_SUB_ROT_IMM(dst, src, rol, ROL(-imm, rol)));
+    if (imm < (1 << 12))
+      return out_16x2(codebuf, T4_ADD_IMM(dst, src, imm));
+    if (-imm < (1 << 12))
+      return out_16x2(codebuf, T4_SUB_IMM(dst, src, -imm));
+    mov_imm(codebuf, ARM_IP, imm);
+    return add_reg(codebuf, dst, src, ARM_IP);
+  }
+  J_Unimplemented();
+}
+
+int sub_imm(CodeBuf *codebuf, u32 dst, u32 src, u32 imm)
+{
+  return add_imm(codebuf, dst, src, -imm);
+}
+
+int dop_imm_s(CodeBuf *codebuf, u32 op, u32 dst, u32 src, u32 imm, unsigned s)
+{
+    int imm_type, rol;
+    unsigned n_op, n_imm;
+
+    JASSERT(op == DP_ADC || op == DP_ADD || op == DP_AND || op == DP_BIC || op == DP_CMN ||
+		op == DP_CMP || op == DP_EOR || op == DP_MOV || op == DP_MVN ||
+		op == DP_ORN || op == DP_ORR || op == DP_RSB || op == DP_SBC ||
+		op == DP_SUB || op == DP_TEQ || op == DP_TST, "bad op");
+    if (op == DP_CMP || op == DP_CMN || op == DP_TEQ || op == DP_TST) dst = 0x0f;
+    if (op == DP_MOV || op == DP_MVN) src = 0x0f;
+    imm_type = thumb_bytelane(imm);
+    if (imm_type >= 0) {
+      if (imm_type == 2) imm >>= 8;
+      return out_16x2(codebuf, T_DOP_BYTELANE(DP_IMM(op)|s, dst, src, imm_type, (imm & 0xff)));
+    }
+    rol = thumb_single_shift(imm);
+    if (rol >= 0)
+      return out_16x2(codebuf, T_DOP_ROT_IMM(DP_IMM(op)|s, dst, src, rol, ROL(imm, rol)));
+    n_op = N_OP(op);
+    if (n_op != (unsigned)-1) {
+      n_imm = ~imm;
+      if (op == DP_ADD || op == DP_SUB || op == DP_CMP || op == DP_CMN) n_imm = -imm;
+      imm_type = thumb_bytelane(n_imm);
+      if (imm_type >= 0) {
+	if (imm_type == 2) n_imm >>= 8;
+	return out_16x2(codebuf, T_DOP_BYTELANE(DP_IMM(n_op)|s, dst, src, imm_type, (n_imm & 0xff)));
+      }
+      rol = thumb_single_shift(n_imm);
+      if (rol >= 0)
+	return out_16x2(codebuf, T_DOP_ROT_IMM(DP_IMM(n_op)|s, dst, src, rol, ROL(n_imm, rol)));
+    }
+    mov_imm(codebuf, ARM_IP, imm);
+    return out_16x2(codebuf, T_DOP_REG(DP_REG(op)|s, dst, src, ARM_IP, SHIFT_LSL, 0));
+}
+
+int dop_imm(CodeBuf *codebuf, u32 op, u32 dst, u32 src, u32 imm)
+{
+    return dop_imm_s(codebuf, op, dst, src, imm, 1<<20);
+}
+
+int dop_imm_preserve(CodeBuf *codebuf, u32 op, u32 dst, u32 src, u32 imm)
+{
+    return dop_imm_s(codebuf, op, dst, src, imm, 0);
+}
+
+int shift_imm(CodeBuf *codebuf, u32 op, u32 dst, u32 src, u32 imm)
+{
+    imm &= 31;
+    if (imm == 0)
+      return mov_reg(codebuf, dst, src);
+    else
+      return out_16x2(codebuf, T_SHIFT_IMM(DP_IMM(op), dst, src, imm));
+}
+
+int rsb_imm(CodeBuf *codebuf, u32 dst, u32 src, u32 imm)
+{
+  if (dst < ARM_R8 && src < ARM_R8 && imm == 0)
+    return out_16(codebuf, T_NEG(dst, src));
+  return dop_imm(codebuf, DP_RSB, dst, src, imm);
+}
+
+int adc_imm(CodeBuf *codebuf, u32 dst, u32 src, u32 imm)
+{
+  return dop_imm(codebuf, DP_ADC, dst, src, imm);
+}
+
+int asr_imm(CodeBuf *codebuf, u32 dst, u32 src, u32 imm)
+{
+  return shift_imm(codebuf, DP_ASR, dst, src, imm);
+}
+
+int eor_imm(CodeBuf *codebuf, u32 dst, u32 src, u32 imm)
+{
+  return dop_imm(codebuf, DP_EOR, dst, src, imm);
+}
+
+int and_imm(CodeBuf *codebuf, u32 dst, u32 src, u32 imm)
+{
+  return dop_imm(codebuf, DP_AND, dst, src, imm);
+}
+
+int orr_imm(CodeBuf *codebuf, u32 dst, u32 src, u32 imm)
+{
+  return dop_imm(codebuf, DP_ORR, dst, src, imm);
+}
+
+int cmp_imm(CodeBuf *codebuf, Reg src, u32 imm)
+{
+  if (src < ARM_R8 && imm < 256) return out_16(codebuf, T_CMP_IMM(src, imm));
+  return dop_imm(codebuf, DP_CMP, 0x0f, src, imm);
+}
+
+int tst_imm(CodeBuf *codebuf, Reg src, u32 imm)
+{
+  return dop_imm(codebuf, DP_TST, 0x0f, src, imm);
+}
+
+void fullBarrier(CodeBuf *codebuf)
+{
+  if (os::is_MP())
+    out_16x2(codebuf, T_DMB(0xf));
+}
+
+void storeBarrier(CodeBuf *codebuf)
+{
+  if (os::is_MP())
+    out_16x2(codebuf, T_DMB(0xe));
+}
+
+int tbh(CodeBuf *codebuf, Reg base, Reg idx)
+{
+  out_16x2(codebuf, T_TBH(base, idx));
+}
+
+int umull(CodeBuf *codebuf, u32 res_lo, u32 res_hi, u32 lho, u32 rho)
+{
+  return out_16x2(codebuf, T_UMULL(res_lo, res_hi, lho, rho));
+}
+
+int mla(CodeBuf *codebuf, u32 res, u32 lho, u32 rho, u32 a)
+{
+  return out_16x2(codebuf, T_MLA(res, lho, rho, a));
+}
+
+#define COND_EQ 0
+#define COND_NE 1
+#define COND_LT	2
+#define COND_GE 3
+#define COND_GT 4
+#define COND_LE 5
+#define COND_CS 6
+#define COND_CC 7
+#define COND_MI 8
+#define COND_PL 9
+
+static unsigned conds[] = {
+	0x0,
+	0x1,
+	0xb,
+	0xa,
+	0xc,
+	0xd,
+	0x2,
+	0x3,
+	0x4,
+	0x5,
+};
+
+#define NEG_COND(cond)	((cond) ^ 1)
+
+#define T_B(uoff)	(0xe000 | ((uoff) & 0x7ff))
+#define T_BW(uoff)	(0xf0009000 | \
+			  (((uoff) & (1<<23)) << (26-23)) | \
+			  (((~(uoff) & (1<<22)) >> 22) ^ (((uoff) & (1<<23)) >> 23)) << 13 | \
+			  (((~(uoff) & (1<<21)) >> 21) ^ (((uoff) & (1<<23)) >> 23)) << 11 | \
+			  (((uoff) & 0x1ff800) << (16-11)) | \
+			  ((uoff) & 0x7ff))
+#define T_BL(uoff)	(0xf000d000 | \
+			  (((uoff) & (1<<23)) << (26-23)) | \
+			  (((~(uoff) & (1<<22)) >> 22) ^ (((uoff) & (1<<23)) >> 23)) << 13 | \
+			  (((~(uoff) & (1<<21)) >> 21) ^ (((uoff) & (1<<23)) >> 23)) << 11 | \
+			  (((uoff) & 0x1ff800) << (16-11)) | \
+			  ((uoff) & 0x7ff))
+#define T_BLX(uoff)	(0xf000c000 | \
+			  (((uoff) & (1<<23)) << (26-23)) | \
+			  (((~(uoff) & (1<<22)) >> 22) ^ (((uoff) & (1<<23)) >> 23)) << 13 | \
+			  (((~(uoff) & (1<<21)) >> 21) ^ (((uoff) & (1<<23)) >> 23)) << 11 | \
+			  (((uoff) & 0x1ff800) << (16-11)) | \
+			  ((uoff) & 0x7ff))
+#define T_BCC(cond, uoff) (0xd000 | (conds[cond] << 8) | ((uoff) & 0xff))
+#define T_BCCW(cond, uoff) (0xf0008000 | \
+			     (conds[cond] << 22) | \
+			     (((uoff) & (1<<19)) << (26-19)) | \
+			     (((uoff) & (1<<18)) >> (18-11)) | \
+			     (((uoff) & (1<<17)) >> (17-13)) | \
+			     (((uoff) & 0x1f800) << (16-11)) | \
+			     ((uoff) & 0x7ff))
+#define T_BLX_REG(r)	(0x4780 | ((r) << 3))
+#define T_CBZ(r, uoff)	(0xb100 | (((uoff) & 0x1f) << 3) | (((uoff) & 0x20) << (8-5)) | ((r) & 7))
+#define T_CBNZ(r, uoff)	(0xb900 | (((uoff) & 0x1f) << 3) | (((uoff) & 0x20) << (8-5)) | ((r) & 7))
+
+#define T_IT(cond, mask) (0xbf00 | (conds[cond] << 4) | (mask))
+
+#define IT_MASK_T	8
+#define IT_MASK_TEE	0x0e
+
+#define PATCH(loc)	do {						\
+	  unsigned oldidx = codebuf->idx;				\
+	  codebuf->idx = (loc) >> 1;					\
+
+#define HCTAP								\
+	  codebuf->idx = oldidx;					\
+    	} while (0)
+
+int forward_16(CodeBuf *codebuf)
+{
+  int loc = out_loc(codebuf);
+  out_16(codebuf, T_UNDEFINED_16);
+  return loc;
+}
+
+int forward_32(CodeBuf *codebuf)
+{
+  int loc = out_loc(codebuf);
+  out_32(codebuf, T_UNDEFINED_32);
+  return loc;
+}
+
+int it(CodeBuf *codebuf, unsigned cond, unsigned mask)
+{
+  if (cond & 1) {
+    // If this is a negated condition, flip all the bits above the
+    // least significant bit that is 1.  Note that at least one bit is
+    // always 1 in mask
+    switch (mask & (-mask)) {
+    case 8:
+      break;
+    case 4:
+      mask ^= 8;
+      break;
+    case 2:
+      mask ^= 0x0c;
+      break;
+    case 1:
+      mask ^= 0x0e;
+      break;
+    default:
+      // Impossible unless someone specified an incorrect mask
+      longjmp(compiler_error_env, COMPILER_RESULT_FAILED);
+    }
+  }
+
+  return out_16(codebuf, T_IT(cond, mask));
+}
+
+void t2_bug_align(CodeBuf *codebuf)
+{
+  unsigned pc = (unsigned)&codebuf->codebuf[codebuf->idx];
+  if ((pc & 0xffe) != 0xffe) return;
+  mov_reg(codebuf, ARM_R0, ARM_R0);
+}
+
+void t2_bug_fix(CodeBuf *codebuf, int offset)
+{
+  unsigned pc = (unsigned)&codebuf->codebuf[codebuf->idx];
+  if ((pc & 0xffe) != 0xffe) return;
+  if (offset >= 0 || offset < -(4096+4)) return;
+  mov_reg(codebuf, ARM_R0, ARM_R0);
+}
+
+int branch_uncond(CodeBuf *codebuf, unsigned dest)
+{
+  unsigned loc = (codebuf->idx * 2) + 4;
+  int offset;
+  unsigned uoff;
+
+  JASSERT((dest & 1) == 0 && (loc & 1) == 0, "unaligned code");
+  dest >>= 1;
+  loc >>= 1;
+  offset = dest - loc;
+  if (offset >= -(1<<10) && offset < (1<<10)) {
+    uoff = offset;
+    return out_16(codebuf, T_B(uoff));
+  }
+  t2_bug_fix(codebuf, offset);
+  if (offset >= -(1<<23) && offset < (1<<23)) {
+    uoff = offset;
+    return out_16x2(codebuf, T_BW(uoff));
+  }
+  J_Unimplemented();
+}
+
+int branch_uncond_patch(CodeBuf *codebuf, unsigned loc, unsigned dest)
+{
+  int offset;
+  unsigned uoff;
+  unsigned oldidx;
+  int rc;
+
+  oldidx = codebuf->idx;
+  codebuf->idx = loc >> 1;
+  loc += 4;
+  JASSERT((dest & 1) == 0 && (loc & 1) == 0, "unaligned code");
+  dest >>= 1;
+  loc >>= 1;
+  offset = dest - loc;
+  t2_bug_fix(codebuf, offset);
+  if (offset >= -(1<<23) && offset < (1<<23)) {
+    uoff = offset & ((1<<24)-1);
+    rc = out_16x2(codebuf, T_BW(uoff));
+    codebuf->idx = oldidx;
+    return rc;
+  }
+  J_Unimplemented();
+}
+
+int branch_narrow_patch(CodeBuf *codebuf, unsigned loc)
+{
+  int offset;
+  unsigned uoff;
+  unsigned oldidx;
+  unsigned dest;
+  int rc;
+
+  dest = codebuf->idx * 2;
+  oldidx = codebuf->idx;
+  codebuf->idx = loc >> 1;
+  loc += 4;
+  JASSERT((dest & 1) == 0 && (loc & 1) == 0, "unaligned code");
+  dest >>= 1;
+  loc >>= 1;
+  offset = dest - loc;
+  if (offset >= -(1<<10) && offset < (1<<10)) {
+    uoff = offset & ((1<<11)-1);
+    rc = out_16(codebuf, T_B(uoff));
+    codebuf->idx = oldidx;
+    return rc;
+  }
+  J_Unimplemented();
+}
+
+int branch(CodeBuf *codebuf, unsigned cond, unsigned dest)
+{
+  unsigned loc = (codebuf->idx * 2) + 4;
+  int offset;
+  unsigned uoff;
+
+  JASSERT((dest & 1) == 0 && (loc & 1) == 0, "unaligned code");
+  dest >>= 1;
+  loc >>= 1;
+  offset = dest - loc;
+  if (offset >= -(1<<7) && offset < (1<<7)) {
+    uoff = offset;
+    return out_16(codebuf, T_BCC(cond, uoff));
+  }
+  t2_bug_fix(codebuf, offset);
+  if (offset >= -(1<<19) && offset < (1<<19)) {
+    uoff = offset;
+    return out_16x2(codebuf, T_BCCW(cond, uoff));
+  }
+  J_Unimplemented();
+}
+
+int bcc_patch(CodeBuf *codebuf, unsigned cond, unsigned loc)
+{
+  int offset;
+  unsigned uoff;
+  unsigned oldidx;
+  unsigned dest;
+  int rc;
+
+  dest = codebuf->idx * 2;
+  oldidx = codebuf->idx;
+  codebuf->idx = loc >> 1;
+  loc += 4;
+  JASSERT((dest & 1) == 0 && (loc & 1) == 0, "unaligned code");
+  dest >>= 1;
+  loc >>= 1;
+  offset = dest-loc;
+  if (offset >= -(1<<7) && offset < (1<<7)) {
+    uoff = offset;
+    rc = out_16(codebuf, T_BCC(cond, uoff));
+    codebuf->idx = oldidx;
+    return rc;
+  }
+  J_Unimplemented();
+}
+
+int bl(CodeBuf *codebuf, unsigned dest)
+{
+  unsigned loc = (unsigned)&codebuf->codebuf[codebuf->idx] + 4;
+  int offset;
+  unsigned uoff;
+
+  JASSERT((dest & 1) == 0 && (loc & 1) == 0, "unaligned code");
+  dest >>= 1;
+  loc >>= 1;
+  offset = dest - loc;
+  t2_bug_fix(codebuf, offset);
+  if (offset >= -(1<<23) && offset < (1<<23)) {
+    uoff = offset;
+    return out_16x2(codebuf, T_BL(uoff));
+  }
+  J_Unimplemented();
+}
+
+int blx(CodeBuf *codebuf, unsigned dest)
+{
+  unsigned loc = (unsigned)&codebuf->codebuf[codebuf->idx] + 4;
+  int offset;
+  unsigned uoff;
+
+  JASSERT((dest & 3) == 0 && (loc & 1) == 0, "unaligned code");
+  dest >>= 1;
+  loc >>= 1;
+  loc &= ~1;
+  offset = dest - loc;
+  t2_bug_fix(codebuf, offset);
+  if (offset >= -(1<<23) && offset < (1<<23)) {
+    uoff = offset;
+    return out_16x2(codebuf, T_BLX(uoff));
+  }
+  J_Unimplemented();
+}
+
+int branch_patch(CodeBuf *codebuf, unsigned cond, unsigned loc, unsigned dest)
+{
+  int offset;
+  unsigned uoff;
+  unsigned oldidx;
+  int rc;
+
+  oldidx = codebuf->idx;
+  codebuf->idx = loc >> 1;
+  loc += 4;
+  JASSERT((dest & 1) == 0 && (loc & 1) == 0, "unaligned code");
+  dest >>= 1;
+  loc >>= 1;
+  offset = dest - loc;
+  t2_bug_fix(codebuf, offset);
+  if (offset >= -(1<<19) && offset < (1<<19)) {
+    uoff = offset & ((1<<20)-1);
+    rc = out_16x2(codebuf, T_BCCW(cond, uoff));
+    codebuf->idx = oldidx;
+    return rc;
+  }
+  J_Unimplemented();
+}
+
+int blx_reg(CodeBuf *codebuf, Reg r)
+{
+  return out_16(codebuf, T_BLX_REG(r));
+}
+
+int cbz_patch(CodeBuf *codebuf, Reg r, unsigned loc)
+{
+  unsigned offset;
+  unsigned oldidx;
+  unsigned dest;
+  int rc;
+
+  dest = codebuf->idx * 2;
+  oldidx = codebuf->idx;
+  codebuf->idx = loc >> 1;
+  loc += 4;
+  JASSERT((dest & 1) == 0 && (loc & 1) == 0, "unaligned code");
+  dest >>= 1;
+  loc >>= 1;
+  offset = dest-loc;
+  if (r < ARM_R8 && offset < 64) {
+    rc = out_16(codebuf, T_CBZ(r, offset));
+    codebuf->idx = oldidx;
+    return rc;
+  }
+  J_Unimplemented();
+}
+
+int cbnz_patch(CodeBuf *codebuf, Reg r, unsigned loc)
+{
+  unsigned offset;
+  unsigned oldidx;
+  unsigned dest;
+  int rc;
+
+  dest = codebuf->idx * 2;
+  oldidx = codebuf->idx;
+  codebuf->idx = loc >> 1;
+  loc += 4;
+  JASSERT((dest & 1) == 0 && (loc & 1) == 0, "unaligned code");
+  dest >>= 1;
+  loc >>= 1;
+  offset = dest-loc;
+  if (r < ARM_R8 && offset < 64) {
+    rc = out_16(codebuf, T_CBNZ(r, offset));
+    codebuf->idx = oldidx;
+    return rc;
+  }
+  J_Unimplemented();
+}
+
+int chka(CodeBuf *codebuf, u32 size, u32 idx)
+{
+  cmp_reg(codebuf, idx, size);
+  it(codebuf, COND_CS, IT_MASK_T);
+  bl(codebuf, handlers[H_ARRAYBOUND]);
+}
+
+//-----------------------------------------------------------------------------------
+
+// An example of some debugging logic that you can use to trigger a
+// breakpoint when a particular method is executing.
+#define EQ(S1, S2) (S1 && (strncmp(S1, S2, strlen(S2)) == 0))
+extern "C" void Debug(interpreterState istate)
+{
+  char valuebuf[8192];
+  istate->method()->name_and_sig_as_C_string(valuebuf, sizeof valuebuf);
+  if (EQ(valuebuf, "java.util.Hashtable.get(Ljava/lang/Object;)")
+      // && istate->method()->bci_from(istate->bcp()) == 45
+      ) {
+    asm("nop");
+  }
+}
+#undef EQ
+
+void Thumb2_Push_Multiple(CodeBuf *codebuf, Reg *regs, unsigned nregs)
+{
+  unsigned regset = 0;
+  unsigned regmask;
+  unsigned i;
+  Reg r;
+
+  JASSERT(nregs > 0, "nregs must be > 0");
+  if (nregs == 1) {
+    str_imm(codebuf, regs[0], Rstack, -4, 1, 1);
+    return;
+  }
+  for (i = 0; i < nregs; i++) {
+    r = regs[i];
+    if (!IS_ARM_INT_REG(r)) J_Unimplemented();
+    regmask = 1<<r;
+    if (regset != 0 && regmask >= (regset & -regset)) {
+      stm(codebuf, regset, Rstack, PUSH_FD, 1);
+      regset = 0;
+    }
+    regset |= regmask;
+  }
+  stm(codebuf, regset, Rstack, PUSH_FD, 1);
+}
+
+void Thumb2_Pop_Multiple(CodeBuf *codebuf, Reg *regs, unsigned nregs)
+{
+  unsigned regset = 0;
+  unsigned regmask;
+  unsigned i;
+  Reg r;
+
+  if (nregs == 0)
+    return;
+  JASSERT(nregs > 0, "nregs must be > 0");
+  if (nregs == 1) {
+    ldr_imm(codebuf, regs[0], Rstack, 4, 0, 1);
+    return;
+  }
+  i = nregs;
+  do {
+    i--;
+    r = regs[i];
+    if (!IS_ARM_INT_REG(r)) J_Unimplemented();
+    regmask = 1<<r;
+    if (regmask <= (regset & -regset)) {
+      ldm(codebuf, regset, Rstack, POP_FD, 1);
+      regset = 0;
+    }
+    regset |= regmask;
+  } while (i > 0);
+  ldm(codebuf, regset, Rstack, POP_FD, 1);
+}
+
+int mov_multiple(CodeBuf *codebuf, Reg *dst, Reg *src, unsigned nregs)
+{
+  unsigned u, n, p;
+  unsigned smask = 0;
+  unsigned dmask = 0;
+  unsigned free_mask, free_reg;
+
+  for (u = 0, n = 0; u < nregs; u++) {
+    JASSERT(dst[u] != ARM_IP, "mov_multiple cannot be used for ARM_IP");
+    JASSERT(src[u] != ARM_IP, "mov_multiple cannot be used for ARM_IP");
+    if (dst[u] != src[u]) {
+      dst[n] = dst[u];
+      src[n++] = src[u];
+    }
+  }
+  while (n) {
+    // Find a reg which is in the dst reg set but not the src reg set
+    smask = 0;
+    dmask = 0;
+    for (u = 0; u < n; u++) {
+      smask |= (1 << src[u]);
+      dmask |= (1 << dst[u]);
+    }
+    free_mask = dmask & ~smask;
+    if (!free_mask) {
+      // No such reg => must use IP
+      Reg r = dst[0];
+      mov_reg(codebuf, ARM_IP, r);
+      for (u = 0; u < n; u++) {
+	if (src[u] == r) src[u] = ARM_IP;
+      }
+      smask ^= (1<<r) | (1<<ARM_IP);
+      free_mask = dmask & ~smask;
+      JASSERT(free_mask, "still no free reg after using ARM_IP?");
+    }
+    free_reg = LOG2(free_mask);
+    for (u = 0, p = 0; u < n; u++) {
+      if (dst[u] == free_reg) {
+	mov_reg(codebuf, dst[u], src[u]);
+      } else {
+	dst[p] = dst[u];
+	src[p++] = src[u];
+      }
+    }
+    n--;
+  }
+  return 0;
+}
+
+#define TOS(jstack)	((jstack)->stack[(jstack)->depth-1])
+#define TOSM1(jstack)	((jstack)->stack[(jstack)->depth-2])
+#define TOSM2(jstack)	((jstack)->stack[(jstack)->depth-3])
+#define TOSM3(jstack)	((jstack)->stack[(jstack)->depth-4])
+
+#define SWAP(jstack) do { \
+		      Reg r = (jstack)->stack[(jstack)->depth-1]; \
+		      (jstack)->stack[(jstack)->depth-1] = (jstack)->stack[(jstack)->depth-2]; \
+		      (jstack)->stack[(jstack)->depth-2] = r; \
+		    } while (0)
+
+#define JSTACK_REG(jstack)		jstack_reg(jstack)
+#define JSTACK_PREFER(jstack, prefer)	jstack_prefer(jstack, prefer)
+
+int PUSH(Thumb2_Stack *jstack, unsigned reg) {
+  jstack->stack[jstack->depth] = reg;
+  jstack->depth++;
+  return reg;
+}
+
+int POP(Thumb2_Stack *jstack) {
+  jstack->depth--;
+  return jstack->stack[jstack->depth];
+}
+
+static const unsigned last_clear_bit[] = {
+	3,	//	0000
+	3,	//	0001
+	3,	//	0010
+	3,	//	0011
+	3,	//	0100
+	3,	//	0101
+	3,	//	0110
+	3,	//	0111
+	2,	//	1000
+	2,	//	1001
+	2,	//	1010
+	2,	//	1011
+	1,	//	1100
+	1,	//	1101
+	0,	//	1110
+	0,	//	1111 // No registers available...
+};
+
+#define LAST_CLEAR_BIT(mask) last_clear_bit[mask]
+
+unsigned long thumb2_register_allocation_failures = 0;
+
+unsigned jstack_reg(Thumb2_Stack *jstack)
+{
+  unsigned *stack = jstack->stack;
+  unsigned depth = jstack->depth;
+  unsigned mask = 0;
+  unsigned r;
+  unsigned i;
+
+  for (i = 0; i < depth; i++) mask |= 1 << stack[i];
+  mask &= (1 << STACK_REGS) - 1;
+  if (mask >= (1 << STACK_REGS) - 1)  { // No free registers
+    thumb2_register_allocation_failures++;
+    J_BogusImplementation();
+  }
+  r = LAST_CLEAR_BIT(mask);
+  return r;
+}
+
+unsigned jstack_prefer(Thumb2_Stack *jstack, Reg prefer)
+{
+  unsigned *stack = jstack->stack;
+  unsigned depth = jstack->depth;
+  unsigned mask = 0;
+  unsigned r;
+  unsigned i;
+
+  for (i = 0; i < depth; i++) mask |= 1 << stack[i];
+  mask &= (1 << STACK_REGS) - 1;
+  if ((prefer & ~mask) & 0x0f) mask |= (~prefer & ((1 << STACK_REGS) - 1));
+  if (mask >= (1 << STACK_REGS) - 1)  { // No free registers
+    thumb2_register_allocation_failures++;
+    J_BogusImplementation();
+  }
+  r = LAST_CLEAR_BIT(mask);
+  return r;
+}
+
+void Thumb2_Fill(Thumb2_Info *jinfo, unsigned required)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned *stack = jstack->stack;
+  unsigned depth = jstack->depth;
+  unsigned mask = 0;
+  unsigned tofill;
+  unsigned r, i;
+
+  if (depth >= required) return;
+  tofill = required - depth;
+  for (i = depth; i > 0;) {
+    i--;
+    mask |= 1 << stack[i];
+    stack[i+tofill] = stack[i];
+  }
+  mask &= (1 << STACK_REGS) - 1;
+  for (i = 0; i < tofill; i++) {
+    JASSERT(mask != (1 << STACK_REGS) - 1, "Fill failed!!!");
+    r = LAST_CLEAR_BIT(mask);
+    mask |= (1 << r);
+    stack[i] = r;
+  }
+  jstack->depth = depth + tofill;
+  Thumb2_Pop_Multiple(jinfo->codebuf, stack, tofill);
+}
+
+static const unsigned bitcount[] = {
+	0,	// 0000
+	1,	// 0001
+	1,	// 0010
+	2,	// 0011
+	1,	// 0100
+	2,	// 0101
+	2,	// 0110
+	3,	// 0111
+	1,	// 1000
+	2,	// 1001
+	2,	// 1010
+	3,	// 1011
+	2,	// 1100
+	3,	// 1101
+	3,	// 1110
+	4,	// 1111
+};
+
+#define BITCOUNT(mask) bitcount[mask]
+
+// Thumb2_Spill:-
+// 	required - ensure that at least this many registers are available
+// 	exclude - bitmask, do not count these registers as available
+//
+// 	The no. of available regs (STACK_REGS) less the no. of registers in
+// 	exclude must be >= the number required, otherwise this function loops!
+//
+// 	Typical usage is
+//
+// 	Thumb2_Spill(jinfo, 2, 0);	// get 2 free regs
+// 	r_res_lo = PUSH(jinfo->jstack, JSTACK_REG(jinfo->jstack));
+// 	r_res_hi = PUSH(jinfo->jstack, JSTACK_REG(jinfo->jstack));
+//
+//	Use the exclude mask when you do not want a subsequent call to
+//	JSTACK_REG to return a particular register or registers. This can
+//	be useful, for example, with long (64) bit operations. Eg. In the
+//	following we use it to ensure that the hi inputs are not clobbered
+//	by the lo result as part of the intermediate calculation.
+//
+//	Thumb2_Fill(jinfo, 4);
+//	exclude = (1<<rho_hi)|(1<<lho_hi);
+//	rho_lo = POP(jstack);
+//	rho_hi = POP(jstack);
+//	lho_lo = POP(jstack);
+//	lho_hi = POP(jstack);
+//	Thumb2_Spill(jinfo, 2, exclude);
+//	res_hi = PUSH(jstack, JSTACK_PREFER(jstack, ~exclude));	// != rho_hi or lho_hi
+//	res_lo = PUSH(jstack, JSTACK_PREFER(jstack, ~exclude));	// != rho_hi or lho_hi
+//	dop_reg(jinfo->codebuf, DP_ADD, res_lo, lho_lo, rho_lo, SHIFT_LSL, 0); 
+//	dop_reg(jinfo->codebuf, DP_ADC, res_hi, lho_hi, rho_hi, SHIFT_LSL, 0);
+//	
+void Thumb2_Spill(Thumb2_Info *jinfo, unsigned required, unsigned exclude)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned *stack = jstack->stack;
+  unsigned depth = jstack->depth;
+  unsigned mask;
+  unsigned i;
+  unsigned tospill = 0;
+
+  exclude &= (1 << STACK_REGS) - 1;
+  if (depth <= (STACK_REGS - required) && exclude == 0) return;
+  while (1) {
+    mask = 0;
+    for (i = tospill; i < depth; i++) mask |= 1 << stack[i];
+    mask &= ((1 << STACK_REGS) - 1);
+    mask |= exclude;
+    if (STACK_REGS - BITCOUNT(mask) >= required) break;
+    tospill++;
+  }
+  if (tospill == 0) return;
+  Thumb2_Push_Multiple(jinfo->codebuf, stack, tospill);
+  for (i = tospill; i < depth; i++)
+    stack[i-tospill] = stack[i];
+  jstack->depth = depth - tospill;
+  JASSERT((int)jstack->depth >= 0, "Stack underflow");
+}
+
+// Thumb2_Tmp:-
+// 	Allocate a temp reg for use in local code generation.
+// 	exclude is a bit mask of regs not to use.
+// 	A max of 2 regs can be guaranteed (ARM_IP & ARM_LR)
+// 	If allocating 2 regs you must include the reg you got the
+// 	first time in the exclude list. Otherwise you just get
+// 	the same reg again.
+Reg Thumb2_Tmp(Thumb2_Info *jinfo, unsigned exclude)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned *stack = jstack->stack;
+  unsigned depth = jstack->depth;
+  unsigned mask;
+  unsigned i;
+
+  mask = 0;
+  for (i = 0; i < depth; i++) mask |= 1 << stack[i];
+  mask |= exclude;
+  for (i = 0; i < STACK_REGS; i++)
+    if ((mask & (1<<i)) == 0) return i;
+  if ((mask & (1<<ARM_IP)) == 0) return ARM_IP;
+  if ((mask & (1<<ARM_LR)) == 0) return ARM_LR;
+  JASSERT(0, "failed to allocate a tmp reg");
+}
+
+void Thumb2_Flush(Thumb2_Info *jinfo)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+
+  if (jstack->depth > 0)
+    Thumb2_Push_Multiple(jinfo->codebuf, jstack->stack, jstack->depth);
+  jstack->depth = 0;
+}
+
+// SAVE_STACK and RESTORE_STACK save the stack state so that it's
+// possible to do a stack flush to memory and restore that stack state
+// to the same registers.
+#define SAVE_STACK(JSTACK)					\
+  unsigned saved_stack_elements[JSTACK->depth];			\
+  unsigned saved_stack_depth;					\
+  memcpy(saved_stack_elements, JSTACK->stack,			\
+	 JSTACK->depth * sizeof saved_stack_elements[0]);	\
+  saved_stack_depth = JSTACK->depth;
+#define RESTORE_STACK(JSTACK, CODEBUF)					\
+  Thumb2_Pop_Multiple(CODEBUF, saved_stack_elements, saved_stack_depth); \
+  memcpy(JSTACK->stack, saved_stack_elements,				\
+	 JSTACK->depth * sizeof saved_stack_elements[0]);		\
+  JSTACK->depth = saved_stack_depth;
+
+// Call this when we are about to corrupt a local
+// The local may already be on the stack
+// For example
+// 	iload	0
+// 	iconst	2
+// 	istore	0
+// 	istore	1
+// Without this check the code generated would be (r4 is local 0, r5 is local 1)
+// 	mov	r4, #2
+//	mov	r5, r4
+// With this check the code should be
+// 	mov	r3, r4
+// 	mov	r4, #2
+// 	mov	r5, r3
+// This is not ideal, but is better than the previous:-)
+//
+void Thumb2_Corrupt(Thumb2_Info *jinfo, unsigned r, unsigned ignore)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned *stack = jstack->stack;
+  unsigned depth = jstack->depth;
+  unsigned r_new, mask;
+  unsigned i;
+
+  if (ignore >= depth) return;
+//  JASSERT(depth >= ignore, "Cant ignore more than the whole stack!!");
+  if (IS_SREG(r)) return;
+  depth -= ignore;
+  for (i = 0; i < depth; i++) {
+    if (r == stack[i]) {
+      Thumb2_Spill(jinfo, 1, 0);
+      depth = jstack->depth - ignore;
+      r_new = JSTACK_REG(jstack);
+      mov_reg(jinfo->codebuf, r_new, r);
+      for (i = 0; i < depth; i++) if (r == stack[i]) stack[i] = r_new;
+      break;
+    }
+  }
+}
+
+unsigned Thumb2_ResultLocal(Thumb2_Info *jinfo, unsigned bci)
+{
+  unsigned opc = jinfo->code_base[bci];
+  if (jinfo->bc_stackinfo[bci] & BC_BRANCH_TARGET) return 0;
+  if (opc < opc_istore || opc > opc_astore_3) return 0;
+  if (opc == opc_istore || opc == opc_fstore || opc == opc_astore)
+    return jinfo->jregs->r_local[jinfo->code_base[bci+1]];
+  if ((opc >= opc_istore_0 && opc <= opc_istore_3) ||
+	(opc >= opc_fstore_0 && opc <= opc_fstore_3) ||
+	(opc >= opc_astore_0 && opc <= opc_astore_3))
+    return jinfo->jregs->r_local[(opc-opc_istore_0)&3];
+  return 0;
+}
+
+static const unsigned char dOps[] = {
+	DP_ADD, DP_ADC, VP_ADD, VP_ADD,
+	DP_SUB, DP_SBC, VP_SUB, VP_SUB,
+	DP_MUL, 0, VP_MUL, VP_MUL,
+	0, 0, VP_DIV, VP_DIV,
+	0, 0, 0, 0,
+	0, 0, 0, 0,
+	DP_LSL, 0,
+	DP_ASR, 0,
+	DP_LSR, 0,
+	DP_AND, DP_AND, DP_ORR, DP_ORR, DP_EOR, DP_EOR,
+};
+
+unsigned Thumb2_Imm(Thumb2_Info *jinfo, unsigned imm, unsigned next_bci)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned r;
+  unsigned next_op;
+
+  if (!(jinfo->bc_stackinfo[next_bci] & BC_BRANCH_TARGET)) {
+    next_op = jinfo->code_base[next_bci];
+    if (next_op > OPC_LAST_JAVA_OP) {
+      if (Bytecodes::is_defined((Bytecodes::Code)next_op))
+	next_op = (unsigned)Bytecodes::java_code((Bytecodes::Code)next_op);
+    }
+    switch (next_op) {
+      case opc_istore:
+      case opc_fstore:
+      case opc_astore: {
+	unsigned local = jinfo->code_base[next_bci+1];
+	r = jinfo->jregs->r_local[local];
+	if (r) {
+	  Thumb2_Corrupt(jinfo, r, 0);
+	  mov_imm(jinfo->codebuf, r, imm);
+	  return 2;
+	}
+	break;
+      }
+      case opc_istore_0:
+      case opc_istore_1:
+      case opc_istore_2:
+      case opc_istore_3:
+      case opc_fstore_0:
+      case opc_fstore_1:
+      case opc_fstore_2:
+      case opc_fstore_3:
+      case opc_astore_0:
+      case opc_astore_1:
+      case opc_astore_2:
+      case opc_astore_3: {
+	unsigned local = (jinfo->code_base[next_bci]-opc_istore_0) & 3;
+	r = jinfo->jregs->r_local[local];
+	if (r) {
+	  Thumb2_Corrupt(jinfo, r, 0);
+	  mov_imm(jinfo->codebuf, r, imm);
+	  return 1;
+	}
+	break;
+      }
+      case opc_iadd:
+      case opc_isub:
+      case opc_ishl:
+      case opc_ishr:
+      case opc_iushr:
+      case opc_iand:
+      case opc_ior:
+      case opc_ixor: {
+	unsigned len = 0;
+	unsigned r_lho;
+
+	Thumb2_Fill(jinfo, 1);
+	r_lho = POP(jstack);
+
+	r = Thumb2_ResultLocal(jinfo, next_bci+1);
+	if (r) {
+	  Thumb2_Corrupt(jinfo, r, 0);
+	  len = Bytecodes::length_for((Bytecodes::Code)jinfo->code_base[next_bci+1]);
+	} else {
+	  Thumb2_Spill(jinfo, 1, 0);
+	  r = JSTACK_REG(jstack);
+	  PUSH(jstack, r);
+	}
+	if (next_op == opc_ishl || next_op == opc_ishr || next_op == opc_iushr)
+	  shift_imm(jinfo->codebuf, dOps[next_op-opc_iadd], r, r_lho, imm);
+	else
+	  dop_imm(jinfo->codebuf, dOps[next_op-opc_iadd], r, r_lho, imm);
+	return 1+len;
+      }
+
+      case opc_idiv: {
+	unsigned len = 0;
+	unsigned r_lho;
+	unsigned abs_imm = abs((int)imm);
+
+	if ((imm & -imm) == abs_imm) {
+	  unsigned l2_imm = LOG2(abs_imm);
+	  unsigned r_lho;
+
+	  if (imm == 0) break;
+	  if (imm == 1) return 1;
+
+	  Thumb2_Fill(jinfo, 1);
+	  r_lho = POP(jstack);
+
+	  r = Thumb2_ResultLocal(jinfo, next_bci+1);
+	  if (r) {
+	    Thumb2_Corrupt(jinfo, r, 0);
+	    len = Bytecodes::length_for((Bytecodes::Code)jinfo->code_base[next_bci+1]);
+	  } else {
+	    Thumb2_Spill(jinfo, 1, 0);
+	    r = JSTACK_REG(jstack);
+	    PUSH(jstack, r);
+	  }
+
+	  if (abs_imm != 1) {
+	    unsigned r_tmp = r_lho;
+	    if (abs_imm != 2) {
+	      r_tmp = Thumb2_Tmp(jinfo, (1<<r_lho));
+	      asr_imm(jinfo->codebuf, r_tmp, r_lho, 31);
+	    }
+	    add_reg_shift(jinfo->codebuf, r, r_lho, r_tmp, SHIFT_LSR, 32-l2_imm);
+	    asr_imm(jinfo->codebuf, r, r, l2_imm);
+	  }
+	  if ((int)imm < 0)
+	    rsb_imm(jinfo->codebuf, r, r, 0);
+	  return 1+len;
+	}
+	break;
+      }
+    }
+  }
+  Thumb2_Spill(jinfo, 1, 0);
+  r = JSTACK_REG(jstack);
+  PUSH(jstack, r);
+  mov_imm(jinfo->codebuf, r, imm);
+  return 0;
+}
+
+void Thumb2_ImmX2(Thumb2_Info *jinfo, unsigned lo, unsigned hi)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned r_lo, r_hi;
+
+  Thumb2_Spill(jinfo, 2, 0);
+  r_hi = PUSH(jstack, JSTACK_REG(jstack));
+  r_lo = PUSH(jstack, JSTACK_REG(jstack));
+  mov_imm(jinfo->codebuf, r_lo, lo);
+  mov_imm(jinfo->codebuf, r_hi, hi);
+}
+
+#define LOCAL_OFFSET(local, stackdepth, nlocals) ((stackdepth)*4 + FRAME_SIZE + ((nlocals)-1-(local))*4)
+#define ISTATE_REG(jinfo)	  ((jinfo)->use_istate ? Ristate : Rstack)
+#define ISTATE(jinfo, stackdepth) ((jinfo)->use_istate ? 0 : (((stackdepth)-(jinfo)->jstack->depth)*4))
+#define ISTATE_OFFSET(jinfo, stackdepth, offset) (ISTATE(jinfo, stackdepth) + (offset))
+
+void load_local(Thumb2_Info *jinfo, Reg r, unsigned local, unsigned stackdepth)
+{
+  int nlocals = jinfo->method->max_locals();
+  if (jinfo->use_istate)
+    ldr_imm(jinfo->codebuf, r, Ristate, FRAME_SIZE + (nlocals-1-local) * 4, 1, 0);
+  else
+    ldr_imm(jinfo->codebuf, r, Rstack, LOCAL_OFFSET(local, stackdepth, nlocals), 1, 0);
+}
+
+void store_local(Thumb2_Info *jinfo, Reg r, unsigned local, unsigned stackdepth)
+{
+  int nlocals = jinfo->method->max_locals();
+  if (jinfo->use_istate)
+    str_imm(jinfo->codebuf, r, Ristate, FRAME_SIZE + (nlocals-1-local) * 4, 1, 0);
+  else
+    str_imm(jinfo->codebuf, r, Rstack, LOCAL_OFFSET(local, stackdepth, nlocals), 1, 0);
+}
+
+void load_istate(Thumb2_Info *jinfo, Reg r, unsigned istate_offset, unsigned stackdepth)
+{
+  if (jinfo->use_istate)
+    ldr_imm(jinfo->codebuf, r, Ristate, istate_offset, 1, 0);
+  else
+    ldr_imm(jinfo->codebuf, r, Rstack, ISTATE_OFFSET(jinfo, stackdepth, istate_offset), 1, 0);
+}
+
+void store_istate(Thumb2_Info *jinfo, Reg r, unsigned istate_offset, unsigned stackdepth)
+{
+  if (jinfo->use_istate)
+    str_imm(jinfo->codebuf, r, Ristate, istate_offset, 1, 0);
+  else
+    str_imm(jinfo->codebuf, r, Rstack, ISTATE_OFFSET(jinfo, stackdepth, istate_offset), 1, 0);
+}
+
+void Thumb2_Load(Thumb2_Info *jinfo, int local, unsigned stackdepth)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned r;
+
+  r = jinfo->jregs->r_local[local];
+  if (r) {
+    PUSH(jstack, r);
+  } else {
+    int nlocals = jinfo->method->max_locals();
+
+    Thumb2_Spill(jinfo, 1, 0);
+    JASSERT(stackdepth >= jstack->depth, "negative stack offset?");
+    stackdepth -= jstack->depth;
+    r = JSTACK_REG(jstack);
+    PUSH(jstack, r);
+    load_local(jinfo, r, local, stackdepth);
+  }
+}
+
+void Thumb2_LoadX2(Thumb2_Info *jinfo, int local, unsigned stackdepth)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned r_lo, r_hi;
+  int nlocals = jinfo->method->max_locals();
+
+  r_hi = jinfo->jregs->r_local[local];
+  if (r_hi) {
+    r_lo = jinfo->jregs->r_local[local+1];
+    if (r_lo) {
+      PUSH(jstack, r_hi);
+      PUSH(jstack, r_lo);
+    } else {
+      Thumb2_Spill(jinfo, 1, 0);
+      stackdepth -= jstack->depth;
+      PUSH(jstack, r_hi);
+      r_lo = PUSH(jstack, JSTACK_REG(jstack));
+      load_local(jinfo, r_lo, local+1, stackdepth);
+    }
+  } else {
+    r_lo = jinfo->jregs->r_local[local+1];
+    if (r_lo) {
+      Thumb2_Spill(jinfo, 1, 0);
+      stackdepth -= jstack->depth;
+      r_hi = PUSH(jstack, JSTACK_REG(jstack));
+      load_local(jinfo, r_hi, local, stackdepth);
+      PUSH(jstack, r_lo);
+    } else {
+      Thumb2_Spill(jinfo, 2, 0);
+      stackdepth -= jstack->depth;
+      r_hi = PUSH(jstack, JSTACK_REG(jstack));
+      r_lo = PUSH(jstack, JSTACK_REG(jstack));
+      load_local(jinfo, r_hi, local, stackdepth);
+      load_local(jinfo, r_lo, local+1, stackdepth);
+    }
+  }
+}
+
+void Thumb2_Store(Thumb2_Info *jinfo, int local, unsigned stackdepth)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned r, r_local;
+  int nlocals = jinfo->method->max_locals();
+
+  Thumb2_Fill(jinfo, 1);
+  stackdepth -= jstack->depth;
+  r = POP(jstack);
+  r_local = jinfo->jregs->r_local[local];
+  if (r_local) {
+    Thumb2_Corrupt(jinfo, r_local, 0);
+    mov_reg(jinfo->codebuf, r_local, r);
+  } else {
+    store_local(jinfo, r, local, stackdepth);
+  }
+}
+
+void Thumb2_StoreX2(Thumb2_Info *jinfo, int local, unsigned stackdepth)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned r_lo, r_hi;
+  unsigned r_local_lo, r_local_hi;
+  int nlocals = jinfo->method->max_locals();
+
+  Thumb2_Fill(jinfo, 2);
+  r_lo = POP(jstack);
+  r_hi = POP(jstack);
+  stackdepth -= 2;
+
+  r_local_hi = jinfo->jregs->r_local[local];
+  if (r_local_hi) {
+    Thumb2_Corrupt(jinfo, r_local_hi, 0);
+    mov_reg(jinfo->codebuf, r_local_hi, r_hi);
+  } else {
+    store_local(jinfo, r_hi, local, stackdepth-jstack->depth);
+  }
+
+  r_local_lo = jinfo->jregs->r_local[local+1];
+  if (r_local_lo) {
+    Thumb2_Corrupt(jinfo, r_local_lo, 0);
+    mov_reg(jinfo->codebuf, r_local_lo, r_lo);
+  } else {
+    store_local(jinfo, r_lo, local+1, stackdepth-jstack->depth);
+  }
+}
+
+void Thumb2_Xaload(Thumb2_Info *jinfo, u32 opc)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned r_index, r_array, r_value;
+  unsigned op = opc - (unsigned)opc_iaload;
+  unsigned r_tmp;
+
+  Thumb2_Fill(jinfo, 2);
+  r_index = POP(jstack);
+  r_array = POP(jstack);
+  Thumb2_Spill(jinfo, 1, 0);
+  r_tmp = Thumb2_Tmp(jinfo, (1<<r_array)|(1<<r_index));
+  r_value = JSTACK_REG(jstack);
+  PUSH(jstack, r_value);
+  ldr_imm(jinfo->codebuf, r_tmp, r_array, 8, 1, 0);
+  chka(jinfo->codebuf, r_tmp, r_index);
+  if (opc == opc_baload) {
+    add_reg(jinfo->codebuf, r_tmp, r_array, r_index);
+    ldrsb_imm(jinfo->codebuf, r_value, r_tmp, 12, 1, 0);
+  } else if (opc == opc_caload) {
+    add_reg_shift(jinfo->codebuf, r_tmp, r_array, r_index, SHIFT_LSL, 1);
+    ldrh_imm(jinfo->codebuf, r_value, r_tmp, 12, 1, 0);
+  } else if (opc == opc_saload) {
+    add_reg_shift(jinfo->codebuf, r_tmp, r_array, r_index, SHIFT_LSL, 1);
+    ldrsh_imm(jinfo->codebuf, r_value, r_tmp, 12, 1, 0);
+  } else {
+    add_reg_shift(jinfo->codebuf, r_tmp, r_array, r_index, SHIFT_LSL, 2);
+    ldr_imm(jinfo->codebuf, r_value, r_tmp, 12, 1, 0);
+  }
+}
+
+void Thumb2_X2aload(Thumb2_Info *jinfo)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned r_index, r_array, r_lo, r_hi;
+  unsigned r_tmp;
+
+  Thumb2_Fill(jinfo, 2);
+  r_index = POP(jstack);
+  r_array = POP(jstack);
+  Thumb2_Spill(jinfo, 2, 0);
+  r_tmp = Thumb2_Tmp(jinfo, (1<<r_array)|(1<<r_index));
+  r_hi = PUSH(jstack, JSTACK_REG(jstack));
+  r_lo = PUSH(jstack, JSTACK_REG(jstack));
+  ldr_imm(jinfo->codebuf, r_tmp, r_array, 8, 1, 0);
+  chka(jinfo->codebuf, r_tmp, r_index);
+  add_reg_shift(jinfo->codebuf, r_tmp, r_array, r_index, SHIFT_LSL, 3);
+  ldrd_imm(jinfo->codebuf, r_lo, r_hi, r_tmp, 16, 1, 0);
+}
+
+void Thumb2_Xastore(Thumb2_Info *jinfo, u32 opc)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned r_value, r_index, r_array;
+  unsigned op = opc - (unsigned)opc_iastore;
+  unsigned r_tmp;
+
+  Thumb2_Fill(jinfo, 3);
+  r_value = POP(jstack);
+  r_index = POP(jstack);
+  r_array = POP(jstack);
+  r_tmp = Thumb2_Tmp(jinfo, (1<<r_array)|(1<<r_index)|(1<<r_value));
+  ldr_imm(jinfo->codebuf, r_tmp, r_array, 8, 1, 0);
+  chka(jinfo->codebuf, r_tmp, r_index);
+  if (opc == opc_bastore) {
+    add_reg(jinfo->codebuf, r_tmp, r_array, r_index);
+    strb_imm(jinfo->codebuf, r_value, r_tmp, 12, 1, 0);
+  } else if (opc == opc_castore || opc == opc_sastore) {
+    add_reg_shift(jinfo->codebuf, r_tmp, r_array, r_index, SHIFT_LSL, 1);
+    strh_imm(jinfo->codebuf, r_value, r_tmp, 12, 1, 0);
+  } else {
+    add_reg_shift(jinfo->codebuf, r_tmp, r_array, r_index, SHIFT_LSL, 2);
+    str_imm(jinfo->codebuf, r_value, r_tmp, 12, 1, 0);
+  }
+}
+
+void Thumb2_X2astore(Thumb2_Info *jinfo)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned r_lo, r_hi, r_index, r_array;
+  unsigned r_tmp;
+
+  Thumb2_Fill(jinfo, 4);
+  r_lo = POP(jstack);
+  r_hi = POP(jstack);
+  r_index = POP(jstack);
+  r_array = POP(jstack);
+  r_tmp = Thumb2_Tmp(jinfo, (1<<r_array)|(1<<r_index)|(1<<r_lo)|(1<<r_hi));
+  ldr_imm(jinfo->codebuf, r_tmp, r_array, 8, 1, 0);
+  chka(jinfo->codebuf, r_tmp, r_index);
+  add_reg_shift(jinfo->codebuf, r_tmp, r_array, r_index, SHIFT_LSL, 3);
+  strd_imm(jinfo->codebuf, r_lo, r_hi, r_tmp, 16, 1, 0);
+}
+
+void Thumb2_Pop(Thumb2_Info *jinfo, unsigned n)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+
+  while (n > 0 && jstack->depth > 0) {
+    POP(jstack);
+    n--;
+  }
+  if (n > 0) add_imm(jinfo->codebuf, Rstack, Rstack, n * 4);
+}
+
+void Thumb2_Dup(Thumb2_Info *jinfo, unsigned n)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned *stack = jstack->stack;
+  unsigned depth;
+  unsigned i;
+
+  Thumb2_Fill(jinfo, n+1);
+  depth = jstack->depth;
+  for (i = 0; i <= n; i++)
+    stack[depth-i] = stack[depth-i-1];
+  stack[depth-n-1] = stack[depth];
+  jstack->depth = depth + 1;
+}
+
+void Thumb2_Dup2(Thumb2_Info *jinfo, unsigned n)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned *stack = jstack->stack;
+  unsigned depth;
+  unsigned i;
+
+  Thumb2_Fill(jinfo, n+2);
+  depth = jstack->depth;
+  for (i = 0; i <= n+1; i++)
+    stack[depth-i+1] = stack[depth-i-1];
+  stack[depth-n-1] = stack[depth+1];
+  stack[depth-n-2] = stack[depth];
+  jstack->depth = depth + 2;
+}
+
+void Thumb2_Swap(Thumb2_Info *jinfo)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+
+  Thumb2_Fill(jinfo, 2);
+  SWAP(jstack);
+}
+
+void Thumb2_iOp(Thumb2_Info *jinfo, u32 opc)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned r_lho, r_rho, r;
+
+  Thumb2_Fill(jinfo, 2);
+  r_rho = POP(jstack);
+  r_lho = POP(jstack);
+  Thumb2_Spill(jinfo, 1, 0);
+  r = JSTACK_REG(jstack);
+  PUSH(jstack, r);
+  switch (opc) {
+  case opc_ishl:
+  case opc_ishr:
+  case opc_iushr:
+    {
+      unsigned tmp_reg = Thumb2_Tmp(jinfo, 1 << r_lho | 1 << r_rho | 1 << r);
+      and_imm(jinfo->codebuf, tmp_reg, r_rho, 31);
+      r_rho = tmp_reg;
+      break;
+    }
+  }
+  dop_reg(jinfo->codebuf, dOps[opc-opc_iadd], r, r_lho, r_rho, 0, 0);
+}
+
+void Thumb2_iNeg(Thumb2_Info *jinfo, u32 opc)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned r_src, r;
+
+  Thumb2_Fill(jinfo, 1);
+  r_src = POP(jstack);
+  Thumb2_Spill(jinfo, 1, 0);
+  r = JSTACK_REG(jstack);
+  PUSH(jstack, r);
+  rsb_imm(jinfo->codebuf, r, r_src, 0);
+}
+
+void Thumb2_lNeg(Thumb2_Info *jinfo, u32 opc)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned r_lo, r_hi, r_res_lo, r_res_hi;
+  unsigned r_tmp;
+
+  Thumb2_Fill(jinfo, 2);
+  r_lo = POP(jstack);
+  r_hi = POP(jstack);
+  Thumb2_Spill(jinfo, 1, 0);
+  r_res_hi = PUSH(jstack, JSTACK_REG(jstack));
+  Thumb2_Spill(jinfo, 1, (1<<r_hi));
+  r_res_lo = PUSH(jstack, JSTACK_PREFER(jstack, ~(1<<r_hi)));
+  JASSERT(r_res_lo != r_res_hi, "oops");
+  JASSERT(r_res_lo != r_hi, "r_res_lo != r_hi");
+  rsb_imm(jinfo->codebuf, r_res_lo, r_lo, 0);
+  r_tmp = Thumb2_Tmp(jinfo, (1<<r_hi)|(1<<r_res_lo));
+  mov_imm(jinfo->codebuf, r_tmp, 0);
+  dop_reg(jinfo->codebuf, DP_SBC, r_res_hi, r_tmp, r_hi, SHIFT_LSL, 0);
+}
+
+void Thumb2_fNeg(Thumb2_Info *jinfo, u32 opc)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned r, r_result;
+
+  Thumb2_Fill(jinfo, 1);
+  r = POP(jstack);
+  Thumb2_Spill(jinfo, 1, 0);
+  r_result = PUSH(jstack, JSTACK_REG(jstack));
+  eor_imm(jinfo->codebuf, r_result, r, 0x80000000);
+}
+
+// arm_op is either DP_EOR (for dnegate) or DP_BIC (for dabs)
+static void Thumb2_dUnaryOp(Thumb2_Info *jinfo, u32 arm_op)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned r_lo, r_hi, r_res_lo, r_res_hi;
+
+  Thumb2_Fill(jinfo, 2);
+  r_lo = POP(jstack);
+  r_hi = POP(jstack);
+  Thumb2_Spill(jinfo, 1, 0);
+  r_res_hi = PUSH(jstack, JSTACK_REG(jstack));
+  Thumb2_Spill(jinfo, 1, (1<<r_hi));
+  r_res_lo = PUSH(jstack, JSTACK_PREFER(jstack, ~(1<<r_hi)));
+  JASSERT(r_res_lo != r_res_hi, "oops");
+  JASSERT(r_res_lo != r_hi, "r_res_lo != r_hi");
+  mov_reg(jinfo->codebuf, r_res_lo, r_lo);
+  dop_imm(jinfo->codebuf, arm_op, r_res_hi, r_hi, 0x80000000);
+}
+
+void Thumb2_dNeg(Thumb2_Info *jinfo)
+{
+  Thumb2_dUnaryOp(jinfo, DP_EOR);
+}
+
+void Thumb2_dAbs(Thumb2_Info *jinfo)
+{
+  Thumb2_dUnaryOp(jinfo, DP_BIC);
+}
+
+void Thumb2_lOp(Thumb2_Info *jinfo, u32 opc)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned res_lo, res_hi;
+  unsigned lho_lo, lho_hi;
+  unsigned rho_lo, rho_hi;
+
+  Thumb2_Fill(jinfo, 4);
+  rho_lo = POP(jstack);
+  rho_hi = POP(jstack);
+  lho_lo = POP(jstack);
+  lho_hi = POP(jstack);
+  Thumb2_Spill(jinfo, 1, 0);
+  res_hi = PUSH(jstack, JSTACK_REG(jstack));
+  Thumb2_Spill(jinfo, 1, (1<<lho_hi)|(1<<rho_hi));
+  res_lo = PUSH(jstack, JSTACK_PREFER(jstack, ~((1<<lho_hi)|(1<<rho_hi))));
+  JASSERT(res_lo != rho_hi && res_lo != lho_hi, "res_lo != rho_hi && res_lo != lho_hi");
+  dop_reg(jinfo->codebuf, dOps[opc-opc_ladd], res_lo, lho_lo, rho_lo, SHIFT_LSL, 0);
+  dop_reg(jinfo->codebuf, dOps[opc-opc_ladd+1], res_hi, lho_hi, rho_hi, SHIFT_LSL, 0);
+}
+
+void Thumb2_lmul(Thumb2_Info *jinfo)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned res_lo, res_hi;
+  unsigned lho_lo, lho_hi;
+  unsigned rho_lo, rho_hi;
+  unsigned r_tmp_lo, r_tmp_hi;
+  unsigned op_mask;
+
+  Thumb2_Fill(jinfo, 4);
+  rho_lo = POP(jstack);
+  rho_hi = POP(jstack);
+  lho_lo = POP(jstack);
+  lho_hi = POP(jstack);
+  op_mask = (1<<rho_lo)|(1<<rho_hi)|(1<<lho_lo)|(1<<lho_hi);
+  Thumb2_Spill(jinfo, 2, 0);
+  res_hi = PUSH(jstack, JSTACK_PREFER(jstack, ~op_mask));
+  res_lo = PUSH(jstack, JSTACK_PREFER(jstack, ~op_mask));
+  r_tmp_lo = res_lo;
+  r_tmp_hi = res_hi;
+  if (op_mask & (1<<r_tmp_lo)) r_tmp_lo = Thumb2_Tmp(jinfo, op_mask);
+  if (op_mask & (1<<r_tmp_hi)) r_tmp_hi = Thumb2_Tmp(jinfo, op_mask|(1<<r_tmp_lo));
+  umull(jinfo->codebuf, r_tmp_lo, r_tmp_hi, rho_lo, lho_lo);
+  mla(jinfo->codebuf, r_tmp_hi, rho_lo, lho_hi, r_tmp_hi);
+  mla(jinfo->codebuf, res_hi, rho_hi, lho_lo, r_tmp_hi);
+  mov_reg(jinfo->codebuf, res_lo, r_tmp_lo);
+}
+
+void Thumb2_fOp(Thumb2_Info *jinfo, u32 opc)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned rho, lho, res;
+
+  Thumb2_Fill(jinfo, 2);
+  rho = POP(jstack);
+  lho = POP(jstack);
+  Thumb2_Spill(jinfo, 1, 0);
+  res = PUSH(jstack, JSTACK_REG(jstack));
+  vmov_reg_s_toVFP(jinfo->codebuf, VFP_S0, lho);
+  vmov_reg_s_toVFP(jinfo->codebuf, VFP_S1, rho);
+  vop_reg_s(jinfo->codebuf, dOps[opc-opc_iadd], VFP_S0, VFP_S0, VFP_S1);
+  vmov_reg_s_toARM(jinfo->codebuf, res, VFP_S0);
+}
+
+void Thumb2_dOp(Thumb2_Info *jinfo, u32 opc)
+{
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned rho_lo, rho_hi, lho_lo, lho_hi, res_lo, res_hi;
+
+  Thumb2_Fill(jinfo, 4);
+  rho_lo = POP(jstack);
+  rho_hi = POP(jstack);
+  lho_lo = POP(jstack);
+  lho_hi = POP(jstack);
+  Thumb2_Spill(jinfo, 2, 0);
+  res_hi = PUSH(jstack, JSTACK_REG(jstack));
+  res_lo = PUSH(jstack, JSTACK_REG(jstack));
+  vmov_reg_d_toVFP(jinfo->codebuf, VFP_D0, lho_lo, lho_hi);
+  vmov_reg_d_toVFP(jinfo->codebuf, VFP_D1, rho_lo, rho_hi);
+  vop_reg_d(jinfo->codebuf, dOps[opc-opc_iadd], VFP_D0, VFP_D0, VFP_D1);
+  vmov_reg_d_toARM(jinfo->codebuf, res_lo, res_hi, VFP_D0);
+}
+
+void Thumb2_Handler(Thumb2_Info *jinfo, unsigned handler, unsigned opcode, unsigned bci)
+{
+  mov_imm(jinfo->codebuf, ARM_R0, opcode);
+  mov_imm(jinfo->codebuf, ARM_R1, bci);
+  mov_imm(jinfo->codebuf, ARM_IP, 0);
+  str_imm(jinfo->codebuf, ARM_IP, ARM_IP, 0, 1, 0);
+}
+
+void Thumb2_codegen(Thumb2_Info *jinfo, unsigned start);
+
+// called from the SEGV handling code to see if a polling page read
+// is from a legitimate safepoint address
+int Thumb2_Install_Safepoint_PC(ucontext_t *uc, int magicByteOffset)
+{
+  mcontext_t *mc = &uc->uc_mcontext;
+  unsigned long arm_pc = mc->arm_pc;
+  // ensure the faulting instruction lies in JITted code
+  if (arm_pc < (unsigned long)(thumb2_codebuf + 1)) {
+    return false;
+  }
+  if (arm_pc >= (unsigned long)thumb2_codebuf->sp) {
+    return false;
+  }
+  // skip to the MAGIC word and check it is valid
+  arm_pc +=magicByteOffset;
+  if (*((short*)arm_pc) != (short)THUMB2_POLLING_PAGE_MAGIC) {
+    return false;
+  }
+
+  // skip the magic word 
+  arm_pc += 2;
+  mc->arm_pc = arm_pc;
+
+  return true;
+}
+
+// Insert code to poll the SafepointSynchronize state and call
+// Helper_SafePoint.
+// -- if offset is negative it identifies a bytecode index which
+// should be jumped to via an unconditional backward branch
+// taken either before or after executing the safepoint check
+// -- if offset is zero or positive then a return or conditional
+// branch, respectively, needs to be compiled so control should
+// flow to end of the safepoint check whether or not it is executed
+
+void Thumb2_Safepoint(Thumb2_Info *jinfo, int stackdepth, int bci, int offset)
+{
+  // normal case: read the polling page and branch to skip
+  // the safepoint test
+  // abnormal case: read the polling page, trap to handler
+  // which resets return address into the safepoint check code
+  //
+  // with a negative offset the generated code will look like
+  //    movw r_tmp, #polling_page
+  //    movt r_tmp, #polling_page
+  //    ldr r_tmp, [r_tmp, #K] ; K == 2 * byte offset to the magic word
+  //    b.n #branchtarget
+  //    #POLLING_PAGE_MAGIC ; magic data word
+  //    <
+  //     safepoint check  code
+  //    >
+  //    b.n #branchtarget
+  //
+  // i.e. the generated code includes the branch backwards twice
+  // and relies on a fault at the ldr to skip into the safepoint code
+  //
+  // with a zero or positive offset the caller will plant the return
+  // (zero) or conditional branch (positive) code after the check so
+  // the normal path skips round the safepoint check code and the
+  // abnormal path just drops through. the generated code will look
+  // like
+  //
+  //    movw r_tmp, #polling_page
+  //    movt r_tmp, #polling_page
+  //    ldr r_tmp, [r_tmp, #0]
+  //    b.n L1
+  //    POLLING_PAGE_MAGIC ; data
+  //    <
+  //     safepoint check  code
+  //    >
+  // L1:
+  //    <caller plants branch/return here>
+  //
+  //  n.b. for a return there is no need save or restore locals
+
+  bool is_return = offset == 0; // This is some kind of return bytecode
+
+  int r_tmp = Thumb2_Tmp(jinfo, 0);
+  unsigned dest;
+  if (offset < 0) {
+    // the index of the backward branch target in the code buffer
+    dest = jinfo->bc_stackinfo[bci+offset] & ~BC_FLAGS_MASK;
+  } else {
+    dest = 0;
+  }
+  mov_imm(jinfo->codebuf, r_tmp, (u32)os::get_polling_page());
+  // this encodes the offset from the read instruction to the magic
+  // word into the fault address, assuming it is 4 bytes. however, if
+  // we need to plant a wide backwards branch we may need to rewrite
+  // this instruction with offset 6. so stash the instruction location
+  // here just in case. n.b. the offset is doubled to ensure the fault
+  // address in aligned -- aligned reads always use a single 16-bit
+  // instruction whereas non-aligned reads require 2 x 16 bit words
+  unsigned read_loc = out_loc(jinfo->codebuf);
+  ldr_imm(jinfo->codebuf, r_tmp, r_tmp, 8, 1, 0);
+  if (offset < 0) {
+    branch_uncond(jinfo->codebuf, dest);
+    unsigned magic_loc = out_loc(jinfo->codebuf);
+    if (magic_loc - read_loc != 4) {
+      JASSERT(magic_loc - read_loc == 6, "bad safepoint offset to magic word");
+      // must have needed a wide branch so patch the load instruction
+      jinfo->codebuf->idx = read_loc >> 1;
+      ldr_imm(jinfo->codebuf, r_tmp, r_tmp, 12, 1, 0);
+      jinfo->codebuf->idx = magic_loc >> 1;
+    }
+  } else {
+    // leave space for the forward skip branch
+    // location of branch instruction is read_loc + 2
+    forward_16(jinfo->codebuf);
+  }
+  // now write a magic word after the branch so the signal handler can
+  // test that a polling page read is kosher
+  out_16(jinfo->codebuf, THUMB2_POLLING_PAGE_MAGIC);
+
+  {
+    // Flush the stack to memory and save its register state.
+    SAVE_STACK(jinfo->jstack);
+    Thumb2_Flush(jinfo);
+
+    // We don't save or restore locals if we're returning.
+    if (! is_return)
+      Thumb2_save_local_refs(jinfo, stackdepth);
+
+    // now the safepoint polling code itself
+    mov_imm(jinfo->codebuf, ARM_R1, bci+CONSTMETHOD_CODEOFFSET);
+    add_imm(jinfo->codebuf, ARM_R2, ISTATE_REG(jinfo),
+	    ISTATE_OFFSET(jinfo, stackdepth, 0));
+    bl(jinfo->codebuf, handlers[H_SAFEPOINT]);
+
+    if (! is_return)
+      Thumb2_restore_local_refs(jinfo, stackdepth);
+
+    RESTORE_STACK(jinfo->jstack, jinfo->codebuf);
+
+    if (offset < 0) {
+      // needs another unconditional backward branch
+      branch_uncond(jinfo->codebuf, dest);
+    } else {
+      // patch in the forward skip branch
+      branch_narrow_patch(jinfo->codebuf, read_loc + 2);
+    }
+  }
+}
+
+// If this is a backward branch, compile a safepoint check
+void Thumb2_Cond_Safepoint(Thumb2_Info *jinfo, int stackdepth, int bci) {
+  int offset = GET_JAVA_S2(jinfo->code_base + bci + 1);
+  unsigned dest_taken = bci + offset;
+
+  if (jinfo->bc_stackinfo[dest_taken] & BC_COMPILED) {
+    // pass offset as positive so the safepoint code plant a forward
+    // skip over the test rather than doing an unconditional backwards
+    // branch. that allows the condition test to be planted by
+    // whatever followed this call
+    Thumb2_Safepoint(jinfo, stackdepth, bci, -offset);
+  }
+}
+
+int Thumb2_Branch(Thumb2_Info *jinfo, unsigned bci, unsigned cond)
+{
+    int offset = GET_JAVA_S2(jinfo->code_base + bci + 1);
+    unsigned dest_taken = bci + offset;
+    unsigned dest_not_taken = bci + 3;
+    unsigned loc;
+
+    if (jinfo->bc_stackinfo[dest_taken] & BC_COMPILED) {
+      branch(jinfo->codebuf, cond, jinfo->bc_stackinfo[dest_taken] & ~BC_FLAGS_MASK);
+      return dest_not_taken;
+    }
+    loc = forward_32(jinfo->codebuf);
+    Thumb2_codegen(jinfo, dest_not_taken);
+    JASSERT(jinfo->bc_stackinfo[dest_taken] & BC_COMPILED, "dest in branch not compiled!!!");
+    branch_patch(jinfo->codebuf, cond, loc, jinfo->bc_stackinfo[dest_taken] & ~BC_FLAGS_MASK);
+    return -1;
+}
+
+int Thumb2_Goto(Thumb2_Info *jinfo, unsigned bci, int offset, int len, int stackdepth = -1)
+{
+    unsigned dest_taken = bci + offset;
+    unsigned dest_not_taken = bci + len;
+    unsigned loc;
+
+    if (stackdepth >= 0
+	&& jinfo->bc_stackinfo[dest_taken] & BC_COMPILED) {
+      // n.b. the backwards branch will be planted by the safepoint routine
+      Thumb2_Safepoint(jinfo, stackdepth, bci, offset);
+      return dest_not_taken;
+    }
+    loc = forward_32(jinfo->codebuf);
+    Thumb2_codegen(jinfo, dest_not_taken);
+    JASSERT(jinfo->bc_stackinfo[dest_taken] & BC_COMPILED, "dest in goto not compiled!!!");
+    branch_uncond_patch(jinfo->codebuf, loc, jinfo->bc_stackinfo[dest_taken] & ~BC_FLAGS_MASK);
+    return -1;
+}
+
+void Thumb2_save_local_refs(Thumb2_Info *jinfo, unsigned stackdepth)
+{
+  int nlocals = jinfo->method->max_locals();
+  unsigned *locals_info = jinfo->locals_info;
+  int i;
+
+  JASSERT(jinfo->jstack->depth == 0, "stack not empty");
+  for (i = 0; i < nlocals; i++) {
+    Reg r = jinfo->jregs->r_local[i];
+    if (r) {
+      if ((locals_info[i] & (1 << LOCAL_REF)) && (locals_info[i] & (1 << LOCAL_MODIFIED))) {
+	store_local(jinfo, r, i, stackdepth);
+      }
+    }
+  }
+}
+
+void Thumb2_restore_local_refs(Thumb2_Info *jinfo, unsigned stackdepth)
+{
+  int nlocals = jinfo->method->max_locals();
+  unsigned *locals_info = jinfo->locals_info;
+  int i;
+
+  JASSERT(jinfo->jstack->depth == 0, "stack not empty");
+  for (i = 0; i < nlocals; i++) {
+    Reg r = jinfo->jregs->r_local[i];
+    if (r) {
+      if (locals_info[i] & (1<<LOCAL_REF)) {
+	load_local(jinfo, r, i, stackdepth);
+      }
+    }
+  }
+}
+
+void Thumb2_save_all_locals(Thumb2_Info *jinfo, unsigned stackdepth)
+{
+  int nlocals = jinfo->method->max_locals();
+  unsigned *locals_info = jinfo->locals_info;
+  int i;
+
+  JASSERT(jinfo->jstack->depth == 0, "stack not empty");
+  for (i = 0; i < nlocals; i++) {
+    Reg r = jinfo->jregs->r_local[i];
+    if (r) {
+      if (locals_info[i] & (1 << LOCAL_MODIFIED)) {
+	store_local(jinfo, r, i, stackdepth);
+      }
+    }
+  }
+}
+
+void Thumb2_restore_all_locals(Thumb2_Info *jinfo, unsigned stackdepth)
+{
+  int nlocals = jinfo->method->max_locals();
+  unsigned *locals_info = jinfo->locals_info;
+  int i;
+
+  JASSERT(jinfo->jstack->depth == 0, "stack not empty");
+  for (i = 0; i < nlocals; i++) {
+    Reg r = jinfo->jregs->r_local[i];
+    if (r) {
+	load_local(jinfo, r, i, stackdepth);
+    }
+  }
+}
+
+void Thumb2_Exit(Thumb2_Info *jinfo, unsigned handler, unsigned bci, unsigned stackdepth)
+{
+    Thumb2_Flush(jinfo);
+    Thumb2_save_all_locals(jinfo, stackdepth);
+    mov_imm(jinfo->codebuf, ARM_R0, bci+CONSTMETHOD_CODEOFFSET);
+    bl(jinfo->codebuf, handlers[handler]);
+}
+
+void Thumb2_Return(Thumb2_Info *jinfo, unsigned opcode, int bci, int stackdepth)
+{
+  Thumb2_Safepoint(jinfo, stackdepth, bci, 0);
+
+  Reg r_lo, r;
+  Thumb2_Stack *jstack = jinfo->jstack;
+
+  if (jinfo->method->has_monitor_bytecodes()) {
+    Thumb2_Exit(jinfo, H_EXIT_TO_INTERPRETER, bci, stackdepth);
+  }
+
+  if (jinfo->method->is_synchronized()) {
+    unsigned loc_success1, loc_success2, loc_failed, loc_retry, loc_exception;
+    unsigned loc_illegal_monitor_state;
+    Thumb2_Flush(jinfo);
+//    Thumb2_save_local_refs(jinfo);
+    // Free the monitor
+    //
+    // 		add	r1, #<stackdepth>-8
+    // 		ldr	r2, [r1, #4]
+    //		cbz	r2, throw_illegal_monitor_state
+    //		ldr	r0, [r1, #0]
+    //		mov	r3, #0
+    //		str	r3, [r1, #4]
+    //		cbz	r0, success
+    //	retry:
+    //		ldrex	r3, [r2, #0]
+    //		cmp	r1, r3
+    //		bne	failed
+    //		strex	r3, r0, [r2, #0]
+    //		cbz	r3, success
+    //		b	retry
+    //	failed:
+    //		str	r2, [r1, #4]
+    //		...
+    //  success:
+    //
+    // JAZ_V1 == tmp2
+    // JAZ_V2 == tmp1
+    add_imm(jinfo->codebuf, ARM_R1, ISTATE_REG(jinfo), ISTATE(jinfo, stackdepth) - frame::interpreter_frame_monitor_size()*wordSize);
+
+    ldr_imm(jinfo->codebuf, ARM_R2, ARM_R1, 4, 1, 0);
+    loc_illegal_monitor_state = forward_16(jinfo->codebuf);
+    ldr_imm(jinfo->codebuf, ARM_R0, ARM_R1, 0, 1, 0);
+    mov_imm(jinfo->codebuf, ARM_R3, 0);
+    str_imm(jinfo->codebuf, ARM_R3, ARM_R1, 4, 1, 0);
+    loc_success1 = forward_16(jinfo->codebuf);
+    loc_retry = out_loc(jinfo->codebuf);
+    ldrex_imm(jinfo->codebuf, ARM_R3, ARM_R2, 0);
+    cmp_reg(jinfo->codebuf, ARM_R1, ARM_R3);
+    loc_failed = forward_16(jinfo->codebuf);
+    strex_imm(jinfo->codebuf, ARM_R3, ARM_R0, ARM_R2, 0);
+    loc_success2 = forward_16(jinfo->codebuf);
+    branch_uncond(jinfo->codebuf, loc_retry);
+    bcc_patch(jinfo->codebuf, COND_NE, loc_failed);
+    cbz_patch(jinfo->codebuf, ARM_R2, loc_illegal_monitor_state);
+    str_imm(jinfo->codebuf, ARM_R2, ARM_R1, 4, 1, 0);
+    mov_imm(jinfo->codebuf, ARM_R0, 0+CONSTMETHOD_CODEOFFSET);
+    bl(jinfo->codebuf, handlers[H_SYNCHRONIZED_EXIT]);
+    loc_exception = forward_16(jinfo->codebuf);
+    bl(jinfo->codebuf, handlers[H_HANDLE_EXCEPTION]);
+    cbz_patch(jinfo->codebuf, ARM_R0, loc_exception);
+    cbz_patch(jinfo->codebuf, ARM_R0, loc_success1);
+    cbz_patch(jinfo->codebuf, ARM_R3, loc_success2);
+  }
+
+  if (opcode == opc_return) {
+    if (jinfo->compiled_return) {
+      unsigned ret_idx = jinfo->compiled_return;
+
+      branch_uncond(jinfo->codebuf, ret_idx);
+      return;
+    }
+    jinfo->compiled_return = jinfo->codebuf->idx * 2;
+  } else {
+    if (opcode == opc_lreturn || opcode == opc_dreturn) {
+      Thumb2_Fill(jinfo, 2);
+      r_lo = POP(jstack);
+      r = POP(jstack);
+    } else {
+      Thumb2_Fill(jinfo, 1);
+      r = POP(jstack);
+      if (jinfo->compiled_word_return[r]) {
+        unsigned ret_idx = jinfo->compiled_word_return[r];
+
+        branch_uncond(jinfo->codebuf, ret_idx);
+        return;
+      }
+      jinfo->compiled_word_return[r] = jinfo->codebuf->idx * 2;
+    }
+  }
+
+  mov_imm(jinfo->codebuf, ARM_LR, 0);
+  str_imm(jinfo->codebuf, ARM_LR, Rthread, THREAD_LAST_JAVA_SP, 1, 0);
+  str_imm(jinfo->codebuf, ARM_LR, Rthread, THREAD_LAST_JAVA_FP, 1, 0);
+  ldr_imm(jinfo->codebuf, Rstack, Rthread, THREAD_TOP_ZERO_FRAME, 1, 0);
+  ldr_imm(jinfo->codebuf, ARM_LR, Rstack, 0, 1, 0);
+
+  if (opcode == opc_return) {
+    add_imm(jinfo->codebuf, Rstack, Rstack, jinfo->method->max_locals() * sizeof(int) + 4);
+  } else {
+    if (opcode == opc_lreturn || opcode == opc_dreturn) {
+      str_imm(jinfo->codebuf, r, Rstack, jinfo->method->max_locals() * sizeof(int), 1, 0);
+      str_imm(jinfo->codebuf, r_lo, Rstack, jinfo->method->max_locals() * sizeof(int)-4, 1, 1);
+    } else {
+      str_imm(jinfo->codebuf, r, Rstack, jinfo->method->max_locals() * sizeof(int), 1, 1);
+    }
+  }
+
+  str_imm(jinfo->codebuf, ARM_LR, Rthread, THREAD_TOP_ZERO_FRAME, 1, 0);
+  str_imm(jinfo->codebuf, Rstack, Rthread, THREAD_JAVA_SP, 1, 0);
+
+  // deoptimized_frames = 0
+  // FIXME: This should be done in the slow entry, but only three
+  // words are allocated there for the instructions.
+  mov_imm(jinfo->codebuf, ARM_R0, 0);
+
+  ldm(jinfo->codebuf, C_REGSET + (1<<ARM_PC), ARM_SP, POP_FD, 1);
+}
+
+int Thumb2_Accessor(Thumb2_Info *jinfo)
+{
+  jubyte *code_base = jinfo->code_base;
+  constantPoolCacheOop  cp = jinfo->method->constants()->cache();
+  ConstantPoolCacheEntry* cache;
+  int index = GET_NATIVE_U2(code_base+2);
+  unsigned *bc_stackinfo = jinfo->bc_stackinfo;
+
+  JASSERT(code_base[0] == opc_aload_0 || code_base[0] == opc_iaccess_0, "not an aload_0 in accessor");
+  JASSERT(code_base[4] == opc_ireturn || code_base[4] == opc_areturn, "not an ireturn in accessor");
+  cache = cp->entry_at(index);
+  if (!cache->is_resolved((Bytecodes::Code)opc_getfield)) return 0;
+
+  TosState tos_type = cache->flag_state();
+  int field_offset = cache->f2();
+
+  // Slow entry point - callee save
+  // R0 = method
+  // R2 = thread
+  stm(jinfo->codebuf, (1<<Rthread) + (1<<ARM_LR), ARM_SP, PUSH_FD, 1);
+  mov_reg(jinfo->codebuf, Rthread, ARM_R2);
+  bl(jinfo->codebuf, out_pos(jinfo->codebuf) + FAST_ENTRY_OFFSET - 6);
+  ldm(jinfo->codebuf, (1<<Rthread) + (1<<ARM_PC), ARM_SP, POP_FD, 1);
+  out_16(jinfo->codebuf, 0);
+
+  out_32(jinfo->codebuf, 0);	// pointer to osr table
+  out_32(jinfo->codebuf, 0);	// Space for exception_table pointer
+  out_32(jinfo->codebuf, 0);	// next compiled method
+
+  out_32(jinfo->codebuf, -1);    // regusage
+  out_32(jinfo->codebuf, -1);
+  out_32(jinfo->codebuf, -1);
+
+  out_align(jinfo->codebuf, CODE_ALIGN);
+
+  // fast entry point
+  bc_stackinfo[0] = (bc_stackinfo[0] & BC_FLAGS_MASK) | (jinfo->codebuf->idx * 2) | BC_COMPILED;
+  ldr_imm(jinfo->codebuf, ARM_R1, Rthread, THREAD_JAVA_SP, 1, 0);
+  ldr_imm(jinfo->codebuf, ARM_R0, ARM_R1, 0, 1, 0);
+  if (tos_type == btos)
+    ldrsb_imm(jinfo->codebuf, ARM_R0, ARM_R0, field_offset, 1, 0);
+  else if (tos_type == ctos)
+    ldrh_imm(jinfo->codebuf, ARM_R0, ARM_R0, field_offset, 1, 0);
+  else if (tos_type == stos)
+    ldrsh_imm(jinfo->codebuf, ARM_R0, ARM_R0, field_offset, 1, 0);
+  else
+    ldr_imm(jinfo->codebuf, ARM_R0, ARM_R0, field_offset, 1, 0);
+  str_imm(jinfo->codebuf, ARM_R0, ARM_R1, 0, 1, 0);
+
+  if (cache->is_volatile())
+    fullBarrier(jinfo->codebuf);
+
+  // deoptimized_frames = 0
+  mov_imm(jinfo->codebuf, ARM_R0, 0);
+  mov_reg(jinfo->codebuf, ARM_PC, ARM_LR);
+
+  return 1;
+}
+
+#define STACKDEPTH(jinfo, stackinfo) (((stackinfo) & ~BC_FLAGS_MASK) + \
+	((jinfo)->method->is_synchronized() ? frame::interpreter_frame_monitor_size() : 0))
+
+
+void Thumb2_Enter(Thumb2_Info *jinfo)
+{
+  int parms = jinfo->method->size_of_parameters();
+  int extra_locals = jinfo->method->max_locals() - parms;
+  unsigned *locals_info = jinfo->locals_info;
+  int i;
+  unsigned stackdepth = 0;
+
+  // Slow entry point - callee save
+  // R0 = method
+  // R2 = thread
+  stm(jinfo->codebuf, I_REGSET + (1<<ARM_LR), ARM_SP, PUSH_FD, 1);
+  mov_reg(jinfo->codebuf, Rthread, ARM_R2);
+  bl(jinfo->codebuf, out_pos(jinfo->codebuf) + FAST_ENTRY_OFFSET - 6);
+  ldm(jinfo->codebuf, I_REGSET + (1<<ARM_PC), ARM_SP, POP_FD, 1);
+  out_16(jinfo->codebuf, 0);
+
+  out_32(jinfo->codebuf, 0);	// Space for osr_table pointer
+  out_32(jinfo->codebuf, 0);	// Space for exception_table pointer
+  out_32(jinfo->codebuf, 0);	// Pointer to next method
+
+  out_32(jinfo->codebuf, 0);    // regusage
+  out_32(jinfo->codebuf, 0);
+  out_32(jinfo->codebuf, 0);
+
+  out_align(jinfo->codebuf, CODE_ALIGN);
+
+  // Fast entry point == Slow entry + 64 - caller save
+  // R0 = method
+  // R2 = thread
+  stm(jinfo->codebuf, C_REGSET + (1<<ARM_LR), ARM_SP, PUSH_FD, 1);
+  ldr_imm(jinfo->codebuf, Rstack, Rthread, THREAD_JAVA_SP, 1, 0);
+  {
+    unsigned stacksize;
+
+    stacksize = (extra_locals + jinfo->method->max_stack()) * sizeof(int);
+    stacksize += FRAME_SIZE + STACK_SPARE;
+    if (!jinfo->is_leaf || stacksize > LEAF_STACK_SIZE) {
+      ldr_imm(jinfo->codebuf, ARM_R3, Rthread, THREAD_JAVA_STACK_BASE, 1, 0);
+      sub_imm(jinfo->codebuf, ARM_R1, Rstack, stacksize + LEAF_STACK_SIZE);
+      cmp_reg(jinfo->codebuf, ARM_R3, ARM_R1);
+      it(jinfo->codebuf, COND_CS, IT_MASK_T);
+      bl(jinfo->codebuf, handlers[H_STACK_OVERFLOW]);
+    }
+  }
+  mov_imm(jinfo->codebuf, ARM_R1, 0);
+
+  if (extra_locals > 0) {
+    sub_imm(jinfo->codebuf, Rstack, Rstack, extra_locals * 4);
+
+    for (i = 0; i < extra_locals; i++) {
+      unsigned linfo = locals_info[parms+i];
+      if (linfo & (1<< LOCAL_REF) || ((linfo >> LOCAL_INT) & 0x1f) == 0)
+	str_imm(jinfo->codebuf, ARM_R1, Rstack, (extra_locals-1 - i) * 4, 1, 0);
+    }
+  }
+
+  ldr_imm(jinfo->codebuf, ARM_IP, ARM_R0, METHOD_CONSTANTS, 1, 0);
+
+  add_imm(jinfo->codebuf, Rlocals, Rstack, (jinfo->method->max_locals()-1) * sizeof(int));
+
+  sub_imm(jinfo->codebuf, Rstack, Rstack, FRAME_SIZE);
+
+  if (jinfo->use_istate) mov_reg(jinfo->codebuf, Ristate, Rstack);
+  store_istate(jinfo, Rstack, ISTATE_SELF_LINK, stackdepth);
+
+  store_istate(jinfo, Rstack, ISTATE_MONITOR_BASE, stackdepth);
+
+  store_istate(jinfo, Rlocals, ISTATE_LOCALS, stackdepth);
+
+  if (jinfo->method->is_synchronized()) {
+    sub_imm(jinfo->codebuf, Rstack, Rstack, frame::interpreter_frame_monitor_size()*wordSize);
+    stackdepth = frame::interpreter_frame_monitor_size();
+    if (jinfo->method->is_static()) {
+      ldr_imm(jinfo->codebuf, ARM_R3, ARM_IP, CONSTANTPOOL_POOL_HOLDER, 1, 0);
+      ldr_imm(jinfo->codebuf, JAZ_V1, ARM_R3, KLASS_PART+KLASS_JAVA_MIRROR, 1, 0);
+    } else {
+      ldr_imm(jinfo->codebuf, JAZ_V1, Rlocals, 0, 1, 0);
+    }
+    str_imm(jinfo->codebuf, JAZ_V1, Rstack, 4, 1, 0);
+  }
+
+  store_istate(jinfo, ARM_R1, ISTATE_MSG, stackdepth);
+  store_istate(jinfo, ARM_R1, ISTATE_OOP_TEMP, stackdepth);
+
+  sub_imm(jinfo->codebuf, ARM_R3, Rstack, jinfo->method->max_stack() * sizeof(int));
+  str_imm(jinfo->codebuf, ARM_R3, Rthread, THREAD_JAVA_SP, 1, 0);
+
+  store_istate(jinfo, Rstack, ISTATE_STACK_BASE, stackdepth);
+
+  sub_imm(jinfo->codebuf, ARM_R3, ARM_R3, 4);
+  store_istate(jinfo, ARM_R3, ISTATE_STACK_LIMIT, stackdepth);
+
+  ldr_imm(jinfo->codebuf, ARM_R3, Rthread, THREAD_TOP_ZERO_FRAME, 1, 0);
+  store_istate(jinfo, ARM_R3, ISTATE_NEXT_FRAME, stackdepth);
+
+  mov_imm(jinfo->codebuf, ARM_R3, INTERPRETER_FRAME);
+  store_istate(jinfo, ARM_R3, ISTATE_FRAME_TYPE, stackdepth);
+
+  mov_imm(jinfo->codebuf, ARM_R1, 0);   // set last SP to zero before
+                                        // setting FP
+  str_imm(jinfo->codebuf, ARM_R1, Rthread, THREAD_LAST_JAVA_SP, 1, 0);
+  add_imm(jinfo->codebuf, ARM_R3, ISTATE_REG(jinfo), ISTATE(jinfo, stackdepth) + ISTATE_NEXT_FRAME);
+  str_imm(jinfo->codebuf, ARM_R3, Rthread, THREAD_TOP_ZERO_FRAME, 1, 0);
+  str_imm(jinfo->codebuf, ARM_R3, Rthread, THREAD_LAST_JAVA_FP, 1, 0);
+  ldr_imm(jinfo->codebuf, ARM_R3, Rthread, THREAD_JAVA_SP, 1, 0);
+  str_imm(jinfo->codebuf, ARM_R3, Rthread, THREAD_LAST_JAVA_SP, 1, 0);
+
+  ldr_imm(jinfo->codebuf, ARM_R3, ARM_IP, CONSTANTPOOL_CACHE, 1, 0);
+  store_istate(jinfo, ARM_R3, ISTATE_CONSTANTS, stackdepth);
+
+  store_istate(jinfo, Rthread, ISTATE_THREAD, stackdepth);
+  store_istate(jinfo, ARM_R0, ISTATE_METHOD, stackdepth);
+
+  if (jinfo->method->is_synchronized()) {
+    unsigned loc_retry, loc_failed, loc_success, loc_exception;
+
+    // JAZ_V1 == monitor object
+    //
+    // Try to acquire the monitor. Seems very sub-optimal
+    // 		ldr	r3, [JAZ_V1, #0]
+    // 		orr	r3, r3, #1
+    // 		str	r3, [Rstack, #0]
+    // 	retry:
+    // 		ldrex	r0, [JAZ_V1, #0]
+    // 		cmp	r3, r0
+    // 		bne	failed
+    // 		strex	r0, Rstack, [JAZ_V1, #0]
+    // 		cbz	r0, success
+    // 		b	retry
+    // 	failed:
+    // 		<failed - someone else has the monitor - must yield>
+    //  success:
+    // 		<success - acquired the monitor>
+    //
+    ldr_imm(jinfo->codebuf, ARM_R3, JAZ_V1, 0, 1, 0);
+    orr_imm(jinfo->codebuf, ARM_R3, ARM_R3, 1);
+    str_imm(jinfo->codebuf, ARM_R3, Rstack, 0, 1, 0);
+    loc_retry = out_loc(jinfo->codebuf);
+// retry:
+    ldrex_imm(jinfo->codebuf, ARM_R0, JAZ_V1, 0);
+    cmp_reg(jinfo->codebuf, ARM_R3, ARM_R0);
+    loc_failed = forward_16(jinfo->codebuf);
+    strex_imm(jinfo->codebuf, ARM_R0, Rstack, JAZ_V1, 0);
+    loc_success = forward_16(jinfo->codebuf);
+    branch_uncond(jinfo->codebuf, loc_retry);
+    bcc_patch(jinfo->codebuf, COND_NE, loc_failed);
+// failed:
+    mov_imm(jinfo->codebuf, ARM_R0, 0+CONSTMETHOD_CODEOFFSET);
+    bl(jinfo->codebuf, handlers[H_SYNCHRONIZED_ENTER]);
+    loc_exception = forward_16(jinfo->codebuf);
+    bl(jinfo->codebuf, handlers[H_HANDLE_EXCEPTION_NO_REGS]);
+    cbz_patch(jinfo->codebuf, ARM_R0, loc_exception);
+    cbz_patch(jinfo->codebuf, ARM_R0, loc_success);
+// success:
+
+  }
+
+  {
+    int nlocals = jinfo->method->max_locals();
+
+    for (i = 0; i < nlocals; i++) {
+      Reg r = jinfo->jregs->r_local[i];
+      if (r) {
+	unsigned stackdepth = STACKDEPTH(jinfo, 0);
+        if (i < parms)
+	  load_local(jinfo, r, i, stackdepth);
+        else if (locals_info[i] & (1<<LOCAL_REF))
+          mov_reg(jinfo->codebuf, r, ARM_R1);
+      }
+    }
+  }
+}
+
+unsigned opcode2handler[] = {
+  H_IDIV,
+  H_LDIV,
+  0, 0,			// fdiv, ddiv
+  H_IREM,
+  H_LREM,
+  H_FREM,
+  H_DREM,
+  0, 0, 0, 0,		// ineg, lneg, fneg, dneg
+  0, 0, 0, 0, 0, 0,	// shifts
+  0, 0, 0, 0, 0, 0,	// and, or, xor
+  0,			// iinc
+  0,			// i2l
+  H_I2F,
+  H_I2D,
+  0,			// l2i
+  H_L2F,
+  H_L2D,
+  H_F2I,
+  H_F2L,
+  H_F2D,
+  H_D2I,
+  H_D2L,
+  H_D2F,
+};
+
+// Generate code for a load of a jlong.
+
+void Thumb2_load_long(Thumb2_Info *jinfo, Reg r_lo, Reg r_hi, Reg base,
+		      int field_offset,
+		      bool is_volatile = false)
+{
+  CodeBuf *codebuf = jinfo->codebuf;
+  if (is_volatile) {
+    Reg r_addr = base;
+    if (field_offset) {
+      r_addr = Thumb2_Tmp(jinfo, (1<<r_lo) | (1<<r_hi) | (1<<base));
+      add_imm(jinfo->codebuf, r_addr, base, field_offset);
+    }
+    ldrexd(codebuf, r_lo, r_hi, r_addr);
+  } else {
+    ldrd_imm(codebuf, r_lo, r_hi, base, field_offset, 1, 0);
+  }
+}
+
+// Generate code for a store of a jlong.  If the operand is volatile,
+// generate a sequence of the form
+//
+// .Ldst
+// 	ldrexd 	r2, r3, [dst]
+// 	strexd 	r2, r0, r1, [dst]
+// 	cmp 	r2, #0
+// 	bne 	.Ldst
+
+void Thumb2_store_long(Thumb2_Info *jinfo, Reg r_lo, Reg r_hi, Reg base,
+		      int field_offset,
+		      bool is_volatile = false)
+{
+  CodeBuf *codebuf = jinfo->codebuf;
+  if (is_volatile) {
+    Reg r_addr = base;
+    Reg tmp1 = Thumb2_Tmp(jinfo, (1<<r_lo) | (1<<r_hi) | (1<<base));
+    Reg tmp2 = Thumb2_Tmp(jinfo, (1<<r_lo) | (1<<r_hi) | (1<<base) | (1<<tmp1));
+    if (field_offset) {
+      r_addr = Thumb2_Tmp(jinfo, (1<<r_lo) | (1<<r_hi) | (1<<base) | (1<<tmp1) | (1<<tmp2));
+      add_imm(jinfo->codebuf, r_addr, base, field_offset);
+    }
+    int loc = out_loc(codebuf);
+    ldrexd(codebuf, tmp1, tmp2, r_addr);
+    strexd(codebuf, tmp1, r_lo, r_hi, r_addr);
+    cmp_imm(codebuf, tmp1, 0);
+    branch(codebuf, COND_NE, loc);
+  } else {
+    strd_imm(codebuf, r_lo, r_hi, base, field_offset, 1, 0);
+  }
+}
+
+#define OPCODE2HANDLER(opc) (handlers[opcode2handler[(opc)-opc_idiv]])
+
+extern "C" void _ZN18InterpreterRuntime18register_finalizerEP10JavaThreadP7oopDesc(void);
+
+// Push VFP_REG to the java stack.
+static void vfp_to_jstack(Thumb2_Info *jinfo, int vfp_reg) {
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned r_lo, r_hi;
+  r_hi = PUSH(jstack, JSTACK_REG(jstack));
+  r_lo = PUSH(jstack, JSTACK_REG(jstack));
+  vmov_reg_d_toARM(jinfo->codebuf, r_lo, r_hi, vfp_reg);
+}
+
+// Pop the java stack to VFP_REG .
+static void jstack_to_vfp(Thumb2_Info *jinfo, int vfp_reg) {
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned r_lo, r_hi;
+  Thumb2_Fill(jinfo, 2);
+  r_lo = POP(jstack);
+  r_hi = POP(jstack);
+  vmov_reg_d_toVFP(jinfo->codebuf, vfp_reg, r_lo, r_hi);
+  Thumb2_Flush(jinfo);
+}
+
+// Expand a call to a "special" method.  These are usually inlines of
+// java.lang.Math methods.  Return true if the inlining succeeded.
+static bool handle_special_method(methodOop callee, Thumb2_Info *jinfo,
+				  unsigned stackdepth) {
+  Thumb2_Stack *jstack = jinfo->jstack;
+  CodeBuf *codebuf = jinfo->codebuf;
+
+  const char *entry_name;
+
+  switch (callee->intrinsic_id()) {
+  case vmIntrinsics::_dabs:
+   {
+     Thumb2_dAbs(jinfo);
+     return true;
+    }
+
+#ifdef __ARM_PCS_VFP
+  case vmIntrinsics::_dsin:
+    entry_name = "Java_java_lang_StrictMath_sin";
+    break;
+
+  case vmIntrinsics::_dcos:
+    entry_name = "Java_java_lang_StrictMath_cos";
+    break;
+
+  case vmIntrinsics::_dtan:
+    entry_name = "Java_java_lang_StrictMath_tan";
+    break;
+
+  case vmIntrinsics::_dsqrt:
+    {
+      void *entry_point = dlsym(NULL, "Java_java_lang_StrictMath_sqrt");
+      if (! entry_point)
+	return false;
+
+      unsigned r_lo, r_hi, r_res_lo, r_res_hi;
+
+      // Make sure that canonical NaNs are returned, as per the spec.
+      //
+      // Generate:
+      // vsqrt.f64 d0, d1
+      // vcmp.f64 d0, d0
+      // vmrs APSR_nzcv, fpscr
+      // beq.n 0f
+      // vmov.f64 d0, d1
+      // blx Java_java_lang_StrictMath_sqrt
+      // 0:
+      jstack_to_vfp(jinfo, VFP_D1);
+      vop_reg_d(jinfo->codebuf, VP_SQRT, VFP_D0, 0, VFP_D1);
+      vcmp_reg_d(jinfo->codebuf, VFP_D0, VFP_D0, 0);
+      vmrs(jinfo->codebuf, ARM_PC);
+      int loc = forward_16(jinfo->codebuf);
+      vmov_reg_d_VFP_to_VFP(jinfo->codebuf, VFP_D0, VFP_D1);
+      // FIXME: The JNI StrictMath routines don't use the JNIEnv *env
+      // parameter, so it's arguably pointless to pass it here.
+      add_imm(jinfo->codebuf, ARM_R0, Rthread, THREAD_JNI_ENVIRONMENT);
+      mov_imm(jinfo->codebuf, ARM_IP, (unsigned)entry_point);
+      blx_reg(jinfo->codebuf, ARM_IP);
+      bcc_patch(jinfo->codebuf, COND_EQ, loc);
+      vfp_to_jstack(jinfo, VFP_D0);
+
+      return true;
+    }
+
+  case vmIntrinsics::_dlog:
+    entry_name = "Java_java_lang_StrictMath_log";
+    break;
+
+  case vmIntrinsics::_dlog10:
+    entry_name = "Java_java_lang_StrictMath_log10";
+    break;
+#endif // __ARM_PCS_VFP
+
+  case vmIntrinsics::_compareAndSwapInt:
+   {
+      Thumb2_Fill(jinfo, 4);
+
+      unsigned update = POP(jstack);
+      unsigned expect = POP(jstack);
+      unsigned offset = POP(jstack);
+      POP(jstack);  // Actually the high part of the offset
+
+      // unsigned object = POP(jstack);
+      // unsigned unsafe = POP(jstack);  // Initially an instance of java.lang.Unsafe
+
+      Thumb2_Flush(jinfo);
+      // Get ourself a result reg that's not one of the inputs
+      unsigned exclude = (1<<update)|(1<<expect)|(1<<offset);
+      unsigned result = JSTACK_PREFER(jstack, ~exclude);
+
+      ldm(codebuf, (1<<ARM_IP)|(1<<ARM_LR), Rstack, POP_FD, 1); // Object addr
+      add_reg(codebuf, result, offset, ARM_IP); // result now points to word
+      ldr_imm(codebuf, ARM_LR, ARM_LR, 0, 0, 0);  // Security check
+
+      fullBarrier(codebuf);
+
+      int retry = out_loc(codebuf);
+      ldrex_imm(codebuf, ARM_LR, result, 0);
+      cmp_reg(codebuf, ARM_LR, expect);
+      int loc_failed = forward_16(codebuf);
+      strex_imm(codebuf, ARM_IP, update, result, 0);
+      cmp_imm(codebuf, ARM_IP, 0);
+      branch(codebuf, COND_NE, retry);
+      bcc_patch(jinfo->codebuf, COND_NE, loc_failed);
+
+      it(codebuf, COND_NE, IT_MASK_TEE);
+      mov_imm(codebuf, result, 0);
+      mov_imm(codebuf, result, 1);
+      fullBarrier(codebuf);
+
+      PUSH(jstack, result);
+    }
+    return true;
+
+  case vmIntrinsics::_compareAndSwapLong:
+    {
+      Thumb2_Fill(jinfo, 4);
+
+      unsigned update_lo = POP(jstack);
+      unsigned update_hi = POP(jstack);
+      unsigned expect_lo = POP(jstack);
+      unsigned expect_hi = POP(jstack);
+
+      Thumb2_Flush(jinfo);
+      Thumb2_save_all_locals(jinfo, stackdepth - 4); // 4 args popped above
+
+      // instance of java.lang.Unsafe:
+      ldr_imm(jinfo->codebuf, ARM_LR, Rstack, 3 * wordSize, 1, 0);
+      ldr_imm(codebuf, ARM_LR, ARM_LR, 0, 0, 0);  // Security check
+
+      // Object:
+      ldr_imm(jinfo->codebuf, ARM_LR, Rstack, 2 * wordSize, 1, 0);
+      // Offset:
+      ldr_imm(jinfo->codebuf, ARM_IP, Rstack, 0 * wordSize, 1, 0);
+      add_reg(codebuf, ARM_LR, ARM_LR, ARM_IP); // ARM_LR now points to word
+
+      fullBarrier(codebuf);
+
+      int retry = out_loc(codebuf);
+      ldrexd(codebuf, JAZ_V2, JAZ_V3, ARM_LR);
+      cmp_reg(codebuf, JAZ_V2, expect_lo);
+      it(jinfo->codebuf, COND_EQ, IT_MASK_T);
+      cmp_reg(codebuf, JAZ_V3, expect_hi);
+
+      int loc_failed = forward_16(codebuf);
+      strexd(codebuf, JAZ_V1, update_lo, update_hi, ARM_LR);
+      cmp_imm(codebuf, JAZ_V1, 0);
+      branch(codebuf, COND_NE, retry);
+      bcc_patch(jinfo->codebuf, COND_NE, loc_failed);
+
+      unsigned result = JSTACK_REG(jinfo->jstack);
+
+      it(codebuf, COND_NE, IT_MASK_TEE);
+      mov_imm(codebuf, result, 0);
+      mov_imm(codebuf, result, 1);
+      fullBarrier(codebuf);
+
+      Thumb2_restore_all_locals(jinfo, stackdepth - 4); // 4 args popped above
+      add_imm(codebuf, Rstack, Rstack, 4 * wordSize);
+      PUSH(jstack, result);
+    }
+    return true;
+
+  default:
+    return false;
+  }
+
+  void *entry_point = dlsym(NULL, entry_name);
+  if (! entry_point)
+    return false;
+
+  jstack_to_vfp(jinfo, VFP_D0);
+  // FIXME: The JNI StrictMath routines don't use the JNIEnv *env
+  // parameter, so it's arguably pointless to pass it here.
+  add_imm(jinfo->codebuf, ARM_R0, Rthread, THREAD_JNI_ENVIRONMENT);
+  mov_imm(jinfo->codebuf, ARM_IP, (unsigned)entry_point);
+  blx_reg(jinfo->codebuf, ARM_IP);
+  vfp_to_jstack(jinfo, VFP_D0);
+
+  return true;
+}
+
+void Thumb2_codegen(Thumb2_Info *jinfo, unsigned start)
+{
+  unsigned code_size = jinfo->code_size;
+  jubyte *code_base = jinfo->code_base;
+  unsigned *bc_stackinfo = jinfo->bc_stackinfo;
+  CodeBuf *codebuf = jinfo->codebuf;
+  Thumb2_Stack *jstack = jinfo->jstack;
+  unsigned bci;
+  unsigned opcode;
+  unsigned stackinfo;
+  int len;
+  unsigned stackdepth;
+
+  for (bci = start; bci < code_size; ) {
+    opcode = code_base[bci];
+    stackinfo = bc_stackinfo[bci];
+#ifdef T2_PRINT_DISASS
+    unsigned start_idx;
+#endif
+
+    if (stackinfo & BC_BRANCH_TARGET) Thumb2_Flush(jinfo);
+
+    if (!OSPACE && (stackinfo & BC_BACK_TARGET)) {
+      if (out_pos(codebuf) & 0x02) nop_16(codebuf);
+      if (out_pos(codebuf) & 0x04) nop_32(codebuf);
+    }
+
+#ifdef T2_PRINT_DISASS
+    start_idx = jinfo->codebuf->idx;
+    if (start_bci[start_idx] == -1) start_bci[start_idx] = bci;
+#endif
+
+    JASSERT(!(stackinfo & BC_COMPILED), "code already compiled for this bytecode?");
+    stackdepth = STACKDEPTH(jinfo, stackinfo); // Stackdepth here is adjusted for monitors
+    bc_stackinfo[bci] = (stackinfo & BC_FLAGS_MASK) | (codebuf->idx * 2) | BC_COMPILED;
+
+    if (opcode > OPC_LAST_JAVA_OP)
+      switch (opcode) {
+      default:
+	if (Bytecodes::is_defined((Bytecodes::Code)opcode))
+	  opcode = (unsigned)Bytecodes::java_code((Bytecodes::Code)opcode);
+	break;
+      case opc_return_register_finalizer:
+      case opc_fast_aldc_w:
+      case opc_fast_aldc:
+	break;
+      }
+
+    len = Bytecodes::length_for((Bytecodes::Code)opcode);
+    if (len <= 0) {
+      Bytecodes::Code code = Bytecodes::code_at(NULL, (address)(code_base+bci));
+      len = (Bytecodes::special_length_at
+	     (code,
+	      (address)(code_base+bci), (address)(code_base+code_size)));
+    }
+
+    if (IS_DEAD(stackinfo)) {
+      unsigned zlen = 0;
+#ifdef T2_PRINT_DISASS
+      unsigned start_bci = bci;
+#endif
+
+      Thumb2_Exit(jinfo, H_DEADCODE, bci, stackdepth);
+      do {
+	zlen += len;
+	bci += len;
+	if (bci >= code_size) break;
+	opcode = code_base[bci];
+	stackinfo = bc_stackinfo[bci];
+
+	if (stackinfo & BC_BRANCH_TARGET) break;
+	if (!IS_DEAD(stackinfo)) break;
+
+	bc_stackinfo[bci] = (stackinfo & BC_FLAGS_MASK) | (codebuf->idx * 2);
+
+	if (opcode > OPC_LAST_JAVA_OP) {
+	  if (Bytecodes::is_defined((Bytecodes::Code)opcode))
+	    opcode = (unsigned)Bytecodes::java_code((Bytecodes::Code)opcode);
+	}
+
+	len = Bytecodes::length_for((Bytecodes::Code)opcode);
+	if (len <= 0) {
+	  Bytecodes::Code code = Bytecodes::code_at(NULL, (address)(code_base+bci));
+	  len = (Bytecodes::special_length_at
+		 (code,
+		  (address)(code_base+bci), (address)(code_base+code_size)));
+	}
+
+      } while (1);
+#ifdef T2_PRINT_DISASS
+      end_bci[start_idx] = start_bci + zlen;
+#endif
+      continue;
+    }
+
+#if 0
+    if (bci >= 4) {
+      unsigned zlen = 0;
+#ifdef T2_PRINT_DISASS
+      unsigned start_bci = bci;
+#endif
+
+      Thumb2_Exit(jinfo, H_DEADCODE, bci, stackdepth);
+      do {
+	zlen += len;
+	bci += len;
+	if (bci >= code_size) break;
+	opcode = code_base[bci];
+	stackinfo = bc_stackinfo[bci];
+
+	if (stackinfo & BC_BRANCH_TARGET) break;
+
+	bc_stackinfo[bci] = (stackinfo & BC_FLAGS_MASK) | (codebuf->idx * 2);
+
+	if (opcode > OPC_LAST_JAVA_OP) {
+	  if (Bytecodes::is_defined((Bytecodes::Code)opcode))
+	    opcode = (unsigned)Bytecodes::java_code((Bytecodes::Code)opcode);
+	}
+
+	len = Bytecodes::length_for((Bytecodes::Code)opcode);
+	if (len <= 0) {
+	  Bytecodes::Code code = Bytecodes::code_at(NULL, (address)(code_base+bci));
+	  len = (Bytecodes::special_length_at
+		 (code,
+		  (address)(code_base+bci), (address)(code_base+code_size)));
+	}
+
+      } while (1);
+#ifdef T2_PRINT_DISASS
+      end_bci[start_idx] = start_bci + zlen;
+#endif
+      continue;
+    }
+#endif
+
+#ifdef T2_PRINT_DISASS
+    end_bci[start_idx] = bci + len;
+#endif
+
+#ifdef THUMB2_JVMTI
+    // emit a start address --> bci map entry before
+    // generating machine code for this bytecode
+
+    void *addr = (void *)(codebuf->codebuf + codebuf->idx);
+    address_bci_map_add(addr, bci);
+#endif //THUMB2_JVMTI
+
+    switch (opcode) {
+      case opc_nop:
+	break;
+      case opc_aconst_null:
+	len += Thumb2_Imm(jinfo, 0, bci+1);
+	break;
+      case opc_iconst_m1:
+      case opc_iconst_0:
+      case opc_iconst_1:
+      case opc_iconst_2:
+      case opc_iconst_3:
+      case opc_iconst_4:
+      case opc_iconst_5:
+	len += Thumb2_Imm(jinfo, opcode - (unsigned)opc_iconst_0, bci+1);
+	break;
+      case opc_lconst_0:
+      case opc_lconst_1:
+	Thumb2_ImmX2(jinfo, opcode - (unsigned)opc_lconst_0, 0);
+	break;
+      case opc_fconst_0:
+      case opc_fconst_1:
+      case opc_fconst_2: {
+	unsigned v = 0;
+	if (opcode == (unsigned)opc_fconst_1) v = 0x3f800000;
+	if (opcode == (unsigned)opc_fconst_2) v = 0x40000000;
+	len += Thumb2_Imm(jinfo, v, bci+1);
+	break;
+      }
+      case opc_dconst_0:
+      case opc_dconst_1: {
+	unsigned v_hi = 0;
+	if (opcode == (unsigned)opc_dconst_1) v_hi = 0x3ff00000;
+	Thumb2_ImmX2(jinfo, 0, v_hi);
+	break;
+      }
+      case opc_bipush:
+	len += Thumb2_Imm(jinfo, GET_JAVA_S1(code_base+bci+1), bci+2);
+	break;
+      case opc_sipush:
+	len += Thumb2_Imm(jinfo, GET_JAVA_S2(code_base+bci+1), bci+3);
+	break;
+      case opc_ldc:
+      case opc_ldc_w:
+      case opc_ldc2_w: {
+	unsigned index = (opcode == (unsigned)opc_ldc) ?
+				code_base[bci+1] : GET_JAVA_U2(code_base+bci+1);
+	constantPoolOop constants = jinfo->method->constants();
+	unsigned v;
+
+	switch (v = constants->tag_at(index).value()) {
+	  case JVM_CONSTANT_Integer:
+	  case JVM_CONSTANT_Float:
+	    v = (unsigned)constants->int_at(index);
+	    len += Thumb2_Imm(jinfo, v, bci+len);
+	    break;
+	  case JVM_CONSTANT_Long:
+	  case JVM_CONSTANT_Double: {
+	    unsigned long long v;
+	    v = constants->long_at(index);
+	    Thumb2_ImmX2(jinfo, v & 0xffffffff, v >> 32);
+	    break;
+	  }
+	  case JVM_CONSTANT_Class:
+	  case JVM_CONSTANT_String: {
+	    Reg r;
+	    Thumb2_Spill(jinfo, 1, 0);
+	    r = JSTACK_REG(jstack);
+	    PUSH(jstack, r);
+	    load_istate(jinfo, r, ISTATE_METHOD, stackdepth+1);
+	    ldr_imm(jinfo->codebuf, r, r, METHOD_CONSTANTS, 1, 0);
+	    ldr_imm(jinfo->codebuf, r, r, CONSTANTPOOL_BASE + (index << 2), 1, 0);
+	    if (v == JVM_CONSTANT_Class)
+	      ldr_imm(jinfo->codebuf, r, r, KLASS_PART+KLASS_JAVA_MIRROR, 1, 0);
+	    break;
+	  }
+	  default:
+	    unsigned loc;
+
+	    JASSERT(opcode != opc_ldc2_w, "ldc2_w unresolved?");
+	    Thumb2_Flush(jinfo);
+	    mov_imm(jinfo->codebuf, ARM_R0, bci+CONSTMETHOD_CODEOFFSET);
+	  Thumb2_save_local_refs(jinfo, stackdepth);
+//	    mov_imm(jinfo->codebuf, ARM_R1, opcode != opc_ldc);
+	    bl(jinfo->codebuf, handlers[opcode == opc_ldc ? H_LDC : H_LDC_W]);
+	  Thumb2_restore_local_refs(jinfo, stackdepth);
+	    ldr_imm(jinfo->codebuf, ARM_R0, Rthread, THREAD_VM_RESULT, 1, 0);
+	    mov_imm(jinfo->codebuf, ARM_R2, 0);
+	    str_imm(jinfo->codebuf, ARM_R2, Rthread, THREAD_VM_RESULT, 1, 0);
+	    loc = forward_16(jinfo->codebuf);
+	    bl(jinfo->codebuf, handlers[H_HANDLE_EXCEPTION]);
+	    cbnz_patch(jinfo->codebuf, ARM_R0, loc);
+	    PUSH(jstack, ARM_R0);
+	    break;
+	}
+	break;
+      }
+
+      case opc_iload:
+      case opc_fload:
+      case opc_aload:
+	Thumb2_Load(jinfo, code_base[bci+1], stackdepth);
+	break;
+      case opc_lload:
+      case opc_dload:
+	Thumb2_LoadX2(jinfo, code_base[bci+1], stackdepth);
+	break;
+      case opc_iload_0:
+      case opc_iload_1:
+      case opc_iload_2:
+      case opc_iload_3:
+      case opc_fload_0:
+      case opc_fload_1:
+      case opc_fload_2:
+      case opc_fload_3:
+      case opc_aload_0:
+      case opc_aload_1:
+      case opc_aload_2:
+      case opc_aload_3:
+	Thumb2_Load(jinfo, (opcode - opc_iload_0) & 3, stackdepth);
+	break;
+      case opc_lload_0:
+      case opc_lload_1:
+      case opc_lload_2:
+      case opc_lload_3:
+      case opc_dload_0:
+      case opc_dload_1:
+      case opc_dload_2:
+      case opc_dload_3:
+	Thumb2_LoadX2(jinfo, (opcode - opc_iload_0) & 3, stackdepth);
+	break;
+      case opc_iaload:
+      case opc_faload:
+      case opc_aaload:
+      case opc_baload:
+      case opc_caload:
+      case opc_saload:
+	Thumb2_Xaload(jinfo, opcode);
+	break;
+      case opc_laload:
+      case opc_daload:
+	Thumb2_X2aload(jinfo);
+	break;
+      case opc_istore:
+      case opc_fstore:
+      case opc_astore:
+	Thumb2_Store(jinfo, code_base[bci+1], stackdepth);
+	break;
+      case opc_lstore:
+      case opc_dstore:
+	Thumb2_StoreX2(jinfo, code_base[bci+1], stackdepth);
+	break;
+      case opc_istore_0:
+      case opc_istore_1:
+      case opc_istore_2:
+      case opc_istore_3:
+      case opc_fstore_0:
+      case opc_fstore_1:
+      case opc_fstore_2:
+      case opc_fstore_3:
+      case opc_astore_0:
+      case opc_astore_1:
+      case opc_astore_2:
+      case opc_astore_3:
+	Thumb2_Store(jinfo, (opcode - opc_istore_0) & 3, stackdepth);
+	break;
+      case opc_lstore_0:
+      case opc_lstore_1:
+      case opc_lstore_2:
+      case opc_lstore_3:
+      case opc_dstore_0:
+      case opc_dstore_1:
+      case opc_dstore_2:
+      case opc_dstore_3:
+	Thumb2_StoreX2(jinfo, (opcode - opc_istore_0) & 3, stackdepth);
+	break;
+      case opc_iastore:
+      case opc_fastore:
+      case opc_bastore:
+      case opc_castore:
+      case opc_sastore:
+	Thumb2_Xastore(jinfo, opcode);
+	break;
+      case opc_lastore:
+      case opc_dastore:
+	Thumb2_X2astore(jinfo);
+	break;
+
+      case opc_pop:
+      case opc_pop2:
+	Thumb2_Pop(jinfo, opcode - opc_pop + 1);
+	break;
+
+      case opc_dup:
+      case opc_dup_x1:
+      case opc_dup_x2:
+	Thumb2_Dup(jinfo, opcode - opc_dup);
+	break;
+
+      case opc_dup2:
+      case opc_dup2_x1:
+      case opc_dup2_x2:
+	Thumb2_Dup2(jinfo, opcode - opc_dup2);
+	break;
+
+      case opc_swap:
+	Thumb2_Swap(jinfo);
+	break;
+
+      case opc_iadd:
+      case opc_isub:
+      case opc_imul:
+      case opc_ishl:
+      case opc_ishr:
+      case opc_iushr:
+      case opc_iand:
+      case opc_ior:
+      case opc_ixor:
+	Thumb2_iOp(jinfo, opcode);
+	break;
+
+      case opc_ladd:
+      case opc_lsub:
+      case opc_land:
+      case opc_lor:
+      case opc_lxor:
+	Thumb2_lOp(jinfo, opcode);
+	break;
+
+      case opc_lshl: {
+	Reg lho_lo, lho_hi, res_lo, res_hi, shift;
+	unsigned loc1, loc2;
+
+	Thumb2_Fill(jinfo, 3);
+	shift = POP(jstack);
+	lho_lo = POP(jstack);
+	lho_hi = POP(jstack);
+	Thumb2_Spill(jinfo, 2, (1<<lho_lo)|(1<<lho_hi));
+	res_hi = PUSH(jstack, JSTACK_PREFER(jstack, ~((1<<lho_lo)|(1<<lho_hi))));
+	res_lo = PUSH(jstack, JSTACK_PREFER(jstack, ~((1<<lho_lo)|(1<<lho_hi))));
+	JASSERT(res_lo != lho_lo && res_lo != lho_hi, "Spill failed");
+	JASSERT(res_hi != lho_lo && res_hi != lho_hi, "Spill failed");
+	and_imm(jinfo->codebuf, ARM_IP, shift, 31);
+	tst_imm(jinfo->codebuf, shift, 32);
+	loc1 = forward_16(jinfo->codebuf);
+	mov_imm(jinfo->codebuf, res_lo, 0);
+	dop_reg(jinfo->codebuf, DP_LSL, res_hi, lho_lo, ARM_IP, SHIFT_LSL, 0);
+	loc2 = forward_16(jinfo->codebuf);
+	bcc_patch(jinfo->codebuf, COND_EQ, loc1);
+	dop_reg(jinfo->codebuf, DP_LSL, res_lo, lho_lo, ARM_IP, SHIFT_LSL, 0);
+	dop_reg(jinfo->codebuf, DP_LSL, res_hi, lho_hi, ARM_IP, SHIFT_LSL, 0);
+	rsb_imm(jinfo->codebuf, ARM_IP, ARM_IP, 32);
+	dop_reg(jinfo->codebuf, DP_LSR, ARM_IP, lho_lo, ARM_IP, SHIFT_LSL, 0);
+	dop_reg(jinfo->codebuf, DP_ORR, res_hi, res_hi, ARM_IP, SHIFT_LSL, 0);
+	branch_narrow_patch(jinfo->codebuf, loc2);
+	break;
+      }
+
+      case opc_lushr: {
+	Reg lho_lo, lho_hi, res_lo, res_hi, shift;
+	unsigned loc1, loc2;
+
+	Thumb2_Fill(jinfo, 3);
+	shift = POP(jstack);
+	lho_lo = POP(jstack);
+	lho_hi = POP(jstack);
+	Thumb2_Spill(jinfo, 2, (1<<lho_lo)|(1<<lho_hi));
+	res_hi = PUSH(jstack, JSTACK_PREFER(jstack, ~((1<<lho_lo)|(1<<lho_hi))));
+	res_lo = PUSH(jstack, JSTACK_PREFER(jstack, ~((1<<lho_lo)|(1<<lho_hi))));
+	JASSERT(res_lo != lho_lo && res_lo != lho_hi, "Spill failed");
+	JASSERT(res_hi != lho_lo && res_hi != lho_hi, "Spill failed");
+	and_imm(jinfo->codebuf, ARM_IP, shift, 31);
+	tst_imm(jinfo->codebuf, shift, 32);
+	loc1 = forward_16(jinfo->codebuf);
+	mov_imm(jinfo->codebuf, res_hi, 0);
+	dop_reg(jinfo->codebuf, DP_LSR, res_lo, lho_hi, ARM_IP, SHIFT_LSL, 0);
+	loc2 = forward_16(jinfo->codebuf);
+	bcc_patch(jinfo->codebuf, COND_EQ, loc1);
+	dop_reg(jinfo->codebuf, DP_LSR, res_hi, lho_hi, ARM_IP, SHIFT_LSL, 0);
+	dop_reg(jinfo->codebuf, DP_LSR, res_lo, lho_lo, ARM_IP, SHIFT_LSL, 0);
+	rsb_imm(jinfo->codebuf, ARM_IP, ARM_IP, 32);
+	dop_reg(jinfo->codebuf, DP_LSL, ARM_IP, lho_hi, ARM_IP, SHIFT_LSL, 0);
+	dop_reg(jinfo->codebuf, DP_ORR, res_lo, res_lo, ARM_IP, SHIFT_LSL, 0);
+	branch_narrow_patch(jinfo->codebuf, loc2);
+	break;
+      }
+
+      case opc_lshr: {
+	Reg lho_lo, lho_hi, res_lo, res_hi, shift;
+	unsigned loc1, loc2;
+
+	Thumb2_Fill(jinfo, 3);
+	shift = POP(jstack);
+	lho_lo = POP(jstack);
+	lho_hi = POP(jstack);
+	Thumb2_Spill(jinfo, 2, (1<<lho_lo)|(1<<lho_hi));
+	res_hi = PUSH(jstack, JSTACK_PREFER(jstack, ~((1<<lho_lo)|(1<<lho_hi))));
+	res_lo = PUSH(jstack, JSTACK_PREFER(jstack, ~((1<<lho_lo)|(1<<lho_hi))));
+	JASSERT(res_lo != lho_lo && res_lo != lho_hi, "Spill failed");
+	JASSERT(res_hi != lho_lo && res_hi != lho_hi, "Spill failed");
+	and_imm(jinfo->codebuf, ARM_IP, shift, 31);
+	tst_imm(jinfo->codebuf, shift, 32);
+	loc1 = forward_16(jinfo->codebuf);
+	asr_imm(jinfo->codebuf, res_hi, lho_hi, 31);
+	dop_reg(jinfo->codebuf, DP_ASR, res_lo, lho_hi, ARM_IP, SHIFT_LSL, 0);
+	loc2 = forward_16(jinfo->codebuf);
+	bcc_patch(jinfo->codebuf, COND_EQ, loc1);
+	dop_reg(jinfo->codebuf, DP_ASR, res_hi, lho_hi, ARM_IP, SHIFT_LSL, 0);
+	dop_reg(jinfo->codebuf, DP_LSR, res_lo, lho_lo, ARM_IP, SHIFT_LSL, 0);
+	rsb_imm(jinfo->codebuf, ARM_IP, ARM_IP, 32);
+	dop_reg(jinfo->codebuf, DP_LSL, ARM_IP, lho_hi, ARM_IP, SHIFT_LSL, 0);
+	dop_reg(jinfo->codebuf, DP_ORR, res_lo, res_lo, ARM_IP, SHIFT_LSL, 0);
+	branch_narrow_patch(jinfo->codebuf, loc2);
+	break;
+      }
+
+      case opc_lmul:
+	Thumb2_lmul(jinfo);
+	break;
+
+      case opc_fadd:
+      case opc_fsub:
+      case opc_fmul:
+      case opc_fdiv:
+	Thumb2_fOp(jinfo, opcode);
+	break;
+
+      case opc_dadd:
+      case opc_dsub:
+      case opc_dmul:
+      case opc_ddiv:
+	Thumb2_dOp(jinfo, opcode);
+	break;
+
+      case opc_fcmpl:
+      case opc_fcmpg: {
+	Thumb2_Stack *jstack = jinfo->jstack;
+	unsigned rho, lho, res;
+	unsigned loc1, loc2, loc_ne;
+
+	Thumb2_Fill(jinfo, 2);
+	rho = POP(jstack);
+	lho = POP(jstack);
+	Thumb2_Spill(jinfo, 1, 0);
+	res = PUSH(jstack, JSTACK_REG(jstack));
+	vmov_reg_s_toVFP(jinfo->codebuf, VFP_S0, lho);
+	vmov_reg_s_toVFP(jinfo->codebuf, VFP_S1, rho);
+	vcmp_reg_s(jinfo->codebuf, VFP_S0, VFP_S1, 1);
+	mov_imm(jinfo->codebuf, res, opcode == opc_fcmpl ? 1 : -1);
+	vmrs(jinfo->codebuf, ARM_PC);
+	loc1 = forward_16(jinfo->codebuf);
+	dop_imm_preserve(jinfo->codebuf, DP_RSB, res, res, 0);
+	loc2 = forward_16(jinfo->codebuf);
+	vcmp_reg_s(jinfo->codebuf, VFP_S0, VFP_S1, 0);
+	loc_ne = forward_16(jinfo->codebuf);
+	mov_imm(jinfo->codebuf, res, 0);
+	bcc_patch(jinfo->codebuf, opcode == opc_fcmpl ? COND_GT : COND_MI, loc1);
+	bcc_patch(jinfo->codebuf, opcode == opc_fcmpl ? COND_MI : COND_GT, loc2);
+	bcc_patch(jinfo->codebuf, COND_NE, loc_ne);
+	break;
+      }
+
+      case opc_dcmpl:
+      case opc_dcmpg: {
+	Thumb2_Stack *jstack = jinfo->jstack;
+	unsigned rho_lo, rho_hi, lho_lo, lho_hi, res;
+	unsigned loc1, loc2, loc_ne;
+
+	Thumb2_Fill(jinfo, 4);
+	rho_lo = POP(jstack);
+	rho_hi = POP(jstack);
+	lho_lo = POP(jstack);
+	lho_hi = POP(jstack);
+	Thumb2_Spill(jinfo, 1, 0);
+	res = PUSH(jstack, JSTACK_REG(jstack));
+	vmov_reg_d_toVFP(jinfo->codebuf, VFP_S0, lho_lo, lho_hi);
+	vmov_reg_d_toVFP(jinfo->codebuf, VFP_S1, rho_lo, rho_hi);
+	vcmp_reg_d(jinfo->codebuf, VFP_S0, VFP_S1, 1);
+	mov_imm(jinfo->codebuf, res, opcode == opc_dcmpl ? 1 : -1);
+	vmrs(jinfo->codebuf, ARM_PC);
+	loc1 = forward_16(jinfo->codebuf);
+	dop_imm_preserve(jinfo->codebuf, DP_RSB, res, res, 0);
+	loc2 = forward_16(jinfo->codebuf);
+	vcmp_reg_d(jinfo->codebuf, VFP_S0, VFP_S1, 0);
+	loc_ne = forward_16(jinfo->codebuf);
+	mov_imm(jinfo->codebuf, res, 0);
+	bcc_patch(jinfo->codebuf, opcode == opc_dcmpl ? COND_GT : COND_MI, loc1);
+	bcc_patch(jinfo->codebuf, opcode == opc_dcmpl ? COND_MI : COND_GT, loc2);
+	bcc_patch(jinfo->codebuf, COND_NE, loc_ne);
+	break;
+      }
+
+      case opc_drem:
+      case opc_lrem:
+      case opc_ldiv: {
+	Reg src[4], dst[4];
+
+	Thumb2_Fill(jinfo, 4);
+	src[2] = POP(jstack);
+	src[3] = POP(jstack);
+	src[0] = POP(jstack);
+	src[1] = POP(jstack);
+	Thumb2_Flush(jinfo);
+	dst[0] = ARM_R0;
+	dst[1] = ARM_R1;
+	dst[2] = ARM_R2;
+	dst[3] = ARM_R3;
+	mov_multiple(jinfo->codebuf, dst, src, 4);
+	bl(jinfo->codebuf, OPCODE2HANDLER(opcode));
+	if (opcode != opc_lrem) {
+	  PUSH(jstack, ARM_R1);
+	  PUSH(jstack, ARM_R0);
+	} else {
+	  PUSH(jstack, ARM_R3);
+	  PUSH(jstack, ARM_R2);
+	}
+	break;
+      }
+
+      case opc_frem:
+      case opc_idiv:
+      case opc_irem: {
+	Reg r_rho, r_lho;
+
+	Thumb2_Fill(jinfo, 2);
+	r_rho = POP(jstack);
+	r_lho = POP(jstack);
+	Thumb2_Flush(jinfo);
+	if (r_rho == ARM_R0) {
+	  if (r_lho == ARM_R1) {
+	    mov_reg(jinfo->codebuf, ARM_IP, r_rho);
+	    mov_reg(jinfo->codebuf, ARM_R0, r_lho);
+	    mov_reg(jinfo->codebuf, ARM_R1, ARM_IP);
+	  } else {
+	    mov_reg(jinfo->codebuf, ARM_R1, r_rho);
+	    mov_reg(jinfo->codebuf, ARM_R0, r_lho);
+	  }
+	} else {
+	  mov_reg(jinfo->codebuf, ARM_R0, r_lho);
+	  mov_reg(jinfo->codebuf, ARM_R1, r_rho);
+	}
+	if (opcode == opc_frem)
+	  bl(jinfo->codebuf, OPCODE2HANDLER(opcode));
+	else
+	  blx(jinfo->codebuf, OPCODE2HANDLER(opcode));
+	PUSH(jstack, ARM_R0);
+	break;
+      }
+
+      case opc_f2i:
+      case opc_i2f: {
+	Reg r;
+
+	Thumb2_Fill(jinfo, 1);
+	r = POP(jstack);
+	Thumb2_Flush(jinfo);
+	mov_reg(jinfo->codebuf, ARM_R0, r);
+	bl(jinfo->codebuf, OPCODE2HANDLER(opcode));
+	PUSH(jstack, ARM_R0);
+	break;
+      }
+
+      case opc_f2d:
+      case opc_f2l:
+      case opc_i2d: {
+	Reg r;
+
+	Thumb2_Fill(jinfo, 1);
+	r = POP(jstack);
+	Thumb2_Flush(jinfo);
+	mov_reg(jinfo->codebuf, ARM_R0, r);
+	bl(jinfo->codebuf, OPCODE2HANDLER(opcode));
+	PUSH(jstack, ARM_R1);
+	PUSH(jstack, ARM_R0);
+	break;
+    }
+
+      case opc_d2f:
+      case opc_d2i:
+      case opc_l2d:
+      case opc_d2l:
+      case opc_l2f: {
+	Reg lo, hi;
+
+	Thumb2_Fill(jinfo, 2);
+	lo = POP(jstack);
+	hi = POP(jstack);
+	Thumb2_Flush(jinfo);
+	if (hi == ARM_R0) {
+	  if (lo == ARM_R1) {
+	    mov_reg(jinfo->codebuf, ARM_IP, hi);
+	    mov_reg(jinfo->codebuf, ARM_R0, lo);
+	    mov_reg(jinfo->codebuf, ARM_R1, ARM_IP);
+	  } else {
+	    mov_reg(jinfo->codebuf, ARM_R1, hi);
+	    mov_reg(jinfo->codebuf, ARM_R0, lo);
+	  }
+	} else {
+	  mov_reg(jinfo->codebuf, ARM_R0, lo);
+	  mov_reg(jinfo->codebuf, ARM_R1, hi);
+	}
+	bl(jinfo->codebuf, OPCODE2HANDLER(opcode));
+	if (opcode == opc_l2d || opcode == opc_d2l) PUSH(jstack, ARM_R1);
+	PUSH(jstack, ARM_R0);
+	break;
+      }
+
+      case opc_ineg:
+	Thumb2_iNeg(jinfo, opcode);
+	break;
+
+      case opc_lneg:
+	Thumb2_lNeg(jinfo, opcode);
+	break;
+
+      case opc_fneg:
+	Thumb2_fNeg(jinfo, opcode);
+	break;
+
+      case opc_dneg:
+	Thumb2_dNeg(jinfo);
+	break;
+
+      case opc_i2l: {
+	unsigned r, r_res_lo, r_res_hi;
+
+	Thumb2_Fill(jinfo, 1);
+	r = POP(jstack);
+	Thumb2_Spill(jinfo, 2, 0);
+	r_res_hi = PUSH(jstack, JSTACK_REG(jstack));
+	r_res_lo = PUSH(jstack, JSTACK_REG(jstack));
+	if (r == r_res_hi) {
+	  SWAP(jstack);
+	  r_res_hi = r_res_lo;
+	  r_res_lo = r;
+	}
+	mov_reg(jinfo->codebuf, r_res_lo, r);
+	asr_imm(jinfo->codebuf, r_res_hi, r, 31);
+	break;
+      }
+
+      case opc_l2i: {
+	unsigned r_lo, r_hi;
+	unsigned r;
+
+	Thumb2_Fill(jinfo, 2);
+	r_lo = POP(jstack);
+	r_hi = POP(jstack);
+	Thumb2_Spill(jinfo, 1, 0);
+	r = PUSH(jstack, r_lo);
+	break;
+      }
+
+      case opc_i2b: {
+	unsigned r_src, r_dst;
+
+	Thumb2_Fill(jinfo, 1);
+	r_src = POP(jstack);
+	Thumb2_Spill(jinfo, 1, 0);
+	r_dst = PUSH(jstack, JSTACK_REG(jstack));
+	sxtb(jinfo->codebuf, r_dst, r_src);
+	break;
+      }
+
+      case opc_i2s: {
+	unsigned r_src, r_dst;
+
+	Thumb2_Fill(jinfo, 1);
+	r_src = POP(jstack);
+	Thumb2_Spill(jinfo, 1, 0);
+	r_dst = PUSH(jstack, JSTACK_REG(jstack));
+	sxth(jinfo->codebuf, r_dst, r_src);
+	break;
+      }
+
+      case opc_i2c: {
+	unsigned r_src, r_dst;
+
+	Thumb2_Fill(jinfo, 1);
+	r_src = POP(jstack);
+	Thumb2_Spill(jinfo, 1, 0);
+	r_dst = PUSH(jstack, JSTACK_REG(jstack));
+	uxth(jinfo->codebuf, r_dst, r_src);
+	break;
+      }
+
+      case opc_lcmp: {
+	unsigned lho_lo, lho_hi;
+	unsigned rho_lo, rho_hi;
+	unsigned r_tmp_lo, r_tmp_hi;
+	unsigned res;
+	unsigned loc_lt, loc_eq;
+
+	Thumb2_Fill(jinfo, 4);
+	rho_lo = POP(jstack);
+	rho_hi = POP(jstack);
+	lho_lo = POP(jstack);
+	lho_hi = POP(jstack);
+	Thumb2_Spill(jinfo, 1, 0);
+	res = JSTACK_REG(jstack);
+	PUSH(jstack, res);
+	r_tmp_lo = Thumb2_Tmp(jinfo, (1<<rho_lo)|(1<<rho_hi)|(1<<lho_lo)|(1<<lho_hi));
+	r_tmp_hi = Thumb2_Tmp(jinfo, (1<<rho_lo)|(1<<rho_hi)|(1<<lho_lo)|(1<<lho_hi)|(1<<r_tmp_lo));
+	dop_reg(jinfo->codebuf, DP_SUB, r_tmp_lo, lho_lo, rho_lo, SHIFT_LSL, 0);
+	dop_reg(jinfo->codebuf, DP_SBC, r_tmp_hi, lho_hi, rho_hi, SHIFT_LSL, 0);
+	mov_imm(jinfo->codebuf, res, (unsigned)-1);
+	loc_lt = forward_16(jinfo->codebuf);
+	dop_reg(jinfo->codebuf, DP_ORR, res, r_tmp_lo, r_tmp_hi, SHIFT_LSL, 0);
+	loc_eq = forward_16(jinfo->codebuf);
+	mov_imm(jinfo->codebuf, res, 1);
+	bcc_patch(jinfo->codebuf, COND_LT, loc_lt);
+	bcc_patch(jinfo->codebuf, COND_EQ, loc_eq);
+	break;
+      }
+
+      case opc_iinc: {
+	unsigned local = code_base[bci+1];
+	int constant = GET_JAVA_S1(code_base+bci+2);
+	unsigned r = jinfo->jregs->r_local[local];
+
+	if (!r) {
+	  int nlocals = jinfo->method->max_locals();
+	  r = Thumb2_Tmp(jinfo, 0);
+	  stackdepth -= jstack->depth;
+	  load_local(jinfo, r, local, stackdepth);
+	  add_imm(jinfo->codebuf, r, r, constant);
+	  store_local(jinfo, r, local, stackdepth);
+	} else {
+	  Thumb2_Corrupt(jinfo, r, 0);
+	  add_imm(jinfo->codebuf, r, r, constant);
+	}
+	break;
+      }
+
+      case opc_getfield: {
+	constantPoolCacheOop  cp = jinfo->method->constants()->cache();
+        ConstantPoolCacheEntry* cache;
+	int index = GET_NATIVE_U2(code_base+bci+1);
+	Reg r_obj;
+
+        cache = cp->entry_at(index);
+        if (!cache->is_resolved((Bytecodes::Code)opcode)) {
+ 	  int java_index = GET_NATIVE_U2(code_base+bci+1);
+	  constantPoolOop pool = jinfo->method->constants();
+	  Symbol *sig = pool->signature_ref_at(java_index);
+	  const jbyte *base = sig->base();
+	  jbyte c = *base;
+	  int handler = H_GETFIELD_WORD;
+
+	  if (c == 'J' || c == 'D') handler = H_GETFIELD_DW;
+	  if (c == 'B' || c == 'Z') handler = H_GETFIELD_SB;
+	  if (c == 'C') handler = H_GETFIELD_H;
+	  if (c == 'S') handler = H_GETFIELD_SH;
+	  Thumb2_Flush(jinfo);
+	  Thumb2_save_local_refs(jinfo, stackdepth);
+	  mov_imm(jinfo->codebuf, ARM_R0, bci+CONSTMETHOD_CODEOFFSET);
+	  mov_imm(jinfo->codebuf, ARM_R1, index);
+	  blx(jinfo->codebuf, handlers[handler]);
+	  Thumb2_restore_local_refs(jinfo, STACKDEPTH(jinfo, bc_stackinfo[bci+len]));
+	  break;
+	}
+
+	TosState tos_type = cache->flag_state();
+	int field_offset = cache->f2();
+
+	if (tos_type == ltos || tos_type == dtos) {
+	  Reg r_lo, r_hi;
+	  Thumb2_Fill(jinfo, 1);
+	  r_obj = POP(jstack);
+	  Thumb2_Spill(jinfo, 2, 0);
+	  r_hi = PUSH(jstack, JSTACK_REG(jstack));
+	  r_lo = PUSH(jstack, JSTACK_REG(jstack));
+	  Thumb2_load_long(jinfo, r_lo, r_hi, r_obj, field_offset,
+			   cache->is_volatile());
+	} else {
+	  Reg r;
+
+	  Thumb2_Fill(jinfo, 1);
+	  r_obj = POP(jstack);
+	  Thumb2_Spill(jinfo, 1, 0);
+	  r = JSTACK_REG(jstack);
+	  PUSH(jstack, r);
+	  if (tos_type == btos)
+	    ldrsb_imm(jinfo->codebuf, r, r_obj, field_offset, 1, 0);
+	  else if (tos_type == ctos)
+	    ldrh_imm(jinfo->codebuf, r, r_obj, field_offset, 1, 0);
+	  else if (tos_type == stos)
+	    ldrsh_imm(jinfo->codebuf, r, r_obj, field_offset, 1, 0);
+	  else
+	    ldr_imm(jinfo->codebuf, r, r_obj, field_offset, 1, 0);
+	}
+
+	if (cache->is_volatile())
+	  fullBarrier(jinfo->codebuf);
+
+	break;
+      }
+
+      case opc_getstatic: {
+	constantPoolCacheOop  cp = jinfo->method->constants()->cache();
+        ConstantPoolCacheEntry* cache;
+	int index = GET_NATIVE_U2(code_base+bci+1);
+
+        cache = cp->entry_at(index);
+        if (!cache->is_resolved((Bytecodes::Code)opcode)) {
+	  int java_index = GET_NATIVE_U2(code_base+bci+1);
+	  constantPoolOop pool = jinfo->method->constants();
+	  Symbol *sig = pool->signature_ref_at(java_index);
+	  const jbyte *base = sig->base();
+	  jbyte c = *base;
+	  int handler = H_GETSTATIC_WORD;
+
+	  if (c == 'J' || c == 'D') handler = H_GETSTATIC_DW;
+	  if (c == 'B' || c == 'Z') handler = H_GETSTATIC_SB;
+	  if (c == 'C') handler = H_GETSTATIC_H;
+	  if (c == 'S') handler = H_GETSTATIC_SH;
+	  Thumb2_Flush(jinfo);
+	  Thumb2_save_local_refs(jinfo, stackdepth);
+	  mov_imm(jinfo->codebuf, ARM_R0, bci+CONSTMETHOD_CODEOFFSET);
+	  mov_imm(jinfo->codebuf, ARM_R1, index);
+	  blx(jinfo->codebuf, handlers[handler]);
+	  Thumb2_restore_local_refs(jinfo, STACKDEPTH(jinfo, bc_stackinfo[bci+len]));
+	  break;
+	}
+
+	TosState tos_type = cache->flag_state();
+	int field_offset = cache->f2();
+
+	if (tos_type == ltos || tos_type == dtos) {
+	  Reg r_lo, r_hi, r_addr;
+	  Thumb2_Spill(jinfo, 2, 0);
+	  r_hi = PUSH(jstack, JSTACK_REG(jstack));
+	  r_lo = PUSH(jstack, JSTACK_REG(jstack));
+	  r_addr = Thumb2_Tmp(jinfo, (1<<r_hi) | (1<<r_lo));
+	  load_istate(jinfo, r_lo, ISTATE_CONSTANTS, stackdepth+2);
+	  ldr_imm(jinfo->codebuf, r_addr, r_lo, CP_OFFSET + (index << 4) + 4, 1, 0);
+	  Thumb2_load_long(jinfo, r_lo, r_hi, r_addr, field_offset,
+			   cache->is_volatile());
+	} else {
+	  Reg r;
+	  Thumb2_Spill(jinfo, 1, 0);
+	  r = JSTACK_REG(jstack);
+	  PUSH(jstack, r);
+	  load_istate(jinfo, r, ISTATE_CONSTANTS, stackdepth+1);
+	  ldr_imm(jinfo->codebuf, r, r, CP_OFFSET + (index << 4) + 4, 1, 0);
+	  if (tos_type == btos)
+	    ldrsb_imm(jinfo->codebuf, r, r, field_offset, 1, 0);
+	  else if (tos_type == ctos)
+	    ldrh_imm(jinfo->codebuf, r, r, field_offset, 1, 0);
+	  else if (tos_type == stos)
+	    ldrsh_imm(jinfo->codebuf, r, r, field_offset, 1, 0);
+	  else
+	    ldr_imm(jinfo->codebuf, r, r, field_offset, 1, 0);
+	}
+
+	if (cache->is_volatile())
+	  fullBarrier(jinfo->codebuf);
+
+	break;
+      }
+
+      case opc_putfield: {
+	constantPoolCacheOop  cp = jinfo->method->constants()->cache();
+        ConstantPoolCacheEntry* cache;
+	int index = GET_NATIVE_U2(code_base+bci+1);
+	Reg r_obj;
+
+        cache = cp->entry_at(index);
+
+        if (!cache->is_resolved((Bytecodes::Code)opcode)) {
+	  int java_index = GET_NATIVE_U2(code_base+bci+1);
+	  constantPoolOop pool = jinfo->method->constants();
+	  Symbol *sig = pool->signature_ref_at(java_index);
+	  const jbyte *base = sig->base();
+	  jbyte c = *base;
+	  int handler = H_PUTFIELD_WORD;
+
+	  if (c == 'J' || c == 'D') handler = H_PUTFIELD_DW;
+	  if (c == 'B' || c == 'Z') handler = H_PUTFIELD_B;
+	  if (c == 'C' || c == 'S') handler = H_PUTFIELD_H;
+ 	  if (c == '[' || c == 'L') handler = H_PUTFIELD_A;
+	  Thumb2_Flush(jinfo);
+	  Thumb2_save_local_refs(jinfo, stackdepth);
+	  mov_imm(jinfo->codebuf, ARM_R0, bci+CONSTMETHOD_CODEOFFSET);
+	  mov_imm(jinfo->codebuf, ARM_R1, index);
+	  blx(jinfo->codebuf, handlers[handler]);
+	  Thumb2_restore_local_refs(jinfo, STACKDEPTH(jinfo, bc_stackinfo[bci+len]));
+
+	  break;
+	}
+
+	if (cache->is_volatile())
+	  storeBarrier(jinfo->codebuf);
+
+	TosState tos_type = cache->flag_state();
+	int field_offset = cache->f2();
+
+	if (tos_type == ltos || tos_type == dtos) {
+	  Reg r_lo, r_hi;
+	  Thumb2_Fill(jinfo, 3);
+	  r_lo = POP(jstack);
+	  r_hi = POP(jstack);
+	  r_obj = POP(jstack);
+	  Thumb2_store_long(jinfo, r_lo, r_hi, r_obj, field_offset, cache->is_volatile());
+	} else {
+	  Reg r;
+	  Thumb2_Fill(jinfo, 2);
+	  r = POP(jstack);
+	  r_obj = POP(jstack);
+	  if (tos_type == btos)
+	    strb_imm(jinfo->codebuf, r, r_obj, field_offset, 1, 0);
+	  else if (tos_type == ctos | tos_type == stos)
+	    strh_imm(jinfo->codebuf, r, r_obj, field_offset, 1, 0);
+	  else {
+	    str_imm(jinfo->codebuf, r, r_obj, field_offset, 1, 0);
+	    if (tos_type == atos) {
+	      Thumb2_Flush(jinfo);
+	      mov_reg(jinfo->codebuf, ARM_R0, r_obj);
+	      bl(jinfo->codebuf, handlers[H_APUTFIELD]);
+	    }
+	  }
+	}
+
+	if (cache->is_volatile())
+	  fullBarrier(jinfo->codebuf);
+
+	break;
+      }
+
+      case opc_putstatic: {
+	constantPoolCacheOop  cp = jinfo->method->constants()->cache();
+        ConstantPoolCacheEntry* cache;
+	int index = GET_NATIVE_U2(code_base+bci+1);
+
+        cache = cp->entry_at(index);
+        if (!cache->is_resolved((Bytecodes::Code)opcode)) {
+	  int java_index = GET_NATIVE_U2(code_base+bci+1);
+	  constantPoolOop pool = jinfo->method->constants();
+	  Symbol *sig = pool->signature_ref_at(java_index);
+	  const jbyte *base = sig->base();
+	  jbyte c = *base;
+	  int handler = H_PUTSTATIC_WORD;
+
+	  if (c == 'J' || c == 'D') handler = H_PUTSTATIC_DW;
+	  if (c == 'B' || c == 'Z') handler = H_PUTSTATIC_B;
+	  if (c == 'C' || c == 'S') handler = H_PUTSTATIC_H;
+	  if (c == '[' || c == 'L') handler = H_PUTSTATIC_A;
+	  Thumb2_Flush(jinfo);
+	  Thumb2_save_local_refs(jinfo, stackdepth);
+	  mov_imm(jinfo->codebuf, ARM_R0, bci+CONSTMETHOD_CODEOFFSET);
+	  mov_imm(jinfo->codebuf, ARM_R1, index);
+	  blx(jinfo->codebuf, handlers[handler]);
+	  Thumb2_restore_local_refs(jinfo, STACKDEPTH(jinfo, bc_stackinfo[bci+len]));
+	  break;
+	}
+
+	if (cache->is_volatile())
+	  storeBarrier(jinfo->codebuf);
+
+	TosState tos_type = cache->flag_state();
+	int field_offset = cache->f2();
+	Reg r_obj;
+
+	if (tos_type == ltos || tos_type == dtos) {
+	  Reg r_lo, r_hi;
+	  Thumb2_Fill(jinfo, 2);
+	  r_lo = POP(jstack);
+	  r_hi = POP(jstack);
+	  Thumb2_Spill(jinfo, 1, (1<<r_lo)|(1<<r_hi));
+	  r_obj = JSTACK_PREFER(jstack, ~((1<<r_lo)|(1<<r_hi)));
+	  JASSERT(r_obj != r_lo && r_obj != r_hi, "corruption in putstatic");
+	  load_istate(jinfo, r_obj, ISTATE_CONSTANTS, stackdepth-2);
+	  ldr_imm(jinfo->codebuf, r_obj, r_obj, CP_OFFSET + (index << 4) + 4, 1, 0);
+	  Thumb2_store_long(jinfo, r_lo, r_hi, r_obj, field_offset, cache->is_volatile());
+	} else {
+	  Reg r;
+	  Thumb2_Fill(jinfo, 1);
+	  r = POP(jstack);
+	  Thumb2_Spill(jinfo, 1, (1<<r));
+	  r_obj = JSTACK_PREFER(jstack, ~(1<<r));
+	  JASSERT(r_obj != r, "corruption in putstatic");
+	  load_istate(jinfo, r_obj, ISTATE_CONSTANTS, stackdepth-1);
+	  ldr_imm(jinfo->codebuf, r_obj, r_obj, CP_OFFSET + (index << 4) + 4, 1, 0);
+	  if (tos_type == btos)
+	    strb_imm(jinfo->codebuf, r, r_obj, field_offset, 1, 0);
+	  else if (tos_type == ctos | tos_type == stos)
+	    strh_imm(jinfo->codebuf, r, r_obj, field_offset, 1, 0);
+	  else {
+	    str_imm(jinfo->codebuf, r, r_obj, field_offset, 1, 0);
+	    if (tos_type == atos) {
+	      Thumb2_Flush(jinfo);
+	      mov_reg(jinfo->codebuf, ARM_R0, r_obj);
+	      bl(jinfo->codebuf, handlers[H_APUTFIELD]);
+	    }
+	  }
+	}
+
+	if (cache->is_volatile())
+	  fullBarrier(jinfo->codebuf);
+
+	break;
+      }
+
+      case opc_invokevirtual:
+      case opc_invokestatic:
+      case opc_invokespecial: {
+	constantPoolCacheOop  cp = jinfo->method->constants()->cache();
+        ConstantPoolCacheEntry* cache;
+	int index = GET_NATIVE_U2(code_base+bci+1);
+	unsigned loc;
+	methodOop callee;
+
+	// Call Debug if we're about to enter a synchronized method.
+#define DEBUG_REGSET ((1<<ARM_R0)|(1<<ARM_R1)|(1<<ARM_R2)|(1<<ARM_R3)|(1<<ARM_IP))
+	if (DebugSwitch && jinfo->method->is_synchronized()) {
+	  stm(jinfo->codebuf, DEBUG_REGSET | (1<<ARM_LR), ARM_SP, PUSH_FD, 1);
+	  add_imm(jinfo->codebuf, ARM_R0, ISTATE_REG(jinfo), ISTATE_OFFSET(jinfo, stackdepth, 0));
+	  mov_imm(jinfo->codebuf, ARM_IP, (u32)Debug);
+	  load_istate(jinfo, ARM_R2, ISTATE_METHOD, stackdepth);
+	  ldr_imm(jinfo->codebuf, ARM_R2, ARM_R2, METHOD_CONSTMETHOD, 1, 0);
+	  add_imm(jinfo->codebuf, ARM_R2, ARM_R2, bci+CONSTMETHOD_CODEOFFSET);
+	  store_istate(jinfo, ARM_R2, ISTATE_BCP, stackdepth);
+	  blx_reg(jinfo->codebuf, ARM_IP);
+	  ldm(jinfo->codebuf, DEBUG_REGSET | (1<<ARM_LR), ARM_SP, POP_FD, 1);
+	}
+#undef DEBUG_REGSET
+
+        cache = cp->entry_at(index);
+        if (!cache->is_resolved((Bytecodes::Code)opcode)) {
+	  Thumb2_Flush(jinfo);
+	  Thumb2_save_all_locals(jinfo, stackdepth);
+	  mov_imm(jinfo->codebuf, ARM_R0, bci+CONSTMETHOD_CODEOFFSET);
+	  mov_imm(jinfo->codebuf, ARM_R1, index);
+	  blx(jinfo->codebuf,
+	    handlers[opcode == opc_invokestatic ? H_INVOKESTATIC :
+		     opcode == opc_invokespecial ? H_INVOKESPECIAL : H_INVOKEVIRTUAL]);
+	  Thumb2_restore_all_locals(jinfo, STACKDEPTH(jinfo, bc_stackinfo[bci+len]));
+	  break;
+	}
+
+	callee = opcode == opc_invokevirtual ? (methodOop)cache->f2() : (methodOop)cache->f1();
+
+	if (opcode != opc_invokevirtual || cache->is_vfinal()) {
+	  if (handle_special_method(callee, jinfo, stackdepth))
+	    break;
+	}
+
+	if ((opcode != opc_invokevirtual || cache->is_vfinal()) && callee->is_accessor()) {
+	  u1 *code = callee->code_base();
+	  int index = GET_NATIVE_U2(&code[2]);
+	  constantPoolCacheOop callee_cache = callee->constants()->cache();
+	  ConstantPoolCacheEntry *entry = callee_cache->entry_at(index);
+	  Reg r_obj, r;
+
+	  if (entry->is_resolved(Bytecodes::_getfield)) {
+	    JASSERT(cache->parameter_size() == 1, "not 1 parameter to accessor");
+
+	    TosState tos_type = entry->flag_state();
+	    int field_offset = entry->f2();
+
+	    JASSERT(tos_type == btos || tos_type == ctos || tos_type == stos || tos_type == atos || tos_type == itos, "not itos or atos");
+
+	    Thumb2_Fill(jinfo, 1);
+	    r_obj = POP(jstack);
+	    Thumb2_Spill(jinfo, 1, 0);
+	    r = JSTACK_REG(jstack);
+	    PUSH(jstack, r);
+	    if (tos_type == btos)
+	      ldrb_imm(jinfo->codebuf, r, r_obj, field_offset, 1, 0);
+	    else if (tos_type == ctos)
+	      ldrh_imm(jinfo->codebuf, r, r_obj, field_offset, 1, 0);
+	    else if (tos_type == stos)
+	      ldrsh_imm(jinfo->codebuf, r, r_obj, field_offset, 1, 0);
+	    else
+	      ldr_imm(jinfo->codebuf, r, r_obj, field_offset, 1, 0);
+	    break;
+	  }
+	}
+
+ 	Thumb2_Flush(jinfo);
+	if (OSPACE) {
+	  Thumb2_save_all_locals(jinfo, stackdepth);
+	  mov_imm(jinfo->codebuf, ARM_R0, bci+CONSTMETHOD_CODEOFFSET);
+	  mov_imm(jinfo->codebuf, ARM_R1, index);
+	  blx(jinfo->codebuf, handlers[
+	      opcode == opc_invokestatic ? H_INVOKESTATIC_RESOLVED :
+	      opcode == opc_invokespecial ? H_INVOKESPECIAL_RESOLVED :
+	      cache->is_vfinal() ? H_INVOKEVFINAL : H_INVOKEVIRTUAL_RESOLVED]);
+	  Thumb2_restore_all_locals(jinfo, STACKDEPTH(jinfo, bc_stackinfo[bci+len]));
+	  break;
+	}
+
+	load_istate(jinfo, ARM_R2, ISTATE_METHOD, stackdepth);
+ 	mov_imm(jinfo->codebuf, ARM_R1, 0);
+	if (opcode != opc_invokestatic)
+ 	  ldr_imm(jinfo->codebuf, ARM_R3, Rstack, (cache->parameter_size()-1) * sizeof(int), 1, 0);
+	if (opcode != opc_invokevirtual || cache->is_vfinal())
+	  load_istate(jinfo, ARM_R0, ISTATE_CONSTANTS, stackdepth);
+	ldr_imm(jinfo->codebuf, ARM_R2, ARM_R2, METHOD_CONSTMETHOD, 1, 0);
+	if (opcode != opc_invokestatic)
+	  ldr_imm(jinfo->codebuf, ARM_R3, ARM_R3, 4, 1, 0);
+	if (opcode != opc_invokevirtual || cache->is_vfinal())
+	  ldr_imm(jinfo->codebuf, ARM_R0, ARM_R0,
+		CP_OFFSET + (index << 4) + (opcode == opc_invokevirtual ? 8 : 4), 1, 0);
+	else
+	  ldr_imm(jinfo->codebuf, ARM_R0, ARM_R3, INSTANCEKLASS_VTABLE_OFFSET + cache->f2() * 4, 1, 0);
+	add_imm(jinfo->codebuf, ARM_R2, ARM_R2, bci+CONSTMETHOD_CODEOFFSET);
+ 	str_imm(jinfo->codebuf, ARM_R1, Rthread, THREAD_LAST_JAVA_SP, 1, 0);
+	str_imm(jinfo->codebuf, ARM_R1, Rthread, THREAD_LAST_JAVA_FP, 1, 0);
+ 	ldr_imm(jinfo->codebuf, ARM_R1, ARM_R0, METHOD_FROM_INTERPRETED, 1, 0);
+	store_istate(jinfo, ARM_R2, ISTATE_BCP, stackdepth);
+ 	str_imm(jinfo->codebuf, Rstack, Rthread, THREAD_JAVA_SP, 1, 0);
+ 	Thumb2_save_all_locals(jinfo, stackdepth);
+	sub_imm(jinfo->codebuf, Rstack, Rstack, 4);
+ 	ldr_imm(jinfo->codebuf, ARM_R3, ARM_R1, 0, 1, 0);
+	store_istate(jinfo, Rstack, ISTATE_STACK, stackdepth+1);
+	add_imm(jinfo->codebuf, ARM_R3, ARM_R3, FAST_ENTRY_OFFSET);
+ 	blx_reg(jinfo->codebuf, ARM_R3);
+ 	JASSERT(!(bc_stackinfo[bci+len] & BC_COMPILED), "code already compiled for this bytecode?");
+	stackdepth = STACKDEPTH(jinfo, bc_stackinfo[bci+len]);
+	ldr_imm(jinfo->codebuf, Rstack, Rthread, THREAD_JAVA_SP, 1, 0);
+	load_istate(jinfo, ARM_R2, ISTATE_STACK_LIMIT, stackdepth);
+ 	ldr_imm(jinfo->codebuf, ARM_R1, Rthread, THREAD_TOP_ZERO_FRAME, 1, 0);
+	Thumb2_restore_all_locals(jinfo, stackdepth);
+	mov_imm(jinfo->codebuf, ARM_R0, 0);   // set last SP to zero
+					      // before setting FP
+	str_imm(jinfo->codebuf, ARM_R0, Rthread, THREAD_LAST_JAVA_SP, 1, 0);
+	ldr_imm(jinfo->codebuf, ARM_R1, Rthread, THREAD_TOP_ZERO_FRAME, 1, 0);
+	Thumb2_restore_all_locals(jinfo, stackdepth);
+	add_imm(jinfo->codebuf, ARM_R2, ARM_R2, 4);
+	ldr_imm(jinfo->codebuf, ARM_R3, Rthread, THREAD_PENDING_EXC, 1, 0);
+	str_imm(jinfo->codebuf, ARM_R2, Rthread, THREAD_JAVA_SP, 1, 0);
+	str_imm(jinfo->codebuf, ARM_R1, Rthread, THREAD_LAST_JAVA_FP, 1, 0);
+	str_imm(jinfo->codebuf, ARM_R2, Rthread, THREAD_LAST_JAVA_SP, 1, 0);
+	cmp_imm(jinfo->codebuf, ARM_R3, 0);
+	it(jinfo->codebuf, COND_NE, IT_MASK_T);
+	bl(jinfo->codebuf, handlers[H_HANDLE_EXCEPTION_NO_REGS]);
+	break;
+      }
+
+      case opc_invokeinterface: {
+	constantPoolCacheOop  cp = jinfo->method->constants()->cache();
+        ConstantPoolCacheEntry* cache;
+	int index = GET_NATIVE_U2(code_base+bci+1);
+	unsigned loc, loc_inc_ex;
+
+	// Currently we just call the unresolved invokeinterface entry for resolved /
+	// unresolved alike!
+	Thumb2_Flush(jinfo);
+	Thumb2_save_all_locals(jinfo, stackdepth);
+	mov_imm(jinfo->codebuf, ARM_R0, bci+CONSTMETHOD_CODEOFFSET);
+	mov_imm(jinfo->codebuf, ARM_R1, index);
+	blx(jinfo->codebuf, handlers[H_INVOKEINTERFACE]);
+	Thumb2_restore_all_locals(jinfo, STACKDEPTH(jinfo, bc_stackinfo[bci+len]));
+	break;
+      }
+
+    case opc_invokedynamic:
+      {
+	Thumb2_Exit(jinfo, H_EXIT_TO_INTERPRETER, bci, stackdepth);
+	break;
+      }
+
+    case opc_fast_aldc_w:
+    case opc_fast_aldc:
+      {
+	unsigned index = (opcode == (unsigned)opc_fast_aldc) ?
+				code_base[bci+1] : GET_NATIVE_U2(code_base+bci+1);
+	constantPoolOop constants = jinfo->method->constants();
+	ConstantPoolCacheEntry* cpce = constants->cache()->entry_at(index);
+        if (! cpce->is_f1_null()) {
+	  Thumb2_Spill(jinfo, 1, 0);
+	  int r = JSTACK_REG(jstack);
+	  PUSH(jstack, r);
+	  ldr_imm(jinfo->codebuf, r, Ristate, ISTATE_CONSTANTS, 1, 0);
+	  ldr_imm(jinfo->codebuf, r, r, CP_OFFSET + (index << 4) + 4, 1, 0); // offset to cache->f1()
+	} else {
+	  Thumb2_Exit(jinfo, H_EXIT_TO_INTERPRETER, bci, stackdepth);
+	}
+	break;
+      }
+
+      case opc_jsr_w:
+      case opc_jsr: {
+	int offset = opcode == opc_jsr ?
+		GET_JAVA_S2(jinfo->code_base + bci + 1) :
+		GET_JAVA_U4(jinfo->code_base + bci + 1);
+	Reg r;
+
+	Thumb2_Spill(jinfo, 1, 0);
+	r = JSTACK_REG(jstack);
+	PUSH(jstack, r);
+	mov_imm(jinfo->codebuf, r, bci + ((opcode == opc_jsr) ? 3 : 5));
+	Thumb2_Flush(jinfo);
+	bci = Thumb2_Goto(jinfo, bci, offset, len);
+	len = 0;
+	break;
+      }
+
+      case opc_ret: {
+	Thumb2_Exit(jinfo, H_RET, bci, stackdepth);
+	break;
+      }
+
+      case opc_goto:
+      case opc_goto_w: {
+	int offset = opcode == opc_goto ?
+		GET_JAVA_S2(jinfo->code_base + bci + 1) :
+		GET_JAVA_U4(jinfo->code_base + bci + 1);
+	Thumb2_Flush(jinfo);
+	bci = Thumb2_Goto(jinfo, bci, offset, len, stackdepth);
+	len = 0;
+	break;
+      }
+
+      case opc_athrow:
+	Thumb2_Exit(jinfo, H_ATHROW, bci, stackdepth);
+	break;
+
+      case opc_ifeq:
+      case opc_ifne:
+      case opc_iflt:
+      case opc_ifge:
+      case opc_ifgt:
+      case opc_ifle:
+      case opc_ifnull:
+      case opc_ifnonnull: {
+	Reg r;
+	unsigned cond = opcode - opc_ifeq;
+	Thumb2_Cond_Safepoint(jinfo, stackdepth, bci);
+	if (opcode >= opc_ifnull) cond = opcode - opc_ifnull;
+	Thumb2_Fill(jinfo, 1);
+	r = POP(jstack);
+	Thumb2_Flush(jinfo);
+	cmp_imm(jinfo->codebuf, r, 0);
+	bci = Thumb2_Branch(jinfo, bci, cond);
+	len = 0;
+	break;
+      }
+
+      case opc_if_icmpeq:
+      case opc_if_icmpne:
+      case opc_if_icmplt:
+      case opc_if_icmpge:
+      case opc_if_icmpgt:
+      case opc_if_icmple:
+      case opc_if_acmpeq:
+      case opc_if_acmpne: {
+	Reg r_lho, r_rho;
+	unsigned cond = opcode - opc_if_icmpeq;
+	Thumb2_Cond_Safepoint(jinfo, stackdepth, bci);
+	if (opcode >= opc_if_acmpeq) cond = opcode - opc_if_acmpeq;
+	Thumb2_Fill(jinfo, 2);
+	r_rho = POP(jstack);
+	r_lho = POP(jstack);
+	Thumb2_Flush(jinfo);
+	cmp_reg(jinfo->codebuf, r_lho, r_rho);
+	bci = Thumb2_Branch(jinfo, bci, cond);
+	len = 0;
+	break;
+      }
+
+      case opc_return:
+      case opc_dreturn:
+      case opc_lreturn:
+      case opc_ireturn:
+      case opc_freturn:
+      case opc_areturn:
+	Thumb2_Return(jinfo, opcode, bci, stackdepth);
+	break;
+
+      case opc_return_register_finalizer: {
+	Thumb2_Stack *jstack = jinfo->jstack;
+	Reg r, r_tmp;
+	unsigned loc_eq;
+
+	Thumb2_Flush(jinfo);
+	Thumb2_Load(jinfo, 0, stackdepth);
+	r = POP(jstack);
+	r_tmp = Thumb2_Tmp(jinfo, (1<<r));
+	ldr_imm(jinfo->codebuf, r_tmp, r, 4, 1, 0);
+	ldr_imm(jinfo->codebuf, r_tmp, r_tmp, KLASS_PART+KLASS_ACCESSFLAGS, 1, 0);
+	tst_imm(jinfo->codebuf, r_tmp, JVM_ACC_HAS_FINALIZER);
+	loc_eq = forward_16(jinfo->codebuf);
+	Thumb2_save_local_refs(jinfo, stackdepth);
+	mov_reg(jinfo->codebuf, ARM_R1, r);
+	load_istate(jinfo, ARM_R0, ISTATE_METHOD, stackdepth);
+	ldr_imm(jinfo->codebuf, ARM_R0, ARM_R0, METHOD_CONSTMETHOD, 1, 0);
+	add_imm(jinfo->codebuf, ARM_R0, ARM_R0, bci+CONSTMETHOD_CODEOFFSET);
+	store_istate(jinfo, ARM_R0, ISTATE_BCP, stackdepth);
+	sub_imm(jinfo->codebuf, ARM_R0, Rstack, 4);
+	store_istate(jinfo, ARM_R0, ISTATE_STACK, stackdepth);
+
+	mov_reg(jinfo->codebuf, ARM_R0, Rthread);
+	mov_imm(jinfo->codebuf, ARM_R3, (u32)_ZN18InterpreterRuntime18register_finalizerEP10JavaThreadP7oopDesc);
+	blx_reg(jinfo->codebuf, ARM_R3);
+
+	ldr_imm(jinfo->codebuf, ARM_R3, Rthread, THREAD_PENDING_EXC, 1, 0);
+	cmp_imm(jinfo->codebuf, ARM_R3, 0);
+	it(jinfo->codebuf, COND_NE, IT_MASK_T);
+	bl(jinfo->codebuf, handlers[H_HANDLE_EXCEPTION]);
+	bcc_patch(jinfo->codebuf, COND_EQ, loc_eq);
+	Thumb2_Return(jinfo, opc_return, bci, stackdepth);
+	break;
+      }
+
+      case opc_new: {
+	unsigned loc;
+
+	Thumb2_Flush(jinfo);
+	mov_imm(jinfo->codebuf, ARM_R1, GET_JAVA_U2(code_base+bci+1));
+	mov_imm(jinfo->codebuf, ARM_R3, bci+CONSTMETHOD_CODEOFFSET);
+      Thumb2_save_local_refs(jinfo, stackdepth);
+	bl(jinfo->codebuf, handlers[H_NEW]);
+      Thumb2_restore_local_refs(jinfo, stackdepth);
+	cmp_imm(jinfo->codebuf, ARM_R0, 0);
+	it(jinfo->codebuf, COND_EQ, IT_MASK_T);
+	bl(jinfo->codebuf, handlers[H_HANDLE_EXCEPTION]);
+	PUSH(jstack, ARM_R0);
+	break;
+      }
+
+      case opc_aastore: {
+	Reg src[3], dst[3];
+	unsigned loc;
+
+	Thumb2_Fill(jinfo, 3);
+	src[0] = POP(jstack);	// value
+	src[1] = POP(jstack);	// index
+	src[2] = POP(jstack);	// arrayref
+	Thumb2_Flush(jinfo);
+	dst[0] = ARM_R1;
+	dst[1] = ARM_R2;
+	dst[2] = ARM_R3;
+	mov_multiple(jinfo->codebuf, dst, src, 3);
+	mov_imm(jinfo->codebuf, ARM_R0, bci+CONSTMETHOD_CODEOFFSET);
+      Thumb2_save_local_refs(jinfo, stackdepth - 3);	// 3 args popped above
+	bl(jinfo->codebuf, handlers[H_AASTORE]);
+      Thumb2_restore_local_refs(jinfo, stackdepth - 3);
+	cmp_imm(jinfo->codebuf, ARM_R0, 0);
+	it(jinfo->codebuf, COND_NE, IT_MASK_T);
+	bl(jinfo->codebuf, handlers[H_HANDLE_EXCEPTION]);
+	break;
+      }
+
+      case opc_instanceof: {
+	unsigned loc;
+	Reg r;
+
+	Thumb2_Fill(jinfo, 1);
+	r = POP(jstack);
+	Thumb2_Flush(jinfo);
+	mov_reg(jinfo->codebuf, ARM_R2, r);
+	mov_imm(jinfo->codebuf, ARM_R1, GET_JAVA_U2(code_base+bci+1));
+	mov_imm(jinfo->codebuf, ARM_R3, bci+CONSTMETHOD_CODEOFFSET);
+      Thumb2_save_local_refs(jinfo, stackdepth - 1);
+	bl(jinfo->codebuf, handlers[H_INSTANCEOF]);
+      Thumb2_restore_local_refs(jinfo, stackdepth - 1);	// 1 arg popped above
+	cmp_imm(jinfo->codebuf, ARM_R0, (unsigned)-1);
+	it(jinfo->codebuf, COND_EQ, IT_MASK_T);
+	bl(jinfo->codebuf, handlers[H_HANDLE_EXCEPTION]);
+	PUSH(jstack, ARM_R0);
+	break;
+      }
+
+      case opc_checkcast: {
+	unsigned loc;
+	Reg r;
+
+	Thumb2_Fill(jinfo, 1);
+	r = TOS(jstack);
+	Thumb2_Flush(jinfo);
+	mov_reg(jinfo->codebuf, ARM_R2, r);
+	mov_imm(jinfo->codebuf, ARM_R1, GET_JAVA_U2(code_base+bci+1));
+	mov_imm(jinfo->codebuf, ARM_R3, bci+CONSTMETHOD_CODEOFFSET);
+      Thumb2_save_local_refs(jinfo, stackdepth);
+	bl(jinfo->codebuf, handlers[H_CHECKCAST]);
+      Thumb2_restore_local_refs(jinfo, stackdepth);
+	cmp_imm(jinfo->codebuf, ARM_R0, 0);
+	it(jinfo->codebuf, COND_NE, IT_MASK_T);
+	bl(jinfo->codebuf, handlers[H_HANDLE_EXCEPTION]);
+	break;
+      }
+
+      case opc_monitorenter:
+	Thumb2_Flush(jinfo);
+	Thumb2_save_all_locals(jinfo, stackdepth);
+	mov_imm(jinfo->codebuf, ARM_R0, bci+CONSTMETHOD_CODEOFFSET);
+	bl(jinfo->codebuf, handlers[H_MONITORENTER]);
+	Thumb2_restore_all_locals(jinfo, stackdepth);
+	break;
+
+      case opc_monitorexit: {
+	Reg r;
+
+	Thumb2_Fill(jinfo, 1);
+	r = POP(jstack);
+	Thumb2_Flush(jinfo);
+	mov_reg(jinfo->codebuf, ARM_R1, r);
+	mov_imm(jinfo->codebuf, ARM_R3, bci+CONSTMETHOD_CODEOFFSET);
+        Thumb2_save_local_refs(jinfo, stackdepth);
+	bl(jinfo->codebuf, handlers[H_MONITOREXIT]);
+        Thumb2_restore_local_refs(jinfo, stackdepth);
+	cmp_imm(jinfo->codebuf, ARM_R0, 0);
+	it(jinfo->codebuf, COND_NE, IT_MASK_T);
+	bl(jinfo->codebuf, handlers[H_HANDLE_EXCEPTION]);
+	break;
+      }
+
+      case opc_newarray: {
+	Reg r;
+	unsigned loc;
+
+	Thumb2_Fill(jinfo, 1);
+	r = POP(jstack);
+	Thumb2_Flush(jinfo);
+	mov_reg(jinfo->codebuf, ARM_R2, r);
+	mov_imm(jinfo->codebuf, ARM_R1, code_base[bci+1]);
+	mov_imm(jinfo->codebuf, ARM_R3, bci+CONSTMETHOD_CODEOFFSET);
+      Thumb2_save_local_refs(jinfo, stackdepth-1);
+	bl(jinfo->codebuf, handlers[H_NEWARRAY]);
+      Thumb2_restore_local_refs(jinfo, stackdepth-1);
+	ldr_imm(jinfo->codebuf, ARM_R0, Rthread, THREAD_VM_RESULT, 1, 0);
+	mov_imm(jinfo->codebuf, ARM_R2, 0);
+  	str_imm(jinfo->codebuf, ARM_R2, Rthread, THREAD_VM_RESULT, 1, 0);
+	cmp_imm(jinfo->codebuf, ARM_R0, 0);
+	it(jinfo->codebuf, COND_EQ, IT_MASK_T);
+	bl(jinfo->codebuf, handlers[H_HANDLE_EXCEPTION]);
+	PUSH(jstack, ARM_R0);
+	break;
+      }
+
+      case opc_anewarray: {
+	Reg r;
+	unsigned loc;
+
+	Thumb2_Fill(jinfo, 1);
+	r = POP(jstack);
+	Thumb2_Flush(jinfo);
+	mov_reg(jinfo->codebuf, ARM_R3, r);
+	mov_imm(jinfo->codebuf, ARM_R2, GET_JAVA_U2(code_base+bci+1));
+	mov_imm(jinfo->codebuf, ARM_R0, bci+CONSTMETHOD_CODEOFFSET);
+      Thumb2_save_local_refs(jinfo, stackdepth-1);
+	bl(jinfo->codebuf, handlers[H_ANEWARRAY]);
+      Thumb2_restore_local_refs(jinfo, stackdepth-1);
+	ldr_imm(jinfo->codebuf, ARM_R0, Rthread, THREAD_VM_RESULT, 1, 0);
+	mov_imm(jinfo->codebuf, ARM_R2, 0);
+  	str_imm(jinfo->codebuf, ARM_R2, Rthread, THREAD_VM_RESULT, 1, 0);
+	cmp_imm(jinfo->codebuf, ARM_R0, 0);
+	it(jinfo->codebuf, COND_EQ, IT_MASK_T);
+	bl(jinfo->codebuf, handlers[H_HANDLE_EXCEPTION]);
+	PUSH(jstack, ARM_R0);
+	break;
+      }
+
+      case opc_multianewarray: {
+	unsigned loc;
+
+	Thumb2_Flush(jinfo);
+	mov_imm(jinfo->codebuf, ARM_R0, bci+CONSTMETHOD_CODEOFFSET);
+	mov_imm(jinfo->codebuf, ARM_R1, code_base[bci+3] * 4);
+      Thumb2_save_local_refs(jinfo, stackdepth);
+	bl(jinfo->codebuf, handlers[H_MULTIANEWARRAY]);
+      Thumb2_restore_local_refs(jinfo, stackdepth - code_base[bci+3]);
+	ldr_imm(jinfo->codebuf, ARM_R0, Rthread, THREAD_VM_RESULT, 1, 0);
+	mov_imm(jinfo->codebuf, ARM_R2, 0);
+  	str_imm(jinfo->codebuf, ARM_R2, Rthread, THREAD_VM_RESULT, 1, 0);
+	cmp_imm(jinfo->codebuf, ARM_R0, 0);
+	it(jinfo->codebuf, COND_EQ, IT_MASK_T);
+	bl(jinfo->codebuf, handlers[H_HANDLE_EXCEPTION]);
+	PUSH(jstack, ARM_R0);
+	break;
+      }
+
+      case opc_arraylength: {
+	Reg r_obj, r_len;
+
+	Thumb2_Fill(jinfo, 1);
+	r_obj = POP(jstack);
+	Thumb2_Spill(jinfo, 1, 0);
+	r_len = JSTACK_REG(jstack);
+	PUSH(jstack, r_len);
+	ldr_imm(jinfo->codebuf, r_len, r_obj, 8, 1, 0);
+	break;
+      }
+
+      case opc_lookupswitch: {
+	unsigned w;
+	unsigned nbci;
+	int def;
+	int npairs;	// The Java spec says signed but must be >= 0??
+	unsigned *table, *tablep;
+	unsigned r;
+	unsigned oldidx;
+	unsigned table_loc;
+	int i;
+
+	nbci = bci & ~3;
+	w = *(unsigned int *)(code_base + nbci + 4);
+	def = bci + (int)BYTESEX_REVERSE(w);
+	w = *(unsigned int *)(code_base + nbci + 8);
+	npairs = (int)BYTESEX_REVERSE(w);
+	table = (unsigned int *)(code_base + nbci + 12);
+
+	Thumb2_Fill(jinfo, 1);
+	r = POP(jstack);
+
+	Thumb2_Flush(jinfo);
+
+	table_loc = out_loc(jinfo->codebuf);
+	for (i = 0, tablep = table; i < npairs; i++) {
+	  unsigned match;
+
+	  w = tablep[0];
+	  match = BYTESEX_REVERSE(w);
+	  tablep += 2;
+	  cmp_imm(jinfo->codebuf, r, match);
+	  t2_bug_align(jinfo->codebuf);
+	  forward_32(jinfo->codebuf);
+	}
+	t2_bug_align(jinfo->codebuf);
+	forward_32(jinfo->codebuf);
+	Thumb2_codegen(jinfo, bci+len);
+
+	oldidx = codebuf->idx;
+	codebuf->idx = table_loc >> 1;
+	for (i = 0, tablep = table; i < npairs; i++) {
+	  unsigned match;
+	  unsigned dest;
+	  unsigned loc;
+
+	  w = tablep[0];
+	  match = BYTESEX_REVERSE(w);
+	  w = tablep[1];
+	  dest = bci + (int)BYTESEX_REVERSE(w);
+	  tablep += 2;
+	  cmp_imm(jinfo->codebuf, r, match);
+	  JASSERT(jinfo->bc_stackinfo[dest] & BC_COMPILED, "code not compiled");
+	  t2_bug_align(jinfo->codebuf);
+	  loc = forward_32(jinfo->codebuf);
+	  branch_patch(jinfo->codebuf, COND_EQ, loc, jinfo->bc_stackinfo[dest] & ~BC_FLAGS_MASK);
+	}
+	JASSERT(jinfo->bc_stackinfo[def] & BC_COMPILED, "default in lookupswitch not compiled");
+	t2_bug_align(jinfo->codebuf);
+	branch_uncond_patch(jinfo->codebuf, out_loc(jinfo->codebuf), jinfo->bc_stackinfo[def] & ~BC_FLAGS_MASK);
+	codebuf->idx = oldidx;
+
+	bci = (unsigned)-1;
+	len = 0;
+
+	break;
+      }
+
+      case opc_tableswitch: {
+	int low, high, i;
+	unsigned w;
+	unsigned *table, *tablep;
+	unsigned nbci;
+	int def;
+	unsigned loc, table_loc;
+	unsigned r, rs;
+	unsigned oldidx;
+	unsigned negative_offsets, negative_branch_table;
+
+	nbci = bci & ~3;
+	w = *(unsigned int *)(code_base + nbci + 8);
+	low = (int)BYTESEX_REVERSE(w);
+	w = *(unsigned int *)(code_base + nbci + 12);
+	high = (int)BYTESEX_REVERSE(w);
+	w = *(unsigned int *)(code_base + nbci + 4);
+	def = bci + (int)BYTESEX_REVERSE(w);
+	table = (unsigned int *)(code_base + nbci + 16);
+
+	Thumb2_Fill(jinfo, 1);
+	rs = POP(jstack);
+	Thumb2_Flush(jinfo);
+	r = Thumb2_Tmp(jinfo, (1<<rs));
+	sub_imm(jinfo->codebuf, r, rs, low);
+	cmp_imm(jinfo->codebuf, r, (high-low)+1);
+	loc = 0;
+	if (jinfo->bc_stackinfo[def] & BC_COMPILED)
+	  branch(jinfo->codebuf, COND_CS, jinfo->bc_stackinfo[def] & ~BC_FLAGS_MASK);
+	else
+	  loc = forward_32(jinfo->codebuf);
+	tbh(jinfo->codebuf, ARM_PC, r);
+	table_loc = out_loc(jinfo->codebuf);
+	negative_offsets = 0;
+	for (i = low, tablep = table; i <= high; i++) {
+	  int offset;
+	  w = *tablep++;
+	  offset = (int)BYTESEX_REVERSE(w);
+	  if (offset < 0) negative_offsets++;
+	  out_16(jinfo->codebuf, 0);
+	}
+	negative_branch_table = out_loc(jinfo->codebuf);
+	for (i = 0; i < (int)negative_offsets; i++) {
+	  t2_bug_align(jinfo->codebuf);
+	  out_16x2(jinfo->codebuf, 0);
+	}
+
+	Thumb2_codegen(jinfo, bci+len);
+
+	if (loc) {
+	  JASSERT(jinfo->bc_stackinfo[def] & BC_COMPILED, "def not compiled in tableswitch");
+	  branch_patch(jinfo->codebuf, COND_CS, loc, jinfo->bc_stackinfo[def] & ~BC_FLAGS_MASK);
+	}
+
+	oldidx = codebuf->idx;
+	codebuf->idx = table_loc >> 1;
+	for (i = low, tablep = table; i <= high; i++) {
+	  unsigned dest;
+	  int offset;
+
+	  w = *tablep++;
+	  offset = (int)BYTESEX_REVERSE(w);
+	  dest = bci + offset;
+	  JASSERT(jinfo->bc_stackinfo[dest] & BC_COMPILED, "code not compiled");
+	  dest = jinfo->bc_stackinfo[dest] & ~BC_FLAGS_MASK;
+	  if (offset < 0) {
+	    unsigned oldidx;
+	    out_16(jinfo->codebuf, (negative_branch_table >> 1) - (table_loc >> 1));
+	    PATCH(negative_branch_table) {
+	      t2_bug_align(jinfo->codebuf);
+	      branch_uncond_patch(jinfo->codebuf, out_loc(jinfo->codebuf), dest);
+	      negative_branch_table = out_loc(jinfo->codebuf);
+	    } HCTAP;
+	  } else {
+	    JASSERT((dest & 1) == 0 && (table_loc & 1) == 0, "unaligned code");
+	    offset = (dest >> 1) - (table_loc >> 1);
+	    if (offset >= 65536) {
+	      longjmp(compiler_error_env, COMPILER_RESULT_FAILED);
+	    }
+	    out_16(jinfo->codebuf, offset);
+	  }
+	}
+	codebuf->idx = oldidx;
+	bci = (unsigned)-1;
+	len = 0;
+	break;
+      }
+
+      case opc_wide: {
+	unsigned local = GET_JAVA_U2(code_base + bci + 2);
+	opcode = code_base[bci+1];
+	if (opcode == opc_iinc) {
+	  int constant = GET_JAVA_S2(code_base + bci + 4);
+	  unsigned r = jinfo->jregs->r_local[local];
+	  
+	  if (!r) {
+	    int nlocals = jinfo->method->max_locals();
+	    r = ARM_IP;
+	    stackdepth -= jstack->depth;
+	    load_local(jinfo, r, local, stackdepth);
+	    add_imm(jinfo->codebuf, r, r, constant);
+	    store_local(jinfo, r, local, stackdepth);
+	  } else {
+	    Thumb2_Corrupt(jinfo, r, 0);
+	    add_imm(jinfo->codebuf, r, r, constant);
+	  }
+	} else if (opcode == opc_ret) {
+	  Thumb2_Exit(jinfo, H_RET, bci, stackdepth);
+	} else {
+	  if (opcode == opc_iload ||
+	  	opcode == opc_fload || opcode == opc_aload)
+	    Thumb2_Load(jinfo, local, stackdepth);
+	  else if (opcode == opc_lload || opcode == opc_dload)
+	    Thumb2_LoadX2(jinfo, local, stackdepth);
+	  else if (opcode == opc_istore ||
+	  	opcode == opc_fstore || opcode == opc_astore)
+	    Thumb2_Store(jinfo, local, stackdepth);
+	  else if (opcode == opc_lstore || opcode == opc_dstore)
+	    Thumb2_StoreX2(jinfo, local, stackdepth);
+	  else fatal(err_msg("Undefined wide opcode %d\n", opcode));
+	}
+	break;
+      }
+
+      default:
+	JASSERT(0, "unknown bytecode");
+	break;
+    }
+    bci += len;
+#ifdef T2_PRINT_DISASS
+    if (len == 0) {
+      if (start_idx == jinfo->codebuf->idx) start_bci[start_idx] = -1;
+    } else
+      end_bci[start_idx] = bci;
+#endif
+  }
+}
+
+#define BEG_BCI_OFFSET		0
+#define END_BCI_OFFSET		1
+#define HANDLER_BCI_OFFSET	2
+#define KLASS_INDEX_OFFSET	3
+#define ENTRY_SIZE		4
+
+extern "C" int Thumb2_lr_to_bci(unsigned lr, methodOop method, Reg *regs, unsigned *locals)
+{
+  Compiled_Method *cmethod = compiled_method_list;
+  typeArrayOop table = method->exception_table();
+  constantPoolOop pool = method->constants();
+  int length = table->length();
+
+  while (cmethod) {
+    unsigned *exception_table = cmethod->exception_table;
+    if (exception_table) {
+      unsigned code_base = (unsigned)cmethod;
+      if (code_base <= lr && lr <= (unsigned)exception_table) {
+	int exception_index = -1;
+	unsigned exception_found = 0;
+
+	for (int i = 0; i < length; i += ENTRY_SIZE) {
+	  unsigned offsets = *exception_table++;
+	  unsigned exc_beg = code_base + ((offsets >> 16) << 1);
+	  unsigned exc_end = code_base + ((offsets & 0xffff) << 1);
+
+	  if (exc_beg <= lr && lr <= exc_end) {
+	    if (exc_beg > exception_found) {
+	      // With nested try catch blocks, choose the most deeply nested
+	      exception_found = exc_beg;
+	      exception_index = i;
+	    }	    
+	  }
+	  if (exception_index >= 0) {
+	    if (regs) {
+	      for (unsigned i = 0; i < PREGS; i++) {
+		int local = cmethod->regusage[i];
+		if (local >= 0) {
+		  locals[-local] = regs[i];
+		}
+	      }
+	    }
+	    return table->int_at(exception_index + BEG_BCI_OFFSET);
+	  }
+	}
+      }
+    }
+    cmethod = cmethod->next;
+  }
+  return -1;
+}
+
+void Thumb2_generate_exception_table(Compiled_Method *cmethod, Thumb2_Info *jinfo)
+{
+  methodOop method = jinfo->method;
+  typeArrayOop table = method->exception_table();
+  constantPoolOop pool = method->constants();
+  int length = table->length();
+  unsigned *bc_stackinfo = jinfo->bc_stackinfo;
+
+  cmethod->exception_table = (unsigned *)out_pos(jinfo->codebuf);
+  for (int i = 0; i < length; i += ENTRY_SIZE) {
+    int beg_bci = table->int_at(i + BEG_BCI_OFFSET);
+    int end_bci = table->int_at(i + END_BCI_OFFSET);
+    unsigned stackinfo;
+    unsigned beg_offset, end_offset;
+
+    stackinfo = bc_stackinfo[beg_bci];
+    beg_offset = (stackinfo & ~BC_FLAGS_MASK) >> 1;
+    stackinfo = bc_stackinfo[end_bci];
+    end_offset = (stackinfo & ~BC_FLAGS_MASK) >> 1;
+    if (!(beg_offset != 0 && end_offset >= beg_offset && end_offset < 65536)) {
+	longjmp(compiler_error_env, COMPILER_RESULT_FAILED);
+    }
+    out_32(jinfo->codebuf, (beg_offset << 16) | (end_offset));
+  }
+}
+
+void Thumb2_tablegen(Compiled_Method *cmethod, Thumb2_Info *jinfo)
+{
+  unsigned code_size = jinfo->code_size;
+  jubyte *code_base = jinfo->code_base;
+  unsigned *bc_stackinfo = jinfo->bc_stackinfo;
+  unsigned bci;
+  unsigned count = 0;
+  unsigned i;
+  CodeBuf *codebuf = jinfo->codebuf;
+
+  cmethod->osr_table = (unsigned *)out_pos(jinfo->codebuf);
+  out_32(codebuf, 0);
+  bc_stackinfo[0] |= BC_BACK_TARGET;
+  for (bci = 0; bci < code_size;) {
+    unsigned stackinfo = bc_stackinfo[bci];
+    unsigned bytecodeinfo;
+    unsigned opcode;
+
+    if (stackinfo & BC_BACK_TARGET) {
+      unsigned code_offset = (stackinfo & ~BC_FLAGS_MASK) >> 1;
+      JASSERT(stackinfo & BC_COMPILED, "back branch target not compiled???");
+      if (code_offset >= 65536) {
+	longjmp(compiler_error_env, COMPILER_RESULT_FAILED);
+      }
+//      JASSERT(code_offset < (1<<16), "oops, codesize too big");
+      out_32(codebuf, (bci << 16) | code_offset);
+      count++;
+    }
+
+    opcode = code_base[bci];
+    bytecodeinfo = bcinfo[opcode];
+    if (!BCI_SPECIAL(bytecodeinfo)) {
+      bci += BCI_LEN(bytecodeinfo);
+      continue;
+    } else {
+      int len = Bytecodes::length_for((Bytecodes::Code)opcode);
+      if (len <= 0) {
+	Bytecodes::Code code = Bytecodes::code_at(NULL, (address)(code_base+bci));
+	len = (Bytecodes::special_length_at
+	       (code,
+		(address)(code_base+bci), (address)(code_base+code_size)));
+      }
+      bci += len;
+    }
+  }
+  *cmethod->osr_table = count;
+  if (jinfo->method->has_exception_handler())
+    Thumb2_generate_exception_table(cmethod, jinfo);
+}
+
+extern "C" void Thumb2_Clear_Cache(char *base, char *limit);
+#define IS_COMPILED(e, cb) ((e) >= (unsigned)(cb) && (e) < (unsigned)(cb) + (cb)->size)
+
+unsigned Thumb2_osr_from_bci(Compiled_Method *cmethod, unsigned bci)
+{
+  unsigned *osr_table;
+  unsigned count;
+  unsigned i;
+
+  osr_table = cmethod->osr_table;
+  if (!osr_table) return 0;
+  count = *osr_table++;
+  for (i = 0; i < count; i++) {
+    unsigned u = *osr_table++;
+
+    if (bci == (u>>16)) return (u & 0xffff) << 1;
+  }
+  return 0;
+}
+
+extern "C" void Debug_MethodEntry(interpreterState istate, intptr_t *stack, methodOop callee)
+{
+#if 0
+  if (DebugSwitch) {
+    methodOop method = istate->method();
+    tty->print("Entering ");
+    callee->print_short_name(tty);
+    tty->print(" from ");
+    method->print_short_name(tty);
+    tty->cr();
+    Debug_Stack(stack);
+    tty->flush();
+  }
+#endif
+}
+
+extern "C" void Debug_MethodExit(interpreterState istate, intptr_t *stack)
+{
+#if 0
+  if (DebugSwitch) {
+    methodOop method = istate->method();
+    JavaThread *thread = istate->thread();
+    oop exc = thread->pending_exception();
+
+    if (!exc) return;
+    tty->print("Leaving ");
+    method->print_short_name(tty);
+    tty->cr();
+    Debug_Stack(stack);
+    tty->flush();
+    if (exc) tty->print_cr("Exception %s", exc->print_value_string());
+  }
+#endif
+}
+
+extern "C" void Debug_MethodCall(interpreterState istate, intptr_t *stack, methodOop callee)
+{
+#if 0
+  if (DebugSwitch) {
+    methodOop method = istate->method();
+    tty->print("Calling ");
+    callee->print_short_name(tty);
+    tty->print(" from ");
+    method->print_short_name(tty);
+    tty->cr();
+    Debug_Stack(stack);
+    tty->flush();
+  }
+#endif
+}
+extern "C" void Thumb2_Install(methodOop mh, u32 entry);
+
+extern "C" unsigned cmpxchg_ptr(unsigned new_value, volatile unsigned *ptr, unsigned cmp_value);
+static volatile unsigned compiling;
+static unsigned CompileCount = 0;
+static unsigned MaxCompile = 10000;
+
+#define COMPILE_ONLY	0
+#define COMPILE_COUNT	0
+#define DISASS_AFTER	0
+//#define COMPILE_LIST
+
+#ifdef COMPILE_LIST
+static const char *compile_list[] = {
+	0
+};
+#endif
+
+static unsigned compiled_methods = 0;
+
+#ifdef T2_PRINT_STATISTICS
+static unsigned bytecodes_compiled = 0;
+static unsigned arm_code_generated = 0;
+static clock_t total_compile_time = 0;
+#endif
+
+extern unsigned CPUInfo;
+
+extern "C" unsigned long long Thumb2_Compile(JavaThread *thread, unsigned branch_pc)
+{
+  HandleMark __hm(thread);
+  frame fr = thread->last_frame();
+  methodOop method = fr.interpreter_frame_method();
+  Symbol *name = method->name();
+  Symbol *sig = method->signature();
+  const jbyte *base = sig->base();
+
+  jubyte *code_base = (jubyte *)method->code_base();
+  int code_size = method->code_size();
+  InvocationCounter* ic = method->invocation_counter();
+  InvocationCounter* bc = method->backedge_counter();
+  Thumb2_Info jinfo_str;
+  CodeBuf codebuf_str;
+  Thumb2_Stack jstack_str;
+  Thumb2_Registers jregs_str;
+  int idx;
+  u32 code_handle, slow_entry;
+  Thumb2_CodeBuf *cb = thumb2_codebuf;
+  int rc;
+  char *saved_hp;
+  Compiled_Method *cmethod;
+  u32 compiled_offset;
+  Thumb2_Entrypoint thumb_entry;
+  int compiled_accessor;
+
+  if (!(CPUInfo & ARCH_THUMB2))
+	UseCompiler = false;
+
+  {
+    bool ignore;
+    methodHandle mh(thread, method);
+    if (!UseCompiler || method->is_not_compilable()
+	|| CompilerOracle::should_exclude(mh, ignore)) {
+      ic->set(ic->state(), 1);
+      bc->set(ic->state(), 1);
+      return 0;
+    }
+  }
+
+  slow_entry = *(unsigned *)method->from_interpreted_entry();
+  if (IS_COMPILED(slow_entry, cb)) {
+    cmethod = (Compiled_Method *)(slow_entry & ~TBIT);
+    compiled_offset = Thumb2_osr_from_bci(cmethod, branch_pc);
+    if (compiled_offset == 0) return 0;
+    thumb_entry.compiled_entrypoint = slow_entry + compiled_offset;
+    thumb_entry.regusage = cmethod->regusage;
+    return *(unsigned long long *)&thumb_entry;
+  }
+
+  ic->decay();
+  bc->decay();
+
+  // Dont compile anything with code size >= 32K.
+  // We rely on the bytecode index fitting in 16 bits
+  //
+  // Dont compile anything with max stack + maxlocal > 1K
+  // The range of an LDR in T2 is -4092..4092
+  // Othersize we have difficulty access the locals from the stack pointer
+  //
+  if (code_size > THUMB2_MAX_BYTECODE_SIZE ||
+		(method->max_locals() + method->max_stack()) >= 1000) {
+        method->set_not_compilable();
+	return 0;
+  }
+
+  if (COMPILE_COUNT && compiled_methods == COMPILE_COUNT) return 0;
+
+  if (COMPILE_ONLY) {
+    if (strcmp(name->as_C_string(), COMPILE_ONLY) != 0) return 0;
+  }
+
+#ifdef COMPILE_LIST
+  {
+	const char **argv = compile_list;
+	const char *s;
+	while (s = *argv++) {
+		if (strcmp(s, method->name_and_sig_as_C_string()) == 0)
+			break;
+	}
+	if (!s) {
+		method->set_not_compilable();
+		return 0;
+	}
+  }
+#endif
+
+  saved_hp = cb->hp;
+  if (rc = setjmp(compiler_error_env)) {
+    cb->hp = saved_hp;
+    if (rc == COMPILER_RESULT_FAILED)
+        method->set_not_compilable();
+    if (rc == COMPILER_RESULT_FATAL)
+	UseCompiler = false;
+    compiling = 0;
+    return 0;
+  }
+
+  if (cmpxchg_ptr(1, &compiling, 0)) return 0;
+
+#ifdef T2_PRINT_STATISTICS
+  clock_t compile_time = clock();
+#endif
+
+#ifdef T2_PRINT_COMPILATION
+  if (PrintCompilation || PrintAssembly) {
+    fprintf(stderr, "Compiling %d %c%c %s\n",
+	compiled_methods,
+	method->is_synchronized() ? 'S' : ' ',
+	method->has_exception_handler() ? 'E' : ' ',
+	method->name_and_sig_as_C_string());
+  }
+#endif
+
+  memset(bc_stackinfo, 0, code_size * sizeof(unsigned));
+  memset(locals_info, 0, method->max_locals() * sizeof(unsigned));
+#ifdef T2_PRINT_DISASS
+  memset(start_bci, 0xff, sizeof(start_bci));
+  memset(end_bci, 0xff, sizeof(end_bci));
+#endif
+
+#ifdef THUMB2_JVMTI
+  address_bci_map_reset(thread);
+#endif // THUMB2_JVMTI
+
+  jinfo_str.thread = thread;
+  jinfo_str.method = method;
+  jinfo_str.code_base = code_base;
+  jinfo_str.code_size = code_size;
+  jinfo_str.bc_stackinfo = bc_stackinfo;
+  jinfo_str.locals_info = locals_info;
+  jinfo_str.compiled_return = 0;
+  for (int i = 0; i < 12; i++) jinfo_str.compiled_word_return[i] = 0;
+  jinfo_str.is_leaf = 1;
+  jinfo_str.use_istate = method->has_monitor_bytecodes();
+
+  Thumb2_local_info_from_sig(&jinfo_str, method, base);
+
+  Thumb2_pass1(&jinfo_str, 0, 0);
+
+  codebuf_str.codebuf = (unsigned short *)cb->hp;
+  codebuf_str.idx = 0;
+  codebuf_str.limit = (unsigned short *)cb->sp - (unsigned short *)cb->hp;
+
+  jstack_str.stack = stack;
+  jstack_str.depth = 0;
+
+  memset(r_local, 0, method->max_locals() * sizeof(unsigned));
+
+  jregs_str.r_local = r_local;
+
+  jinfo_str.codebuf = &codebuf_str;
+  jinfo_str.jstack = &jstack_str;
+  jinfo_str.jregs = &jregs_str;
+
+  jregs_str.pregs[0] = JAZ_V1;
+  jregs_str.pregs[1] = JAZ_V2;
+  jregs_str.pregs[2] = JAZ_V3;
+  jregs_str.pregs[3] = JAZ_V4;
+  jregs_str.pregs[4] = JAZ_V5;
+  jregs_str.pregs[5] = JAZ_V6;
+
+  jregs_str.npregs = PREGS;
+
+  Thumb2_RegAlloc(&jinfo_str);
+
+  slow_entry = out_align_offset(&codebuf_str, CODE_ALIGN, SLOW_ENTRY_OFFSET);
+  cmethod = (Compiled_Method *)slow_entry;
+  slow_entry |= TBIT;
+
+  cb->hp += codebuf_str.idx * 2;
+  codebuf_str.codebuf = (unsigned short *)cb->hp;
+  codebuf_str.idx = 0;
+  codebuf_str.limit = (unsigned short *)cb->sp - (unsigned short *)cb->hp;
+
+  compiled_accessor = 1;
+  if (!method->is_accessor() || !Thumb2_Accessor(&jinfo_str)) {
+    Thumb2_Enter(&jinfo_str);
+    Thumb2_codegen(&jinfo_str, 0);
+    compiled_accessor = 0;
+  }
+
+#ifdef T2_PRINT_DISASS
+  if (DISASS_AFTER == 0 || compiled_methods >= DISASS_AFTER)
+    if (PrintAssembly)
+      Thumb2_disass(&jinfo_str);
+#endif
+
+  for (int i = 0; i < PREGS; i++)
+    cmethod->regusage[i] = jregs_str.mapping[i];
+
+  Thumb2_Clear_Cache(cb->hp, cb->hp + codebuf_str.idx * 2);
+
+#ifdef T2_PRINT_STATISTICS
+  compile_time = clock() - compile_time;
+  total_compile_time += compile_time;
+
+  if (t2_print_statistics) {
+    unsigned codegen = codebuf_str.idx * 2;
+    bytecodes_compiled += code_size;
+    arm_code_generated += codegen;
+    fprintf(stderr, "%d bytecodes => %d bytes code in %.2f sec, totals: %d => %d in %.2f sec\n",
+      code_size, codegen, (double)compile_time/(double)CLOCKS_PER_SEC,
+    bytecodes_compiled, arm_code_generated, (double)total_compile_time/(double)CLOCKS_PER_SEC);
+  }
+#endif
+
+  code_handle = out_align(&codebuf_str, sizeof(address));
+
+  out_32(&codebuf_str, slow_entry);
+
+  if (!compiled_accessor)
+    Thumb2_tablegen(cmethod, &jinfo_str);
+
+  cb->hp += codebuf_str.idx * 2;
+
+  *compiled_method_list_tail_ptr = cmethod;
+  compiled_method_list_tail_ptr = &(cmethod->next);
+
+  Thumb2_Install(method, code_handle);
+
+  compiled_methods++;
+
+  compiling = 0;
+
+  compiled_offset = Thumb2_osr_from_bci(cmethod, branch_pc);
+  if (compiled_offset == 0) return 0;
+  thumb_entry.compiled_entrypoint = slow_entry + compiled_offset;
+  thumb_entry.regusage = cmethod->regusage;
+
+#ifdef THUMB2_JVMTI
+  {
+    // we need to dispatch a compiled_method_load event
+    // to all registered Jvmti agents
+
+    // notify the whole generated code region for this Java method
+    // from slow_entry through to the end of the osr table. some
+    // of it is data not code but that's not a problem.
+
+    const void *gen_code_start = (const void *)(slow_entry ^ TBIT);
+    unsigned gen_code_size = codebuf_str.idx * 2;
+
+    // address_bci_map translates start addresses for generated code
+    // sections to bytecode indices and contains address_bci_map_length
+    // entries
+
+    // the final compile_info argument is supposed to contain
+    // information about inlined code. we can supply NULL for now -
+    // oprofile doesn't use it anyway
+
+    void *compile_info = NULL;
+
+    // transition from in Java to in VM before calling into Jvmti
+    ThreadInVMfromJava transition(thread);
+
+    JvmtiExport::post_compiled_method_load(method, gen_code_size,
+		gen_code_start, address_bci_map_length,
+		address_bci_map, NULL);
+  }
+#endif // THUMB2_JVMTI
+
+  return *(unsigned long long *)&thumb_entry;
+}
+
+extern "C" void Thumb2_DivZero_Handler(void);
+extern "C" void Thumb2_ArrayBounds_Handler(void);
+extern "C" void Thumb2_Handle_Exception(void);
+extern "C" void Thumb2_Handle_Exception_NoRegs(void);
+extern "C" void Thumb2_Exit_To_Interpreter(void);
+extern "C" void Thumb2_Stack_Overflow(void);
+extern "C" void Thumb2_monitorenter(void);
+
+extern "C" void __divsi3(void);
+extern "C" void __aeabi_ldivmod(void);
+extern "C" void __aeabi_i2f(void);
+extern "C" void __aeabi_i2d(void);
+extern "C" void __aeabi_l2f(void);
+extern "C" void __aeabi_l2d(void);
+extern "C" void __aeabi_f2d(void);
+extern "C" void __aeabi_d2f(void);
+extern "C" void Helper_new(void);
+extern "C" void Helper_instanceof(void);
+extern "C" void Helper_checkcast(void);
+extern "C" void Helper_monitorexit(void);
+extern "C" void Helper_aastore(void);
+extern "C" void Helper_aputfield(void);
+extern "C" void Helper_synchronized_enter(void);
+extern "C" void Helper_synchronized_exit(void);
+extern "C" void Helper_SafePoint(void);
+
+extern "C" void _ZN13SharedRuntime3f2iEf(void);
+extern "C" void _ZN13SharedRuntime3f2lEf(void);
+extern "C" void _ZN13SharedRuntime3d2iEd(void);
+extern "C" void _ZN13SharedRuntime3d2lEd(void);
+extern "C" void _ZN18InterpreterRuntime8newarrayEP10JavaThread9BasicTypei(void);
+extern "C" void _ZN18InterpreterRuntime9anewarrayEP10JavaThreadP19constantPoolOopDescii(void);
+extern "C" void _ZN18InterpreterRuntime14multianewarrayEP10JavaThreadPi(void);
+extern "C" void _ZN18InterpreterRuntime3ldcEP10JavaThreadb(void);
+
+extern char Thumb2_stubs[];
+extern char Thumb2_stubs_end[];
+extern char Thumb2_idiv_stub[];
+extern char Thumb2_irem_stub[];
+extern char Thumb2_invokeinterface_stub[];
+extern char Thumb2_invokevirtual_stub[];
+extern char Thumb2_invokestatic_stub[];
+extern char Thumb2_invokespecial_stub[];
+extern char Thumb2_getfield_word_stub[];
+extern char Thumb2_getfield_sh_stub[];
+extern char Thumb2_getfield_h_stub[];
+extern char Thumb2_getfield_sb_stub[];
+extern char Thumb2_getfield_dw_stub[];
+extern char Thumb2_putfield_word_stub[];
+extern char Thumb2_putfield_h_stub[];
+extern char Thumb2_putfield_b_stub[];
+extern char Thumb2_putfield_a_stub[];
+extern char Thumb2_putfield_dw_stub[];
+extern char Thumb2_getstatic_word_stub[];
+extern char Thumb2_getstatic_sh_stub[];
+extern char Thumb2_getstatic_h_stub[];
+extern char Thumb2_getstatic_sb_stub[];
+extern char Thumb2_getstatic_dw_stub[];
+extern char Thumb2_putstatic_word_stub[];
+extern char Thumb2_putstatic_h_stub[];
+extern char Thumb2_putstatic_b_stub[];
+extern char Thumb2_putstatic_a_stub[];
+extern char Thumb2_putstatic_dw_stub[];
+
+extern char Thumb2_invokestaticresolved_stub[];
+extern char Thumb2_invokespecialresolved_stub[];
+extern char Thumb2_invokevirtualresolved_stub[];
+extern char Thumb2_invokevfinalresolved_stub[];
+
+#define STUBS_SIZE	(Thumb2_stubs_end-Thumb2_stubs)
+#define IDIV_STUB		(Thumb2_idiv_stub-Thumb2_stubs)
+#define IREM_STUB		(Thumb2_irem_stub-Thumb2_stubs)
+#define INVOKEINTERFACE_STUB	(Thumb2_invokeinterface_stub-Thumb2_stubs)
+#define INVOKEVIRTUAL_STUB	(Thumb2_invokevirtual_stub-Thumb2_stubs)
+#define INVOKESTATIC_STUB	(Thumb2_invokestatic_stub-Thumb2_stubs)
+#define INVOKESPECIAL_STUB	(Thumb2_invokespecial_stub-Thumb2_stubs)
+#define GETFIELD_WORD_STUB	(Thumb2_getfield_word_stub-Thumb2_stubs)
+#define GETFIELD_SH_STUB	(Thumb2_getfield_sh_stub-Thumb2_stubs)
+#define GETFIELD_H_STUB		(Thumb2_getfield_h_stub-Thumb2_stubs)
+#define GETFIELD_SB_STUB	(Thumb2_getfield_sb_stub-Thumb2_stubs)
+#define GETFIELD_DW_STUB	(Thumb2_getfield_dw_stub-Thumb2_stubs)
+#define PUTFIELD_WORD_STUB	(Thumb2_putfield_word_stub-Thumb2_stubs)
+#define PUTFIELD_H_STUB		(Thumb2_putfield_h_stub-Thumb2_stubs)
+#define PUTFIELD_B_STUB		(Thumb2_putfield_b_stub-Thumb2_stubs)
+#define PUTFIELD_A_STUB		(Thumb2_putfield_a_stub-Thumb2_stubs)
+#define PUTFIELD_DW_STUB	(Thumb2_putfield_dw_stub-Thumb2_stubs)
+#define GETSTATIC_WORD_STUB	(Thumb2_getstatic_word_stub-Thumb2_stubs)
+#define GETSTATIC_SH_STUB	(Thumb2_getstatic_sh_stub-Thumb2_stubs)
+#define GETSTATIC_H_STUB	(Thumb2_getstatic_h_stub-Thumb2_stubs)
+#define GETSTATIC_SB_STUB	(Thumb2_getstatic_sb_stub-Thumb2_stubs)
+#define GETSTATIC_DW_STUB	(Thumb2_getstatic_dw_stub-Thumb2_stubs)
+#define PUTSTATIC_WORD_STUB	(Thumb2_putstatic_word_stub-Thumb2_stubs)
+#define PUTSTATIC_H_STUB	(Thumb2_putstatic_h_stub-Thumb2_stubs)
+#define PUTSTATIC_B_STUB	(Thumb2_putstatic_b_stub-Thumb2_stubs)
+#define PUTSTATIC_A_STUB	(Thumb2_putstatic_a_stub-Thumb2_stubs)
+#define PUTSTATIC_DW_STUB	(Thumb2_putstatic_dw_stub-Thumb2_stubs)
+
+#define INVOKESTATIC_RESOLVED_STUB (Thumb2_invokestaticresolved_stub-Thumb2_stubs)
+#define INVOKESPECIAL_RESOLVED_STUB (Thumb2_invokespecialresolved_stub-Thumb2_stubs)
+#define INVOKEVIRTUAL_RESOLVED_STUB (Thumb2_invokevirtualresolved_stub-Thumb2_stubs)
+#define INVOKEVFINAL_RESOLVED_STUB (Thumb2_invokevfinalresolved_stub-Thumb2_stubs)
+
+extern "C" void Thumb2_NullPtr_Handler(void);
+
+
+extern "C" int Thumb2_Check_Null(unsigned *regs, unsigned pc)
+{
+  Thumb2_CodeBuf *cb = thumb2_codebuf;
+  if (!(CPUInfo & ARCH_THUMB2)) return 0;
+  if (IS_COMPILED(pc, cb)) {
+    regs[ARM_LR] = pc;
+    regs[ARM_PC] = (unsigned)Thumb2_NullPtr_Handler;
+    regs[ARM_CPSR] &= ~CPSR_THUMB_BIT;
+    return 1;
+  }
+  return 0;
+}
+
+extern "C" void Thumb2_Initialize(void)
+{
+  CodeBuf codebuf;
+  Thumb2_CodeBuf *cb;
+  u32 h_divzero;
+  u32 loc_irem, loc_idiv, loc_ldiv;
+  int rc;
+
+  if (!(CPUInfo & ARCH_THUMB2)) {
+    UseCompiler = false;
+    return;
+  }
+
+#ifdef T2_PRINT_COMPILATION
+  PrintCompilation |= getenv("T2_PRINT_COMPILATION") != NULL;
+#endif
+#ifdef T2_PRINT_STATISTICS
+  t2_print_statistics = getenv("T2_PRINT_STATISTICS");
+#endif
+#ifdef T2_PRINT_DISASS
+  PrintAssembly |= getenv("T2_PRINT_DISASS") != NULL;
+#endif
+#ifdef T2_PRINT_REGUSAGE
+  t2_print_regusage = getenv("T2_PRINT_REGUSAGE");
+#endif
+
+  cb = (Thumb2_CodeBuf *)mmap(0, THUMB2_CODEBUF_SIZE, PROT_EXEC|PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
+  if (cb == MAP_FAILED) {
+    UseCompiler = false;
+    return;
+  }
+
+  cb->size = THUMB2_CODEBUF_SIZE;
+  cb->hp = (char *)cb + sizeof(Thumb2_CodeBuf);
+  cb->sp = (char *)cb + THUMB2_CODEBUF_SIZE;
+
+  codebuf.codebuf = (unsigned short *)cb->hp;
+  codebuf.idx = 0;
+  codebuf.limit = (unsigned short *)cb->sp - (unsigned short *)cb->hp;
+
+  if (rc = setjmp(compiler_error_env)) {
+    UseCompiler = false;
+    return;
+  }
+
+#ifdef THUMB2_JVMTI
+  // cache the start of the generated stub region for notification later
+  stub_gen_code_start = cb->hp;
+#endif // THUMB2_JVMTI
+
+  memcpy(cb->hp, Thumb2_stubs, STUBS_SIZE);
+
+  // fprintf(stderr, "Thumb2_stubs offset: 0x%x\n",
+  // 	  (char*)(cb->hp) - (char*)Thumb2_stubs);
+
+  handlers[H_IDIV] = (unsigned)(cb->hp + IDIV_STUB);
+  handlers[H_IREM] = (unsigned)(cb->hp + IREM_STUB);
+handlers[H_INVOKEINTERFACE] = (unsigned)(cb->hp + INVOKEINTERFACE_STUB);
+  handlers[H_INVOKEVIRTUAL] = (unsigned)(cb->hp + INVOKEVIRTUAL_STUB);
+  handlers[H_INVOKESTATIC] = (unsigned)(cb->hp + INVOKESTATIC_STUB);
+  handlers[H_INVOKESPECIAL] = (unsigned)(cb->hp + INVOKESPECIAL_STUB);
+
+  handlers[H_GETFIELD_WORD] = (unsigned)(cb->hp + GETFIELD_WORD_STUB);
+  handlers[H_GETFIELD_SH] = (unsigned)(cb->hp + GETFIELD_SH_STUB);
+  handlers[H_GETFIELD_H] = (unsigned)(cb->hp + GETFIELD_H_STUB);
+  handlers[H_GETFIELD_SB] = (unsigned)(cb->hp + GETFIELD_SB_STUB);
+  handlers[H_GETFIELD_DW] = (unsigned)(cb->hp + GETFIELD_DW_STUB);
+
+  handlers[H_INVOKESTATIC_RESOLVED] = (unsigned)(cb->hp + INVOKESTATIC_RESOLVED_STUB);
+  handlers[H_INVOKEVIRTUAL_RESOLVED] = (unsigned)(cb->hp + INVOKESPECIAL_RESOLVED_STUB);
+  handlers[H_INVOKEVIRTUAL_RESOLVED] = (unsigned)(cb->hp + INVOKEVIRTUAL_RESOLVED_STUB);
+  handlers[H_INVOKEVFINAL] = (unsigned)(cb->hp + INVOKEVFINAL_RESOLVED_STUB);
+
+  handlers[H_PUTFIELD_WORD] = (unsigned)(cb->hp + PUTFIELD_WORD_STUB);
+  handlers[H_PUTFIELD_H] = (unsigned)(cb->hp + PUTFIELD_H_STUB);
+  handlers[H_PUTFIELD_B] = (unsigned)(cb->hp + PUTFIELD_B_STUB);
+  handlers[H_PUTFIELD_A] = (unsigned)(cb->hp + PUTFIELD_A_STUB);
+  handlers[H_PUTFIELD_DW] = (unsigned)(cb->hp + PUTFIELD_DW_STUB);
+
+  handlers[H_GETSTATIC_WORD] = (unsigned)(cb->hp + GETSTATIC_WORD_STUB);
+  handlers[H_GETSTATIC_SH] = (unsigned)(cb->hp + GETSTATIC_SH_STUB);
+  handlers[H_GETSTATIC_H] = (unsigned)(cb->hp + GETSTATIC_H_STUB);
+  handlers[H_GETSTATIC_SB] = (unsigned)(cb->hp + GETSTATIC_SB_STUB);
+  handlers[H_GETSTATIC_DW] = (unsigned)(cb->hp + GETSTATIC_DW_STUB);
+
+  handlers[H_PUTSTATIC_WORD] = (unsigned)(cb->hp + PUTSTATIC_WORD_STUB);
+  handlers[H_PUTSTATIC_H] = (unsigned)(cb->hp + PUTSTATIC_H_STUB);
+  handlers[H_PUTSTATIC_B] = (unsigned)(cb->hp + PUTSTATIC_B_STUB);
+  handlers[H_PUTSTATIC_A] = (unsigned)(cb->hp + PUTSTATIC_A_STUB);
+  handlers[H_PUTSTATIC_DW] = (unsigned)(cb->hp + PUTSTATIC_DW_STUB);
+
+  codebuf.idx += (Thumb2_stubs_end-Thumb2_stubs) >> 1;
+
+  // Disassemble the codebuf we just created.  For debugging.  This
+  // first part is all ARM code; the part that we're about to create
+  // is Thumb code.
+  if (PrintAssembly) {
+    Hsdis hsdis;
+    hsdis.decode_instructions(cb->hp, cb->hp + codebuf.idx * 2,
+			      print_address, NULL, NULL, stderr,
+			      "");
+    fputc('\n', stderr);
+  }
+
+  char *begin_thumb_code = cb->hp + codebuf.idx * 2;
+
+  handlers[H_LDIV] = handlers[H_LREM] = out_pos(&codebuf);
+  dop_reg(&codebuf, DP_ORR, ARM_IP, ARM_R2, ARM_R3, 0, 0);
+  loc_ldiv = forward_16(&codebuf);
+  mov_imm(&codebuf, ARM_IP, (u32)__aeabi_ldivmod);
+  mov_reg(&codebuf, ARM_PC, ARM_IP);
+  bcc_patch(&codebuf, COND_EQ, loc_ldiv);
+  mov_imm(&codebuf, ARM_IP, (u32)Thumb2_DivZero_Handler);
+  mov_reg(&codebuf, ARM_PC, ARM_IP);
+
+  handlers[H_ARRAYBOUND] = out_pos(&codebuf);
+  mov_imm(&codebuf, ARM_R3, (u32)Thumb2_ArrayBounds_Handler);
+  mov_reg(&codebuf, ARM_PC, ARM_R3);
+
+  handlers[H_HANDLE_EXCEPTION] = out_pos(&codebuf);
+  mov_imm(&codebuf, ARM_R3, (u32)Thumb2_Handle_Exception);
+  mov_reg(&codebuf, ARM_PC, ARM_R3);
+
+  handlers[H_HANDLE_EXCEPTION_NO_REGS] = out_pos(&codebuf);
+  mov_imm(&codebuf, ARM_R3, (u32)Thumb2_Handle_Exception_NoRegs);
+  mov_reg(&codebuf, ARM_PC, ARM_R3);
+
+  handlers[H_STACK_OVERFLOW] = out_pos(&codebuf);
+  mov_imm(&codebuf, ARM_R3, (u32)Thumb2_Stack_Overflow);
+  mov_reg(&codebuf, ARM_PC, ARM_R3);
+
+  handlers[H_DREM] = out_pos(&codebuf);
+  stm(&codebuf, (1<<ARM_LR), ARM_SP, PUSH_FD, 1);
+  mov_imm(&codebuf, ARM_IP, (u32)fmod);
+#ifdef __ARM_PCS_VFP
+  vmov_reg_d_toVFP(&codebuf, VFP_D0, ARM_R0, ARM_R1);
+  vmov_reg_d_toVFP(&codebuf, VFP_D1, ARM_R2, ARM_R3);
+#endif
+  blx_reg(&codebuf, ARM_IP);
+#ifdef __ARM_PCS_VFP
+  vmov_reg_d_toARM(&codebuf, ARM_R0, ARM_R1, VFP_D0);
+#endif
+  ldm(&codebuf, (1<<ARM_PC), ARM_SP, POP_FD, 1);
+
+  handlers[H_FREM] = out_pos(&codebuf);
+  stm(&codebuf, (1<<ARM_LR), ARM_SP, PUSH_FD, 1);
+  mov_imm(&codebuf, ARM_R3, (u32)fmodf);
+#ifdef __ARM_PCS_VFP
+  vmov_reg_s_toVFP(&codebuf, VFP_S0, ARM_R0);
+  vmov_reg_s_toVFP(&codebuf, VFP_S1, ARM_R1);
+#endif
+  blx_reg(&codebuf, ARM_R3);
+#ifdef __ARM_PCS_VFP
+  vmov_reg_s_toARM(&codebuf, ARM_R0, VFP_S0);
+#endif
+  ldm(&codebuf, (1<<ARM_PC), ARM_SP, POP_FD, 1);
+
+  handlers[H_I2F] = out_pos(&codebuf);
+  mov_imm(&codebuf, ARM_IP, (u32)__aeabi_i2f);
+  mov_reg(&codebuf, ARM_PC, ARM_IP);
+
+  handlers[H_I2D] = out_pos(&codebuf);
+  mov_imm(&codebuf, ARM_IP, (u32)__aeabi_i2d);
+  mov_reg(&codebuf, ARM_PC, ARM_IP);
+
+  handlers[H_L2F] = out_pos(&codebuf);
+  mov_imm(&codebuf, ARM_IP, (u32)__aeabi_l2f);
+  mov_reg(&codebuf, ARM_PC, ARM_IP);
+
+  handlers[H_L2D] = out_pos(&codebuf);
+  mov_imm(&codebuf, ARM_IP, (u32)__aeabi_l2d);
+  mov_reg(&codebuf, ARM_PC, ARM_IP);
+
+  handlers[H_F2I] = out_pos(&codebuf);
+  mov_imm(&codebuf, ARM_IP, (u32)_ZN13SharedRuntime3f2iEf);
+#ifdef __ARM_PCS_VFP
+  vmov_reg_s_toVFP(&codebuf, VFP_S0, ARM_R0);
+#endif
+  mov_reg(&codebuf, ARM_PC, ARM_IP);
+
+  handlers[H_F2L] = out_pos(&codebuf);
+  mov_imm(&codebuf, ARM_IP, (u32)_ZN13SharedRuntime3f2lEf);
+#ifdef __ARM_PCS_VFP
+  vmov_reg_s_toVFP(&codebuf, VFP_S0, ARM_R0);
+#endif
+  mov_reg(&codebuf, ARM_PC, ARM_IP);
+
+  handlers[H_F2D] = out_pos(&codebuf);
+  mov_imm(&codebuf, ARM_IP, (u32)__aeabi_f2d);
+  mov_reg(&codebuf, ARM_PC, ARM_IP);
+
+  handlers[H_D2I] = out_pos(&codebuf);
+  mov_imm(&codebuf, ARM_IP, (u32)_ZN13SharedRuntime3d2iEd);
+#ifdef __ARM_PCS_VFP
+  vmov_reg_d_toVFP(&codebuf, VFP_S0, ARM_R0, ARM_R1);
+#endif
+  mov_reg(&codebuf, ARM_PC, ARM_IP);
+
+  handlers[H_D2L] = out_pos(&codebuf);
+  mov_imm(&codebuf, ARM_IP, (u32)_ZN13SharedRuntime3d2lEd);
+#ifdef __ARM_PCS_VFP
+  vmov_reg_d_toVFP(&codebuf, VFP_S0, ARM_R0, ARM_R1);
+#endif
+  mov_reg(&codebuf, ARM_PC, ARM_IP);
+
+  handlers[H_D2F] = out_pos(&codebuf);
+  mov_imm(&codebuf, ARM_IP, (u32)__aeabi_d2f);
+  mov_reg(&codebuf, ARM_PC, ARM_IP);
+
+// NEW Stub
+//   r1 = index
+//   r3 = bci
+//   result -> R0, == 0 => exception
+  handlers[H_NEW] = out_pos(&codebuf);
+  ldr_imm(&codebuf, ARM_R0, Rthread, THREAD_TOP_ZERO_FRAME, 1, 0);
+  sub_imm(&codebuf, ARM_R0, ARM_R0, ISTATE_NEXT_FRAME);
+  ldr_imm(&codebuf, ARM_R2, ARM_R0, ISTATE_METHOD, 1, 0);
+  mov_imm(&codebuf, ARM_IP, (u32)Helper_new);
+  ldr_imm(&codebuf, ARM_R2, ARM_R2, METHOD_CONSTMETHOD, 1, 0);
+  add_reg(&codebuf, ARM_R2, ARM_R2, ARM_R3);
+sub_imm(&codebuf, ARM_R3, Rstack, 4);
+  str_imm(&codebuf, ARM_R3, ARM_R0, ISTATE_STACK, 1, 0);
+  str_imm(&codebuf, ARM_R2, ARM_R0, ISTATE_BCP, 1, 0);
+  mov_reg(&codebuf, ARM_PC, ARM_IP);
+
+// NEWARRAY Stub
+//   r1 = atype
+//   r2 = tos
+//   r3 = bci
+//   result -> thread->vm_result
+  handlers[H_NEWARRAY] = out_pos(&codebuf);
+  ldr_imm(&codebuf, ARM_IP, Rthread, THREAD_TOP_ZERO_FRAME, 1, 0);
+  ldr_imm(&codebuf, ARM_R0, ARM_IP, ISTATE_METHOD-ISTATE_NEXT_FRAME, 1, 0);
+  ldr_imm(&codebuf, ARM_R0, ARM_R0, METHOD_CONSTMETHOD, 1, 0);
+  add_reg(&codebuf, ARM_R3, ARM_R0, ARM_R3);
+  mov_reg(&codebuf, ARM_R0, Rthread);
+  str_imm(&codebuf, ARM_R3, ARM_IP, ISTATE_BCP-ISTATE_NEXT_FRAME, 1, 0);
+  sub_imm(&codebuf, ARM_R3, Rstack, 4);
+  str_imm(&codebuf, ARM_R3, ARM_IP, ISTATE_STACK-ISTATE_NEXT_FRAME, 1, 0);
+  mov_imm(&codebuf, ARM_IP, (u32)_ZN18InterpreterRuntime8newarrayEP10JavaThread9BasicTypei);
+  mov_reg(&codebuf, ARM_PC, ARM_IP);
+
+// ANEWARRAY Stub
+//   r0 = bci
+//   r2 = index
+//   r3 = tos
+//   result -> thread->vm_result
+  handlers[H_ANEWARRAY] = out_pos(&codebuf);
+  ldr_imm(&codebuf, ARM_IP, Rthread, THREAD_TOP_ZERO_FRAME, 1, 0);
+  ldr_imm(&codebuf, ARM_R1, ARM_IP, ISTATE_METHOD-ISTATE_NEXT_FRAME, 1, 0);
+  ldr_imm(&codebuf, ARM_R1, ARM_R1, METHOD_CONSTMETHOD, 1, 0);
+  add_reg(&codebuf, ARM_R0, ARM_R0, ARM_R1);
+  str_imm(&codebuf, ARM_R0, ARM_IP, ISTATE_BCP-ISTATE_NEXT_FRAME, 1, 0);
+
+  sub_imm(&codebuf, ARM_R1, Rstack, 4);
+  str_imm(&codebuf, ARM_R1, ARM_IP, ISTATE_STACK-ISTATE_NEXT_FRAME, 1, 0);
+
+  ldr_imm(&codebuf, ARM_R1, ARM_IP, ISTATE_METHOD-ISTATE_NEXT_FRAME, 1, 0);
+  ldr_imm(&codebuf, ARM_R1, ARM_R1, METHOD_CONSTANTS, 1, 0);
+  mov_imm(&codebuf, ARM_IP, (u32)_ZN18InterpreterRuntime9anewarrayEP10JavaThreadP19constantPoolOopDescii);
+  mov_reg(&codebuf, ARM_R0, Rthread);
+  mov_reg(&codebuf, ARM_PC, ARM_IP);
+
+// MULTIANEWARRAY Stub
+//   r0 = bci
+//   r1 = dimensions (*4)
+  handlers[H_MULTIANEWARRAY] = out_pos(&codebuf);
+  ldr_imm(&codebuf, ARM_IP, Rthread, THREAD_TOP_ZERO_FRAME, 1, 0);
+  ldr_imm(&codebuf, ARM_R2, ARM_IP, ISTATE_METHOD-ISTATE_NEXT_FRAME, 1, 0);
+  sub_imm(&codebuf, ARM_R3, Rstack, 4);
+  ldr_imm(&codebuf, ARM_R2, ARM_R2, METHOD_CONSTMETHOD, 1, 0);
+  str_imm(&codebuf, ARM_R3, ARM_IP, ISTATE_STACK-ISTATE_NEXT_FRAME, 1, 0);
+  add_reg(&codebuf, ARM_R0, ARM_R2, ARM_R0);
+  add_reg(&codebuf, Rstack, Rstack, ARM_R1);
+  mov_imm(&codebuf, ARM_R3, (u32)_ZN18InterpreterRuntime14multianewarrayEP10JavaThreadPi);
+  str_imm(&codebuf, ARM_R0, ARM_IP, ISTATE_BCP-ISTATE_NEXT_FRAME, 1, 0);
+  mov_reg(&codebuf, ARM_R0, Rthread);
+  sub_imm(&codebuf, ARM_R1, Rstack, 4);
+  mov_reg(&codebuf, ARM_PC, ARM_R3);
+
+// LDC Stub
+//   r0 = bci
+  handlers[H_LDC] = out_pos(&codebuf);
+  ldr_imm(&codebuf, ARM_IP, Rthread, THREAD_TOP_ZERO_FRAME, 1, 0);
+  ldr_imm(&codebuf, ARM_R2, ARM_IP, ISTATE_METHOD-ISTATE_NEXT_FRAME, 1, 0);
+  sub_imm(&codebuf, ARM_R3, Rstack, 4);
+  ldr_imm(&codebuf, ARM_R2, ARM_R2, METHOD_CONSTMETHOD, 1, 0);
+  str_imm(&codebuf, ARM_R3, ARM_IP, ISTATE_STACK-ISTATE_NEXT_FRAME, 1, 0);
+  add_reg(&codebuf, ARM_R0, ARM_R2, ARM_R0);
+  mov_imm(&codebuf, ARM_R3, (u32)_ZN18InterpreterRuntime3ldcEP10JavaThreadb);
+  str_imm(&codebuf, ARM_R0, ARM_IP, ISTATE_BCP-ISTATE_NEXT_FRAME, 1, 0);
+  mov_reg(&codebuf, ARM_R0, Rthread);
+  mov_imm(&codebuf, ARM_R1, 0);
+  mov_reg(&codebuf, ARM_PC, ARM_R3);
+
+// LDC_W Stub
+//   r0 = bci
+  handlers[H_LDC_W] = out_pos(&codebuf);
+  ldr_imm(&codebuf, ARM_IP, Rthread, THREAD_TOP_ZERO_FRAME, 1, 0);
+  ldr_imm(&codebuf, ARM_R2, ARM_IP, ISTATE_METHOD-ISTATE_NEXT_FRAME, 1, 0);
+  sub_imm(&codebuf, ARM_R3, Rstack, 4);
+  ldr_imm(&codebuf, ARM_R2, ARM_R2, METHOD_CONSTMETHOD, 1, 0);
+  str_imm(&codebuf, ARM_R3, ARM_IP, ISTATE_STACK-ISTATE_NEXT_FRAME, 1, 0);
+  add_reg(&codebuf, ARM_R0, ARM_R2, ARM_R0);
+  mov_imm(&codebuf, ARM_R3, (u32)_ZN18InterpreterRuntime3ldcEP10JavaThreadb);
+  str_imm(&codebuf, ARM_R0, ARM_IP, ISTATE_BCP-ISTATE_NEXT_FRAME, 1, 0);
+  mov_reg(&codebuf, ARM_R0, Rthread);
+  mov_imm(&codebuf, ARM_R1, 1);
+  mov_reg(&codebuf, ARM_PC, ARM_R3);
+
+// INSTANCEOF Stub
+//   r1 = index
+//   r2 = tos
+//   r3 = bci
+//   result -> R0, == -1 => exception
+  handlers[H_INSTANCEOF] = out_pos(&codebuf);
+  ldr_imm(&codebuf, ARM_R0, Rthread, THREAD_TOP_ZERO_FRAME, 1, 0);
+  sub_imm(&codebuf, ARM_R0, ARM_R0, ISTATE_NEXT_FRAME);
+  ldr_imm(&codebuf, ARM_IP, ARM_R0, ISTATE_METHOD, 1, 0);
+  ldr_imm(&codebuf, ARM_IP, ARM_IP, METHOD_CONSTMETHOD, 1, 0);
+  add_reg(&codebuf, ARM_R3, ARM_IP, ARM_R3);
+  str_imm(&codebuf, ARM_R3, ARM_R0, ISTATE_BCP, 1, 0);
+  sub_imm(&codebuf, ARM_R3, Rstack, 4);
+  str_imm(&codebuf, ARM_R3, ARM_R0, ISTATE_STACK, 1, 0);
+  mov_imm(&codebuf, ARM_IP, (u32)Helper_instanceof);
+  mov_reg(&codebuf, ARM_PC, ARM_IP);
+
+// CHECKCAST Stub
+//   r1 = index
+//   r2 = tos
+//   r3 = bci
+//   result -> R0, != 0 => exception
+  handlers[H_CHECKCAST] = out_pos(&codebuf);
+  ldr_imm(&codebuf, ARM_R0, Rthread, THREAD_TOP_ZERO_FRAME, 1, 0);
+  sub_imm(&codebuf, ARM_R0, ARM_R0, ISTATE_NEXT_FRAME);
+  ldr_imm(&codebuf, ARM_IP, ARM_R0, ISTATE_METHOD, 1, 0);
+  ldr_imm(&codebuf, ARM_IP, ARM_IP, METHOD_CONSTMETHOD, 1, 0);
+  add_reg(&codebuf, ARM_R3, ARM_IP, ARM_R3);
+  str_imm(&codebuf, ARM_R3, ARM_R0, ISTATE_BCP, 1, 0);
+  sub_imm(&codebuf, ARM_R3, Rstack, 4);
+  str_imm(&codebuf, ARM_R3, ARM_R0, ISTATE_STACK, 1, 0);
+  mov_imm(&codebuf, ARM_IP, (u32)Helper_checkcast);
+  mov_reg(&codebuf, ARM_PC, ARM_IP);
+
+// MONITORENTER
+//   r0 = bci
+  handlers[H_MONITORENTER] = out_pos(&codebuf);
+  ldr_imm(&codebuf, ARM_R2, Rthread, THREAD_TOP_ZERO_FRAME, 1, 0);
+  mov_imm(&codebuf, ARM_R3, (u32)Thumb2_monitorenter);
+  ldr_imm(&codebuf, ARM_R1, ARM_R2, ISTATE_METHOD-ISTATE_NEXT_FRAME, 1, 0);
+  ldr_imm(&codebuf, ARM_IP, ARM_R1, METHOD_CONSTMETHOD, 1, 0);
+  add_reg(&codebuf, Rint_jpc, ARM_IP, ARM_R0);
+  mov_reg(&codebuf, ARM_PC, ARM_R3);
+
+// MONITOREXIT Stub
+//   r1 = tos
+//   r3 = bci
+//   result -> R0, != 0 => exception
+  handlers[H_MONITOREXIT] = out_pos(&codebuf);
+  ldr_imm(&codebuf, ARM_R0, Rthread, THREAD_TOP_ZERO_FRAME, 1, 0);
+  sub_imm(&codebuf, ARM_R0, ARM_R0, ISTATE_NEXT_FRAME);
+  ldr_imm(&codebuf, ARM_IP, ARM_R0, ISTATE_METHOD, 1, 0);
+  ldr_imm(&codebuf, ARM_IP, ARM_IP, METHOD_CONSTMETHOD, 1, 0);
+  add_reg(&codebuf, ARM_R3, ARM_IP, ARM_R3);
+  str_imm(&codebuf, ARM_R3, ARM_R0, ISTATE_BCP, 1, 0);
+  sub_imm(&codebuf, ARM_R3, Rstack, 4);
+  str_imm(&codebuf, ARM_R3, ARM_R0, ISTATE_STACK, 1, 0);
+  mov_imm(&codebuf, ARM_IP, (u32)Helper_monitorexit);
+  mov_reg(&codebuf, ARM_PC, ARM_IP);
+
+// AASTORE Stub
+//   r0 = bci
+//   r1 = value
+//   r2 = index
+//   r3 = arrayref
+  handlers[H_AASTORE] = out_pos(&codebuf);
+  ldr_imm(&codebuf, ARM_IP, Rthread, THREAD_TOP_ZERO_FRAME, 1, 0);
+  ldr_imm(&codebuf, ARM_IP, ARM_IP, ISTATE_METHOD-ISTATE_NEXT_FRAME, 1, 0);
+  ldr_imm(&codebuf, ARM_IP, ARM_IP, METHOD_CONSTMETHOD, 1, 0);
+  add_reg(&codebuf, ARM_IP, ARM_IP, ARM_R0);
+  ldr_imm(&codebuf, ARM_R0, Rthread, THREAD_TOP_ZERO_FRAME, 1, 0);
+  sub_imm(&codebuf, ARM_R0, ARM_R0, ISTATE_NEXT_FRAME);
+  str_imm(&codebuf, ARM_IP, ARM_R0, ISTATE_BCP, 1, 0);
+  sub_imm(&codebuf, ARM_IP, Rstack, 4);
+  str_imm(&codebuf, ARM_IP, ARM_R0, ISTATE_STACK, 1, 0);
+  mov_imm(&codebuf, ARM_IP, (u32)Helper_aastore);
+  mov_reg(&codebuf, ARM_PC, ARM_IP);
+
+// APUTFIELD Stub
+//   r0 = obj
+  handlers[H_APUTFIELD] = out_pos(&codebuf);
+  mov_imm(&codebuf, ARM_R3, (u32)Helper_aputfield);
+  mov_reg(&codebuf, ARM_PC, ARM_R3);
+
+// SYNCHRONIZED_ENTER Stub
+//   r0 = bci
+//   Rstack = monitor
+  handlers[H_SYNCHRONIZED_ENTER] = out_pos(&codebuf);
+  ldr_imm(&codebuf, ARM_R1, Rthread, THREAD_TOP_ZERO_FRAME, 1, 0);
+  ldr_imm(&codebuf, ARM_R2, ARM_R1, ISTATE_METHOD-ISTATE_NEXT_FRAME, 1, 0);
+  ldr_imm(&codebuf, ARM_R2, ARM_R2, METHOD_CONSTMETHOD, 1, 0);
+  add_reg(&codebuf, ARM_R2, ARM_R2, ARM_R0);
+  str_imm(&codebuf, ARM_R2, ARM_R1, ISTATE_BCP-ISTATE_NEXT_FRAME, 1, 0);
+
+  sub_imm(&codebuf, ARM_R0, Rstack, 4);
+  str_imm(&codebuf, ARM_R0, ARM_R1, ISTATE_STACK-ISTATE_NEXT_FRAME, 1, 0);
+
+  mov_imm(&codebuf, ARM_IP, (u32)Helper_synchronized_enter);
+  mov_reg(&codebuf, ARM_R0, Rthread);
+  mov_reg(&codebuf, ARM_R1, Rstack);
+  mov_reg(&codebuf, ARM_PC, ARM_IP);
+
+//
+// SYNCHRONIZED_EXIT Stub
+//   r0 = bci
+//   r1 = monitor
+  handlers[H_SYNCHRONIZED_EXIT] = out_pos(&codebuf);
+  ldr_imm(&codebuf, ARM_R2, Rthread, THREAD_TOP_ZERO_FRAME, 1, 0);
+
+  ldr_imm(&codebuf, ARM_IP, ARM_R2, ISTATE_METHOD-ISTATE_NEXT_FRAME, 1, 0);
+  ldr_imm(&codebuf, ARM_IP, ARM_IP, METHOD_CONSTMETHOD, 1, 0);
+  add_reg(&codebuf, ARM_IP, ARM_IP, ARM_R0);
+  sub_imm(&codebuf, ARM_R0, Rstack, 4);
+  str_imm(&codebuf, ARM_R0, ARM_R2, ISTATE_STACK-ISTATE_NEXT_FRAME, 1, 0);
+  str_imm(&codebuf, ARM_IP, ARM_R2, ISTATE_BCP-ISTATE_NEXT_FRAME, 1, 0);
+  mov_imm(&codebuf, ARM_IP, (u32)Helper_synchronized_exit);
+  mov_reg(&codebuf, ARM_R0, Rthread);
+  mov_reg(&codebuf, ARM_PC, ARM_IP);
+
+#define DEBUG_REGSET ((1<<ARM_R0)|(1<<ARM_R1)|(1<<ARM_R2)|(1<<ARM_R3)|(1<<ARM_IP))
+
+// DEBUG_METHDENTRY
+  handlers[H_DEBUG_METHODENTRY] = out_pos(&codebuf);
+  stm(&codebuf, DEBUG_REGSET | (1<<ARM_LR), ARM_SP, PUSH_FD, 1);
+  mov_reg(&codebuf, ARM_R2, ARM_R0);
+  mov_reg(&codebuf, ARM_R0, ARM_R8);
+  mov_reg(&codebuf, ARM_R1, ARM_R4);
+  mov_imm(&codebuf, ARM_IP, (u32)Debug_MethodEntry);
+  blx_reg(&codebuf, ARM_IP);
+  ldm(&codebuf, DEBUG_REGSET | (1<<ARM_PC), ARM_SP, POP_FD, 1);
+
+// DEBUG_METHODEXIT
+  handlers[H_DEBUG_METHODEXIT] = out_pos(&codebuf);
+  stm(&codebuf, DEBUG_REGSET | (1<<ARM_LR), ARM_SP, PUSH_FD, 1);
+  mov_reg(&codebuf, ARM_R0, ARM_R8);
+  mov_reg(&codebuf, ARM_R1, ARM_R4);
+  mov_imm(&codebuf, ARM_IP, (u32)Debug_MethodExit);
+  blx_reg(&codebuf, ARM_IP);
+  ldm(&codebuf, DEBUG_REGSET | (1<<ARM_PC), ARM_SP, POP_FD, 1);
+
+// DEBUG_METHODCALL
+  handlers[H_DEBUG_METHODCALL] = out_pos(&codebuf);
+  stm(&codebuf, DEBUG_REGSET | (1<<ARM_LR), ARM_SP, PUSH_FD, 1);
+  mov_reg(&codebuf, ARM_R2, ARM_R0);
+  mov_reg(&codebuf, ARM_R0, ARM_R8);
+  mov_reg(&codebuf, ARM_R1, ARM_R4);
+  mov_imm(&codebuf, ARM_IP, (u32)Debug_MethodCall);
+  blx_reg(&codebuf, ARM_IP);
+  ldm(&codebuf, DEBUG_REGSET | (1<<ARM_PC), ARM_SP, POP_FD, 1);
+
+// EXIT_TO_INTERPRETER
+//   r0 = bci
+  handlers[H_EXIT_TO_INTERPRETER] = out_pos(&codebuf);
+  ldr_imm(&codebuf, ARM_R2, Rthread, THREAD_TOP_ZERO_FRAME, 1, 0);
+  mov_imm(&codebuf, ARM_R3, (u32)Thumb2_Exit_To_Interpreter);
+  ldr_imm(&codebuf, ARM_R1, ARM_R2, ISTATE_METHOD-ISTATE_NEXT_FRAME, 1, 0);
+  ldr_imm(&codebuf, ARM_IP, ARM_R1, METHOD_CONSTMETHOD, 1, 0);
+  add_reg(&codebuf, Rint_jpc, ARM_IP, ARM_R0);
+  mov_reg(&codebuf, ARM_PC, ARM_R3);
+
+// H_SAFEPOINT
+  handlers[H_SAFEPOINT] = out_pos(&codebuf);
+  stm(&codebuf, (1<<ARM_LR), ARM_SP, PUSH_FD, 1);
+
+  // The frame walking code used by the garbage collector
+  // (frame::interpreter_frame_tos_address()) assumes that the stack
+  // pointer points one word below the top item on the stack, so we
+  // have to adjust the SP saved in istate accordingly.  If we don't,
+  // the value on TOS won't be seen by the GC and we will crash later.
+  sub_imm(&codebuf, ARM_R0, Rstack, 4);
+  str_imm(&codebuf, ARM_R0, ARM_R2, ISTATE_STACK, 1, 0);
+
+  // Set up BytecodeInterpreter->_bcp for the GC
+  // bci+CONSTMETHOD_CODEOFFSET is passed in ARM_R1
+  // istate is passed in ARM_R2
+  ldr_imm(&codebuf, ARM_R0, ARM_R2, ISTATE_METHOD, 1, 0);
+  ldr_imm(&codebuf, ARM_R0, ARM_R0, METHOD_CONSTMETHOD, 1, 0);
+  add_reg(&codebuf, ARM_R0, ARM_R0, ARM_R1);
+  str_imm(&codebuf, ARM_R0, ARM_R2, ISTATE_BCP, 1, 0);
+
+  mov_imm(&codebuf, ARM_IP, (u32)Helper_SafePoint);
+  mov_reg(&codebuf, ARM_R0, Rthread);
+  blx_reg(&codebuf, ARM_IP);
+  ldm(&codebuf, (1<<ARM_LR), ARM_SP, POP_FD, 1);
+  cmp_imm(&codebuf, ARM_R0, 0);
+
+  // The sequence here is delicate.  We need to seet things up so that
+  // it looks as though Thumb2_Handle_Exception_NoRegs was called
+  // directly from a compiled method.
+  it(&codebuf, COND_EQ, IT_MASK_T);
+  mov_reg(&codebuf, ARM_PC, ARM_LR);
+  mov_imm(&codebuf, ARM_R3, (u32)Thumb2_Handle_Exception_NoRegs);
+  mov_reg(&codebuf, ARM_PC, ARM_R3);
+
+  // Disassemble the codebuf we just created.  For debugging
+  if (PrintAssembly) {
+    Hsdis hsdis;
+    hsdis.decode_instructions(begin_thumb_code, cb->hp + codebuf.idx * 2,
+			      print_address, NULL, NULL, stderr,
+			      "force-thumb");
+    fputc('\n', stderr);
+  }
+
+  Thumb2_Clear_Cache(cb->hp, cb->hp + codebuf.idx * 2);
+  cb->hp += codebuf.idx * 2;
+
+  thumb2_codebuf = cb;
+
+#ifdef THUMB2_JVMTI
+  // cache the end of the generated stub region for notification later
+  stub_gen_code_end = cb->hp;
+#endif // THUMB2_JVMTI
+}
+
+#endif // T2JIT
+
+#endif // __arm__
--- a/src/cpu/zero/vm/vm_version_zero.hpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/cpu/zero/vm/vm_version_zero.hpp	Fri Aug 09 12:21:36 2013 +0100
@@ -30,7 +30,18 @@
 #include "runtime/vm_version.hpp"
 
 class VM_Version : public Abstract_VM_Version {
+
  public:
+  static void get_processor_features() {
+#ifdef __ARM_ARCH_7A__
+    Abstract_VM_Version::_supports_cx8 = true;
+#endif
+  }
+
+  static void initialize() {
+    get_processor_features();
+  }
+
   static const char* cpu_features() {
     return "";
   }
--- a/src/os/linux/vm/os_linux.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/os/linux/vm/os_linux.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -129,6 +129,11 @@
 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
 
 #define LARGEPAGES_BIT (1 << 6)
+
+#ifndef EM_AARCH64
+#define EM_AARCH64	183		/* ARM AARCH64 */
+#endif
+
 ////////////////////////////////////////////////////////////////////////////////
 // global variables
 julong os::Linux::_physical_memory = 0;
@@ -1889,7 +1894,9 @@
     {EM_MIPS_RS3_LE, EM_MIPS_RS3_LE, ELFCLASS32, ELFDATA2LSB, (char*)"MIPSel"},
     {EM_MIPS,        EM_MIPS,    ELFCLASS32, ELFDATA2MSB, (char*)"MIPS"},
     {EM_PARISC,      EM_PARISC,  ELFCLASS32, ELFDATA2MSB, (char*)"PARISC"},
-    {EM_68K,         EM_68K,     ELFCLASS32, ELFDATA2MSB, (char*)"M68k"}
+    {EM_68K,         EM_68K,     ELFCLASS32, ELFDATA2MSB, (char*)"M68k"},
+    {EM_SH,          EM_SH,      ELFCLASS32, ELFDATA2LSB, (char*)"SH"}, /* Support little endian only*/
+    {EM_AARCH64,     EM_AARCH64, ELFCLASS64, ELFDATA2LSB, (char*)"AARCH64"} /* Support little endian only*/
   };
 
   #if  (defined IA32)
@@ -1920,9 +1927,13 @@
     static  Elf32_Half running_arch_code=EM_MIPS;
   #elif  (defined M68K)
     static  Elf32_Half running_arch_code=EM_68K;
+  #elif  (defined SH)
+    static  Elf32_Half running_arch_code=EM_SH;
+  #elif  (defined AARCH64)
+    static  Elf32_Half running_arch_code=EM_AARCH64;
   #else
     #error Method os::dll_load requires that one of following is defined:\
-         IA32, AMD64, IA64, __sparc, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K
+         IA32, AMD64, IA64, __sparc, __powerpc__, ARM, S390, ALPHA, MIPS, MIPSEL, PARISC, M68K, SH
   #endif
 
   // Identify compatability class for VM's architecture and library's architecture
@@ -2007,6 +2018,37 @@
   return true;
 }
 
+bool _print_lsb_file(const char* filename, outputStream* st) {
+  int fd = open(filename, O_RDONLY);
+  if (fd == -1) {
+     return false;
+  }
+
+  char buf[512], *d_i, *d_r, *d_c;
+  int bytes;
+
+  if ((bytes = read(fd, buf, sizeof(buf)-1)) == sizeof(buf)-1) {
+     close(fd);
+     return false;
+  }
+  close(fd);
+
+  buf[bytes] = '\n';
+  buf[bytes+1] = '\0';
+  d_i = strstr(buf, "DISTRIB_ID=");
+  d_r = strstr(buf, "DISTRIB_RELEASE=");
+  d_c = strstr(buf, "DISTRIB_CODENAME=");
+  if (!d_i || !d_r || !d_c) {
+     return false;
+  }
+  d_i = strchr(d_i, '=') + 1;  *strchrnul(d_i, '\n') = '\0';
+  d_r = strchr(d_r, '=') + 1;  *strchrnul(d_r, '\n') = '\0';
+  d_c = strchr(d_c, '=') + 1;  *strchrnul(d_c, '\n') = '\0';
+  st->print("%s %s (%s)", d_i, d_r, d_c);
+
+  return true;
+}
+
 void os::print_dll_info(outputStream *st) {
    st->print_cr("Dynamic libraries:");
 
@@ -2063,6 +2105,7 @@
       !_print_ascii_file("/etc/SuSE-release", st) &&
       !_print_ascii_file("/etc/turbolinux-release", st) &&
       !_print_ascii_file("/etc/gentoo-release", st) &&
+      !_print_lsb_file("/etc/lsb-release", st) &&
       !_print_ascii_file("/etc/debian_version", st) &&
       !_print_ascii_file("/etc/ltib-release", st) &&
       !_print_ascii_file("/etc/angstrom-version", st)) {
--- a/src/os_cpu/linux_sparc/vm/assembler_linux_sparc.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/os_cpu/linux_sparc/vm/assembler_linux_sparc.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -28,7 +28,16 @@
 #include "runtime/os.hpp"
 #include "runtime/threadLocalStorage.hpp"
 
-#include <asm-sparc/traps.h>
+/* Headers for 32bit sparc with a 32bit userland end up in asm/
+ * Headers for 32bit sparc with a 64bit userland end up in asm-sparc/
+ * There is no traps.h in asm-sparc64/
+ */
+
+#if defined(__sparc__) && defined(__arch64__)
+# include <asm-sparc/traps.h>
+#else 
+# include <asm/traps.h>
+#endif
 
 void MacroAssembler::read_ccr_trap(Register ccr_save) {
   // No implementation
--- a/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -309,29 +309,30 @@
   if (context == NULL) return;
 
   ucontext_t *uc = (ucontext_t*)context;
+  sigcontext* sc = (sigcontext*)context;
   intptr_t *sp = (intptr_t *)os::Linux::ucontext_get_sp(uc);
 
   st->print_cr("Register to memory mapping:");
   st->cr();
 
   // this is only for the "general purpose" registers
-  st->print("G1="); print_location(st, SIG_REGS(sc).u_regs[CON__G1]);
-  st->print("G2="); print_location(st, SIG_REGS(sc).u_regs[CON__G2]);
-  st->print("G3="); print_location(st, SIG_REGS(sc).u_regs[CON__G3]);
-  st->print("G4="); print_location(st, SIG_REGS(sc).u_regs[CON__G4]);
-  st->print("G5="); print_location(st, SIG_REGS(sc).u_regs[CON__G5]);
-  st->print("G6="); print_location(st, SIG_REGS(sc).u_regs[CON__G6]);
-  st->print("G7="); print_location(st, SIG_REGS(sc).u_regs[CON__G7]);
+  st->print("G1="); print_location(st, SIG_REGS(sc).u_regs[CON_G1]);
+  st->print("G2="); print_location(st, SIG_REGS(sc).u_regs[CON_G2]);
+  st->print("G3="); print_location(st, SIG_REGS(sc).u_regs[CON_G3]);
+  st->print("G4="); print_location(st, SIG_REGS(sc).u_regs[CON_G4]);
+  st->print("G5="); print_location(st, SIG_REGS(sc).u_regs[CON_G5]);
+  st->print("G6="); print_location(st, SIG_REGS(sc).u_regs[CON_G6]);
+  st->print("G7="); print_location(st, SIG_REGS(sc).u_regs[CON_G7]);
   st->cr();
 
-  st->print("O0="); print_location(st, SIG_REGS(sc).u_regs[CON__O0]);
-  st->print("O1="); print_location(st, SIG_REGS(sc).u_regs[CON__O1]);
-  st->print("O2="); print_location(st, SIG_REGS(sc).u_regs[CON__O2]);
-  st->print("O3="); print_location(st, SIG_REGS(sc).u_regs[CON__O3]);
-  st->print("O4="); print_location(st, SIG_REGS(sc).u_regs[CON__O4]);
-  st->print("O5="); print_location(st, SIG_REGS(sc).u_regs[CON__O5]);
-  st->print("O6="); print_location(st, SIG_REGS(sc).u_regs[CON__O6]);
-  st->print("O7="); print_location(st, SIG_REGS(sc).u_regs[CON__O7]);
+  st->print("O0="); print_location(st, SIG_REGS(sc).u_regs[CON_O0]);
+  st->print("O1="); print_location(st, SIG_REGS(sc).u_regs[CON_O1]);
+  st->print("O2="); print_location(st, SIG_REGS(sc).u_regs[CON_O2]);
+  st->print("O3="); print_location(st, SIG_REGS(sc).u_regs[CON_O3]);
+  st->print("O4="); print_location(st, SIG_REGS(sc).u_regs[CON_O4]);
+  st->print("O5="); print_location(st, SIG_REGS(sc).u_regs[CON_O5]);
+  st->print("O6="); print_location(st, SIG_REGS(sc).u_regs[CON_O6]);
+  st->print("O7="); print_location(st, SIG_REGS(sc).u_regs[CON_O7]);
   st->cr();
 
   st->print("L0="); print_location(st, sp[L0->sp_offset_in_saved_window()]);
--- a/src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/os_cpu/linux_zero/vm/atomic_linux_zero.inline.hpp	Fri Aug 09 12:21:36 2013 +0100
@@ -160,6 +160,16 @@
         return prev;
     }
 }
+
+#ifdef __ARM_ARCH_7A__
+/* Perform an atomic compare and swap: if the current value of `*PTR'
+   is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
+   `*PTR' before the operation.*/
+extern "C" jlong arm_val_compare_and_swap_long(volatile void *ptr,
+					       jlong oldval,
+					       jlong newval);
+
+#endif	// __ARM_ARCH_7A__
 #endif // ARM
 
 inline void Atomic::store(jint store_value, volatile jint* dest) {
@@ -274,7 +284,11 @@
                              volatile jlong* dest,
                              jlong compare_value) {
 
+#ifndef	__ARM_ARCH_7A__
   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
+#else
+  return arm_val_compare_and_swap_long(dest, compare_value, exchange_value);
+#endif
 }
 
 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value,
--- a/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -116,6 +116,11 @@
   ShouldNotCallThis();
 }
 
+#ifdef HOTSPOT_ASM
+extern "C" int asm_check_null_ptr(ucontext_t *uc);
+extern int Thumb2_Install_Safepoint_PC(ucontext_t *uc, int magicBytes);
+#endif // HOTSPOT_ASM
+
 extern "C" JNIEXPORT int
 JVM_handle_linux_signal(int sig,
                         siginfo_t* info,
@@ -123,6 +128,26 @@
                         int abort_if_unrecognized) {
   ucontext_t* uc = (ucontext_t*) ucVoid;
 
+#ifdef HOTSPOT_ASM
+  if (sig == SIGSEGV) {
+    // check to see if this was the result of a back edge safepoint check
+    if (os::is_poll_address((address)info->si_addr)) {
+      // check that this is a legitimate safepoint rather
+      // than any old illegal access to the polling page.
+      // if the the check code returns true it will patch
+      // the return address to enter the safepoint check code
+      // n.b. the offset into the page gives us twice the offset to
+      // the magic word in bytes
+      int magicByteOffset = ((address)info->si_addr - (address)os::get_polling_page()) / 2;
+      if (Thumb2_Install_Safepoint_PC(uc, magicByteOffset)) {
+	return true;
+      }
+    } else if (asm_check_null_ptr(uc)) {
+      return 1;
+    }
+  }
+#endif // HOTSPOT_ASM
+
   Thread* t = ThreadLocalStorage::get_thread_slow();
 
   SignalHandlerMark shm(t);
--- a/src/os_cpu/linux_zero/vm/os_linux_zero.hpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/os_cpu/linux_zero/vm/os_linux_zero.hpp	Fri Aug 09 12:21:36 2013 +0100
@@ -48,6 +48,12 @@
                   "std %0, 0(%2)\n"
                   : "=r"(tmp)
                   : "a"(src), "a"(dst));
+#elif defined(__ARM_ARCH_7A__)
+    jlong tmp;
+    asm volatile ("ldrexd  %0, [%1]\n"
+                  : "=r"(tmp)
+                  : "r"(src), "m"(src));
+    *(jlong *) dst = tmp;
 #else
     *(jlong *) dst = *(jlong *) src;
 #endif
--- a/src/share/tools/hsdis/Makefile	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/tools/hsdis/Makefile	Fri Aug 09 12:21:36 2013 +0100
@@ -68,14 +68,18 @@
 CONFIGURE_ARGS= --host=$(MINGW) --target=$(MINGW)
 else
 CPU             = $(shell uname -m)
+ifneq		 ($(findstring arm,$(CPU)),)
+ARCH=arm
+else
 ARCH1=$(CPU:x86_64=amd64)
 ARCH=$(ARCH1:i686=i386)
 CFLAGS/i386	+= -m32
 CFLAGS/sparc	+= -m32
 CFLAGS/sparcv9	+= -m64
 CFLAGS/amd64	+= -m64
+endif
 CFLAGS		+= $(CFLAGS/$(ARCH))
-CFLAGS		+= -fPIC
+CFLAGS		+= -fPIC -g
 OS		= linux
 LIB_EXT		= .so
 CC 		= gcc
@@ -118,7 +122,7 @@
 BINUTILSDIR	= $(shell cd $(BINUTILS);pwd)
 endif
 
-CPPFLAGS	+= -I$(BINUTILSDIR)/include -I$(BINUTILS)/bfd -I$(TARGET_DIR)/bfd
+CPPFLAGS	+= -I$(BINUTILSDIR)/include -I$(BINUTILSDIR)/bfd -I$(TARGET_DIR)/bfd
 CPPFLAGS	+= -DLIBARCH_$(LIBARCH) -DLIBARCH=\"$(LIBARCH)\" -DLIB_EXT=\"$(LIB_EXT)\"
 
 TARGET_DIR	= build/$(OS)-$(JDKARCH)
@@ -145,7 +149,7 @@
 demo: $(TARGET) $(DEMO_TARGET)
 
 $(LIBRARIES): $(TARGET_DIR) $(TARGET_DIR)/Makefile
-	if [ ! -f $@ ]; then cd $(TARGET_DIR); make all-opcodes; fi
+	if [ ! -f $@ ]; then cd $(TARGET_DIR); make all-opcodes "CFLAGS=$(CFLAGS)"; fi
 
 $(TARGET_DIR)/Makefile:
 	(cd $(TARGET_DIR); CC=$(CC) CFLAGS="$(CFLAGS)" $(BINUTILSDIR)/configure --disable-nls $(CONFIGURE_ARGS))
--- a/src/share/tools/hsdis/hsdis.c	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/tools/hsdis/hsdis.c	Fri Aug 09 12:21:36 2013 +0100
@@ -35,6 +35,8 @@
 #include <dis-asm.h>
 #include <inttypes.h>
 
+#include <string.h>
+
 #ifndef bool
 #define bool int
 #define true 1
@@ -418,6 +420,9 @@
 #ifdef LIBARCH_sparcv9
     res = "sparc:v9b";
 #endif
+#ifdef LIBARCH_arm
+    res = "arm";
+#endif
   if (res == NULL)
     res = "architecture not set in Makefile!";
   return res;
--- a/src/share/vm/asm/codeBuffer.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/asm/codeBuffer.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -674,7 +674,7 @@
     }
   }
 
-  if (dest->blob() == NULL) {
+  if ((dest->blob() == NULL) && dest_filled ) {
     // Destination is a final resting place, not just another buffer.
     // Normalize uninitialized bytes in the final padding.
     Copy::fill_to_bytes(dest_filled, dest_end - dest_filled,
--- a/src/share/vm/asm/codeBuffer.hpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/asm/codeBuffer.hpp	Fri Aug 09 12:21:36 2013 +0100
@@ -93,7 +93,7 @@
   address     _locs_point;      // last relocated position (grows upward)
   bool        _locs_own;        // did I allocate the locs myself?
   bool        _frozen;          // no more expansion of this section
-  char        _index;           // my section number (SECT_INST, etc.)
+  signed char _index;           // my section number (SECT_INST, etc.)
   CodeBuffer* _outer;           // enclosing CodeBuffer
 
   // (Note:  _locs_point used to be called _last_reloc_offset.)
--- a/src/share/vm/ci/ciTypeFlow.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/ci/ciTypeFlow.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -403,7 +403,7 @@
   // Set the rest of the locals to bottom.
   Cell cell = state->next_cell(state->tos());
   state->set_stack_size(0);
-  int limit = state->limit_cell();
+  Cell limit = state->limit_cell();
   for (; cell < limit; cell = state->next_cell(cell)) {
     state->set_type_at(cell, state->bottom_type());
   }
--- a/src/share/vm/classfile/systemDictionary.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/classfile/systemDictionary.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -2252,7 +2252,6 @@
 
   // Better never do a GC while we're holding these oops
   No_Safepoint_Verifier nosafepoint;
-
   klassOop klass1 = find_class(d_index1, d_hash1, constraint_name, class_loader1);
   klassOop klass2 = find_class(d_index2, d_hash2, constraint_name, class_loader2);
   return constraints()->add_entry(constraint_name, klass1, class_loader1,
--- a/src/share/vm/compiler/methodLiveness.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/compiler/methodLiveness.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -572,15 +572,15 @@
 
 
 MethodLiveness::BasicBlock::BasicBlock(MethodLiveness *analyzer, int start, int limit) :
-         _gen((uintptr_t*)analyzer->arena()->Amalloc(BytesPerWord * analyzer->bit_map_size_words()),
+         _gen((size_t*)analyzer->arena()->Amalloc(BytesPerWord * analyzer->bit_map_size_words()),
                          analyzer->bit_map_size_bits()),
-         _kill((uintptr_t*)analyzer->arena()->Amalloc(BytesPerWord * analyzer->bit_map_size_words()),
+         _kill((size_t*)analyzer->arena()->Amalloc(BytesPerWord * analyzer->bit_map_size_words()),
                          analyzer->bit_map_size_bits()),
-         _entry((uintptr_t*)analyzer->arena()->Amalloc(BytesPerWord * analyzer->bit_map_size_words()),
+         _entry((size_t*)analyzer->arena()->Amalloc(BytesPerWord * analyzer->bit_map_size_words()),
                          analyzer->bit_map_size_bits()),
-         _normal_exit((uintptr_t*)analyzer->arena()->Amalloc(BytesPerWord * analyzer->bit_map_size_words()),
+         _normal_exit((size_t*)analyzer->arena()->Amalloc(BytesPerWord * analyzer->bit_map_size_words()),
                          analyzer->bit_map_size_bits()),
-         _exception_exit((uintptr_t*)analyzer->arena()->Amalloc(BytesPerWord * analyzer->bit_map_size_words()),
+         _exception_exit((size_t*)analyzer->arena()->Amalloc(BytesPerWord * analyzer->bit_map_size_words()),
                          analyzer->bit_map_size_bits()),
          _last_bci(-1) {
   _analyzer = analyzer;
@@ -998,7 +998,7 @@
 }
 
 MethodLivenessResult MethodLiveness::BasicBlock::get_liveness_at(ciMethod* method, int bci) {
-  MethodLivenessResult answer(NEW_RESOURCE_ARRAY(uintptr_t, _analyzer->bit_map_size_words()),
+  MethodLivenessResult answer(NEW_RESOURCE_ARRAY(size_t, _analyzer->bit_map_size_words()),
                 _analyzer->bit_map_size_bits());
   answer.set_is_valid();
 
--- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -957,7 +957,7 @@
   if (free_percentage < desired_free_percentage) {
     size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
     assert(desired_capacity >= capacity(), "invalid expansion size");
-    expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
+    expand_bytes = MAX2((long unsigned int) (desired_capacity - capacity()), (long unsigned int) MinHeapDeltaBytes);
   }
   if (expand_bytes > 0) {
     if (PrintGCDetails && Verbose) {
@@ -6276,7 +6276,7 @@
     HeapWord* curAddr = _markBitMap.startWord();
     while (curAddr < _markBitMap.endWord()) {
       size_t remaining  = pointer_delta(_markBitMap.endWord(), curAddr);
-      MemRegion chunk(curAddr, MIN2(CMSBitMapYieldQuantum, remaining));
+      MemRegion chunk(curAddr, MIN2((size_t) CMSBitMapYieldQuantum, remaining));
       _markBitMap.clear_large_range(chunk);
       if (ConcurrentMarkSweepThread::should_yield() &&
           !foregroundGCIsActive() &&
@@ -6569,7 +6569,7 @@
     return;
   }
   // Double capacity if possible
-  size_t new_capacity = MIN2(_capacity*2, MarkStackSizeMax);
+  size_t new_capacity = MIN2((size_t) _capacity*2, (size_t) MarkStackSizeMax);
   // Do not give up existing stack until we have managed to
   // get the double capacity that we desired.
   ReservedSpace rs(ReservedSpace::allocation_align_size_up(
--- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -116,7 +116,7 @@
 #ifndef PRODUCT
 bool CMBitMapRO::covers(ReservedSpace rs) const {
   // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
-  assert(((size_t)_bm.size() * (size_t)(1 << _shifter)) == _bmWordSize,
+  assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
          "size inconsistency");
   return _bmStartWord == (HeapWord*)(rs.base()) &&
          _bmWordSize  == rs.size()>>LogHeapWordSize;
@@ -5407,7 +5407,7 @@
                  G1PPRL_SUM_BYTE_FORMAT("region-size"),
                  g1_committed.start(), g1_committed.end(),
                  g1_reserved.start(), g1_reserved.end(),
-                 HeapRegion::GrainBytes);
+                 (size_t)HeapRegion::GrainBytes);
   _out->print_cr(G1PPRL_LINE_PREFIX);
   _out->print_cr(G1PPRL_LINE_PREFIX
                  G1PPRL_TYPE_H_FORMAT
--- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -520,7 +520,7 @@
   if (_max_fine_entries == 0) {
     assert(_mod_max_fine_entries_mask == 0, "Both or none.");
     size_t max_entries_log = (size_t)log2_long((jlong)G1RSetRegionEntries);
-    _max_fine_entries = (size_t)(1 << max_entries_log);
+    _max_fine_entries = (size_t)1 << max_entries_log;
     _mod_max_fine_entries_mask = _max_fine_entries - 1;
 #if SAMPLE_FOR_EVICTION
     assert(_fine_eviction_sample_size == 0
--- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -897,8 +897,8 @@
 void PSParallelCompact::initialize_dead_wood_limiter()
 {
   const size_t max = 100;
-  _dwl_mean = double(MIN2(ParallelOldDeadWoodLimiterMean, max)) / 100.0;
-  _dwl_std_dev = double(MIN2(ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
+  _dwl_mean = double(MIN2((size_t) ParallelOldDeadWoodLimiterMean, max)) / 100.0;
+  _dwl_std_dev = double(MIN2((size_t) ParallelOldDeadWoodLimiterStdDev, max)) / 100.0;
   _dwl_first_term = 1.0 / (sqrt(2.0 * M_PI) * _dwl_std_dev);
   DEBUG_ONLY(_dwl_initialized = true;)
   _dwl_adjustment = normal_distribution(1.0);
--- a/src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/gc_implementation/parallelScavenge/psPermGen.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -69,7 +69,7 @@
   _last_used = current_live;
 
   // We have different alignment constraints than the rest of the heap.
-  const size_t alignment = MAX2(MinPermHeapExpansion,
+  const size_t alignment = MAX2((size_t) MinPermHeapExpansion,
                                 virtual_space()->alignment());
 
   // Compute the desired size:
--- a/src/share/vm/memory/blockOffsetTable.hpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/memory/blockOffsetTable.hpp	Fri Aug 09 12:21:36 2013 +0100
@@ -289,7 +289,7 @@
   };
 
   static size_t power_to_cards_back(uint i) {
-    return (size_t)(1 << (LogBase * i));
+    return (size_t)1 << (LogBase * i);
   }
   static size_t power_to_words_back(uint i) {
     return power_to_cards_back(i) * N_words;
--- a/src/share/vm/memory/collectorPolicy.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/memory/collectorPolicy.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -330,7 +330,7 @@
     // yield a size that is too small) and bound it by MaxNewSize above.
     // Ergonomics plays here by previously calculating the desired
     // NewSize and MaxNewSize.
-    max_new_size = MIN2(MAX2(max_new_size, NewSize), MaxNewSize);
+    max_new_size = MIN2(MAX2(max_new_size, (size_t) NewSize), (size_t) MaxNewSize);
   }
   assert(max_new_size > 0, "All paths should set max_new_size");
 
--- a/src/share/vm/memory/threadLocalAllocBuffer.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/memory/threadLocalAllocBuffer.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -243,7 +243,7 @@
   size_t init_sz;
 
   if (TLABSize > 0) {
-    init_sz = MIN2(TLABSize / HeapWordSize, max_size());
+    init_sz = MIN2((size_t) (TLABSize / HeapWordSize), max_size());
   } else if (global_stats() == NULL) {
     // Startup issue - main thread initialized before heap initialized.
     init_sz = min_size();
--- a/src/share/vm/opto/cfgnode.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/opto/cfgnode.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -941,6 +941,8 @@
       { assert(ft == _type, ""); } // Uplift to interface
     else if( !t->empty() && ttkp && ttkp->is_loaded() && ttkp->klass()->is_interface() )
       { assert(ft == _type, ""); } // Uplift to interface
+    else if( !t->empty() && ttkp && ttkp->is_loaded() && ttkp->klass()->is_interface() )
+      { assert(ft == _type, ""); } // Uplift to interface
     // Otherwise it's something stupid like non-overlapping int ranges
     // found on dying counted loops.
     else
--- a/src/share/vm/opto/type.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/opto/type.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -2577,6 +2577,8 @@
       return kills;             // Uplift to interface
     if (!empty() && ktkp != NULL && ktkp->klass()->is_loaded() && ktkp->klass()->is_interface())
       return kills;             // Uplift to interface
+    if (!empty() && ktkp != NULL && ktkp->klass()->is_loaded() && ktkp->klass()->is_interface())
+      return kills;             // Uplift to interface
 
     return Type::TOP;           // Canonical empty value
   }
--- a/src/share/vm/prims/jni.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/prims/jni.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -4488,7 +4488,7 @@
   if (!directBufferSupportInitializeEnded) {
     if (!initializeDirectBufferSupport(env, thread)) {
 #ifndef USDT2
-      DTRACE_PROBE1(hotspot_jni, NewDirectByteBuffer__return, NULL);
+	DTRACE_PROBE1(hotspot_jni, NewDirectByteBuffer__return, (uintptr_t) NULL);
 #else /* USDT2 */
       HOTSPOT_JNI_NEWDIRECTBYTEBUFFER_RETURN(
                                              NULL);
--- a/src/share/vm/prims/jvmtiEnv.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/prims/jvmtiEnv.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -2794,6 +2794,9 @@
   (*entry_count_ptr) = num_entries;
   (*table_ptr) = jvmti_table;
 
+  if (num_entries == 0)
+    return JVMTI_ERROR_ABSENT_INFORMATION;
+
   return JVMTI_ERROR_NONE;
 } /* end GetLineNumberTable */
 
--- a/src/share/vm/prims/jvmtiExport.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/prims/jvmtiExport.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -1774,6 +1774,47 @@
   }
 }
 
+#if defined(ZERO) && defined(ARM)
+
+// special compiled_method_load notify API for thumb2 compiler
+
+void JvmtiExport::post_compiled_method_load(const methodOop method, const jint length,
+                                            const void *code_begin, const jint map_length,
+                                            const jvmtiAddrLocationMap* map,
+					    const void *compile_info)
+{
+  JavaThread* thread = JavaThread::current();
+  jmethodID methodId = method->jmethod_id();
+
+  EVT_TRIG_TRACE(JVMTI_EVENT_COMPILED_METHOD_LOAD,
+                 ("JVMTI [%s] method compile load event triggered (by thumb2_compile)",
+                 JvmtiTrace::safe_get_thread_name(thread)));
+
+  JvmtiEnvIterator it;
+  for (JvmtiEnv* env = it.first(); env != NULL; env = it.next(env)) {
+    if (env->is_enabled(JVMTI_EVENT_COMPILED_METHOD_LOAD)) {
+
+      EVT_TRACE(JVMTI_EVENT_COMPILED_METHOD_LOAD,
+                ("JVMTI [%s] class compile method load event sent %s.%s   (by thumb2_compile)",
+                JvmtiTrace::safe_get_thread_name(thread),
+                method->klass_name()->as_C_string(),
+		 method->name()->as_C_string()));
+
+      JvmtiEventMark jem(thread);
+      JvmtiJavaThreadEventTransition jet(thread);
+      jvmtiEventCompiledMethodLoad callback = env->callbacks()->CompiledMethodLoad;
+
+      if (callback != NULL) {
+        (*callback)(env->jvmti_external(), methodId,
+                    length, code_begin, map_length,
+                    map, compile_info);
+      }
+    }
+  }
+}
+
+#endif // defined(TARGET_ARCH_zero) && ZERO_LIBARCH == "arm"
+
 
 // post a COMPILED_METHOD_LOAD event for a given environment
 void JvmtiExport::post_compiled_method_load(JvmtiEnv* env, const jmethodID method, const jint length,
--- a/src/share/vm/prims/jvmtiExport.hpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/prims/jvmtiExport.hpp	Fri Aug 09 12:21:36 2013 +0100
@@ -295,6 +295,12 @@
                                         jint *cached_length_ptr);
   static void post_native_method_bind(methodOop method, address* function_ptr) KERNEL_RETURN;
   static void post_compiled_method_load(nmethod *nm) KERNEL_RETURN;
+#ifdef __arm__
+  static void post_compiled_method_load(const methodOop method, const jint length,
+					const void *code_begin, const jint map_length,
+					const jvmtiAddrLocationMap* map,
+					const void *compile_info) KERNEL_RETURN;
+#endif // __arm__
   static void post_dynamic_code_generated(const char *name, const void *code_begin, const void *code_end) KERNEL_RETURN;
 
   // used to post a CompiledMethodUnload event
--- a/src/share/vm/runtime/arguments.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/runtime/arguments.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -53,8 +53,7 @@
 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
 #endif
 
-// Note: This is a special bug reporting site for the JVM
-#define DEFAULT_VENDOR_URL_BUG "http://bugreport.sun.com/bugreport/crash.jsp"
+#define DEFAULT_VENDOR_URL_BUG "http://icedtea.classpath.org/bugzilla"
 #define DEFAULT_JAVA_LAUNCHER  "generic"
 
 char**  Arguments::_jvm_flags_array             = NULL;
@@ -1168,7 +1167,7 @@
     // NewSize was set on the command line and it is larger than
     // preferred_max_new_size.
     if (!FLAG_IS_DEFAULT(NewSize)) {   // NewSize explicitly set at command-line
-      FLAG_SET_ERGO(uintx, MaxNewSize, MAX2(NewSize, preferred_max_new_size));
+      FLAG_SET_ERGO(uintx, MaxNewSize, MAX2((size_t) NewSize, preferred_max_new_size));
     } else {
       FLAG_SET_ERGO(uintx, MaxNewSize, preferred_max_new_size);
     }
@@ -1197,8 +1196,8 @@
       // Unless explicitly requested otherwise, make young gen
       // at least min_new, and at most preferred_max_new_size.
       if (FLAG_IS_DEFAULT(NewSize)) {
-        FLAG_SET_ERGO(uintx, NewSize, MAX2(NewSize, min_new));
-        FLAG_SET_ERGO(uintx, NewSize, MIN2(preferred_max_new_size, NewSize));
+        FLAG_SET_ERGO(uintx, NewSize, MAX2((size_t) NewSize, min_new));
+        FLAG_SET_ERGO(uintx, NewSize, MIN2(preferred_max_new_size, (size_t) NewSize));
         if (PrintGCDetails && Verbose) {
           // Too early to use gclog_or_tty
           tty->print_cr("CMS ergo set NewSize: " SIZE_FORMAT, NewSize);
--- a/src/share/vm/runtime/globals.hpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/runtime/globals.hpp	Fri Aug 09 12:21:36 2013 +0100
@@ -3209,7 +3209,7 @@
   product(uintx, InitialHeapSize, 0,                                        \
           "Initial heap size (in bytes); zero means OldSize + NewSize")     \
                                                                             \
-  product(uintx, MaxHeapSize, ScaleForWordSize(96*M),                       \
+  product(uintx, MaxHeapSize, ScaleForWordSize(512*M),                      \
           "Maximum heap size (in bytes)")                                   \
                                                                             \
   product(uintx, OldSize, ScaleForWordSize(4*M),                            \
--- a/src/share/vm/runtime/os.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/runtime/os.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -1101,6 +1101,9 @@
         "%/lib/jsse.jar:"
         "%/lib/jce.jar:"
         "%/lib/charsets.jar:"
+        "%/lib/netx.jar:"
+        "%/lib/plugin.jar:"
+        "%/lib/rhino.jar:"
         "%/lib/jfr.jar:"
 #ifdef __APPLE__
         "%/lib/JObjC.jar:"
--- a/src/share/vm/runtime/vmStructs.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/runtime/vmStructs.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -827,10 +827,10 @@
   /* CodeBlobs (NOTE: incomplete, but only a little) */                                                                              \
   /***************************************************/                                                                              \
                                                                                                                                      \
-  X86_ONLY(nonstatic_field(MethodHandles::RicochetFrame, _sender_pc,                                     address))                   \
-  X86_ONLY(nonstatic_field(MethodHandles::RicochetFrame, _exact_sender_sp,                              intptr_t*))                  \
-  X86_ONLY(nonstatic_field(MethodHandles::RicochetFrame, _sender_link,                                  intptr_t*))                  \
-  X86_ONLY(nonstatic_field(MethodHandles::RicochetFrame, _saved_args_base,                              intptr_t*))                  \
+  NOT_ZERO(X86_ONLY(nonstatic_field(MethodHandles::RicochetFrame, _sender_pc,                                     address)))                   \
+  NOT_ZERO(X86_ONLY(nonstatic_field(MethodHandles::RicochetFrame, _exact_sender_sp,                              intptr_t*)))                  \
+  NOT_ZERO(X86_ONLY(nonstatic_field(MethodHandles::RicochetFrame, _sender_link,                                  intptr_t*)))                  \
+  NOT_ZERO(X86_ONLY(nonstatic_field(MethodHandles::RicochetFrame, _saved_args_base,                              intptr_t*)))                  \
                                                                                                                                      \
      static_field(SharedRuntime,               _ricochet_blob,                                RicochetBlob*)                         \
                                                                                                                                      \
@@ -2529,7 +2529,7 @@
   /* frame              */                                                \
   /**********************/                                                \
                                                                           \
-  X86_ONLY(declare_constant(frame::entry_frame_call_wrapper_offset))      \
+  NOT_ZERO(X86_ONLY(declare_constant(frame::entry_frame_call_wrapper_offset)))      \
   declare_constant(frame::pc_return_offset)                               \
                                                                           \
   /*************/                                                         \
--- a/src/share/vm/shark/sharkCompiler.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/shark/sharkCompiler.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright 2008, 2009, 2010, 2011 Red Hat, Inc.
+ * Copyright 2008, 2009, 2010, 2011, 2012 Red Hat, Inc.
  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  *
  * This code is free software; you can redistribute it and/or modify it
@@ -319,7 +319,7 @@
   // finish with the exception of the VM thread, so we can consider
   // ourself the owner of the execution engine lock even though we
   // can't actually acquire it at this time.
-  assert(Thread::current()->is_VM_thread(), "must be called by VM thread");
+  assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 
   SharkEntry *entry = (SharkEntry *) code;
--- a/src/share/vm/utilities/bitMap.hpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/utilities/bitMap.hpp	Fri Aug 09 12:21:36 2013 +0100
@@ -79,7 +79,7 @@
 
   // Set a word to a specified value or to all ones; clear a word.
   void set_word  (idx_t word, bm_word_t val) { _map[word] = val; }
-  void set_word  (idx_t word)            { set_word(word, ~(uintptr_t)0); }
+  void set_word  (idx_t word)            { set_word(word, ~(idx_t)0); }
   void clear_word(idx_t word)            { _map[word] = 0; }
 
   // Utilities for ranges of bits.  Ranges are half-open [beg, end).
--- a/src/share/vm/utilities/macros.hpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/utilities/macros.hpp	Fri Aug 09 12:21:36 2013 +0100
@@ -177,6 +177,22 @@
 #define NOT_WIN64(code) code
 #endif
 
+#if defined(ZERO)
+#define ZERO_ONLY(code) code
+#define NOT_ZERO(code)
+#else
+#define ZERO_ONLY(code) 
+#define NOT_ZERO(code) code
+#endif
+
+#if defined(SHARK)
+#define SHARK_ONLY(code) code
+#define NOT_SHARK(code)
+#else
+#define SHARK_ONLY(code) 
+#define NOT_SHARK(code) code
+#endif
+
 #if defined(IA32) || defined(AMD64)
 #define X86
 #define X86_ONLY(code) code
--- a/src/share/vm/utilities/ostream.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/utilities/ostream.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -1047,7 +1047,7 @@
   server.sin_port = htons(port);
 
   server.sin_addr.s_addr = inet_addr(ip);
-  if (server.sin_addr.s_addr == (uint32_t)-1) {
+  if (server.sin_addr.s_addr == (in_addr_t)-1) {
     struct hostent* host = os::get_host_by_name((char*)ip);
     if (host != NULL) {
       memcpy(&server.sin_addr, host->h_addr_list[0], host->h_length);
--- a/src/share/vm/utilities/vmError.cpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/utilities/vmError.cpp	Fri Aug 09 12:21:36 2013 +0100
@@ -192,7 +192,8 @@
 
 static void print_bug_submit_message(outputStream *out, Thread *thread) {
   if (out == NULL) return;
-  out->print_raw_cr("# If you would like to submit a bug report, please visit:");
+  out->print_raw_cr("# If you would like to submit a bug report, please include");
+  out->print_raw_cr("# instructions on how to reproduce the bug and visit:");
   out->print_raw   ("#   ");
   out->print_raw_cr(Arguments::java_vendor_url_bug());
   // If the crash is in native code, encourage user to submit a bug to the
@@ -253,6 +254,19 @@
   return buf;
 }
 
+#ifdef PRODUCT
+extern "C" void _ps() {
+  fdStream out(defaultStream::output_fd());
+  JavaThread* thread = JavaThread::active();
+  char *buf = new char[1024*1024];
+  VMError err(thread, "", 0, "", "");
+
+  err.print_stack_trace(&out, thread, buf, 1024*1024, true);
+
+  delete[] buf;
+}
+#endif // PRODUCT
+
 void VMError::print_stack_trace(outputStream* st, JavaThread* jt,
                                 char* buf, int buflen, bool verbose) {
 #ifdef ZERO
@@ -458,6 +472,13 @@
                    UseCompressedOops ? "compressed oops" : ""
                  );
 
+#ifdef DERIVATIVE_ID
+     st->print_cr("# Derivative: %s", DERIVATIVE_ID);
+#endif
+#ifdef DISTRIBUTION_ID
+     st->print_cr("# Distribution: %s", DISTRIBUTION_ID);
+#endif
+
   STEP(60, "(printing problematic frame)")
 
      // Print current frame if we have a context (i.e. it's a crash)
--- a/src/share/vm/utilities/vmError.hpp	Tue Jun 04 10:47:35 2013 -0700
+++ b/src/share/vm/utilities/vmError.hpp	Fri Aug 09 12:21:36 2013 +0100
@@ -30,6 +30,10 @@
 class Decoder;
 class VM_ReportJavaOutOfMemory;
 
+#ifdef PRODUCT
+extern "C" void _ps();
+#endif // PRODUCT
+
 class VMError : public StackObj {
   friend class VM_ReportJavaOutOfMemory;
   friend class Decoder;
@@ -98,6 +102,10 @@
   const char* detail_msg() const { return _detail_msg; }
   bool should_report_bug(unsigned int id) { return id != oom_error; }
 
+#ifdef PRODUCT
+  friend void _ps();
+#endif // PRODUCT
+
 public:
   // Constructor for crashes
   VMError(Thread* thread, unsigned int sig, address pc, void* siginfo,
--- a/test/runtime/6929067/Test6929067.sh	Tue Jun 04 10:47:35 2013 -0700
+++ b/test/runtime/6929067/Test6929067.sh	Fri Aug 09 12:21:36 2013 +0100
@@ -42,7 +42,31 @@
     ;;
 esac
 
-LD_LIBRARY_PATH=.:${TESTJAVA}/jre/lib/i386/client:/usr/openwin/lib:/usr/dt/lib:/usr/lib:$LD_LIBRARY_PATH
+COMP_FLAG="-m32"
+
+# Test if JDK is 32 or 64 bits
+${TESTJAVA}/bin/java -d64 -version 2> /dev/null
+
+if [ $? -eq 0 ]
+then
+    COMP_FLAG="-m64"
+fi
+
+# Get ARCH specifics
+ARCH=`uname -m`
+case "$ARCH" in
+  x86_64)
+    ARCH=amd64
+    ;;
+  i586)
+    ARCH=i386
+    ;;
+  i686)
+    ARCH=i386
+esac
+
+LD_LIBRARY_PATH=.:${TESTJAVA}/jre/lib/${ARCH}/client:${TESTJAVA}/jre/lib/${ARCH}/server:/usr/openwin/lib:/usr/dt/lib:/usr/lib:$LD_LIBRARY_PATH
+
 export LD_LIBRARY_PATH
 
 THIS_DIR=`pwd`
@@ -55,6 +79,13 @@
 
 ${TESTJAVA}${FS}bin${FS}javac T.java
 
-gcc -o invoke -I${TESTJAVA}/include -I${TESTJAVA}/include/linux invoke.c ${TESTJAVA}/jre/lib/i386/client/libjvm.so
+echo "Architecture: ${ARCH}"
+echo "Compilation flag: ${COMP_FLAG}"
+
+gcc ${COMP_FLAG} -o invoke \
+-L${TESTJAVA}/jre/lib/${ARCH}/client \
+-L${TESTJAVA}/jre/lib/${ARCH}/server \
+-ljvm -lpthread -I${TESTJAVA}/include -I${TESTJAVA}/include/linux invoke.c
+
 ./invoke
 exit $?
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/runtime/7020373/GenOOMCrashClass.java	Fri Aug 09 12:21:36 2013 +0100
@@ -0,0 +1,157 @@
+/*
+ * Copyright (c) 2011, Red Hat Inc. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ * 
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ * 
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ * 
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+import java.applet.Applet;
+import java.io.IOException;
+
+import com.sun.org.apache.bcel.internal.Constants;
+import com.sun.org.apache.bcel.internal.generic.AALOAD;
+import com.sun.org.apache.bcel.internal.generic.ACONST_NULL;
+import com.sun.org.apache.bcel.internal.generic.ALOAD;
+import com.sun.org.apache.bcel.internal.generic.ArrayType;
+import com.sun.org.apache.bcel.internal.generic.ClassGen;
+import com.sun.org.apache.bcel.internal.generic.ConstantPoolGen;
+import com.sun.org.apache.bcel.internal.generic.GOTO;
+import com.sun.org.apache.bcel.internal.generic.ICONST;
+import com.sun.org.apache.bcel.internal.generic.IFEQ;
+import com.sun.org.apache.bcel.internal.generic.ILOAD;
+import com.sun.org.apache.bcel.internal.generic.INVOKESTATIC;
+import com.sun.org.apache.bcel.internal.generic.ISTORE;
+import com.sun.org.apache.bcel.internal.generic.InstructionHandle;
+import com.sun.org.apache.bcel.internal.generic.InstructionList;
+import com.sun.org.apache.bcel.internal.generic.JSR;
+import com.sun.org.apache.bcel.internal.generic.MethodGen;
+import com.sun.org.apache.bcel.internal.generic.RETURN;
+import com.sun.org.apache.bcel.internal.generic.Type;
+
+
+public class GenOOMCrashClass {
+
+    public static  String genOOMCrashClass(int maxmeth, int nums/*String[] a*/) throws Exception {
+        String theClassFile = "OOMCrashClass"+nums+"_"+maxmeth;
+        ClassGen cg = new ClassGen(theClassFile, "java.applet.Applet",
+                "<generated>", Constants.ACC_PUBLIC | Constants.ACC_SUPER, null);
+        ConstantPoolGen cp = cg.getConstantPool(); // cg creates constant pool
+
+        //      int br0 = cp.addClass("marc.schoenefeld.2008");
+
+        int br2 = cp.addMethodref("java.lang.Integer", "parseInt",
+                "(Ljava/lang/String;)I");
+
+        Type[] argtype = new Type[] {
+            new ArrayType(Type.STRING, 1)
+        };
+
+        for (int j = 0; j < maxmeth; j++) {
+
+            InstructionList il = new InstructionList();
+
+            String methodName = maxmeth == 1 ? "main" : "main" + j;
+            MethodGen mg = new MethodGen(Constants.ACC_STATIC
+                    | Constants.ACC_PUBLIC,// access flags
+                    Type.VOID, // return type
+                    argtype, new String[] { "argv" }, // arg
+                    // names
+                    methodName, theClassFile, // method, class
+                    il, cp);
+
+            il.append(new ALOAD(0));
+            il.append(new ICONST(0));
+            il.append(new AALOAD()); // load something unpredictable, no folding
+                                     // please
+
+            il.append(new INVOKESTATIC(br2));
+            il.append(new ISTORE(1));
+
+            GOTO gototail = new GOTO(null);
+
+            il.append(gototail);
+
+            InstructionHandle ret = il.append(new RETURN());
+            InstructionHandle ih = null;
+            for (int i = 0; i < nums; i++) {
+                ih = il.append(new ILOAD(1));
+                IFEQ ifeq = new IFEQ(null);
+                il.append(ifeq);
+
+                JSR jsr = new JSR(null);
+                GOTO next = new GOTO(null);
+
+                InstructionHandle h_jsr = il.append(jsr);
+                InstructionHandle h_goto = il.append(next);
+                InstructionHandle h_ret = il.append(new RETURN());
+
+                InstructionHandle danach = il.append(new ACONST_NULL());
+                jsr.setTarget(h_ret);
+                next.setTarget(danach);
+
+                il.append(new GOTO(ih));
+                ifeq.setTarget(ret);
+                ret = ih;
+            }
+
+            gototail.setTarget(ih);
+
+            mg.setMaxStack(Integer.MAX_VALUE); // Needed stack size
+
+            mg.setMaxLocals();
+            cg.addMethod(mg.getMethod());
+        }
+        /* Add public <init> method, i.e. empty constructor */
+        cg.addEmptyConstructor(Constants.ACC_PUBLIC);
+
+        /* Get JavaClass object and dump it to file. */
+        try {
+            System.out.println("dumping:"+theClassFile);
+            cg.getJavaClass().dump(theClassFile + ".class");
+        } catch (java.io.IOException e) {
+            System.err.println(e);
+        }
+        return theClassFile;
+    }
+
+    public static void main(String[] a) throws Exception {
+        int maxmeth_default = 250;
+        int nums_default = 20;
+        int maxmeth;
+        int nums;
+        try {
+            maxmeth = Integer.parseInt(a[0]);
+        }
+        catch (Exception e) {
+            maxmeth = maxmeth_default;
+        }
+        try {
+            nums = Integer.parseInt(a[1]);
+        }
+        catch (Exception e) {
+            nums = nums_default;
+        }       
+        String classname = genOOMCrashClass(maxmeth,nums);
+        System.out.println("Generated");
+        // System.out.println(a[0]);
+        // System.out.println("Loading");
+
+        // long t = System.currentTimeMillis();
+        // Class g2 = Class.forName(classname);
+        // long u = System.currentTimeMillis();
+        // System.out.println(g2 + ":" + (u - t));
+    }
+
+}
--- a/test/runtime/7020373/Test7020373.sh	Tue Jun 04 10:47:35 2013 -0700
+++ b/test/runtime/7020373/Test7020373.sh	Fri Aug 09 12:21:36 2013 +0100
@@ -61,8 +61,13 @@
 
 ${TESTJAVA}${FS}bin${FS}java ${BIT_FLAG} -version
 
-${TESTJAVA}${FS}bin${FS}jar xvf ${TESTSRC}${FS}testcase.jar
+# first step: compile GenOOMCrash generator
+${TESTJAVA}${FS}bin${FS}javac GenOOMCrashClass.java
 
+# second step: run the generator to create test class
+${TESTJAVA}${FS}bin${FS}java GenOOMCrashClass 1 4000
+
+# third step: run the reproducer
 ${TESTJAVA}${FS}bin${FS}java ${BIT_FLAG} OOMCrashClass4000_1 > test.out 2>&1
 
 cat test.out
Binary file test/runtime/7020373/testcase.jar has changed
--- a/test/runtime/7158804/Test7158804.sh	Tue Jun 04 10:47:35 2013 -0700
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,30 +0,0 @@
-#!/bin/sh
-#
-# Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved.
-# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
-#
-
-##
-## @test Test7158804.sh
-## @bug 7158804
-## @summary Improve config file parsing
-## @run shell Test7158804.sh
-##
-
-if [ "${TESTJAVA}" = "" ]
-then
-  echo "TESTJAVA not set.  Test cannot execute.  Failed."
-  exit 1
-fi
-echo "TESTJAVA=${TESTJAVA}"
-
-rm -f .hotspotrc
-echo -XX:+aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa >.hotspotrc
-${TESTJAVA}/bin/java ${TESTVMOPTS} -XX:+IgnoreUnrecognizedVMOptions -XX:Flags=.hotspotrc -version
-if [ $? -ne 0 ]
-then
-    echo "Test Failed"
-    exit 1
-fi
-rm -f .hotspotrc
-exit 0
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/mkbc.c	Fri Aug 09 12:21:36 2013 +0100
@@ -0,0 +1,607 @@
+/*
+ * Copyright 2009 Edward Nevill
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <ctype.h>
+
+#define DEFAULT_PREFIX	"do_"
+
+static char *prefix = (char *)DEFAULT_PREFIX;
+
+#define ISALPHA(c) (isalpha(c) || (c) == '_')
+#define ISALNUM(c) (isalnum(c) || (c) == '_')
+
+FILE *source_f, *bci_f;
+
+typedef struct Bytecode {
+	char	*name;
+	int	len;
+} Bytecode;
+
+typedef struct StringList {
+	struct StringList *next;
+	char 		*line;
+} StringList;
+
+typedef struct OpcodeList {
+	struct OpcodeList *next;
+	long	 	opcode;
+} OpcodeList;
+
+typedef struct OpcodeSequence {
+	struct OpcodeSequence *next;
+	OpcodeList	*opcode_list;
+} OpcodeSequence;
+
+typedef struct BytecodeImpl {
+	struct BytecodeImpl *next;
+	OpcodeSequence	*opcode_seq;
+	StringList	*macro_impl;
+	StringList	*direct_impl;
+	int		len;
+	char		*name;
+	char		*do_name;
+} BytecodeImpl;
+
+Bytecode bytecodes[256];
+
+BytecodeImpl *the_impl = 0;
+BytecodeImpl **the_impl_ptr = &the_impl;
+
+#define BUFLEN 1024
+
+static int lineno = 1;
+
+void fatal(const char *s)
+{
+	fputs(s, stderr);
+	fputc('\n', stderr);
+	exit(1);
+}
+
+void outmem(void)
+{
+	fprintf(stderr, "Out of memory\n");
+	exit(1);
+}
+
+void synerr(void)
+{
+	fprintf(stderr, "Syntax error at line %d\n", lineno);
+	exit(1);
+}
+
+int readchar()
+{
+	int c;
+
+	c = getc(source_f);
+	if (c == '\n') lineno++;
+	return c;
+}
+
+int readwhitespace(int c, char *buf, int len)
+{
+	int i = 0;
+
+	while ((isspace)(c)) {
+		if (buf && i < len-1) buf[i++] = c;
+		c = (readchar)();
+	}
+	if (buf && i < len) buf[i] = 0;
+	return c;
+}
+
+int skipwhitespace(int c)
+{
+	while ((isspace)(c)) {
+		c = (readchar)();
+	}
+	return c;
+}
+
+int readeol(int c, char *buf, int len)
+{
+	int i = 0;
+
+	while (c != '\n' && c != EOF) {
+		if (buf && i < len-1) buf[i++] = c;
+		c = (readchar)();
+	}
+	if (buf && i < len) buf[i] = 0;
+	if (c == '\n') c = (readchar)();
+	return c;
+}
+
+int skipeol(int c)
+{
+	while (c != '\n' && c != EOF) c = (readchar)();
+	if (c == '\n') c = (readchar)();
+	return c;
+}
+
+int readsymbol(int c, char *buf, int len)
+{
+	int i = 0;
+
+	while (ISALNUM(c)) {
+		if (buf && i < len-1) buf[i++] = c;
+		c = (readchar)();
+	}
+	if (buf && i < len) buf[i] = 0;
+	return c;
+}
+
+int bcdef(int c, char *buf, int len)
+{
+	BytecodeImpl *def;
+	OpcodeSequence *seq;
+	OpcodeSequence **seqp;
+	OpcodeList *opc;
+	OpcodeList **opcp;
+	StringList *macro, **macrop;
+	StringList *direct, **directp;
+	char *name;
+	char *line;
+	int i;
+	int length, overall_len;
+
+	def = (BytecodeImpl *)malloc(sizeof(BytecodeImpl));
+	if (!def) outmem();
+	def->next = 0;
+	def->opcode_seq = 0;
+	def->macro_impl = 0;
+	def->direct_impl = 0;
+	def->len = -1;
+	*the_impl_ptr = def;
+	the_impl_ptr = &(def->next);
+	seqp = &(def->opcode_seq);
+	overall_len = 0;
+	do {
+		seq = (OpcodeSequence *)malloc(sizeof(OpcodeSequence));
+		if (!seq) outmem();
+		seq->next = 0;
+		seq->opcode_list = 0;
+		*seqp = seq;
+		seqp = &(seq->next);
+		opcp = &(seq->opcode_list);
+		length = -2;
+		do {
+			c = (readchar)();
+			c = skipwhitespace(c);
+			if (!ISALPHA(c)) synerr();
+			c = readsymbol(c, buf, len);
+			c = skipwhitespace(c);
+			opc = (OpcodeList *)malloc(sizeof(OpcodeList));
+			if (!opc) outmem();
+			opc->next = 0;
+			opc->opcode = -1;
+			*opcp = opc;
+			opcp = &(opc->next);
+			name = strdup(buf);
+			if (!name) outmem();
+			for (i = 0; i < 256; i++) {
+				if (strcmp(name, bytecodes[i].name) == 0) {
+					opc->opcode = i;
+					break;
+				}
+			}
+			if (i == 256) {
+				fprintf(stderr, "No such opcode '%s'\n", name);
+				exit(1);
+			}
+			if (length == -2) length = bytecodes[i].len;
+		} while (c == ',');
+		overall_len += length;
+		if (c != ')') synerr();
+		c = (readchar)();
+		c = skipwhitespace(c);
+	} while (c == '(');
+//	strcpy(buf, "do_");
+	*buf = 0;
+	if (ISALPHA(c)) {
+		c = readsymbol(c, buf, len);
+		c = skipwhitespace(c);
+	} else {
+		seq = def->opcode_seq;
+//		strcat(buf, "bytecode");
+		while (seq) {
+			opc = seq->opcode_list;
+			if (*buf) strcat(buf, "_");
+			strcat(buf, bytecodes[opc->opcode].name);
+//			sprintf(buf+strlen(buf), "_%ld", opc->opcode);
+			seq = seq->next;
+		}
+	}
+	name = strdup(buf);
+	if (!name) outmem();
+	def->name = name;
+	def->do_name = name;
+	def->len = overall_len;
+	if (c != '{') synerr();
+	c = (readchar)();
+	while (c != '\n' && isspace(c)) c = (readchar)();
+	if (c != '\n') synerr();
+	c = (readchar)();
+	c = readwhitespace(c, buf, len);
+	macrop = &(def->macro_impl);
+	while (c != '}' && c != EOF) {
+		c = readeol(c, buf + strlen(buf), len - strlen(buf));
+		line = strdup(buf);
+		if (!line) outmem();
+		macro = (StringList *)malloc(sizeof(StringList));
+		if (!macro) outmem();
+		*macrop = macro;
+		macrop = &(macro->next);
+		macro->next = 0;
+		macro->line = line;
+		c = readwhitespace(c, buf, len);
+	}
+	if (c != '}') synerr();
+	c = (readchar)();
+	c = skipwhitespace(c);
+	if (ISALPHA(c)) {
+		c = readsymbol(c, buf, len);
+		c = skipwhitespace(c);
+		name = strdup(buf);
+		if (!name) outmem();
+		def->do_name = name;
+	}
+	if (c == '[') {
+		c = (readchar)();
+		while (c != '\n' && isspace(c)) c = (readchar)();
+		if (c != '\n') synerr();
+		c = (readchar)();
+		c = readwhitespace(c, buf, len);
+		directp = &(def->direct_impl);
+		while (c != ']' && c != EOF) {
+			c = readeol(c, buf + strlen(buf), len - strlen(buf));
+			line = strdup(buf);
+			if (!line) outmem();
+			direct = (StringList *)malloc(sizeof(StringList));
+			if (!direct) outmem();
+			*directp = direct;
+			directp = &(direct->next);
+			direct->next = 0;
+			direct->line = line;
+			c = readwhitespace(c, buf, len);
+		}
+		if (c != ']') synerr();
+		c = (readchar)();
+	}
+	return c;
+}
+
+void mkbc(void)
+{
+	char buf[BUFLEN];
+	char *endptr;
+	int c;
+	char *name;
+	long opcode, len;
+
+	c = (readchar)();
+	c = skipwhitespace(c);
+	while (c != EOF) {
+		if (c == '@' || c == '#') {
+			c = skipeol(c);
+		} else if (ISALPHA(c)) {
+			c = readsymbol(c, buf, BUFLEN);
+			c = skipwhitespace(c);
+			if (c == '=') {
+				name = strdup(buf);
+				if (!name) outmem();
+				c = (readchar)();
+				c = skipwhitespace(c);
+				if (!(isdigit)(c)) synerr();
+				c = readsymbol(c, buf, BUFLEN);
+				opcode = strtol(buf, &endptr, 0);
+				if (*endptr != 0) synerr();
+				c = skipwhitespace(c);
+				if (c != ',') synerr();
+				c = (readchar)();
+				c = skipwhitespace(c);
+				if (!(isdigit)(c)) synerr();
+				c = readsymbol(c, buf, BUFLEN);
+				len = strtol(buf, &endptr, 0);
+				if (*endptr != 0) synerr();
+				bytecodes[opcode].name = name;
+				bytecodes[opcode].len = len;
+			}
+		} else if (c == '(') {
+			c = bcdef(c, buf, BUFLEN);
+		} else synerr();
+		c = skipwhitespace(c);
+	}
+}
+
+typedef struct TableEntry {
+	BytecodeImpl *impl;
+	char *impl_name;
+	char *def_name;
+	struct TableEntry *subtable;
+} TableEntry;
+
+TableEntry *the_table;
+
+int is_duplicate(TableEntry *a, TableEntry *b)
+{
+	int i;
+	char buf[256];
+
+	for (i = 0; i < 256; i++) {
+		if (a[i].subtable || b[i].subtable) {
+			if (!(a[i].subtable) || !(b[i].subtable)) return 0;
+			if (!is_duplicate(a[i].subtable, b[i].subtable)) return 0;
+		} else if (a[i].impl_name && b[i].impl_name) {
+			if (strcmp(a[i].impl_name, b[i].impl_name) != 0)
+				return 0;
+		} else if (a[i].def_name && b[i].def_name) {
+			if (strcmp(a[i].def_name, b[i].def_name) != 0)
+				return 0;
+		} else return 0;
+	}
+	return 1;
+}
+
+void remove_duplicates(TableEntry *table, int start, int *table_indices, int depth)
+{
+	TableEntry *start_entry = table[start].subtable;
+	int i, j;
+
+	if (!start_entry) fatal("Subtable is NULL in remove_duplicates!!!");
+	for (i = start+1; i < 256; i++) {
+		if (table[i].subtable) {
+			if (is_duplicate(start_entry, table[i].subtable)) {
+				fputs("dispatch", bci_f);
+				for (j = 0; j < depth; j++) {
+					fputc('_', bci_f);
+					fputs(bytecodes[table_indices[j]].name, bci_f);
+				}
+				fputc('_', bci_f);
+				fputs(bytecodes[i].name, bci_f);
+				fputs(":\n", bci_f);
+				free(table[i].subtable);
+				table[i].subtable = 0;
+			}
+		}
+	}
+}
+
+void writeouttable(TableEntry *table, int *table_indices, int depth)
+{
+	int i, j;
+	int len;
+
+	for (i = 0; i < 256; i++) {
+		if (table[i].subtable) {
+			len = 0;
+			fputs("\t.word\tdispatch", bci_f);
+			table_indices[depth] = i;
+			for (j = 0; j <= depth; j++) {
+				fputc('_', bci_f);
+				fputs(bytecodes[table_indices[j]].name, bci_f);
+				len += bytecodes[table_indices[j]].len;
+			}
+			fprintf(bci_f, "+%d\n", len);
+		} else {
+			if (table[i].impl_name)
+				fprintf(bci_f, "\t.word\t%s%s \t@ %d 0x%02x\n", prefix, table[i].impl_name, i, i);
+			else
+				fprintf(bci_f, "\t.word\t%s%s \t@ %d 0x%02x\n", prefix, table[i].def_name, i, i);
+		}
+	}
+	if (depth == 0) {
+		fputs("\t.endm\n", bci_f);
+		fputs("\t.macro\tSUB_DISPATCH_TABLES\n", bci_f);
+	}
+	for (i = 0; i < 256; i++) {
+		if (table[i].subtable) {
+			fputs("dispatch", bci_f);
+			table_indices[depth] = i;
+			for (j = 0; j <= depth; j++) {
+				fputc('_', bci_f);
+				fputs(bytecodes[table_indices[j]].name, bci_f);
+			}
+			fprintf(bci_f, ":\t@ %d 0x%02x\n", i, i);
+			remove_duplicates(table, i, table_indices, depth);
+			writeouttable(table[i].subtable, table_indices, depth+1);
+		}
+	}
+}
+
+void do_tableentry(BytecodeImpl *impl, TableEntry **tablep, int *table_indices, int depth)
+{
+	TableEntry *table;
+	char *def = (char *)"undefined";
+	int i,j;
+
+	if (depth == 0) fatal("Depth = 0 for tableentry\n");
+	for (i = 0; i < depth; i++) {
+		table = *tablep;
+		if (!table) {
+			table = (TableEntry *)malloc(sizeof(TableEntry) * 256);
+			if (!table) outmem();
+			*tablep = table;
+			def = strdup(def);
+			if (!def) outmem();
+			for (j = 0; j < 256; j++) {
+				table[j].impl_name = 0;
+				table[j].def_name = def;
+				table[j].subtable = 0;
+			}
+		}
+		table = &table[table_indices[i]];
+		tablep = &(table->subtable);
+		if (table->impl_name) def = table->def_name;
+	}
+	if (!table->impl_name)
+		table->impl_name = impl->do_name;
+	table->def_name = impl->do_name;
+}
+
+void dumpseq(BytecodeImpl *impl, OpcodeSequence *seq, int *table_indices, int depth)
+{
+	OpcodeList *opc;
+
+	opc = seq->opcode_list;
+	while (opc) {
+		table_indices[depth++] = opc->opcode;
+		if (seq->next != NULL) {
+			dumpseq(impl, seq->next, table_indices, depth);
+		} else {
+			do_tableentry(impl, &the_table, table_indices, depth);
+		}
+		depth--;
+		opc = opc->next;
+	}
+}
+
+void dumptable(void)
+{
+	BytecodeImpl *impl = the_impl;
+	int table_indices[256];
+	int j;
+	char	buf[256];
+	char *def;
+
+	the_table = (TableEntry *)malloc(sizeof(TableEntry) * 256);
+	if (!the_table) outmem();
+	for (j = 0; j < 256; j++) {
+		sprintf(buf, "%s", bytecodes[j].name);
+		def = strdup(buf);
+		if (!def) outmem();
+		the_table[j].impl_name = 0;
+		the_table[j].def_name = def;
+		the_table[j].subtable = 0;
+	}
+	while (impl) {
+		dumpseq(impl, impl->opcode_seq, table_indices, 0);
+		impl = impl->next;
+	}
+	fputs("\t.macro\tMAIN_DISPATCH_TABLE\n", bci_f);
+	writeouttable(the_table, table_indices, 0);
+	fputs("\t.endm\n", bci_f);
+}
+
+void dumpimpl(void)
+{
+	BytecodeImpl *impl = the_impl;
+	OpcodeList *opc;
+	StringList *code;
+	StringList *sl;
+	char buf[BUFLEN];
+	char macro[BUFLEN];
+
+	while (impl) {
+		buf[0] = 0;
+		fprintf(bci_f, "@-----------------------------------------------------------------------------\n");
+		fprintf(bci_f, "\t.macro\t%s\tjpc_off=0, seq_len=%d\n", impl->name, impl->len);
+		sl = impl->macro_impl;
+		while (sl) {
+			fputs(sl->line, bci_f);
+			fputc('\n', bci_f);
+			sl = sl->next;
+		}
+		fprintf(bci_f, "\t.endm\n\n");
+		sl = impl->direct_impl;
+		if (sl) {
+			do {
+				fputs(sl->line, bci_f);
+				fputc('\n', bci_f);
+				sl = sl->next;
+			} while (sl);
+		} else {
+			fprintf(bci_f, "\tOpcode\t%s\n", impl->do_name);
+//			fprintf(bci_f, "%s:\n", impl->do_name);
+			fprintf(bci_f, "\t%s\n", impl->name);
+//			fprintf(bci_f, "\tDISPATCH\t%d\n", impl->len);
+		}
+		impl = impl->next;
+	}
+}
+
+void dumpbc()
+{
+	int i;
+
+	for (i = 0; i < 256; i++) {
+		if (strcmp(bytecodes[i].name, "undefined") != 0)
+			fprintf(bci_f, "#define opc_%s\t\t0x%02x\n", bytecodes[i].name, i);
+	}
+	fputc('\n', bci_f);
+	dumpimpl();
+	dumptable();
+}
+
+void usage(void)
+{
+	fatal("Usage: mkbc <bytecode definition file> <asm output file>");
+}
+
+int main(int argc, char **argv)
+{
+	int i;
+	char *source, *bci;
+	char *s;
+
+	source = bci = 0;
+	while (s = *++argv) {
+		if (s[0] == '-' && s[1] != 0) {
+			if (s[1] == 'P') {
+				prefix = s+2;
+			} else {
+				fprintf(stderr, "Unrecognized option %s\n", s);
+				usage();
+			}
+		} else {
+			if (!source) source = s;
+			else if (!bci) bci = s;
+			else {
+				fprintf(stderr, "Too many arguments\n");
+				usage();
+			}
+		}
+	}
+	if (!bci) {
+		fprintf(stderr, "Too few arguments\n");
+		usage();
+	}
+	if (strcmp(source, "-") == 0) {
+		source_f = stdin;
+	} else {
+		source_f = fopen(source, "r");
+		if (!source_f) fatal("Error opening source file");
+	}
+	if (strcmp(bci, "-") == 0) {
+		bci_f = stdout;
+	} else {
+		bci_f = fopen(bci, "w");
+		if (!bci_f) fatal("Error opening bci file for write");
+	}
+	for (i = 0; i < 256; i++) {
+		bytecodes[i].name = (char *)"undefined";
+		bytecodes[i].len = -1;
+	}
+	mkbc();
+	dumpbc();
+	if (ferror(source_f)) fatal("Error reading source");
+	if (ferror(bci_f)) fatal("Error writing bci");
+	if (source_f != stdin) fclose(source_f);
+	if (bci_f != stdout) fclose(bci_f);
+
+	return 0;
+}