changeset 5117:1efaab66c81d ppc-aix-port-b02

Basic AIX adaption. With this change the VM can successfully run 'HelloWorld' on AIX
author simonis
date Mon, 22 Oct 2012 18:15:38 +0200
parents 50fbe70b4f3f
children aba346eb84ac
files make/aix/makefiles/build_vm_def.sh make/aix/makefiles/jvmg.make make/aix/makefiles/ppc64.make make/aix/makefiles/vm.make src/cpu/ppc/vm/assembler_ppc.cpp src/cpu/ppc/vm/assembler_ppc.hpp src/cpu/ppc/vm/assembler_ppc.inline.hpp src/cpu/ppc/vm/globals_ppc.hpp src/cpu/ppc/vm/nativeInst_ppc.hpp src/os/aix/vm/attachListener_aix.cpp src/os/aix/vm/globals_aix.hpp src/os/aix/vm/libperfstat_aix.cpp src/os/aix/vm/libperfstat_aix.hpp src/os/aix/vm/loadlib_aix.cpp src/os/aix/vm/osThread_aix.cpp src/os/aix/vm/os_aix.cpp src/os/aix/vm/os_aix.hpp src/os/aix/vm/perfMemory_aix.cpp src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp src/share/vm/runtime/sharedRuntime.hpp src/share/vm/runtime/stubRoutines.hpp src/share/vm/utilities/globalDefinitions.hpp src/share/vm/utilities/globalDefinitions_xlc.hpp
diffstat 23 files changed, 3567 insertions(+), 1968 deletions(-) [+]
line wrap: on
line diff
--- a/make/aix/makefiles/build_vm_def.sh	Wed Sep 26 16:54:25 2012 +0200
+++ b/make/aix/makefiles/build_vm_def.sh	Mon Oct 22 18:15:38 2012 +0200
@@ -4,13 +4,15 @@
 if [ "$CROSS_COMPILE_ARCH" != "" ]; then 
 NM=$ALT_COMPILER_PATH/nm
 else
-NM=nm
+# On AIX we have to prevent that we pick up the 'nm' version from the GNU binutils
+# which may be installed under /opt/freeware/bin. So better use an absolute path here! 
+NM=/usr/bin/nm
 fi
 
-$NM --defined-only $* \
+$NM -X64 -B -C $* \
     | awk '{
-              if ($3 ~ /^_ZTV/ || $3 ~ /^gHotSpotVM/) print "\t" $3 ";"
+              if (($2="d" || $2="D") && ($3 ~ /^__vft/ || $3 ~ /^gHotSpotVM/)) print "\t" $3 ";"
               if ($3 ~ /^UseSharedSpaces$/) print "\t" $3 ";"
-              if ($3 ~ /^_ZN9Arguments17SharedArchivePathE$/) print "\t" $3 ";"
+              if ($3 ~ /^SharedArchivePath__9Arguments$/) print "\t" $3 ";"
           }' \
     | sort -u
--- a/make/aix/makefiles/jvmg.make	Wed Sep 26 16:54:25 2012 +0200
+++ b/make/aix/makefiles/jvmg.make	Mon Oct 22 18:15:38 2012 +0200
@@ -36,9 +36,6 @@
 # Linker mapfile
 MAPFILE = $(GAMMADIR)/make/aix/makefiles/mapfile-vers-debug
 
-# xlc 10.01 parameters for ipa linkage: none for dbg/jvmg build.
-LFLAGS_QIPA=-DLUCY_JVMG
-
 G_SUFFIX = _g
 VERSION = debug
 SYSDEFS += -DASSERT -DDEBUG
--- a/make/aix/makefiles/ppc64.make	Wed Sep 26 16:54:25 2012 +0200
+++ b/make/aix/makefiles/ppc64.make	Mon Oct 22 18:15:38 2012 +0200
@@ -37,7 +37,9 @@
 OPT_CFLAGS += -qarch=ppc64 -qtune=$(QTUNE) -qinlglue
 
 # We need variable length arrays
-CFLAGS += -qlanglvl=extc99
+CFLAGS += -qlanglvl=c99vla
+# Just to check for unwanted macro redefinitions
+CFLAGS += -qlanglvl=noredefmac
 
 # Surpress those "implicit private" warnings xlc gives.
 #  - The omitted keyword "private" is assumed for base class "...".
--- a/make/aix/makefiles/vm.make	Wed Sep 26 16:54:25 2012 +0200
+++ b/make/aix/makefiles/vm.make	Mon Oct 22 18:15:38 2012 +0200
@@ -273,11 +273,7 @@
   LIBS_VM                  += $(STATIC_STDCXX) $(LIBS)
 endif
 
-LINK_VM = $(LINK_LIB.CC)
-
-# xlc 10.1 parameters for ipa linkage.
-# LFLAGS_QIPA set in debug.make/fastdebug.make/jvmg.make/optimized.make
-LFLAGS_VM += $(LFLAGS_QIPA)
+LINK_VM = $(LINK_LIB.CXX)
 
 # create loadmap for libjvm.so by default. Helps in diagnosing some problems.
 LFLAGS_VM += -bloadmap:libjvm.loadmap
--- a/src/cpu/ppc/vm/assembler_ppc.cpp	Wed Sep 26 16:54:25 2012 +0200
+++ b/src/cpu/ppc/vm/assembler_ppc.cpp	Mon Oct 22 18:15:38 2012 +0200
@@ -1187,6 +1187,36 @@
   call_VM_leaf(entry_point);
 }
 
+// Check whether ppc_instruction is a read access to the polling page 
+// which was emitted by load_from_polling_page(..).
+bool MacroAssembler::is_load_from_polling_page(int ppc_instruction, void* ucontext) {
+  if (!is_ppc_ld(ppc_instruction))
+    return false; // it's not a ppc_ld. Fail.
+
+  int rt = inv_rt_field(ppc_instruction);
+  int ra = inv_ra_field(ppc_instruction);
+  int ds = inv_ds_field(ppc_instruction);
+
+  if (!(ds == 0 && ra != 0 && rt == 0)) {
+    return false; // it's not a ppc_ld(r0, X, ra). Fail.
+  }
+
+  if (!ucontext) {
+    return true; // no ucontext given. Can't check value of ra. Assume true
+  }
+
+#ifdef LINUX
+  // Ucontext given. Check that register ra contains the address of
+  // the safepoing polling page.
+  ucontext_t* uc = (ucontext_t*) ucontext;
+  return (address)uc->uc_mcontext.regs->gpr[ra] == os::get_polling_page();
+#else
+  // Not on Linux, ucontext must be NULL.
+  ShouldNotReachHere();
+  return false;
+#endif
+}
+
 void MacroAssembler::bang_stack_with_offset(int offset) {
   Unimplemented(); // TODO: PPC port
 }
--- a/src/cpu/ppc/vm/assembler_ppc.hpp	Wed Sep 26 16:54:25 2012 +0200
+++ b/src/cpu/ppc/vm/assembler_ppc.hpp	Mon Oct 22 18:15:38 2012 +0200
@@ -2199,9 +2199,13 @@
   // Java utilities
   //
 
-  // Check whether ppc_instruction is a write access to the memory
-  // serialization page realized by one of the instructions stw, stwu,
-  // stwx, or stwux.
+  // Read from the polling page, its address is already in a register.
+  inline void load_from_polling_page(Register polling_page_address);
+  // Check whether ppc_instruction is a read access to the polling page
+  // which was emitted by load_from_polling_page(..).
+  static bool is_load_from_polling_page(int ppc_instruction, void* ucontext/*may be NULL*/);
+  // Check whether ppc_instruction is a write access to the memory serialization
+  // page realized by one of the instructions stw, stwu, stwx, or stwux.
   static bool is_memory_serialization(int ppc_instruction, JavaThread* thread, void* ucontext);
 
   // Support for NULL-checks
@@ -2265,9 +2269,7 @@
   static bool needs_explicit_null_check(intptr_t offset);
 
   // Trap-instruction-based checks.
-#if defined(AIX)
   inline void ppc_trap_null_check(Register a);
-#endif
   static bool is_ppc_trap_null_check(int x)         { return is_ppc_tdi(x, traptoEqual, -1/*any reg*/, 0); }
 
   static bool is_ppc_trap_zombie_not_entrant(int x) { return is_ppc_tdi(x, traptoUnconditional, 0/*reg 0*/, 1); }
@@ -2294,6 +2296,26 @@
   void reset_last_Java_frame(void);
   void set_top_ijava_frame_at_SP_as_last_Java_frame(Register sp, Register tmp1);
 
+  // 'should not reach here'.
+  inline void ppc_trap_should_not_reach_here();
+  static bool is_ppc_trap_should_not_reach_here(int x) { return is_ppc_tdi(x, traptoUnconditional, 0/*reg 0*/, 2); }
+
+  // SIGTRAP-based range checks for arrays.
+  inline void ppc_trap_range_check_le(Register a, int si16);
+  static bool is_ppc_trap_range_check_le(int x) { 
+    return is_ppc_twi(x, traptoEqual | traptoLessThanUnsigned, -1/*any reg*/);
+  }
+  inline void ppc_trap_range_check_ge(Register a, Register b);
+  inline void ppc_trap_range_check_ge(Register a, int si16);
+  static bool is_ppc_trap_range_check_ge(int x) { 
+    return (is_ppc_tw (x, traptoEqual | traptoGreaterThanUnsigned, -1/*any reg*/, -1/*any reg*/) ||
+            is_ppc_twi(x, traptoEqual | traptoGreaterThanUnsigned, -1/*any reg*/)                  );
+  }
+
+  static bool is_ppc_trap_range_check(int x) { 
+    return is_ppc_trap_range_check_le(x) || is_ppc_trap_range_check_ge(x); 
+  }
+
   // 
   // Debugging
   //
--- a/src/cpu/ppc/vm/assembler_ppc.inline.hpp	Wed Sep 26 16:54:25 2012 +0200
+++ b/src/cpu/ppc/vm/assembler_ppc.inline.hpp	Mon Oct 22 18:15:38 2012 +0200
@@ -1177,9 +1177,7 @@
 }
 
 // Trap-instruction-based checks.
-#if defined(AIX)
 inline void MacroAssembler::ppc_trap_null_check(Register a)  { ppc_tdi(traptoEqual,         a/*reg a*/, 0); }
-#endif
 
 inline void MacroAssembler::ppc_trap_ic_miss_check(Register a, Register b) {
   ppc_td(traptoGreaterThanUnsigned | traptoLessThanUnsigned, a, b);
--- a/src/cpu/ppc/vm/globals_ppc.hpp	Wed Sep 26 16:54:25 2012 +0200
+++ b/src/cpu/ppc/vm/globals_ppc.hpp	Mon Oct 22 18:15:38 2012 +0200
@@ -66,7 +66,7 @@
           "Reoptimize code-sequences of calls at runtime.")                 \
   product(bool, UseLoadInstructionsForStackBangingPPC64, false,             \
           "Use load instructions for stack banging.")                       \
-  product(bool, UseSIGTRAP, false, /* set false for dev in gamma launcher TODO: PPC port*/ \
+  product(bool, UseSIGTRAP, true,                                           \
           "Allow trap instructions that make use of SIGTRAP.")              \
                                                                             \
   /* Temporarily introduced option, will be deleted after stablized */      \
@@ -82,6 +82,10 @@
   product(bool, UseStaticBranchPredictionInCompareAndSwapPPC64, true,       \
           "Use static branch prediction hints in CAS operations.")          \
                                                                             \
+  /* implicit range checks */                                               \
+  product(bool, ImplicitRangeChecks, true,                                  \
+          "generate code for implicit range checks for array accesses")     \
+                                                                            \
 
 
 
--- a/src/cpu/ppc/vm/nativeInst_ppc.hpp	Wed Sep 26 16:54:25 2012 +0200
+++ b/src/cpu/ppc/vm/nativeInst_ppc.hpp	Mon Oct 22 18:15:38 2012 +0200
@@ -75,9 +75,21 @@
   }  
   static bool is_sigill_zombie_not_entrant_at(address addr);
 
+  // SIGTRAP-based implicit range checks
+  bool is_sigtrap_range_check() {
+    assert(UseSIGTRAP && ImplicitRangeChecks, "precondition");
+    return MacroAssembler::is_ppc_trap_range_check(long_at(0));
+  }
+
+  // 'should not reach here'.
+  bool is_sigtrap_should_not_reach_here() {
+    return MacroAssembler::is_ppc_trap_should_not_reach_here(long_at(0));
+  }
+
   bool is_safepoint_poll() {
-    Unimplemented(); // TODO: PPC port
-    return false;
+    // Is the current instruction a POTENTIAL read access to the polling page?
+    // The current arguments of the instruction are not checked!
+    return MacroAssembler::is_load_from_polling_page(long_at(0), NULL);
   }
 
   bool is_memory_serialization(JavaThread *thread, void *ucontext) {
--- a/src/os/aix/vm/attachListener_aix.cpp	Wed Sep 26 16:54:25 2012 +0200
+++ b/src/os/aix/vm/attachListener_aix.cpp	Mon Oct 22 18:15:38 2012 +0200
@@ -99,6 +99,9 @@
   static char* path()                   { return _path; }
   static bool has_path()                { return _has_path; }
   static int listener()                 { return _listener; }
+  // Shutdown marker to prevent accept blocking during clean-up
+  static void set_shutdown(bool shutdown) { _shutdown = shutdown; }
+  static bool is_shutdown()     { return _shutdown; }
 
   // write the given buffer to a socket
   static int write_fully(int s, char* buf, int len);
@@ -169,7 +172,18 @@
 //    should be sufficient for cleanup.
 extern "C" {
   static void listener_cleanup() {
-    Unimplemented();
+    static int cleanup_done;
+    if (!cleanup_done) {
+      cleanup_done = 1;
+      AixAttachListener::set_shutdown(true);
+      int s = AixAttachListener::listener();
+      if (s != -1) {
+        ::shutdown(s, 2);
+      }
+      if (AixAttachListener::has_path()) {
+        ::unlink(AixAttachListener::path());
+      }
+    }
   }
 }
 
--- a/src/os/aix/vm/globals_aix.hpp	Wed Sep 26 16:54:25 2012 +0200
+++ b/src/os/aix/vm/globals_aix.hpp	Mon Oct 22 18:15:38 2012 +0200
@@ -30,7 +30,22 @@
 // Defines Aix specific flags. They are not available on other platforms.
 //
 #define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
-                                                                        \
+                                                                                    \
+  /* If UseLargePages == true allow or deny usage of 16M pages. 16M pages are  */   \
+  /* a scarce resource and there may be situations where we do not want the VM */   \
+  /* to run with 16M pages. (Will fall back to 64K pages).                     */   \
+  product_pd(bool, Use16MPages,                                                     \
+          "Use 16M pages if available.")                                            \
+                                                                                    \
+  /*  use optimized addresses for the polling page, */                              \
+  /* e.g. map it to a special 32-bit address.       */                              \
+  product_pd(bool, OptimizePollingPageLocation,                                     \
+          "Optimize the location of the polling page used for Safepoints")          \
+                                                                                    \
+
+// Per default, do not allow 16M pages. 16M pages have to be switched on specifically.
+define_pd_global(bool, Use16MPages, false);
+define_pd_global(bool, OptimizePollingPageLocation, true);
 
 //
 // Defines Aix-specific default values. The flags are available on all
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/libperfstat_aix.cpp	Mon Oct 22 18:15:38 2012 +0200
@@ -0,0 +1,100 @@
+#include "runtime/arguments.hpp"
+#include "libperfstat_aix.hpp"
+
+// For dlopen and friends
+#include <fcntl.h>
+
+// handle to the libperfstat
+static void* g_libhandle = NULL;
+
+// whether initialization worked
+static bool g_initialized = false;
+
+
+typedef int (*fun_perfstat_cpu_total_t) (perfstat_id_t *name, perfstat_cpu_total_t* userbuff,
+                                         int sizeof_userbuff, int desired_number);
+
+typedef int (*fun_perfstat_memory_total_t) (perfstat_id_t *name, perfstat_memory_total_t* userbuff,
+                                            int sizeof_userbuff, int desired_number);
+
+typedef void (*fun_perfstat_reset_t) ();
+
+static fun_perfstat_cpu_total_t	    g_fun_perfstat_cpu_total = NULL;
+static fun_perfstat_memory_total_t  g_fun_perfstat_memory_total = NULL;
+static fun_perfstat_reset_t         g_fun_perfstat_reset = NULL;
+
+bool libperfstat::init() {
+  
+  if (g_initialized) {
+    return true;
+  }
+
+  g_initialized = false;
+
+  // dynamically load the libperfstat porting library.
+  g_libhandle = dlopen("/usr/lib/libperfstat.a(shr_64.o)", RTLD_MEMBER | RTLD_NOW);
+  if (!g_libhandle) {
+    if (Verbose) {
+      fprintf(stderr, "Cannot load libperfstat.a (dlerror: %s)", dlerror());
+    }
+    return false;
+  }
+
+  // resolve function pointers
+
+#define RESOLVE_FUN_NO_ERROR(name) \
+  g_fun_##name = (fun_##name##_t) dlsym(g_libhandle, #name); 
+
+#define RESOLVE_FUN(name) \
+  RESOLVE_FUN_NO_ERROR(name) \
+  if (!g_fun_##name) { \
+    if (Verbose) { \
+      fprintf(stderr, "Cannot resolve " #name "() from libperfstat.a\n" \
+                      "   (dlerror: %s)", dlerror()); \
+      } \
+    return false; \
+  }
+
+  RESOLVE_FUN(perfstat_cpu_total);
+  RESOLVE_FUN(perfstat_memory_total);
+  RESOLVE_FUN(perfstat_reset);
+
+  g_initialized = true;
+
+  return true;
+}
+
+void libperfstat::cleanup() {
+
+  g_initialized = false;
+
+  if (g_libhandle) {
+    dlclose(g_libhandle);
+    g_libhandle = NULL;
+  }
+
+  g_fun_perfstat_cpu_total = NULL;
+  g_fun_perfstat_memory_total = NULL;
+  g_fun_perfstat_reset = NULL;
+}
+
+int libperfstat::perfstat_memory_total(perfstat_id_t *name, 
+                                       perfstat_memory_total_t* userbuff, 
+                                       int sizeof_userbuff, int desired_number) {
+  assert(g_initialized, "libperfstat not initialized");
+  assert(g_fun_perfstat_memory_total, "");
+  return g_fun_perfstat_memory_total(name, userbuff, sizeof_userbuff, desired_number);
+}
+
+int libperfstat::perfstat_cpu_total(perfstat_id_t *name, perfstat_cpu_total_t* userbuff, 
+                                    int sizeof_userbuff, int desired_number) {
+  assert(g_initialized, "libperfstat not initialized");
+  assert(g_fun_perfstat_cpu_total, "");
+  return g_fun_perfstat_cpu_total(name, userbuff, sizeof_userbuff, desired_number);
+}
+
+void libperfstat::perfstat_reset() {
+  assert(g_initialized, "libperfstat not initialized");
+  assert(g_fun_perfstat_reset, "");
+  g_fun_perfstat_reset();
+}
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/os/aix/vm/libperfstat_aix.hpp	Mon Oct 22 18:15:38 2012 +0200
@@ -0,0 +1,35 @@
+// encapsulates the libperfstat library. 
+//
+// The purpose of this code is to dynamically load the libperfstat library 
+// instead of statically linking against it. The libperfstat library is an 
+// AIX-specific library which only exists on AIX, not on PASE. If I want to 
+// share binaries between AIX and PASE, I cannot directly link against libperfstat.so.
+
+#ifndef OS_AIX_VM_LIBPERFSTAT_AIX_HPP
+#define OS_AIX_VM_LIBPERFSTAT_AIX_HPP
+
+#include <libperfstat.h>
+
+class libperfstat {
+
+public:
+
+  // Load the libperfstat library (must be in LIBPATH).
+  // Returns true if succeeded, false if error.
+  static bool init();
+
+  // cleanup of the libo4 porting library.
+  static void cleanup();
+
+  // direct wrappers for the libperfstat functionality. All they do is
+  // to call the functions with the same name via function pointers.
+  static int perfstat_cpu_total(perfstat_id_t *name, perfstat_cpu_total_t* userbuff, 
+                                int sizeof_userbuff, int desired_number);
+
+  static int perfstat_memory_total(perfstat_id_t *name, perfstat_memory_total_t* userbuff, 
+                                   int sizeof_userbuff, int desired_number);
+
+  static void perfstat_reset();
+};
+
+#endif // OS_AIX_VM_LIBPERFSTAT_AIX_HPP
--- a/src/os/aix/vm/loadlib_aix.cpp	Wed Sep 26 16:54:25 2012 +0200
+++ b/src/os/aix/vm/loadlib_aix.cpp	Mon Oct 22 18:15:38 2012 +0200
@@ -30,9 +30,15 @@
 // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp
 //      ?topic=/com.ibm.aix.basetechref/doc/basetrf1/loadquery.htm
 
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+// 'allocation.inline.hpp' triggers the inclusion of 'inttypes.h' which defines macros
+// required by the definitions in 'globalDefinitions.hpp'. But these macros in 'inttypes.h'
+// are only defined if '__STDC_FORMAT_MACROS' is defined!
+#include "memory/allocation.inline.hpp"
 #include "oops/oop.inline.hpp"
 #include "runtime/threadCritical.hpp"
-//#include "sap/utils_buffer.hpp"
 #include "utilities/debug.hpp"
 #include "utilities/ostream.hpp"
 #include "loadlib_aix.hpp"
@@ -40,22 +46,6 @@
 
 // For loadquery()
 #include <sys/ldr.h>
-#include <demangle.h>
-
-#define trcVerbose(fmt, ...) {           \
-  if (Verbose) {                         \
-    fprintf(stderr, fmt, ##__VA_ARGS__); \
-    fputc('\n', stderr); fflush(stderr); \
-  }                                      \
-}
-#define ERRBYE(s) { trcVerbose( s ); return -1; }
-
-// The semantics in this file are thus that codeptr_t is a *real code ptr*
-// This means that any function taking codeptr_t as arguments will assume
-// a real codeptr and won't handle function descriptors (eg getFuncName),
-// whereas functions taking address as args will deal with function 
-// descriptors (eg os::dll_address_to_library_name)
-typedef unsigned int* codeptr_t;
 
 ///////////////////////////////////////////////////////////////////////////////    
 // Implementation for LoadedLibraryModule
@@ -120,15 +110,16 @@
   // discard old content
   num_loaded = 0;
 
-#if 0
   // Call loadquery(L_GETINFO..) to get a list of all loaded Dlls from AIX.
-  MiscUtils::SpongeBuffer loadquery_buf(4000, 0);
-  loadquery_buf.ensure_size(1000, false);
+  size_t buf_size = 4096;
+  char* loadquery_buf = AllocateHeap(buf_size, "LoadedLibraries::reload");
 
-  while(loadquery(L_GETINFO, loadquery_buf.get_ptr(), loadquery_buf.get_size()) == -1) {
+  while(loadquery(L_GETINFO, loadquery_buf, buf_size) == -1) {
     if (errno == ENOMEM) {
-      loadquery_buf.ensure_size(loadquery_buf.get_size() * 2, false);
+      buf_size *= 2;
+      loadquery_buf = ReallocateHeap(loadquery_buf, buf_size, "LoadedLibraries::reload");
     } else {
+      FreeHeap(loadquery_buf);
       // Ensure that the uintptr_t pointer is valid
       assert(errno != EFAULT, "loadquery: Invalid uintptr_t in info buffer.");
       fprintf(stderr, "loadquery failed (%d %s)", errno, strerror(errno));
@@ -137,11 +128,8 @@
   }
 
   // Iterate over the loadquery result. For details see sys/ldr.h on AIX.
-  const struct ld_info* p = (struct ld_info*) loadquery_buf.get_ptr();
-#else
-  Unimplemented();
-  const struct ld_info* p = NULL;
-#endif
+  const struct ld_info* p = (struct ld_info*) loadquery_buf;
+
   // Ensure we have all loaded libs.
   bool all_loaded = false;
   while(num_loaded < MAX_MODULES) {
@@ -177,6 +165,8 @@
     }
   }
 
+  FreeHeap(loadquery_buf);
+
   // Ensure we have all loaded libs
   assert(all_loaded, "loadquery returned more entries then expected. Please increase MAX_MODULES");
 
--- a/src/os/aix/vm/osThread_aix.cpp	Wed Sep 26 16:54:25 2012 +0200
+++ b/src/os/aix/vm/osThread_aix.cpp	Mon Oct 22 18:15:38 2012 +0200
@@ -45,7 +45,6 @@
   _expanding_stack = 0;
   _alt_sig_stack = NULL;
 
-  Unimplemented();
   _last_cpu_times.sys = _last_cpu_times.user = 0L;
 
   sigemptyset(&_caller_sigmask);
--- a/src/os/aix/vm/os_aix.cpp	Wed Sep 26 16:54:25 2012 +0200
+++ b/src/os/aix/vm/os_aix.cpp	Mon Oct 22 18:15:38 2012 +0200
@@ -22,6 +22,11 @@
  * questions.
  *
  */
+
+// According to the AIX OS doc #pragma alloca must be used
+// with C++ compiler before referencing the function alloca()
+#pragma alloca
+
 // no precompiled headers
 #include "classfile/classLoader.hpp"
 #include "classfile/systemDictionary.hpp"
@@ -30,7 +35,6 @@
 #include "code/vtableStubs.hpp"
 #include "compiler/compileBroker.hpp"
 #include "interpreter/interpreter.hpp"
-#include "jvm_aix.h"
 #include "memory/allocation.inline.hpp"
 #include "memory/filemap.hpp"
 #include "mutex_aix.inline.hpp"
@@ -62,6 +66,9 @@
 #include "utilities/events.hpp"
 #include "utilities/growableArray.hpp"
 #include "utilities/vmError.hpp"
+#include "jvm_aix.h"
+#include "loadlib_aix.hpp"
+#include "libperfstat_aix.hpp"
 #ifdef TARGET_ARCH_ppc
 # include "assembler_ppc.inline.hpp"
 # include "nativeInst_ppc.hpp"
@@ -73,112 +80,143 @@
 #include "opto/runtime.hpp"
 #endif
 
-// put OS-includes here
-# include <sys/types.h>
-# include <sys/mman.h>
-# include <sys/select.h>
-# include <pthread.h>
-# include <signal.h>
-# include <errno.h>
-# include <stdio.h>
-# include <unistd.h>
-# include <sys/resource.h>
-# include <pthread.h>
-# include <sys/stat.h>
-# include <sys/time.h>
-# include <sys/times.h>
-# include <sys/utsname.h>
-# include <sys/socket.h>
-# include <sys/wait.h>
-# include <pwd.h>
-# include <poll.h>
-# include <semaphore.h>
-# include <fcntl.h>
-# include <string.h>
-# include <sys/sysinfo.h>
-# include <sys/ipc.h>
-# include <sys/shm.h>
-# include <stdint.h>
-# include <inttypes.h>
-# include <sys/ioctl.h>
+// put OS-includes here (sorted alphabetically)
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <poll.h>
+#include <pthread.h>
+#include <pwd.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/ipc.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/select.h>
+#include <sys/shm.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/sysinfo.h>
+#include <sys/systemcfg.h>
+#include <sys/time.h>
+#include <sys/times.h>
+#include <sys/types.h>
+#include <sys/utsname.h>
+#include <sys/vminfo.h>
+#include <sys/wait.h>
+
+// Excerpts from systemcfg.h definitions newer than AIX 5.2 and XLC 8
+// On AIX 5.3/ xlC10 some of the definitions are available but 
+// e.g. PV_/ stuff is still missing so keep the definitions
+//
+// XLC 8 build is using the systemcfg.h from /usr/vacpp/include/sys instead of /usr/include/sys
+// Missing values for the version field
+#define PV_5_2 0x0F0001        /* Power PC 5 */
+#define PV_5_3 0x0F0002        /* Power PC 5 */
+#define PV_6 0x100000          /* Power PC 6 */
+#define PV_6_1 0x100001        /* Power PC 6 DD1.x */
+#define PV_7 0x200000          /* Power PC 7 */
+#define PV_5_Compat 0x0F8000   /* Power PC 5 */
+#define PV_6_Compat 0x108000   /* Power PC 6 */
+#define PV_7_Compat 0x208000   /* Power PC 7 */
 
 #define MAX_PATH    (2 * K)
 
 // for timer info max values which include all bits
 #define ALL_64_BITS CONST64(0xFFFFFFFFFFFFFFFF)
-
-// #define LARGEPAGES_BIT (1 << 6)
+// for multipage initialization error analysis (in 'g_multipage_error')
+#define ERROR_MP_OS_TOO_OLD                          100
+#define ERROR_MP_EXTSHM_ACTIVE                       101
+#define ERROR_MP_VMGETINFO_FAILED                    102
+#define ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K 103
+
+// the semantics in this file are thus that codeptr_t is a *real code ptr*
+// This means that any function taking codeptr_t as arguments will assume
+// a real codeptr and won't handle function descriptors (eg getFuncName),
+// whereas functions taking address as args will deal with function
+// descriptors (eg os::dll_address_to_library_name)
+typedef unsigned int* codeptr_t;
+
+// typedefs for stackslots, stack pointers, pointers to op codes
+typedef unsigned long stackslot_t;
+typedef stackslot_t* stackptr_t;
+
+// query dimensions of the stack of the calling thread
+static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size);
+
+// function to check a given stack pointer against given stack limits
+inline bool is_valid_stackpointer(stackptr_t sp, stackptr_t stack_base, size_t stack_size) {
+  if (((uintptr_t)sp) & 0x7) {
+    return false;
+  }
+  if (sp > stack_base) {
+    return false;
+  }
+  if (sp < (stackptr_t) ((address)stack_base - stack_size)) {
+    return false;
+  }
+  return true;
+}
+
+// returns true if function is a valid codepointer
+inline bool is_valid_codepointer(codeptr_t p) {
+  if (!p) {
+    return false;
+  }
+  if (((uintptr_t)p) & 0x3) {
+    return false;
+  }
+  if (LoadedLibraries::find_for_text_address((address)p) == NULL) {
+    return false;
+  }
+  return true;
+}
+
+// macro to check a given stack pointer against given stack limits and to die if test fails
+#define CHECK_STACK_PTR(sp, stack_base, stack_size) { \
+    guarantee(is_valid_stackpointer((stackptr_t)(sp), (stackptr_t)(stack_base), stack_size), "Stack Pointer Invalid"); \
+}
+
+// macro to check the current stack pointer against given stacklimits
+#define CHECK_CURRENT_STACK_PTR(stack_base, stack_size) { \
+  address sp; \
+  sp = os::current_stack_pointer(); \
+  CHECK_STACK_PTR(sp, stack_base, stack_size); \
+}
+
 ////////////////////////////////////////////////////////////////////////////////
-// global variables
-julong os::Aix::_physical_memory = 0;
-
-// address   os::Aix::_initial_thread_stack_bottom = NULL;
-// uintptr_t os::Aix::_initial_thread_stack_size   = 0;
-// 
-// int (*os::Aix::_clock_gettime)(clockid_t, struct timespec *) = NULL;
-// int (*os::Aix::_pthread_getcpuclockid)(pthread_t, clockid_t *) = NULL;
-// Mutex* os::Aix::_createThread_lock = NULL;
+// global variables (for a description see os_aix.hpp)
+
+julong    os::Aix::_physical_memory = 0;
 pthread_t os::Aix::_main_thread = ((pthread_t)0);
-int os::Aix::_page_size = -1;
-// bool os::Aix::_is_floating_stack = false;
-// bool os::Aix::_is_NPTL = false;
-// bool os::Aix::_supports_fast_thread_cpu_time = false;
-// const char * os::Aix::_glibc_version = NULL;
-// const char * os::Aix::_libpthread_version = NULL;
-
-static jlong initial_time_count = 0;
-
-static int clock_tics_per_sec = 100;
-
-// For diagnostics to print a message once. see run_periodic_checks
-static sigset_t check_signal_done;
-static bool check_signals = true;
-
-static pid_t _initial_pid = 0;
-
-/* Signal number used to suspend/resume a thread */
-
-/* do not use any signal number less than SIGSEGV, see 4355769 */
-static int SR_signum = SIGUSR2;
-sigset_t SR_sigset;
-
-/* Used to protect dlsym() calls */
-static pthread_mutex_t dl_mutex;
-
-// #ifdef JAVASE_EMBEDDED
-// class MemNotifyThread: public Thread {
-//   friend class VMStructs;
-//  public:
-//   virtual void run();
-// 
-//  private:
-//   static MemNotifyThread* _memnotify_thread;
-//   int _fd;
-// 
-//  public:
-// 
-//   // Constructor
-//   MemNotifyThread(int fd);
-// 
-//   // Tester
-//   bool is_memnotify_thread() const { return true; }
-// 
-//   // Printing
-//   char* name() const { return (char*)"Linux MemNotify Thread"; }
-// 
-//   // Returns the single instance of the MemNotifyThread
-//   static MemNotifyThread* memnotify_thread() { return _memnotify_thread; }
-// 
-//   // Create and start the single instance of MemNotifyThread
-//   static void start();
-// };
-// #endif // JAVASE_EMBEDDED
-// 
-// ////////////////////////////////////////////////////////////////////////////////
-// // utility functions
-// 
-// static int SR_initialize();
-// static int SR_finalize();
+int       os::Aix::_page_size = -1;
+int       os::Aix::_on_pase = -1;
+int       os::Aix::_os_version = -1;
+int       os::Aix::_stack_page_size = -1;
+size_t    os::Aix::_shm_default_page_size = -1;
+int       os::Aix::_can_use_64K_pages = -1;
+int       os::Aix::_can_use_16M_pages = -1;
+int       os::Aix::_xpg_sus_mode = -1;
+int       os::Aix::_extshm = -1;
+int       os::Aix::_logical_cpus = -1;
+
+////////////////////////////////////////////////////////////////////////////////
+// local variables
+
+static int      g_multipage_error  = -1;   // error analysis for multipage initialization
+static jlong    initial_time_count = 0;
+static int      clock_tics_per_sec = 100;
+static sigset_t check_signal_done;         // For diagnostics to print a message once (see run_periodic_checks)
+static bool     check_signals      = true;
+static pid_t    _initial_pid       = 0;
+static int      SR_signum          = SIGUSR2; // Signal used to suspend/resume a thread (must be > SIGSEGV, see 4355769)
+static sigset_t SR_sigset;
+static pthread_mutex_t dl_mutex;           // Used to protect dlsym() calls */
 
 julong os::available_memory() {
   return Aix::available_memory();
@@ -228,89 +266,299 @@
 }
 
 
-// #ifndef SYS_gettid
-// // i386: 224, ia64: 1105, amd64: 186, sparc 143
-// #ifdef __ia64__
-// #define SYS_gettid 1105
-// #elif __i386__
-// #define SYS_gettid 224
-// #elif __amd64__
-// #define SYS_gettid 186
-// #elif __sparc__
-// #define SYS_gettid 143
-// #else
-// #error define gettid for the arch
-// #endif
-// #endif
-// 
-// // Cpu architecture string
-// #if   defined(ZERO)
-// static char cpu_arch[] = ZERO_LIBARCH;
-// #elif defined(IA64)
-// static char cpu_arch[] = "ia64";
-// #elif defined(IA32)
-// static char cpu_arch[] = "i386";
-// #elif defined(AMD64)
-// static char cpu_arch[] = "amd64";
-// #elif defined(ARM)
-// static char cpu_arch[] = "arm";
-// #elif defined(PPC)
-// static char cpu_arch[] = "ppc";
-// #elif defined(PPC64)
-// static char cpu_arch[] = "ppc64";
-// #elif defined(SPARC)
-// #  ifdef _LP64
-// static char cpu_arch[] = "sparcv9";
-// #  else
-// static char cpu_arch[] = "sparc";
-// #  endif
-// #else
-// #error Add appropriate cpu_arch setting
-// #endif
-
-
-// pid_t gettid()
-//
-// Returns the kernel thread id of the currently running thread. Kernel
-// thread id is used to access /proc.
-//
-// (Note that getpid() on LinuxThreads returns kernel thread id too; but
-// on NPTL, it returns the same pid for all threads, as required by POSIX.)
-//
-// Return the kernel thread id here instead of the pthread id
+// Cpu architecture string
+#if defined(PPC)
+static char cpu_arch[] = "ppc";
+#elif defined(PPC64)
+static char cpu_arch[] = "ppc64";
+#else
+#error Add appropriate cpu_arch setting
+#endif
+
+
+// Given an address, returns the size of the page backing that address
+size_t os::Aix::query_pagesize(void* addr) {
+
+  vm_page_info pi;
+  pi.addr = (uint64_t)addr;
+  if (::vmgetinfo(&pi, VM_PAGE_INFO, sizeof(pi)) == 0) {
+    return pi.pagesize;
+  } else {
+    fprintf(stderr, "vmgetinfo failed to retrieve page size for address %p (errno %d).\n", addr, errno);
+    assert(false, "vmgetinfo failed to retrieve page size");
+    return SIZE_4K;
+  }
+
+}
+
+// Returns the kernel thread id of the currently running thread.
 pid_t os::Aix::gettid() {
-  Unimplemented();
-  return 0;
+  return (pid_t) thread_self();
+}
+
+void os::Aix::initialize_system_info() {
+
+  // get the number of online(logical) cpus instead of configured
+  os::_processor_count = sysconf(_SC_NPROCESSORS_ONLN);
+  assert(_processor_count > 0, "_processor_count must be > 0");
+
+  // retrieve total physical storage
+  os::Aix::meminfo_t mi;
+  if (!os::Aix::get_meminfo(&mi)) {
+    fprintf(stderr, "os::Aix::get_meminfo failed.\n"); fflush(stderr);
+    assert(false, "os::Aix::get_meminfo failed.");
+  }
+  _physical_memory = (julong) mi.real_total;
+
+}
+
+// Helper function for tracing page sizes.
+static const char* describe_pagesize(size_t pagesize) {
+  switch (pagesize) {
+    case SIZE_4K : return "4K";
+    case SIZE_64K: return "64K";
+    case SIZE_16M: return "16M";
+    case SIZE_16G: return "16G";
+    default:
+      assert(false, "surprise");
+      return "??";
+  }
 }
 
-// // Most versions of linux have a bug where the number of processors are
-// // determined by looking at the /proc file system.  In a chroot environment,
-// // the system call returns 1.  This causes the VM to act as if it is
-// // a single processor and elide locking (see is_MP() call).
-// static bool unsafe_chroot_detected = false;
-// static const char *unstable_chroot_error = "/proc file system not found.\n"
-//                      "Java may be unstable running multithreaded in a chroot "
-//                      "environment on Linux when /proc filesystem is not mounted.";
-// 
-// void os::Aix::initialize_system_info() {
-//   set_processor_count(sysconf(_SC_NPROCESSORS_CONF));
-//   if (processor_count() == 1) {
-//     pid_t pid = os::Aix::gettid();
-//     char fname[32];
-//     jio_snprintf(fname, sizeof(fname), "/proc/%d", pid);
-//     FILE *fp = fopen(fname, "r");
-//     if (fp == NULL) {
-//       unsafe_chroot_detected = true;
-//     } else {
-//       fclose(fp);
-//     }
-//   }
-//   _physical_memory = (julong)sysconf(_SC_PHYS_PAGES) * (julong)sysconf(_SC_PAGESIZE);
-//   assert(processor_count() > 0, "linux error");
-// }
-
+// Retrieve information about multipage size support. Will initialize
+// Aix::_page_size, Aix::_stack_page_size, Aix::_can_use_64K_pages,
+// Aix::_can_use_16M_pages. 
+// Must be called before calling os::large_page_init().
+void os::Aix::query_multipage_support() {
+
+  guarantee(_page_size == -1 &&
+            _stack_page_size == -1 &&
+            _can_use_64K_pages == -1 &&
+            _can_use_16M_pages == -1 &&
+            g_multipage_error == -1, 
+            "do not call twice");
+
+  _page_size = ::sysconf(_SC_PAGESIZE);
+
+  // This really would surprise me.
+  assert(_page_size == SIZE_4K, "surprise!");
+
+
+  // query default data page size (default page size for C-Heap, pthread stacks and .bss).
+  // Default data page size is influenced either by linker options (-bdatapsize)
+  // or by environment variable LDR_CNTRL (suboption DATAPSIZE). If none is given,
+  // default should be 4K.
+  size_t data_page_size = SIZE_4K;
+  {
+    void* p = ::malloc(SIZE_16M);
+    data_page_size = os::Aix::query_pagesize(p);
+    ::free(p);
+  }
+
+  // query default shm page size (LDR_CNTRL SHMPSIZE)
+  {
+    const int shmid = ::shmget(IPC_PRIVATE, 1, IPC_CREAT | S_IRUSR | S_IWUSR);
+    guarantee(shmid != -1, "shmget failed");
+    void* p = ::shmat(shmid, NULL, 0);
+    ::shmctl(shmid, IPC_RMID, NULL);
+    guarantee(p != (void*) -1, "shmat failed");
+    _shm_default_page_size = os::Aix::query_pagesize(p);
+    ::shmdt(p);
+  }
+
+  // before querying the stack page size, make sure we are not running as primordial
+  // thread (because primordial thread's stack may have different page size than 
+  // pthread thread stacks). Running a VM on the primordial thread won't work for a 
+  // number of reasons so we may just as well guarantee it here
+  guarantee(!os::Aix::is_primordial_thread(), "Must not be called for primordial thread");
+
+  // query stack page size
+  {
+    int dummy = 0;
+    _stack_page_size = os::Aix::query_pagesize(&dummy);
+    // everything else would surprise me and should be looked into
+    guarantee(_stack_page_size == SIZE_4K || _stack_page_size == SIZE_64K, "Wrong page size");
+    // also, just for completeness: pthread stacks are allocated from C heap, so 
+    // stack page size should be the same as data page size
+    guarantee(_stack_page_size == data_page_size, "stack page size should be the same as data page size");
+  }
+
+  // EXTSHM is bad: among other things, it prevents setting pagesize dynamically
+  // for system V shm.
+  if (Aix::extshm()) {
+    if (Verbose) {
+      fprintf(stderr, "EXTSHM is active - will disable large page support. "
+                      "Please make sure EXTSHM is OFF for large page support.");
+    }
+    g_multipage_error = ERROR_MP_EXTSHM_ACTIVE;
+    _can_use_64K_pages = _can_use_16M_pages = 0;
+    goto query_multipage_support_end;
+  }
+
+  // now check which page sizes the OS claims it supports, and of those, which actually can be used.
+  {
+    const int MAX_PAGE_SIZES = 4;
+    psize_t sizes[MAX_PAGE_SIZES];
+    const int num_psizes = ::vmgetinfo(sizes, VMINFO_GETPSIZES, MAX_PAGE_SIZES);
+    if (num_psizes == -1) {
+      if (Verbose) {
+        fprintf(stderr, "vmgetinfo(VMINFO_GETPSIZES) failed (errno: %d)\n", errno);
+        fprintf(stderr, "disabling multipage support.\n");
+      }
+      g_multipage_error = ERROR_MP_VMGETINFO_FAILED;
+      _can_use_64K_pages = _can_use_16M_pages = 0;
+      goto query_multipage_support_end;
+    }
+    guarantee(num_psizes > 0, "vmgetinfo(.., VMINFO_GETPSIZES, ...) failed.");
+    assert(num_psizes <= MAX_PAGE_SIZES, "Surprise! more than 4 page sizes?");
+    if (Verbose) {
+      fprintf(stderr, "vmgetinfo(.., VMINFO_GETPSIZES, ...) returns %d supported page sizes: ", num_psizes);
+      for (int i = 0; i < num_psizes; i ++) {
+        fprintf(stderr, " %s ", describe_pagesize(sizes[i]));
+      }
+      fprintf(stderr, " .\n");
+    }
+
+    // Can we use 64K, 16M pages?
+    _can_use_64K_pages = 0;
+    _can_use_16M_pages = 0;
+    for (int i = 0; i < num_psizes; i ++) {
+      if (sizes[i] == SIZE_64K) {
+        _can_use_64K_pages = 1;
+      } else if (sizes[i] == SIZE_16M) {
+        _can_use_16M_pages = 1;
+      }
+    }
+
+    if (!_can_use_64K_pages) {
+      g_multipage_error = ERROR_MP_VMGETINFO_CLAIMS_NO_SUPPORT_FOR_64K;
+    }
+
+    // Double-check for 16M pages: Even if AIX claims to be able to use 16M pages,
+    // there must be an actual 16M page pool, and we must run with enough rights.
+    if (_can_use_16M_pages) {
+      const int shmid = ::shmget(IPC_PRIVATE, SIZE_16M, IPC_CREAT | S_IRUSR | S_IWUSR);
+      guarantee(shmid != -1, "shmget failed");
+      struct shmid_ds shm_buf = { 0 };
+      shm_buf.shm_pagesize = SIZE_16M;
+      const bool can_set_pagesize = ::shmctl(shmid, SHM_PAGESIZE, &shm_buf) == 0 ? true : false;
+      const int en = errno;
+      ::shmctl(shmid, IPC_RMID, NULL);
+      if (!can_set_pagesize) {
+        if (Verbose) {
+          fprintf(stderr, "Failed to allocate even one misely 16M page. shmctl failed with %d (%s).\n"
+                          "Will deactivate 16M support.", en, strerror(en));
+        }
+        _can_use_16M_pages = 0;  
+      }
+    }
+
+  } // end: check which pages can be used for shared memory
+
+query_multipage_support_end:
+
+  guarantee(_page_size != -1 &&
+            _stack_page_size != -1 &&
+            _can_use_64K_pages != -1 &&
+            _can_use_16M_pages != -1, "Page sizes not properly initialized");
+  
+  if (_can_use_64K_pages) {
+    g_multipage_error = 0;
+  }
+
+  if (Verbose) {
+    fprintf(stderr, "Data page size (C-Heap, bss, etc): %s\n", describe_pagesize(data_page_size));
+    fprintf(stderr, "Thread stack page size (pthread): %s\n", describe_pagesize(_stack_page_size));
+    fprintf(stderr, "Default shared memory page size: %s\n", describe_pagesize(_shm_default_page_size));
+    fprintf(stderr, "Can use 64K pages dynamically with shared meory: %s\n", (_can_use_64K_pages ? "yes" :"no"));
+    fprintf(stderr, "Can use 16M pages dynamically with shared memory: %s\n", (_can_use_16M_pages ? "yes" :"no"));
+    fprintf(stderr, "Multipage error details: %d\n", g_multipage_error);
+  }
+
+} // end os::Aix::query_multipage_support()
+
+
+// The code for this method was initially derived from the version in os_linux.cpp
 void os::init_system_properties_values() {
-  Unimplemented();
+  // The next few definitions allow the code to be verbatim:
+#define malloc(n) (char*)NEW_C_HEAP_ARRAY(char, (n))
+#define DEFAULT_LIBPATH "/usr/lib:/lib"
+#define EXTENSIONS_DIR  "/lib/ext"
+#define ENDORSED_DIR    "/lib/endorsed"
+
+  // sysclasspath, java_home, dll_dir
+  char *home_path;
+  char *dll_path;
+  char *pslash;
+  char buf[MAXPATHLEN];
+  os::jvm_path(buf, sizeof(buf));
+
+  // Found the full path to libjvm.so.
+  // Now cut the path to <java_home>/jre if we can.
+  *(strrchr(buf, '/')) = '\0'; // get rid of /libjvm.so
+  pslash = strrchr(buf, '/');
+  if (pslash != NULL) {
+    *pslash = '\0';            // get rid of /{client|server|hotspot}
+  }
+
+  dll_path = malloc(strlen(buf) + 1);
+  strcpy(dll_path, buf);
+  Arguments::set_dll_dir(dll_path);
+  
+  if (pslash != NULL) {
+    pslash = strrchr(buf, '/');
+    if (pslash != NULL) {
+      *pslash = '\0';          // get rid of /<arch>
+      pslash = strrchr(buf, '/');
+      if (pslash != NULL) {
+        *pslash = '\0';        // get rid of /lib
+      }
+    }
+  }
+
+  home_path = malloc(strlen(buf) + 1);
+  strcpy(home_path, buf);
+  Arguments::set_java_home(home_path);
+
+  if (!set_boot_path('/', ':')) return;
+
+  // Where to look for native libraries
+
+  // On Aix we get the user setting of LIBPATH
+  // Eventually, all the library path setting will be done here.
+  char *ld_library_path;
+
+  // Construct the invariant part of ld_library_path.
+  ld_library_path = (char *) malloc(sizeof(DEFAULT_LIBPATH));
+  sprintf(ld_library_path, DEFAULT_LIBPATH);
+
+  // Get the user setting of LIBPATH, and prepended it.
+  char *v = ::getenv("LIBPATH");
+  if (v == NULL) {
+    v = "";
+  }
+
+  char *t = ld_library_path;
+  /* That's +1 for the colon and +1 for the trailing '\0' */
+  ld_library_path = (char *) malloc(strlen(v) + 1 + strlen(t) + 1);
+  sprintf(ld_library_path, "%s:%s", v, t);
+  
+  Arguments::set_library_path(ld_library_path);
+  
+  // Extensions directories
+  char* cbuf = malloc(strlen(Arguments::get_java_home()) + sizeof(EXTENSIONS_DIR));
+  sprintf(cbuf, "%s" EXTENSIONS_DIR, Arguments::get_java_home());
+  Arguments::set_ext_dirs(cbuf);
+
+  // Endorsed standards default directory.
+  cbuf = malloc(strlen(Arguments::get_java_home()) + sizeof(ENDORSED_DIR));
+  sprintf(cbuf, "%s" ENDORSED_DIR, Arguments::get_java_home());
+  Arguments::set_endorsed_dirs(cbuf);
+
+#undef malloc
+#undef DEFAULT_LIBPATH
+#undef EXTENSIONS_DIR
+#undef ENDORSED_DIR
 }
 
 ////////////////////////////////////////////////////////////////////////////////
@@ -341,51 +589,54 @@
            return false;
 }
 
-// void os::Aix::signal_sets_init() {
-//   // Should also have an assertion stating we are still single-threaded.
-//   assert(!signal_sets_initialized, "Already initialized");
-//   // Fill in signals that are necessarily unblocked for all threads in
-//   // the VM. Currently, we unblock the following signals:
-//   // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
-//   //                         by -Xrs (=ReduceSignalUsage));
-//   // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
-//   // other threads. The "ReduceSignalUsage" boolean tells us not to alter
-//   // the dispositions or masks wrt these signals.
-//   // Programs embedding the VM that want to use the above signals for their
-//   // own purposes must, at this time, use the "-Xrs" option to prevent
-//   // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
-//   // (See bug 4345157, and other related bugs).
-//   // In reality, though, unblocking these signals is really a nop, since
-//   // these signals are not blocked by default.
-//   sigemptyset(&unblocked_sigs);
-//   sigemptyset(&allowdebug_blocked_sigs);
-//   sigaddset(&unblocked_sigs, SIGILL);
-//   sigaddset(&unblocked_sigs, SIGSEGV);
-//   sigaddset(&unblocked_sigs, SIGBUS);
-//   sigaddset(&unblocked_sigs, SIGFPE);
-//   sigaddset(&unblocked_sigs, SR_signum);
-// 
-//   if (!ReduceSignalUsage) {
-//    if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
-//       sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
-//       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
-//    }
-//    if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
-//       sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
-//       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
-//    }
-//    if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
-//       sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
-//       sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
-//    }
-//   }
-//   // Fill in signals that are blocked by all but the VM thread.
-//   sigemptyset(&vm_sigs);
-//   if (!ReduceSignalUsage)
-//     sigaddset(&vm_sigs, BREAK_SIGNAL);
-//   debug_only(signal_sets_initialized = true);
-// 
-// }
+
+void os::Aix::signal_sets_init() {
+  // Should also have an assertion stating we are still single-threaded.
+  assert(!signal_sets_initialized, "Already initialized");
+  // Fill in signals that are necessarily unblocked for all threads in
+  // the VM. Currently, we unblock the following signals:
+  // SHUTDOWN{1,2,3}_SIGNAL: for shutdown hooks support (unless over-ridden
+  //                         by -Xrs (=ReduceSignalUsage));
+  // BREAK_SIGNAL which is unblocked only by the VM thread and blocked by all
+  // other threads. The "ReduceSignalUsage" boolean tells us not to alter
+  // the dispositions or masks wrt these signals.
+  // Programs embedding the VM that want to use the above signals for their
+  // own purposes must, at this time, use the "-Xrs" option to prevent
+  // interference with shutdown hooks and BREAK_SIGNAL thread dumping.
+  // (See bug 4345157, and other related bugs).
+  // In reality, though, unblocking these signals is really a nop, since
+  // these signals are not blocked by default.
+  sigemptyset(&unblocked_sigs);
+  sigemptyset(&allowdebug_blocked_sigs);
+  sigaddset(&unblocked_sigs, SIGILL);
+  sigaddset(&unblocked_sigs, SIGSEGV);
+  sigaddset(&unblocked_sigs, SIGBUS);
+  sigaddset(&unblocked_sigs, SIGFPE);
+  sigaddset(&unblocked_sigs, SIGTRAP);
+  sigaddset(&unblocked_sigs, SIGDANGER);
+  sigaddset(&unblocked_sigs, SR_signum);
+
+  if (!ReduceSignalUsage) {
+   if (!os::Aix::is_sig_ignored(SHUTDOWN1_SIGNAL)) {
+      sigaddset(&unblocked_sigs, SHUTDOWN1_SIGNAL);
+      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN1_SIGNAL);
+   }
+   if (!os::Aix::is_sig_ignored(SHUTDOWN2_SIGNAL)) {
+      sigaddset(&unblocked_sigs, SHUTDOWN2_SIGNAL);
+      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN2_SIGNAL);
+   }
+   if (!os::Aix::is_sig_ignored(SHUTDOWN3_SIGNAL)) {
+      sigaddset(&unblocked_sigs, SHUTDOWN3_SIGNAL);
+      sigaddset(&allowdebug_blocked_sigs, SHUTDOWN3_SIGNAL);
+   }
+  }
+  // Fill in signals that are blocked by all but the VM thread.
+  sigemptyset(&vm_sigs);
+  if (!ReduceSignalUsage)
+    sigaddset(&vm_sigs, BREAK_SIGNAL);
+  debug_only(signal_sets_initialized = true);
+
+}
 
 // These are signals that are unblocked while a thread is running Java.
 // (For some reason, they get blocked by default.)
@@ -429,275 +680,264 @@
   }
 }
 
-// //////////////////////////////////////////////////////////////////////////////
-// // detecting pthread library
-// 
-// void os::Aix::libpthread_init() {
-//   // Save glibc and pthread version strings. Note that _CS_GNU_LIBC_VERSION
-//   // and _CS_GNU_LIBPTHREAD_VERSION are supported in glibc >= 2.3.2. Use a
-//   // generic name for earlier versions.
-//   // Define macros here so we can build HotSpot on old systems.
-// # ifndef _CS_GNU_LIBC_VERSION
-// # define _CS_GNU_LIBC_VERSION 2
-// # endif
-// # ifndef _CS_GNU_LIBPTHREAD_VERSION
-// # define _CS_GNU_LIBPTHREAD_VERSION 3
-// # endif
-// 
-//   size_t n = confstr(_CS_GNU_LIBC_VERSION, NULL, 0);
-//   if (n > 0) {
-//      char *str = (char *)malloc(n);
-//      confstr(_CS_GNU_LIBC_VERSION, str, n);
-//      os::Aix::set_glibc_version(str);
-//   } else {
-//      // _CS_GNU_LIBC_VERSION is not supported, try gnu_get_libc_version()
-//      static char _gnu_libc_version[32];
-//      jio_snprintf(_gnu_libc_version, sizeof(_gnu_libc_version),
-//               "glibc %s %s", gnu_get_libc_version(), gnu_get_libc_release());
-//      os::Aix::set_glibc_version(_gnu_libc_version);
-//   }
-// 
-//   n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0);
-//   if (n > 0) {
-//      char *str = (char *)malloc(n);
-//      confstr(_CS_GNU_LIBPTHREAD_VERSION, str, n);
-//      // Vanilla RH-9 (glibc 2.3.2) has a bug that confstr() always tells
-//      // us "NPTL-0.29" even we are running with LinuxThreads. Check if this
-//      // is the case. LinuxThreads has a hard limit on max number of threads.
-//      // So sysconf(_SC_THREAD_THREADS_MAX) will return a positive value.
-//      // On the other hand, NPTL does not have such a limit, sysconf()
-//      // will return -1 and errno is not changed. Check if it is really NPTL.
-//      if (strcmp(os::Aix::glibc_version(), "glibc 2.3.2") == 0 &&
-//          strstr(str, "NPTL") &&
-//          sysconf(_SC_THREAD_THREADS_MAX) > 0) {
-//        free(str);
-//        os::Aix::set_libpthread_version("linuxthreads");
-//      } else {
-//        os::Aix::set_libpthread_version(str);
-//      }
-//   } else {
-//     // glibc before 2.3.2 only has LinuxThreads.
-//     os::Aix::set_libpthread_version("linuxthreads");
-//   }
-// 
-//   if (strstr(libpthread_version(), "NPTL")) {
-//      os::Aix::set_is_NPTL();
-//   } else {
-//      os::Aix::set_is_LinuxThreads();
-//   }
-// 
-//   // LinuxThreads have two flavors: floating-stack mode, which allows variable
-//   // stack size; and fixed-stack mode. NPTL is always floating-stack.
-//   if (os::Aix::is_NPTL() || os::Aix::supports_variable_stack_size()) {
-//      os::Aix::set_is_floating_stack();
-//   }
-// }
-// 
-// /////////////////////////////////////////////////////////////////////////////
-// // thread stack
-// 
-// // Force Linux kernel to expand current thread stack. If "bottom" is close
-// // to the stack guard, caller should block all signals.
-// //
-// // MAP_GROWSDOWN:
-// //   A special mmap() flag that is used to implement thread stacks. It tells
-// //   kernel that the memory region should extend downwards when needed. This
-// //   allows early versions of LinuxThreads to only mmap the first few pages
-// //   when creating a new thread. Linux kernel will automatically expand thread
-// //   stack as needed (on page faults).
-// //
-// //   However, because the memory region of a MAP_GROWSDOWN stack can grow on
-// //   demand, if a page fault happens outside an already mapped MAP_GROWSDOWN
-// //   region, it's hard to tell if the fault is due to a legitimate stack
-// //   access or because of reading/writing non-exist memory (e.g. buffer
-// //   overrun). As a rule, if the fault happens below current stack pointer,
-// //   Linux kernel does not expand stack, instead a SIGSEGV is sent to the
-// //   application (see Linux kernel fault.c).
-// //
-// //   This Linux feature can cause SIGSEGV when VM bangs thread stack for
-// //   stack overflow detection.
-// //
-// //   Newer version of LinuxThreads (since glibc-2.2, or, RH-7.x) and NPTL do
-// //   not use this flag. However, the stack of initial thread is not created
-// //   by pthread, it is still MAP_GROWSDOWN. Also it's possible (though
-// //   unlikely) that user code can create a thread with MAP_GROWSDOWN stack
-// //   and then attach the thread to JVM.
-// //
-// // To get around the problem and allow stack banging on Linux, we need to
-// // manually expand thread stack after receiving the SIGSEGV.
-// //
-// // There are two ways to expand thread stack to address "bottom", we used
-// // both of them in JVM before 1.5:
-// //   1. adjust stack pointer first so that it is below "bottom", and then
-// //      touch "bottom"
-// //   2. mmap() the page in question
-// //
-// // Now alternate signal stack is gone, it's harder to use 2. For instance,
-// // if current sp is already near the lower end of page 101, and we need to
-// // call mmap() to map page 100, it is possible that part of the mmap() frame
-// // will be placed in page 100. When page 100 is mapped, it is zero-filled.
-// // That will destroy the mmap() frame and cause VM to crash.
-// //
-// // The following code works by adjusting sp first, then accessing the "bottom"
-// // page to force a page fault. Linux kernel will then automatically expand the
-// // stack mapping.
-// //
-// // _expand_stack_to() assumes its frame size is less than page size, which
-// // should always be true if the function is not inlined.
-// 
-// #if __GNUC__ < 3    // gcc 2.x does not support noinline attribute
-// #define NOINLINE
-// #else
-// #define NOINLINE __attribute__ ((noinline))
-// #endif
-// 
-// static void _expand_stack_to(address bottom) NOINLINE;
-// 
-// static void _expand_stack_to(address bottom) {
-//   address sp;
-//   size_t size;
-//   volatile char *p;
-// 
-//   // Adjust bottom to point to the largest address within the same page, it
-//   // gives us a one-page buffer if alloca() allocates slightly more memory.
-//   bottom = (address)align_size_down((uintptr_t)bottom, os::Aix::page_size());
-//   bottom += os::Aix::page_size() - 1;
-// 
-//   // sp might be slightly above current stack pointer; if that's the case, we
-//   // will alloca() a little more space than necessary, which is OK. Don't use
-//   // os::current_stack_pointer(), as its result can be slightly below current
-//   // stack pointer, causing us to not alloca enough to reach "bottom".
-//   sp = (address)&sp;
-// 
-//   if (sp > bottom) {
-//     size = sp - bottom;
-//     p = (volatile char *)alloca(size);
-//     assert(p != NULL && p <= (volatile char *)bottom, "alloca problem?");
-//     p[0] = '\0';
-//   }
-// }
-// 
-// bool os::Aix::manually_expand_stack(JavaThread * t, address addr) {
-//   assert(t!=NULL, "just checking");
-//   assert(t->osthread()->expanding_stack(), "expand should be set");
-//   assert(t->stack_base() != NULL, "stack_base was not initialized");
-// 
-//   if (addr <  t->stack_base() && addr >= t->stack_yellow_zone_base()) {
-//     sigset_t mask_all, old_sigset;
-//     sigfillset(&mask_all);
-//     pthread_sigmask(SIG_SETMASK, &mask_all, &old_sigset);
-//     _expand_stack_to(addr);
-//     pthread_sigmask(SIG_SETMASK, &old_sigset, NULL);
-//     return true;
-//   }
-//   return false;
-// }
-// 
-// //////////////////////////////////////////////////////////////////////////////
-// // create new thread
-// 
-// static address highest_vm_reserved_address();
-// 
-// // check if it's safe to start a new thread
-// static bool _thread_safety_check(Thread* thread) {
-//   if (os::Aix::is_LinuxThreads() && !os::Aix::is_floating_stack()) {
-//     // Fixed stack LinuxThreads (SuSE Linux/x86, and some versions of Redhat)
-//     //   Heap is mmap'ed at lower end of memory space. Thread stacks are
-//     //   allocated (MAP_FIXED) from high address space. Every thread stack
-//     //   occupies a fixed size slot (usually 2Mbytes, but user can change
-//     //   it to other values if they rebuild LinuxThreads).
-//     //
-//     // Problem with MAP_FIXED is that mmap() can still succeed even part of
-//     // the memory region has already been mmap'ed. That means if we have too
-//     // many threads and/or very large heap, eventually thread stack will
-//     // collide with heap.
-//     //
-//     // Here we try to prevent heap/stack collision by comparing current
-//     // stack bottom with the highest address that has been mmap'ed by JVM
-//     // plus a safety margin for memory maps created by native code.
-//     //
-//     // This feature can be disabled by setting ThreadSafetyMargin to 0
-//     //
-//     if (ThreadSafetyMargin > 0) {
-//       address stack_bottom = os::current_stack_base() - os::current_stack_size();
-// 
-//       // not safe if our stack extends below the safety margin
-//       return stack_bottom - ThreadSafetyMargin >= highest_vm_reserved_address();
-//     } else {
-//       return true;
-//     }
-//   } else {
-//     // Floating stack LinuxThreads or NPTL:
-//     //   Unlike fixed stack LinuxThreads, thread stacks are not MAP_FIXED. When
-//     //   there's not enough space left, pthread_create() will fail. If we come
-//     //   here, that means enough space has been reserved for stack.
-//     return true;
-//   }
-// }
-// 
-// // Thread start routine for all newly created threads
-// static void *java_start(Thread *thread) {
-//   // Try to randomize the cache line index of hot stack frames.
-//   // This helps when threads of the same stack traces evict each other's
-//   // cache lines. The threads can be either from the same JVM instance, or
-//   // from different JVM instances. The benefit is especially true for
-//   // processors with hyperthreading technology.
-//   static int counter = 0;
-//   int pid = os::current_process_id();
-//   alloca(((pid ^ counter++) & 7) * 128);
-// 
-//   ThreadLocalStorage::set_thread(thread);
-// 
-//   OSThread* osthread = thread->osthread();
-//   Monitor* sync = osthread->startThread_lock();
-// 
-//   // non floating stack LinuxThreads needs extra check, see above
-//   if (!_thread_safety_check(thread)) {
-//     // notify parent thread
-//     MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
-//     osthread->set_state(ZOMBIE);
-//     sync->notify_all();
-//     return NULL;
-//   }
-// 
-//   // thread_id is kernel thread id (similar to Solaris LWP id)
-//   osthread->set_thread_id(os::Aix::gettid());
-// 
-//   if (UseNUMA) {
-//     int lgrp_id = os::numa_get_group_id();
-//     if (lgrp_id != -1) {
-//       thread->set_lgrp_id(lgrp_id);
-//     }
-//   }
-//   // initialize signal mask for this thread
-//   os::Aix::hotspot_sigmask(thread);
-// 
-//   // initialize floating point control register
-//   os::Aix::init_thread_fpu_state();
-// 
-//   // handshaking with parent thread
-//   {
-//     MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
-// 
-//     // notify parent thread
-//     osthread->set_state(INITIALIZED);
-//     sync->notify_all();
-// 
-//     // wait until os::start_thread()
-//     while (osthread->get_state() == INITIALIZED) {
-//       sync->wait(Mutex::_no_safepoint_check_flag);
-//     }
-//   }
-// 
-//   // call one more level start routine
-//   thread->run();
-// 
-//   return 0;
-// }
+// retrieve memory information.
+// Returns false if something went wrong;
+// content of pmi undefined in this case.
+bool os::Aix::get_meminfo(meminfo_t* pmi) {
+
+  assert(pmi, "get_meminfo: invalid parameter");
+
+  memset(pmi, 0, sizeof(meminfo_t));
+
+  if (os::Aix::on_pase()) {
+
+    Unimplemented();
+    return false;
+
+  } else {
+
+    // On AIX, I use the (dynamically loaded) perfstat library to retrieve memory statistics
+    // See:
+    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
+    //        ?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_memtot.htm
+    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
+    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
+
+    perfstat_memory_total_t psmt;
+    memset (&psmt, '\0', sizeof(psmt));
+    const int rc = libperfstat::perfstat_memory_total(NULL, &psmt, sizeof(psmt), 1);
+    if (rc == -1) {
+      fprintf(stderr, "perfstat_memory_total() failed (errno=%d)", errno);
+      assert(0, "perfstat_memory_total() failed");
+      return false;
+    }
+
+    assert(rc == 1, "perfstat_memory_total() - weird return code");
+
+    // excerpt from
+    // http://publib.boulder.ibm.com/infocenter/systems/index.jsp
+    //        ?topic=/com.ibm.aix.files/doc/aixfiles/libperfstat.h.htm
+    // The fields of perfstat_memory_total_t:
+    // u_longlong_t virt_total         Total virtual memory (in 4 KB pages).
+    // u_longlong_t real_total         Total real memory (in 4 KB pages).
+    // u_longlong_t real_free          Free real memory (in 4 KB pages).
+    // u_longlong_t pgsp_total         Total paging space (in 4 KB pages).
+    // u_longlong_t pgsp_free          Free paging space (in 4 KB pages).
+
+    pmi->virt_total = psmt.virt_total * 4096;
+    pmi->real_total = psmt.real_total * 4096;
+    pmi->real_free = psmt.real_free * 4096;
+    pmi->pgsp_total = psmt.pgsp_total * 4096;
+    pmi->pgsp_free = psmt.pgsp_free * 4096;
+
+    return true;
+
+  }
+} // end os::Aix::get_meminfo
+
+// Retrieve global cpu information.
+// Returns false if something went wrong;
+// the content of pci is undefined in this case.
+bool os::Aix::get_cpuinfo(cpuinfo_t* pci) {
+  assert(pci, "get_cpuinfo: invalid parameter");
+  memset(pci, 0, sizeof(cpuinfo_t));
+
+  perfstat_cpu_total_t psct;
+  memset (&psct, '\0', sizeof(psct));
+
+  if (-1 == libperfstat::perfstat_cpu_total(NULL, &psct, sizeof(perfstat_cpu_total_t), 1)) {
+    fprintf(stderr, "perfstat_cpu_total() failed (errno=%d)", errno);
+    assert(0, "perfstat_cpu_total() failed");
+    return false;
+  }
+
+  // global cpu information
+  strcpy (pci->description, psct.description);
+  pci->processorHZ = psct.processorHZ;
+  pci->ncpus = psct.ncpus;
+  os::Aix::_logical_cpus = psct.ncpus;
+  for (int i = 0; i < 3; i++) {
+    pci->loadavg[i] = (double) psct.loadavg[i] / (1 << SBITS);
+  }
+
+  // get the processor version from _system_configuration
+  switch (_system_configuration.version) {
+  case PV_7:
+    strcpy(pci->version, "Power PC 7");
+    break;
+  case PV_6_1:
+    strcpy(pci->version, "Power PC 6 DD1.x");
+    break;
+  case PV_6:
+    strcpy(pci->version, "Power PC 6");
+    break;
+  case PV_5:
+    strcpy(pci->version, "Power PC 5");
+    break;
+  case PV_5_2:
+    strcpy(pci->version, "Power PC 5_2");
+    break;
+  case PV_5_3:
+    strcpy(pci->version, "Power PC 5_3");
+    break;
+  case PV_5_Compat:
+    strcpy(pci->version, "PV_5_Compat");
+    break;
+  case PV_6_Compat:
+    strcpy(pci->version, "PV_6_Compat");
+    break;
+  case PV_7_Compat:
+    strcpy(pci->version, "PV_7_Compat");
+    break;
+  default:
+    strcpy(pci->version, "unknown");
+  }
+
+  return true;
+
+} //end os::Aix::get_cpuinfo
+
+//////////////////////////////////////////////////////////////////////////////
+// detecting pthread library
+
+void os::Aix::libpthread_init() {
+  return;
+}
+
+//////////////////////////////////////////////////////////////////////////////
+// create new thread
+
+// Thread start routine for all newly created threads
+static void *java_start(Thread *thread) {
+
+  // find out my own stack dimensions
+  {
+    // actually, this should do exactly the same as thread->record_stack_base_and_size...
+    address base = 0;
+    size_t size = 0;
+    query_stack_dimensions(&base, &size);
+    thread->set_stack_base(base);
+    thread->set_stack_size(size);
+  }
+
+  // Do some sanity checks.
+  CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
+
+  // Try to randomize the cache line index of hot stack frames.
+  // This helps when threads of the same stack traces evict each other's
+  // cache lines. The threads can be either from the same JVM instance, or
+  // from different JVM instances. The benefit is especially true for
+  // processors with hyperthreading technology.
+
+  static int counter = 0;
+  int pid = os::current_process_id();
+  alloca(((pid ^ counter++) & 7) * 128);
+
+  ThreadLocalStorage::set_thread(thread);
+
+  OSThread* osthread = thread->osthread();
+
+  // thread_id is kernel thread id (similar to Solaris LWP id)
+  osthread->set_thread_id(os::Aix::gettid());
+
+  // initialize signal mask for this thread
+  os::Aix::hotspot_sigmask(thread);
+
+  // initialize floating point control register
+  os::Aix::init_thread_fpu_state();
+
+  assert(osthread->get_state() == RUNNABLE, "invalid os thread state");
+
+  // call one more level start routine
+  thread->run();
+
+  return 0;
+}
 
 bool os::create_thread(Thread* thread, ThreadType thr_type, size_t stack_size) {
-  Unimplemented();
-  return false;
+
+  // We want the whole function to be synchronized
+  ThreadCritical cs;
+
+  assert(thread->osthread() == NULL, "caller responsible");
+
+  // Allocate the OSThread object
+  OSThread* osthread = new OSThread(NULL, NULL);
+  if (osthread == NULL) {
+    return false;
+  }
+
+  // set the correct thread state
+  osthread->set_thread_type(thr_type);
+
+  // Initial state is ALLOCATED but not INITIALIZED
+  osthread->set_state(ALLOCATED);
+
+  thread->set_osthread(osthread);
+
+  // init thread attributes
+  pthread_attr_t attr;
+  pthread_attr_init(&attr);
+  guarantee(pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) == 0, "???");
+
+  // Make sure we run in 1:1 kernel-user-thread mode
+  if (os::Aix::on_aix()) {
+    guarantee(pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM) == 0, "???");
+    guarantee(pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED) == 0, "???");
+  } // end: aix
+
+  // Start in suspended state, and in os::thread_start, wake the thread up
+  guarantee(pthread_attr_setsuspendstate_np(&attr, PTHREAD_CREATE_SUSPENDED_NP) == 0, "???");
+
+  // calculate stack size if it's not specified by caller
+  if (os::Aix::supports_variable_stack_size()) {
+    if (stack_size == 0) {
+      stack_size = os::Aix::default_stack_size(thr_type);
+
+      switch (thr_type) {
+      case os::java_thread:
+        // Java threads use ThreadStackSize whose default value can be changed with the flag -Xss
+        assert(JavaThread::stack_size_at_create() > 0, "this should be set");
+        stack_size = JavaThread::stack_size_at_create();
+        break;
+      case os::compiler_thread:
+        if (CompilerThreadStackSize > 0) {
+          stack_size = (size_t)(CompilerThreadStackSize * K);
+          break;
+        } // else fall through:
+          // use VMThreadStackSize if CompilerThreadStackSize is not defined
+      case os::vm_thread:
+      case os::pgc_thread:
+      case os::cgc_thread:
+      case os::watcher_thread:
+        if (VMThreadStackSize > 0) stack_size = (size_t)(VMThreadStackSize * K);
+        break;
+      }
+    }
+
+    stack_size = MAX2(stack_size, os::Aix::min_stack_allowed);
+    pthread_attr_setstacksize(&attr, stack_size);
+  } //else let thread_create() pick the default value (96 K on AIX)
+
+  pthread_t tid;
+  int ret = pthread_create(&tid, &attr, (void* (*)(void*)) java_start, thread);
+
+  pthread_attr_destroy(&attr);
+
+  if (ret != 0) {
+    if (PrintMiscellaneous && (Verbose || WizardMode)) {
+      perror("pthread_create()");
+    }
+    // Need to clean up stuff we've allocated so far
+    thread->set_osthread(NULL);
+    delete osthread;
+    return false;
+  }
+
+  // Store pthread info into the OSThread
+  osthread->set_pthread_id(tid);
+
+  return true;
 }
 
 /////////////////////////////////////////////////////////////////////////////
@@ -728,12 +968,11 @@
   // initialize floating point control register
   os::Aix::init_thread_fpu_state();
 
+  // some sanity checks
+  CHECK_CURRENT_STACK_PTR(thread->stack_base(), thread->stack_size());
+
   // Initial thread state is RUNNABLE
-  {
-    // SAPJVM GL 2012-06-14 Do we need this lock, or is it a SAPJVM5 left over?
-    MutexLockerEx ml(thread->SR_lock(), Mutex::_no_safepoint_check_flag);
-    osthread->set_state(RUNNABLE);
-  }
+  osthread->set_state(RUNNABLE);
 
   thread->set_osthread(osthread);
 
@@ -744,30 +983,6 @@
     }
   }
 
-#if 0
-  if (os::Aix::is_initial_thread()) {
-    // If current thread is initial thread, its stack is mapped on demand,
-    // see notes about MAP_GROWSDOWN. Here we try to force kernel to map
-    // the entire stack region to avoid SEGV in stack banging.
-    // It is also useful to get around the heap-stack-gap problem on SuSE
-    // kernel (see 4821821 for details). We first expand stack to the top
-    // of yellow zone, then enable stack yellow zone (order is significant,
-    // enabling yellow zone first will crash JVM on SuSE Linux), so there
-    // is no gap between the last two virtual memory regions.
-
-    JavaThread *jt = (JavaThread *)thread;
-    address addr = jt->stack_yellow_zone_base();
-    assert(addr != NULL, "initialization problem?");
-    assert(jt->stack_available(addr) > 0, "stack guard should not be enabled");
-
-    osthread->set_expanding_stack();
-    os::Aix::manually_expand_stack(jt, addr);
-    osthread->clear_expanding_stack();
-  }
-#else
-  Unimplemented();
-#endif
-
   // initialize signal mask for this thread
   // and save the caller's signal mask
   os::Aix::hotspot_sigmask(thread);
@@ -776,7 +991,8 @@
 }
 
 void os::pd_start_thread(Thread* thread) {
-  Unimplemented();  
+  int status = pthread_continue_np(thread->osthread()->pthread_id());
+  assert(status == 0, "thr_continue failed");
 }
 
 // Free OS resources related to the OSThread
@@ -818,246 +1034,6 @@
   return ThreadLocalStorage::thread();
 }
 
-// //////////////////////////////////////////////////////////////////////////////
-// // initial thread
-// 
-// // Check if current thread is the initial thread, similar to Solaris thr_main.
-// bool os::Aix::is_initial_thread(void) {
-//   char dummy;
-//   // If called before init complete, thread stack bottom will be null.
-//   // Can be called if fatal error occurs before initialization.
-//   if (initial_thread_stack_bottom() == NULL) return false;
-//   assert(initial_thread_stack_bottom() != NULL &&
-//          initial_thread_stack_size()   != 0,
-//          "os::init did not locate initial thread's stack region");
-//   if ((address)&dummy >= initial_thread_stack_bottom() &&
-//       (address)&dummy < initial_thread_stack_bottom() + initial_thread_stack_size())
-//        return true;
-//   else return false;
-// }
-// 
-// // Find the virtual memory area that contains addr
-// static bool find_vma(address addr, address* vma_low, address* vma_high) {
-//   FILE *fp = fopen("/proc/self/maps", "r");
-//   if (fp) {
-//     address low, high;
-//     while (!feof(fp)) {
-//       if (fscanf(fp, "%p-%p", &low, &high) == 2) {
-//         if (low <= addr && addr < high) {
-//            if (vma_low)  *vma_low  = low;
-//            if (vma_high) *vma_high = high;
-//            fclose (fp);
-//            return true;
-//         }
-//       }
-//       for (;;) {
-//         int ch = fgetc(fp);
-//         if (ch == EOF || ch == (int)'\n') break;
-//       }
-//     }
-//     fclose(fp);
-//   }
-//   return false;
-// }
-// 
-// // Locate initial thread stack. This special handling of initial thread stack
-// // is needed because pthread_getattr_np() on most (all?) Linux distros returns
-// // bogus value for initial thread.
-// void os::Aix::capture_initial_stack(size_t max_size) {
-//   // stack size is the easy part, get it from RLIMIT_STACK
-//   size_t stack_size;
-//   struct rlimit rlim;
-//   getrlimit(RLIMIT_STACK, &rlim);
-//   stack_size = rlim.rlim_cur;
-// 
-//   // 6308388: a bug in ld.so will relocate its own .data section to the
-//   //   lower end of primordial stack; reduce ulimit -s value a little bit
-//   //   so we won't install guard page on ld.so's data section.
-//   stack_size -= 2 * page_size();
-// 
-//   // 4441425: avoid crash with "unlimited" stack size on SuSE 7.1 or Redhat
-//   //   7.1, in both cases we will get 2G in return value.
-//   // 4466587: glibc 2.2.x compiled w/o "--enable-kernel=2.4.0" (RH 7.0,
-//   //   SuSE 7.2, Debian) can not handle alternate signal stack correctly
-//   //   for initial thread if its stack size exceeds 6M. Cap it at 2M,
-//   //   in case other parts in glibc still assumes 2M max stack size.
-//   // FIXME: alt signal stack is gone, maybe we can relax this constraint?
-// #ifndef IA64
-//   if (stack_size > 2 * K * K) stack_size = 2 * K * K;
-// #else
-//   // Problem still exists RH7.2 (IA64 anyway) but 2MB is a little small
-//   if (stack_size > 4 * K * K) stack_size = 4 * K * K;
-// #endif
-// 
-//   // Try to figure out where the stack base (top) is. This is harder.
-//   //
-//   // When an application is started, glibc saves the initial stack pointer in
-//   // a global variable "__libc_stack_end", which is then used by system
-//   // libraries. __libc_stack_end should be pretty close to stack top. The
-//   // variable is available since the very early days. However, because it is
-//   // a private interface, it could disappear in the future.
-//   //
-//   // Linux kernel saves start_stack information in /proc/<pid>/stat. Similar
-//   // to __libc_stack_end, it is very close to stack top, but isn't the real
-//   // stack top. Note that /proc may not exist if VM is running as a chroot
-//   // program, so reading /proc/<pid>/stat could fail. Also the contents of
-//   // /proc/<pid>/stat could change in the future (though unlikely).
-//   //
-//   // We try __libc_stack_end first. If that doesn't work, look for
-//   // /proc/<pid>/stat. If neither of them works, we use current stack pointer
-//   // as a hint, which should work well in most cases.
-// 
-//   uintptr_t stack_start;
-// 
-//   // try __libc_stack_end first
-//   uintptr_t *p = (uintptr_t *)dlsym(RTLD_DEFAULT, "__libc_stack_end");
-//   if (p && *p) {
-//     stack_start = *p;
-//   } else {
-//     // see if we can get the start_stack field from /proc/self/stat
-//     FILE *fp;
-//     int pid;
-//     char state;
-//     int ppid;
-//     int pgrp;
-//     int session;
-//     int nr;
-//     int tpgrp;
-//     unsigned long flags;
-//     unsigned long minflt;
-//     unsigned long cminflt;
-//     unsigned long majflt;
-//     unsigned long cmajflt;
-//     unsigned long utime;
-//     unsigned long stime;
-//     long cutime;
-//     long cstime;
-//     long prio;
-//     long nice;
-//     long junk;
-//     long it_real;
-//     uintptr_t start;
-//     uintptr_t vsize;
-//     intptr_t rss;
-//     uintptr_t rsslim;
-//     uintptr_t scodes;
-//     uintptr_t ecode;
-//     int i;
-// 
-//     // Figure what the primordial thread stack base is. Code is inspired
-//     // by email from Hans Boehm. /proc/self/stat begins with current pid,
-//     // followed by command name surrounded by parentheses, state, etc.
-//     char stat[2048];
-//     int statlen;
-// 
-//     fp = fopen("/proc/self/stat", "r");
-//     if (fp) {
-//       statlen = fread(stat, 1, 2047, fp);
-//       stat[statlen] = '\0';
-//       fclose(fp);
-// 
-//       // Skip pid and the command string. Note that we could be dealing with
-//       // weird command names, e.g. user could decide to rename java launcher
-//       // to "java 1.4.2 :)", then the stat file would look like
-//       //                1234 (java 1.4.2 :)) R ... ...
-//       // We don't really need to know the command string, just find the last
-//       // occurrence of ")" and then start parsing from there. See bug 4726580.
-//       char * s = strrchr(stat, ')');
-// 
-//       i = 0;
-//       if (s) {
-//         // Skip blank chars
-//         do s++; while (isspace(*s));
-// 
-// #define _UFM UINTX_FORMAT
-// #define _DFM INTX_FORMAT
-// 
-//         /*                                     1   1   1   1   1   1   1   1   1   1   2   2    2    2    2    2    2    2    2 */
-//         /*              3  4  5  6  7  8   9   0   1   2   3   4   5   6   7   8   9   0   1    2    3    4    5    6    7    8 */
-//         i = sscanf(s, "%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu %ld %ld %ld %ld %ld %ld " _UFM _UFM _DFM _UFM _UFM _UFM _UFM,
-//              &state,          /* 3  %c  */
-//              &ppid,           /* 4  %d  */
-//              &pgrp,           /* 5  %d  */
-//              &session,        /* 6  %d  */
-//              &nr,             /* 7  %d  */
-//              &tpgrp,          /* 8  %d  */
-//              &flags,          /* 9  %lu  */
-//              &minflt,         /* 10 %lu  */
-//              &cminflt,        /* 11 %lu  */
-//              &majflt,         /* 12 %lu  */
-//              &cmajflt,        /* 13 %lu  */
-//              &utime,          /* 14 %lu  */
-//              &stime,          /* 15 %lu  */
-//              &cutime,         /* 16 %ld  */
-//              &cstime,         /* 17 %ld  */
-//              &prio,           /* 18 %ld  */
-//              &nice,           /* 19 %ld  */
-//              &junk,           /* 20 %ld  */
-//              &it_real,        /* 21 %ld  */
-//              &start,          /* 22 UINTX_FORMAT */
-//              &vsize,          /* 23 UINTX_FORMAT */
-//              &rss,            /* 24 INTX_FORMAT  */
-//              &rsslim,         /* 25 UINTX_FORMAT */
-//              &scodes,         /* 26 UINTX_FORMAT */
-//              &ecode,          /* 27 UINTX_FORMAT */
-//              &stack_start);   /* 28 UINTX_FORMAT */
-//       }
-// 
-// #undef _UFM
-// #undef _DFM
-// 
-//       if (i != 28 - 2) {
-//          assert(false, "Bad conversion from /proc/self/stat");
-//          // product mode - assume we are the initial thread, good luck in the
-//          // embedded case.
-//          warning("Can't detect initial thread stack location - bad conversion");
-//          stack_start = (uintptr_t) &rlim;
-//       }
-//     } else {
-//       // For some reason we can't open /proc/self/stat (for example, running on
-//       // FreeBSD with a Linux emulator, or inside chroot), this should work for
-//       // most cases, so don't abort:
-//       warning("Can't detect initial thread stack location - no /proc/self/stat");
-//       stack_start = (uintptr_t) &rlim;
-//     }
-//   }
-// 
-//   // Now we have a pointer (stack_start) very close to the stack top, the
-//   // next thing to do is to figure out the exact location of stack top. We
-//   // can find out the virtual memory area that contains stack_start by
-//   // reading /proc/self/maps, it should be the last vma in /proc/self/maps,
-//   // and its upper limit is the real stack top. (again, this would fail if
-//   // running inside chroot, because /proc may not exist.)
-// 
-//   uintptr_t stack_top;
-//   address low, high;
-//   if (find_vma((address)stack_start, &low, &high)) {
-//     // success, "high" is the true stack top. (ignore "low", because initial
-//     // thread stack grows on demand, its real bottom is high - RLIMIT_STACK.)
-//     stack_top = (uintptr_t)high;
-//   } else {
-//     // failed, likely because /proc/self/maps does not exist
-//     warning("Can't detect initial thread stack location - find_vma failed");
-//     // best effort: stack_start is normally within a few pages below the real
-//     // stack top, use it as stack top, and reduce stack size so we won't put
-//     // guard page outside stack.
-//     stack_top = stack_start;
-//     stack_size -= 16 * page_size();
-//   }
-// 
-//   // stack_top could be partially down the page so align it
-//   stack_top = align_size_up(stack_top, page_size());
-// 
-//   if (max_size && stack_size > max_size) {
-//      _initial_thread_stack_size = max_size;
-//   } else {
-//      _initial_thread_stack_size = stack_size;
-//   }
-// 
-//   _initial_thread_stack_size = align_size_down(_initial_thread_stack_size, page_size());
-//   _initial_thread_stack_bottom = (address)stack_top - _initial_thread_stack_size;
-// }
-
 ////////////////////////////////////////////////////////////////////////////////
 // time support
 
@@ -1095,90 +1071,35 @@
   return jlong(time.tv_sec) * 1000  +  jlong(time.tv_usec / 1000);
 }
 
-// #ifndef CLOCK_MONOTONIC
-// #define CLOCK_MONOTONIC (1)
-// #endif
-// 
-// void os::Aix::clock_init() {
-//   // we do dlopen's in this particular order due to bug in linux
-//   // dynamical loader (see 6348968) leading to crash on exit
-//   void* handle = dlopen("librt.so.1", RTLD_LAZY);
-//   if (handle == NULL) {
-//     handle = dlopen("librt.so", RTLD_LAZY);
-//   }
-// 
-//   if (handle) {
-//     int (*clock_getres_func)(clockid_t, struct timespec*) =
-//            (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_getres");
-//     int (*clock_gettime_func)(clockid_t, struct timespec*) =
-//            (int(*)(clockid_t, struct timespec*))dlsym(handle, "clock_gettime");
-//     if (clock_getres_func && clock_gettime_func) {
-//       // See if monotonic clock is supported by the kernel. Note that some
-//       // early implementations simply return kernel jiffies (updated every
-//       // 1/100 or 1/1000 second). It would be bad to use such a low res clock
-//       // for nano time (though the monotonic property is still nice to have).
-//       // It's fixed in newer kernels, however clock_getres() still returns
-//       // 1/HZ. We check if clock_getres() works, but will ignore its reported
-//       // resolution for now. Hopefully as people move to new kernels, this
-//       // won't be a problem.
-//       struct timespec res;
-//       struct timespec tp;
-//       if (clock_getres_func (CLOCK_MONOTONIC, &res) == 0 &&
-//           clock_gettime_func(CLOCK_MONOTONIC, &tp)  == 0) {
-//         // yes, monotonic clock is supported
-//         _clock_gettime = clock_gettime_func;
-//       } else {
-//         // close librt if there is no monotonic clock
-//         dlclose(handle);
-//       }
-//     }
-//   }
-// }
-// 
-// #ifndef SYS_clock_getres
-// 
-// #if defined(IA32) || defined(AMD64)
-// #define SYS_clock_getres IA32_ONLY(266)  AMD64_ONLY(229)
-// #define sys_clock_getres(x,y)  ::syscall(SYS_clock_getres, x, y)
-// #else
-// #warning "SYS_clock_getres not defined for this platform, disabling fast_thread_cpu_time"
-// #define sys_clock_getres(x,y)  -1
-// #endif
-// 
-// #else
-// #define sys_clock_getres(x,y)  ::syscall(SYS_clock_getres, x, y)
-// #endif
-// 
-// void os::Aix::fast_thread_clock_init() {
-//   if (!UseLinuxPosixThreadCPUClocks) {
-//     return;
-//   }
-//   clockid_t clockid;
-//   struct timespec tp;
-//   int (*pthread_getcpuclockid_func)(pthread_t, clockid_t *) =
-//       (int(*)(pthread_t, clockid_t *)) dlsym(RTLD_DEFAULT, "pthread_getcpuclockid");
-// 
-//   // Switch to using fast clocks for thread cpu time if
-//   // the sys_clock_getres() returns 0 error code.
-//   // Note, that some kernels may support the current thread
-//   // clock (CLOCK_THREAD_CPUTIME_ID) but not the clocks
-//   // returned by the pthread_getcpuclockid().
-//   // If the fast Posix clocks are supported then the sys_clock_getres()
-//   // must return at least tp.tv_sec == 0 which means a resolution
-//   // better than 1 sec. This is extra check for reliability.
-// 
-//   if(pthread_getcpuclockid_func &&
-//      pthread_getcpuclockid_func(_main_thread, &clockid) == 0 &&
-//      sys_clock_getres(clockid, &tp) == 0 && tp.tv_sec == 0) {
-// 
-//     _supports_fast_thread_cpu_time = true;
-//     _pthread_getcpuclockid = pthread_getcpuclockid_func;
-//   }
-// }
+// We need to manually declare mread_real_time,
+// because IBM didn't provide a prototype in time.h.
+// (they probably only ever tested in C, not C++)
+extern "C"
+int mread_real_time(timebasestruct_t *t, size_t size_of_timebasestruct_t);
 
 jlong os::javaTimeNanos() {
-  Unimplemented();
-  return 0;
+  if (os::Aix::on_pase()) {
+    Unimplemented();
+    return 0;
+  }
+  else {
+    // On AIX use the precision of processors real time clock
+    // or time base registers
+    timebasestruct_t time;
+    int rc;
+
+    // If the CPU has a time register, it will be used and
+    // we have to convert to real time first. After convertion we have following data:
+    // time.tb_high [seconds since 00:00:00 UTC on 1.1.1970]
+    // time.tb_low  [nanoseconds after the last full second above]
+    // We better use mread_real_time here instead of read_real_time
+    // to ensure that we will get a monotonic increasing time.
+    if (mread_real_time(&time, TIMEBASE_SZ) != RTC_POWER) {
+      rc = time_base_to_time(&time, TIMEBASE_SZ);
+      assert(rc != -1, "aix error at time_base_to_time()");
+    }
+    return jlong(time.tb_high) * (1000 * 1000 * 1000) + jlong(time.tb_low);
+  }
 }
 
 void os::javaTimeNanos_info(jvmtiTimerInfo *info_ptr) {
@@ -1379,50 +1300,6 @@
   return false;
 }
 
-// struct _address_to_library_name {
-//   address addr;          // input : memory address
-//   size_t  buflen;        //         size of fname
-//   char*   fname;         // output: library name
-//   address base;          //         library base addr
-// };
-// 
-// static int address_to_library_name_callback(struct dl_phdr_info *info,
-//                                             size_t size, void *data) {
-//   int i;
-//   bool found = false;
-//   address libbase = NULL;
-//   struct _address_to_library_name * d = (struct _address_to_library_name *)data;
-// 
-//   // iterate through all loadable segments
-//   for (i = 0; i < info->dlpi_phnum; i++) {
-//     address segbase = (address)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
-//     if (info->dlpi_phdr[i].p_type == PT_LOAD) {
-//       // base address of a library is the lowest address of its loaded
-//       // segments.
-//       if (libbase == NULL || libbase > segbase) {
-//         libbase = segbase;
-//       }
-//       // see if 'addr' is within current segment
-//       if (segbase <= d->addr &&
-//           d->addr < segbase + info->dlpi_phdr[i].p_memsz) {
-//         found = true;
-//       }
-//     }
-//   }
-// 
-//   // dlpi_name is NULL or empty if the ELF file is executable, return 0
-//   // so dll_address_to_library_name() can fall through to use dladdr() which
-//   // can figure out executable name from argv[0].
-//   if (found && info->dlpi_name && info->dlpi_name[0]) {
-//     d->base = libbase;
-//     if (d->fname) {
-//       jio_snprintf(d->fname, d->buflen, "%s", info->dlpi_name);
-//     }
-//     return 1;
-//   }
-//   return 0;
-// }
-
 bool os::dll_address_to_library_name(address addr, char* buf,
                                      int buflen, int* offset) {
   Unimplemented();
@@ -1432,7 +1309,31 @@
 // Loads .dll/.so and in case of error it checks if .dll/.so was built
 // for the same architecture as Hotspot is running on
 void * os::dll_load(const char *filename, char *ebuf, int ebuflen) {
-  Unimplemented();
+
+  if (ebuf && ebuflen > 0) {
+    ebuf[0] = '\0';
+    ebuf[ebuflen - 1] = '\0';
+  }
+
+  if (!filename || strlen(filename) == 0) {
+    ::strncpy(ebuf, "dll_load: empty filename specified", ebuflen - 1);
+    return NULL;
+  }
+
+  // RTLD_LAZY is currently not implemented. The dl is loaded immediately with all its dependants.
+  void * result= ::dlopen(filename, RTLD_LAZY);
+  if (result != NULL) {
+    // Reload dll cache. Don't do this in signal handling.
+    LoadedLibraries::reload();
+    return result;
+  } else {
+    // error analysis when dlopen fails
+    const char* const error_report = ::dlerror();
+    if (error_report && ebuf && ebuflen > 0) {
+      snprintf(ebuf, ebuflen - 1, "%s, LIBPATH=%s, LD_LIBRARY_PATH=%s : %s", 
+               filename, ::getenv("LIBPATH"), ::getenv("LD_LIBRARY_PATH"), error_report);
+    }
+  }
   return NULL;
 }
 
@@ -1446,101 +1347,120 @@
   return res;
 }
 
-// 
-// static bool _print_ascii_file(const char* filename, outputStream* st) {
-//   int fd = ::open(filename, O_RDONLY);
-//   if (fd == -1) {
-//      return false;
-//   }
-// 
-//   char buf[32];
-//   int bytes;
-//   while ((bytes = ::read(fd, buf, sizeof(buf))) > 0) {
-//     st->print_raw(buf, bytes);
-//   }
-// 
-//   ::close(fd);
-// 
-//   return true;
-// }
-
 void os::print_dll_info(outputStream *st) {
-  Unimplemented();
+  st->print_cr("Dynamic libraries:");
+  LoadedLibraries::print(st);
 }
 
-// void os::print_os_info_brief(outputStream* st) {
-//   os::Aix::print_distro_info(st);
-// 
-//   os::Posix::print_uname_info(st);
-// 
-//   os::Aix::print_libversion_info(st);
-// 
-// }
-
 void os::print_os_info(outputStream* st) {
-  Unimplemented();
+  st->print("OS:");
+
+  st->print("uname:");
+  struct utsname name;
+  uname(&name);
+  st->print(name.sysname); st->print(" ");
+  st->print(name.nodename); st->print(" ");
+  st->print(name.release); st->print(" ");
+  st->print(name.version); st->print(" ");
+  st->print(name.machine);
+  st->cr();
+
+  // rlimit
+  st->print("rlimit:");
+  struct rlimit rlim;
+
+  st->print(" STACK ");
+  getrlimit(RLIMIT_STACK, &rlim);
+  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
+  else st->print("%uk", rlim.rlim_cur >> 10);
+
+  st->print(", CORE ");
+  getrlimit(RLIMIT_CORE, &rlim);
+  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
+  else st->print("%uk", rlim.rlim_cur >> 10);
+
+  st->print(", NPROC ");
+  st->print("%d", sysconf(_SC_CHILD_MAX));
+
+  st->print(", NOFILE ");
+  getrlimit(RLIMIT_NOFILE, &rlim);
+  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
+  else st->print("%d", rlim.rlim_cur);
+
+  st->print(", AS ");
+  getrlimit(RLIMIT_AS, &rlim);
+  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
+  else st->print("%uk", rlim.rlim_cur >> 10);
+
+  // Print limits on DATA, because it limits the C-heap
+  st->print(", DATA ");
+  getrlimit(RLIMIT_DATA, &rlim);
+  if (rlim.rlim_cur == RLIM_INFINITY) st->print("infinity");
+  else st->print("%uk", rlim.rlim_cur >> 10);
+  st->cr();
+
+  // load average
+  st->print("load average:");
+  double loadavg[3] = {-1.L, -1.L, -1.L};
+  os::loadavg(loadavg, 3);
+  st->print("%0.02f %0.02f %0.02f", loadavg[0], loadavg[1], loadavg[2]);
+  st->cr();
 }
 
-// // Try to identify popular distros.
-// // Most Linux distributions have /etc/XXX-release file, which contains
-// // the OS version string. Some have more than one /etc/XXX-release file
-// // (e.g. Mandrake has both /etc/mandrake-release and /etc/redhat-release.),
-// // so the order is important.
-// void os::Aix::print_distro_info(outputStream* st) {
-//   if (!_print_ascii_file("/etc/mandrake-release", st) &&
-//       !_print_ascii_file("/etc/sun-release", st) &&
-//       !_print_ascii_file("/etc/redhat-release", st) &&
-//       !_print_ascii_file("/etc/SuSE-release", st) &&
-//       !_print_ascii_file("/etc/turbolinux-release", st) &&
-//       !_print_ascii_file("/etc/gentoo-release", st) &&
-//       !_print_ascii_file("/etc/debian_version", st) &&
-//       !_print_ascii_file("/etc/ltib-release", st) &&
-//       !_print_ascii_file("/etc/angstrom-version", st)) {
-//       st->print("Linux");
-//   }
-//   st->cr();
-// }
-// 
-// void os::Aix::print_libversion_info(outputStream* st) {
-//   // libc, pthread
-//   st->print("libc:");
-//   st->print(os::Aix::glibc_version()); st->print(" ");
-//   st->print(os::Aix::libpthread_version()); st->print(" ");
-//   if (os::Aix::is_LinuxThreads()) {
-//      st->print("(%s stack)", os::Aix::is_floating_stack() ? "floating" : "fixed");
-//   }
-//   st->cr();
-// }
-// 
-// void os::Aix::print_full_memory_info(outputStream* st) {
-//    st->print("\n/proc/meminfo:\n");
-//    _print_ascii_file("/proc/meminfo", st);
-//    st->cr();
-// }
-
 void os::print_memory_info(outputStream* st) {
-  Unimplemented();
+
+  // SAPJVM stuefe 2010-12-16: print out (more) detailed page information
+  st->print_cr("Memory:");
+
+  st->print_cr("  default page size: %s", describe_pagesize(os::vm_page_size()));
+  st->print_cr("  default stack page size: %s", describe_pagesize(os::vm_page_size()));
+  st->print_cr("  default shm page size: %s", describe_pagesize(os::Aix::shm_default_page_size()));
+  st->print_cr("  can use 64K pages dynamically: %s", (os::Aix::can_use_64K_pages() ?  "yes" :"no"));
+  st->print_cr("  can use 16M pages dynamically: %s", (os::Aix::can_use_16M_pages() ?  "yes" :"no"));
+  if (g_multipage_error != 0) {
+    st->print_cr("  multipage error: %d", g_multipage_error);
+  }
+
+  // print out LDR_CNTRL because it affects the default page sizes
+  const char* const ldr_cntrl = ::getenv("LDR_CNTRL");
+  st->print_cr("  LDR_CNTRL=%s.", ldr_cntrl ? ldr_cntrl : "<unset>");
+
+  const char* const extshm = ::getenv("EXTSHM");
+  st->print_cr("  EXTSHM=%s.", extshm ? extshm : "<unset>");
+
+  // Call os::Aix::get_meminfo() to retrieve memory statistics.
+  os::Aix::meminfo_t mi;
+  if (os::Aix::get_meminfo(&mi)) {
+    char buffer[256];
+    if (os::Aix::on_aix()) {
+      jio_snprintf(buffer, sizeof(buffer),
+                   "  physical total : %I64B\n"
+                   "  physical free  : %I64B\n"
+                   "  swap total     : %I64B\n"
+                   "  swap free      : %I64B\n",
+                   mi.real_total,
+                   mi.real_free,
+                   mi.pgsp_total,
+                   mi.pgsp_free);
+    } else {
+      Unimplemented();
+    }
+    st->print_raw(buffer);
+  } else {
+    st->print_cr("  (no more information available)");
+  }
 }
 
 void os::pd_print_cpu_info(outputStream* st) {
-  Unimplemented();
+  // cpu
+  st->print("CPU:");
+  st->print("total %d", os::processor_count());
+  // It's not safe to query number of active processors after crash
+  // st->print("(active %d)", os::active_processor_count());
+  st->print(" %s", VM_Version::cpu_features());
+  st->cr();
 }
 
-// // Taken from /usr/include/bits/siginfo.h  Supposed to be architecture specific
-// // but they're the same for all the linux arch that we support
-// // and they're the same for solaris but there's no common place to put this.
-// const char *ill_names[] = { "ILL0", "ILL_ILLOPC", "ILL_ILLOPN", "ILL_ILLADR",
-//                           "ILL_ILLTRP", "ILL_PRVOPC", "ILL_PRVREG",
-//                           "ILL_COPROC", "ILL_BADSTK" };
-// 
-// const char *fpe_names[] = { "FPE0", "FPE_INTDIV", "FPE_INTOVF", "FPE_FLTDIV",
-//                           "FPE_FLTOVF", "FPE_FLTUND", "FPE_FLTRES",
-//                           "FPE_FLTINV", "FPE_FLTSUB", "FPE_FLTDEN" };
-// 
-// const char *segv_names[] = { "SEGV0", "SEGV_MAPERR", "SEGV_ACCERR" };
-// 
-// const char *bus_names[] = { "BUS0", "BUS_ADRALN", "BUS_ADRERR", "BUS_OBJERR" };
-
 void os::print_siginfo(outputStream* st, void* siginfo) {
   Unimplemented();
 }
@@ -1571,7 +1491,25 @@
 
 // Find the full path to the current module, libjvm.so or libjvm_g.so
 void os::jvm_path(char *buf, jint buflen) {
-  Unimplemented();
+  // Error checking.
+  if (buflen < MAXPATHLEN) {
+    assert(false, "must use a large-enough buffer");
+    buf[0] = '\0';
+    return;
+  }
+  // Lazy resolve the path to current module.
+  if (saved_jvm_path[0] != 0) {
+    strcpy(buf, saved_jvm_path);
+    return;
+  }
+
+  Dl_info dlinfo;
+  int ret = dladdr(CAST_FROM_FN_PTR(void *, os::jvm_path), &dlinfo);
+  assert(ret != 0, "cannot locate libjvm");
+  char* rp = realpath((char *)dlinfo.dli_fname, buf);
+  assert(rp != NULL, "error in realpath(): maybe the 'path' argument is too long?");
+
+  strcpy(saved_jvm_path, buf);
 }
 
 void os::print_jni_name_prefix_on(outputStream* st, int args_size) {
@@ -1614,8 +1552,36 @@
 }
 
 void* os::signal(int signal_number, void* handler) {
-  Unimplemented();
-  return NULL;
+  struct sigaction sigAct, oldSigAct;
+
+  sigfillset(&(sigAct.sa_mask));
+
+  // Do not block out synchronous signals in the signal handler.
+  // Blocking synchronous signals only makes sense
+  // if you can really be sure that those signals won't happen during signal
+  // handling, when the blocking applies.
+  // Normal signal handlers are lean and do not cause signals. But our
+  // signal handlers tend to be "risky" - secondary SIGSEGV, SIGILL, SIGBUS'
+  // may and do happen.
+  // On AIX, PASE there was a case where a SIGSEGV happened, followed
+  // by a SIGILL, which was blocked due to the signal mask. The process
+  // just hung forever. Better to crash from a secondary signal than to hang.
+  sigdelset(&(sigAct.sa_mask), SIGSEGV);
+  sigdelset(&(sigAct.sa_mask), SIGBUS);
+  sigdelset(&(sigAct.sa_mask), SIGILL);
+  sigdelset(&(sigAct.sa_mask), SIGFPE);
+  sigdelset(&(sigAct.sa_mask), SIGTRAP);
+
+  sigAct.sa_flags   = SA_RESTART|SA_SIGINFO;
+
+  sigAct.sa_handler = CAST_TO_FN_PTR(sa_handler_t, handler);
+
+  if (sigaction(signal_number, &sigAct, &oldSigAct)) {
+    // -1 means registration failed
+    return (void *)-1;
+  }
+
+  return CAST_FROM_FN_PTR(void*, oldSigAct.sa_handler);
 }
 
 void os::signal_raise(int signal_number) {
@@ -1643,17 +1609,52 @@
   ::memset((void*)pending_signals, 0, sizeof(pending_signals));
 
   // Initialize signal semaphore
-  Unimplemented();
+  int rc = ::sem_init(&sig_sem, 0, 0);
+  guarantee(rc != -1, "sem_init failed");
 }
 
 void os::signal_notify(int sig) {
   Atomic::inc(&pending_signals[sig]);
-  Unimplemented();
+  ::sem_post(&sig_sem);
 }
 
 static int check_pending_signals(bool wait) {
-  Unimplemented();
-  return 0;
+  Atomic::store(0, &sigint_count);
+  for (;;) {
+    for (int i = 0; i < NSIG + 1; i++) {
+      jint n = pending_signals[i];
+      if (n > 0 && n == Atomic::cmpxchg(n - 1, &pending_signals[i], n)) {
+        return i;
+      }
+    }
+    if (!wait) {
+      return -1;
+    }
+    JavaThread *thread = JavaThread::current();
+    ThreadBlockInVM tbivm(thread);
+
+    bool threadIsSuspended;
+    do {
+      thread->set_suspend_equivalent();
+      // cleared by handle_special_suspend_equivalent_condition() or java_suspend_self()
+
+      ::sem_wait(&sig_sem);
+
+      // were we externally suspended while we were waiting?
+      threadIsSuspended = thread->handle_special_suspend_equivalent_condition();
+      if (threadIsSuspended) {
+        //
+        // The semaphore has been incremented, but while we were waiting
+        // another thread suspended us. We don't want to continue running
+        // while suspended because that would surprise the thread that
+        // suspended us.
+        //
+        ::sem_post(&sig_sem);
+
+        thread->java_suspend_self();
+      }
+    } while (threadIsSuspended);
+  }
 }
 
 int os::signal_lookup() {
@@ -1667,6 +1668,297 @@
 ////////////////////////////////////////////////////////////////////////////////
 // Virtual Memory
 
+// AddrRange describes an immutable address range
+//
+// Thisd is a helper class for the 'shared memory bookkeeping' below
+class AddrRange {
+
+  char* const _start;
+  const size_t _size;
+
+public:
+
+  AddrRange(char* start, size_t size)
+    : _start(start), _size(size)
+  {}
+
+  AddrRange(const AddrRange& r) 
+    : _start(r.start()), _size(r.size())
+  {}
+
+  char* start() const { return _start; }
+  size_t size() const { return _size; }
+  char* end() const { return _start + _size; }
+  bool is_empty() const { return _size == 0 ? true : false; }
+
+  static AddrRange empty_range() { return AddrRange(NULL, 0); }
+ 
+  bool contains(const char* p) const { 
+    return start() <= p && end() > p;
+  }
+
+  bool contains(const AddrRange& range) const {
+    return start() <= range.start() && end() >= range.end();
+  }
+
+  bool intersects(const AddrRange& range) const {
+    return (range.start() <= start() && range.end() > start()) ||
+           (range.start() < end() && range.end() >= end()) ||
+           contains(range);
+  }
+
+  bool is_same_range(const AddrRange& range) const { 
+    return start() == range.start() && size() == range.size();
+  }
+
+  // return the closest inside range consisting of whole pages
+  AddrRange find_closest_aligned_range(size_t pagesize) const {
+    if (pagesize == 0 || is_empty()) {
+      return empty_range();
+    }
+    char* const from = (char*)align_size_up((intptr_t)_start, pagesize);
+    char* const to = (char*)align_size_down((intptr_t)end(), pagesize);
+    if (from > to) {
+      return empty_range();
+    }
+    return AddrRange(from, to - from);
+  }
+};
+
+////////////////////////////////////////////////////////////////////////////
+// shared memory bookkeeping
+//
+// the os::reserve_memory() API and friends hand out different kind of memory, depending
+// on need and circumstances. Memory may be allocated with mmap() or with shmget/shmat.
+//
+// But these memory types have to be treated differently. For example, to uncommit
+// mmap-based memory, msync(MS_INVALIDATE) is needed, to uncommit shmat-based memory,
+// disclaim64() is needed.
+//
+// Therefore we need to keep track of the allocated memory segments and their
+// properties.
+
+// ShmBkBlock: base class for all blocks in the shared memory bookkeeping
+class ShmBkBlock {
+
+  ShmBkBlock* _next;
+
+protected:
+
+  const AddrRange _range;
+  const size_t _pagesize;
+  const bool _pinned;
+
+public:
+
+  ShmBkBlock(AddrRange range, size_t pagesize, bool pinned)
+    : _range(range), _pagesize(pagesize), _pinned(pinned) , _next(NULL) {
+    
+    assert(_pagesize == SIZE_4K || _pagesize == SIZE_64K || _pagesize == SIZE_16M, "invalid page size");
+    assert(!_range.is_empty(), "invalid range");
+  }
+  
+  virtual void print(outputStream* st) const {
+    st->print("0x%I64X ... 0x%I64X (%I64B) - %d %s pages - %s",
+              _range.start(), _range.end(), _range.size(),
+              _range.size() / _pagesize, describe_pagesize(_pagesize),
+              _pinned ? "pinned" : "");
+  }
+  
+  char* base() const { return _range.start(); }
+  size_t size() const { return _range.size(); }
+  
+  bool containsAddress(const char* p) const {
+    return _range.contains(p);
+  }
+  
+  bool containsRange(const char* p, size_t size) const {
+    return _range.contains(AddrRange((char*)p, size));
+  }
+  
+  bool isSameRange(const char* p, size_t size) const {
+    return _range.is_same_range(AddrRange((char*)p, size));
+  }
+
+  virtual bool disclaim(char* p, size_t size) = 0;
+  virtual bool release() = 0;
+  
+  // blocks live in a list.
+  ShmBkBlock* next() const { return _next; }
+  void set_next(ShmBkBlock* blk) { _next = blk; }
+
+}; // end: ShmBkBlock
+
+
+// ShmBkMappedBlock: describes an block allocated with mmap()
+class ShmBkMappedBlock : public ShmBkBlock {
+public:
+
+  ShmBkMappedBlock(AddrRange range)
+    : ShmBkBlock(range, SIZE_4K, false) {} // mmap: always 4K, never pinned
+  
+  void print(outputStream* st) const {
+    ShmBkBlock::print(st);
+    st->print_cr(" - mmap'ed");
+  }
+
+  bool disclaim(char* p, size_t size) {
+
+    AddrRange r(p, size);
+
+    guarantee(_range.contains(r), "invalid disclaim");
+
+    // only disclaim whole ranges.
+    const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
+    if (r2.is_empty()) {
+      return true;
+    }
+
+    const int rc = ::msync(r2.start(), r2.size(), MS_INVALIDATE);
+
+    if (rc != 0) {
+      warning("msync(0x%p, %llu, MS_INVALIDATE) failed (%d)\n", r2.start(), r2.size(), errno);
+    }
+    
+    return rc == 0 ? true : false;
+  }
+
+  bool release() {
+    // mmap'ed blocks are released using munmap
+    if (::munmap(_range.start(), _range.size()) != 0) {
+      warning("munmap(0x%p, %llu) failed (%d)\n", _range.start(), _range.size(), errno);
+      return false;
+    }
+    return true;
+  }
+}; // end: ShmBkMappedBlock
+
+// ShmBkShmatedBlock: describes an block allocated with shmget/shmat()
+class ShmBkShmatedBlock : public ShmBkBlock {
+public:
+
+  ShmBkShmatedBlock(AddrRange range, size_t pagesize, bool pinned)
+    : ShmBkBlock(range, pagesize, pinned) {}
+
+  void print(outputStream* st) const {
+    ShmBkBlock::print(st);
+    st->print_cr(" - shmat'ed");
+  }
+
+  bool disclaim(char* p, size_t size) {
+
+    AddrRange r(p, size);
+
+    if (_pinned) {
+      return true;
+    }
+
+    // shmat'ed blocks are disclaimed using disclaim64
+    guarantee(_range.contains(r), "invalid disclaim");
+
+    // only disclaim whole ranges.
+    const AddrRange r2 = r.find_closest_aligned_range(_pagesize);
+    if (r2.is_empty()) {
+      return true;
+    }
+
+    const bool rc = ::disclaim64(r2.start(), r2.size(), DISCLAIM_ZEROMEM);
+
+    if (!rc) {
+      warning("failed to disclaim shm %p-%p\n", r2.start(), r2.end());
+    }
+
+    return rc;
+  }
+
+  bool release() {
+    bool rc = false;
+    if (::shmdt(_range.start()) != 0) {
+      warning("shmdt(0x%p) failed (%d)\n", _range.start(), errno);
+    } else {
+      rc = true;
+    }
+    return rc;
+  }
+
+}; // end: ShmBkShmatedBlock
+
+static ShmBkBlock* g_shmbk_list = NULL;
+static volatile jint g_shmbk_table_lock = 0;
+
+// keep some usage statistics
+static struct {
+  int nodes;    // number of nodes in list
+  size_t bytes; // reserved - not committed - bytes.
+  int reserves; // how often reserve was called
+  int lookups;  // how often a lookup was made
+} g_shmbk_stats = { 0, 0, 0, 0 };
+
+// add information about a shared memory segment to the bookkeeping
+static void shmbk_register(ShmBkBlock* p_block) {
+  guarantee(p_block, "logic error");
+  p_block->set_next(g_shmbk_list);
+  g_shmbk_list = p_block;
+  g_shmbk_stats.reserves ++;
+  g_shmbk_stats.bytes += p_block->size();
+  g_shmbk_stats.nodes ++;
+}
+
+// remove information about a shared memory segment by its starting address
+static void shmbk_unregister(ShmBkBlock* p_block) {
+  ShmBkBlock* p = g_shmbk_list;
+  ShmBkBlock* prev = NULL;
+  while (p) {
+    if (p == p_block) {
+      if (prev) {
+        prev->set_next(p->next());
+      } else {
+        g_shmbk_list = p->next();
+      }
+      g_shmbk_stats.nodes --;
+      g_shmbk_stats.bytes -= p->size();
+      return;
+    }
+    prev = p;
+    p = p->next();
+  }
+  assert(false, "should not happen");
+}
+
+// given a pointer, return shared memory bookkeeping record for the segment it points into
+// using the returned block info must happen under lock protection
+static ShmBkBlock* shmbk_find_by_containing_address(const char* addr) {
+  g_shmbk_stats.lookups ++;
+  ShmBkBlock* p = g_shmbk_list;
+  while (p) {
+    if (p->containsAddress(addr)) {
+      return p;
+    }
+    p = p->next();
+  }
+  return NULL;
+}
+
+// dump all information about all memory segments allocated with os::reserve_memory()
+void shmbk_dump_info() {
+  tty->print_cr("-- shared mem bookkeeping (alive: %d segments, %llu bytes, "
+    "total reserves: %d total lookups: %d)",
+    g_shmbk_stats.nodes, g_shmbk_stats.bytes, g_shmbk_stats.reserves, g_shmbk_stats.lookups);
+  const ShmBkBlock* p = g_shmbk_list;
+  int i = 0;
+  while (p) {
+    p->print(tty);
+    p = p->next();
+    i ++;
+  }
+}
+
+#define LOCK_SHMBK     { ThreadCritical _LOCK_SHMBK;
+#define UNLOCK_SHMBK   }
+
+// End: shared memory bookkeeping
+////////////////////////////////////////////////////////////////////////////////////////////////////
+
 int os::vm_page_size() {
   // Seems redundant as all get out
   assert(os::Aix::page_size() != -1, "must call os::init");
@@ -1679,92 +1971,100 @@
   return os::Aix::page_size();
 }
 
-// // Rationale behind this function:
-// //  current (Mon Apr 25 20:12:18 MSD 2005) oprofile drops samples without executable
-// //  mapping for address (see lookup_dcookie() in the kernel module), thus we cannot get
-// //  samples for JITted code. Here we create private executable mapping over the code cache
-// //  and then we can use standard (well, almost, as mapping can change) way to provide
-// //  info for the reporting script by storing timestamp and location of symbol
-// void linux_wrap_code(char* base, size_t size) {
-//   static volatile jint cnt = 0;
-// 
-//   if (!UseOprofile) {
-//     return;
-//   }
-// 
-//   char buf[PATH_MAX+1];
-//   int num = Atomic::add(1, &cnt);
-// 
-//   snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d",
-//            os::get_temp_directory(), os::current_process_id(), num);
-//   unlink(buf);
-// 
-//   int fd = ::open(buf, O_CREAT | O_RDWR, S_IRWXU);
-// 
-//   if (fd != -1) {
-//     off_t rv = ::lseek(fd, size-2, SEEK_SET);
-//     if (rv != (off_t)-1) {
-//       if (::write(fd, "", 1) == 1) {
-//         mmap(base, size,
-//              PROT_READ|PROT_WRITE|PROT_EXEC,
-//              MAP_PRIVATE|MAP_FIXED|MAP_NORESERVE, fd, 0);
-//       }
-//     }
-//     ::close(fd);
-//     unlink(buf);
-//   }
-// }
-
 bool os::commit_memory(char* addr, size_t size, bool exec) {
-  Unimplemented();
-  return false;
+
+  // Commit is a noop. There is no explicit commit
+  // needed on AIX. Memory is committed when touched.
+  //
+  // Debug : check address range for validity
+#ifdef ASSERT
+  LOCK_SHMBK
+    ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
+    if (!block) {
+      fprintf(stderr, "invalid pointer: " INTPTR_FORMAT "\n", addr);
+      shmbk_dump_info();
+      assert(false, "invalid pointer");
+      return false;
+    } else if (!block->containsRange(addr, size)) {
+      fprintf(stderr, "invalid range: " INTPTR_FORMAT " .. " INTPTR_FORMAT "\n", addr, addr + size);
+      shmbk_dump_info();
+      assert(false, "invalid range");
+      return false;
+    }
+  UNLOCK_SHMBK
+#endif // ASSERT
+
+  return true;
+}
+
+bool os::commit_memory(char* addr, size_t size, size_t alignment_hint, bool exec) {
+  return commit_memory(addr, size, exec);
 }
 
-// // Define MAP_HUGETLB here so we can build HotSpot on old systems.
-// #ifndef MAP_HUGETLB
-// #define MAP_HUGETLB 0x40000
-// #endif
-// 
-// // Define MADV_HUGEPAGE here so we can build HotSpot on old systems.
-// #ifndef MADV_HUGEPAGE
-// #define MADV_HUGEPAGE 14
-// #endif
-
-bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
-                       bool exec) {
-  Unimplemented();
-  return false;
+bool os::uncommit_memory(char* addr, size_t size) {
+
+  // Delegate to ShmBkBlock class which knows how to uncommit its memory.
+
+  bool rc = false;
+  LOCK_SHMBK
+    ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
+    if (!block) {
+      fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
+      shmbk_dump_info();
+      assert(false, "invalid pointer");
+      return false;
+    } else if (!block->containsRange(addr, size)) {
+      fprintf(stderr, "invalid range: 0x%p .. 0x%p.\n", addr, addr + size);
+      shmbk_dump_info();
+      assert(false, "invalid range");
+      return false;
+    }
+    rc = block->disclaim(addr, size);
+  UNLOCK_SHMBK
+
+  if (!rc) {
+    warning("failed to disclaim 0x%p .. 0x%p (0x%llX bytes).", addr, addr + size, size);
+  }
+  return rc;
+}
+
+bool os::create_stack_guard_pages(char* addr, size_t size) {
+  return os::guard_memory(addr, size);
+}
+
+bool os::remove_stack_guard_pages(char* addr, size_t size) {
+  return os::unguard_memory(addr, size);
 }
 
 void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
 }
 
 void os::free_memory(char *addr, size_t bytes, size_t alignment_hint) {
-  Unimplemented();
 }
 
 void os::numa_make_global(char *addr, size_t bytes) {
-  Unimplemented();
 }
 
 void os::numa_make_local(char *addr, size_t bytes, int lgrp_hint) {
-  Unimplemented();
 }
 
-bool os::numa_topology_changed()   { return false; }
+bool os::numa_topology_changed()   { 
+  return false;
+}
 
 size_t os::numa_get_groups_num() {
-  Unimplemented();
-  return 0;
+  return 1;
 }
 
 int os::numa_get_group_id() {
-  Unimplemented();
   return 0;
 }
 
 size_t os::numa_get_leaf_groups(int *ids, size_t size) {
-  Unimplemented();
+  if (size > 0) {
+    ids[0] = 0;
+    return 1;
+  }
   return 0;
 }
 
@@ -1776,459 +2076,452 @@
   return end;
 }
 
-
-// int os::Aix::sched_getcpu_syscall(void) {
-//   unsigned int cpu;
-//   int retval = -1;
-// 
-// #if defined(IA32)
-// # ifndef SYS_getcpu
-// # define SYS_getcpu 318
-// # endif
-//   retval = syscall(SYS_getcpu, &cpu, NULL, NULL);
-// #elif defined(AMD64)
-// // Unfortunately we have to bring all these macros here from vsyscall.h
-// // to be able to compile on old linuxes.
-// # define __NR_vgetcpu 2
-// # define VSYSCALL_START (-10UL << 20)
-// # define VSYSCALL_SIZE 1024
-// # define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
-//   typedef long (*vgetcpu_t)(unsigned int *cpu, unsigned int *node, unsigned long *tcache);
-//   vgetcpu_t vgetcpu = (vgetcpu_t)VSYSCALL_ADDR(__NR_vgetcpu);
-//   retval = vgetcpu(&cpu, NULL, NULL);
-// #endif
-// 
-//   return (retval == -1) ? retval : cpu;
-// }
-// 
-// // Something to do with the numa-aware allocator needs these symbols
-// extern "C" JNIEXPORT void numa_warn(int number, char *where, ...) { }
-// extern "C" JNIEXPORT void numa_error(char *where) { }
-// extern "C" JNIEXPORT int fork1() { return fork(); }
-// 
-// 
-// // If we are running with libnuma version > 2, then we should
-// // be trying to use symbols with versions 1.1
-// // If we are running with earlier version, which did not have symbol versions,
-// // we should use the base version.
-// void* os::Aix::libnuma_dlsym(void* handle, const char *name) {
-//   void *f = dlvsym(handle, name, "libnuma_1.1");
-//   if (f == NULL) {
-//     f = dlsym(handle, name);
-//   }
-//   return f;
-// }
-// 
-// bool os::Aix::libnuma_init() {
-//   // sched_getcpu() should be in libc.
-//   set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t,
-//                                   dlsym(RTLD_DEFAULT, "sched_getcpu")));
-// 
-//   // If it's not, try a direct syscall.
-//   if (sched_getcpu() == -1)
-//     set_sched_getcpu(CAST_TO_FN_PTR(sched_getcpu_func_t, (void*)&sched_getcpu_syscall));
-// 
-//   if (sched_getcpu() != -1) { // Does it work?
-//     void *handle = dlopen("libnuma.so.1", RTLD_LAZY);
-//     if (handle != NULL) {
-//       set_numa_node_to_cpus(CAST_TO_FN_PTR(numa_node_to_cpus_func_t,
-//                                            libnuma_dlsym(handle, "numa_node_to_cpus")));
-//       set_numa_max_node(CAST_TO_FN_PTR(numa_max_node_func_t,
-//                                        libnuma_dlsym(handle, "numa_max_node")));
-//       set_numa_available(CAST_TO_FN_PTR(numa_available_func_t,
-//                                         libnuma_dlsym(handle, "numa_available")));
-//       set_numa_tonode_memory(CAST_TO_FN_PTR(numa_tonode_memory_func_t,
-//                                             libnuma_dlsym(handle, "numa_tonode_memory")));
-//       set_numa_interleave_memory(CAST_TO_FN_PTR(numa_interleave_memory_func_t,
-//                                             libnuma_dlsym(handle, "numa_interleave_memory")));
-// 
-// 
-//       if (numa_available() != -1) {
-//         set_numa_all_nodes((unsigned long*)libnuma_dlsym(handle, "numa_all_nodes"));
-//         // Create a cpu -> node mapping
-//         _cpu_to_node = new (ResourceObj::C_HEAP) GrowableArray<int>(0, true);
-//         rebuild_cpu_to_node_map();
-//         return true;
-//       }
-//     }
-//   }
-//   return false;
-// }
-// 
-// // rebuild_cpu_to_node_map() constructs a table mapping cpud id to node id.
-// // The table is later used in get_node_by_cpu().
-// void os::Aix::rebuild_cpu_to_node_map() {
-//   const size_t NCPUS = 32768; // Since the buffer size computation is very obscure
-//                               // in libnuma (possible values are starting from 16,
-//                               // and continuing up with every other power of 2, but less
-//                               // than the maximum number of CPUs supported by kernel), and
-//                               // is a subject to change (in libnuma version 2 the requirements
-//                               // are more reasonable) we'll just hardcode the number they use
-//                               // in the library.
-//   const size_t BitsPerCLong = sizeof(long) * CHAR_BIT;
-// 
-//   size_t cpu_num = os::active_processor_count();
-//   size_t cpu_map_size = NCPUS / BitsPerCLong;
-//   size_t cpu_map_valid_size =
-//     MIN2((cpu_num + BitsPerCLong - 1) / BitsPerCLong, cpu_map_size);
-// 
-//   cpu_to_node()->clear();
-//   cpu_to_node()->at_grow(cpu_num - 1);
-//   size_t node_num = numa_get_groups_num();
-// 
-//   unsigned long *cpu_map = NEW_C_HEAP_ARRAY(unsigned long, cpu_map_size);
-//   for (size_t i = 0; i < node_num; i++) {
-//     if (numa_node_to_cpus(i, cpu_map, cpu_map_size * sizeof(unsigned long)) != -1) {
-//       for (size_t j = 0; j < cpu_map_valid_size; j++) {
-//         if (cpu_map[j] != 0) {
-//           for (size_t k = 0; k < BitsPerCLong; k++) {
-//             if (cpu_map[j] & (1UL << k)) {
-//               cpu_to_node()->at_put(j * BitsPerCLong + k, i);
-//             }
-//           }
-//         }
-//       }
-//     }
-//   }
-//   FREE_C_HEAP_ARRAY(unsigned long, cpu_map);
-// }
-// 
-// int os::Aix::get_node_by_cpu(int cpu_id) {
-//   if (cpu_to_node() != NULL && cpu_id >= 0 && cpu_id < cpu_to_node()->length()) {
-//     return cpu_to_node()->at(cpu_id);
-//   }
-//   return -1;
-// }
-// 
-// GrowableArray<int>* os::Aix::_cpu_to_node;
-// os::Aix::sched_getcpu_func_t os::Aix::_sched_getcpu;
-// os::Aix::numa_node_to_cpus_func_t os::Aix::_numa_node_to_cpus;
-// os::Aix::numa_max_node_func_t os::Aix::_numa_max_node;
-// os::Aix::numa_available_func_t os::Aix::_numa_available;
-// os::Aix::numa_tonode_memory_func_t os::Aix::_numa_tonode_memory;
-// os::Aix::numa_interleave_memory_func_t os::Aix::_numa_interleave_memory;
-// unsigned long* os::Aix::_numa_all_nodes;
-
-bool os::uncommit_memory(char* addr, size_t size) {
-  Unimplemented();
-  return false;
-}
-
-// // Linux uses a growable mapping for the stack, and if the mapping for
-// // the stack guard pages is not removed when we detach a thread the
-// // stack cannot grow beyond the pages where the stack guard was
-// // mapped.  If at some point later in the process the stack expands to
-// // that point, the Linux kernel cannot expand the stack any further
-// // because the guard pages are in the way, and a segfault occurs.
-// //
-// // However, it's essential not to split the stack region by unmapping
-// // a region (leaving a hole) that's already part of the stack mapping,
-// // so if the stack mapping has already grown beyond the guard pages at
-// // the time we create them, we have to truncate the stack mapping.
-// // So, we need to know the extent of the stack mapping when
-// // create_stack_guard_pages() is called.
-// 
-// // Find the bounds of the stack mapping.  Return true for success.
-// //
-// // We only need this for stacks that are growable: at the time of
-// // writing thread stacks don't use growable mappings (i.e. those
-// // creeated with MAP_GROWSDOWN), and aren't marked "[stack]", so this
-// // only applies to the main thread.
-// 
-// static
-// bool get_stack_bounds(uintptr_t *bottom, uintptr_t *top) {
-// 
-//   char buf[128];
-//   int fd, sz;
-// 
-//   if ((fd = ::open("/proc/self/maps", O_RDONLY)) < 0) {
-//     return false;
-//   }
-// 
-//   const char kw[] = "[stack]";
-//   const int kwlen = sizeof(kw)-1;
-// 
-//   // Address part of /proc/self/maps couldn't be more than 128 bytes
-//   while ((sz = os::get_line_chars(fd, buf, sizeof(buf))) > 0) {
-//      if (sz > kwlen && ::memcmp(buf+sz-kwlen, kw, kwlen) == 0) {
-//         // Extract addresses
-//         if (sscanf(buf, "%" SCNxPTR "-%" SCNxPTR, bottom, top) == 2) {
-//            uintptr_t sp = (uintptr_t) __builtin_frame_address(0);
-//            if (sp >= *bottom && sp <= *top) {
-//               ::close(fd);
-//               return true;
-//            }
-//         }
-//      }
-//   }
-// 
-//  ::close(fd);
-//   return false;
-// }
-
-
-bool os::create_stack_guard_pages(char* addr, size_t size) {
-  Unimplemented();
-  return false;
-}
-
-bool os::remove_stack_guard_pages(char* addr, size_t size) {
-  Unimplemented();
-  return false;
-}
-
-// static address _highest_vm_reserved_address = NULL;
-// 
-// // If 'fixed' is true, anon_mmap() will attempt to reserve anonymous memory
-// // at 'requested_addr'. If there are existing memory mappings at the same
-// // location, however, they will be overwritten. If 'fixed' is false,
-// // 'requested_addr' is only treated as a hint, the return value may or
-// // may not start from the requested address. Unlike Linux mmap(), this
-// // function returns NULL to indicate failure.
-// static char* anon_mmap(char* requested_addr, size_t bytes, bool fixed) {
-//   char * addr;
-//   int flags;
-// 
-//   flags = MAP_PRIVATE | MAP_NORESERVE | MAP_ANONYMOUS;
-//   if (fixed) {
-//     assert((uintptr_t)requested_addr % os::Aix::page_size() == 0, "unaligned address");
-//     flags |= MAP_FIXED;
-//   }
-// 
-//   // Map uncommitted pages PROT_READ and PROT_WRITE, change access
-//   // to PROT_EXEC if executable when we commit the page.
-//   addr = (char*)::mmap(requested_addr, bytes, PROT_READ|PROT_WRITE,
-//                        flags, -1, 0);
-// 
-//   if (addr != MAP_FAILED) {
-//     // anon_mmap() should only get called during VM initialization,
-//     // don't need lock (actually we can skip locking even it can be called
-//     // from multiple threads, because _highest_vm_reserved_address is just a
-//     // hint about the upper limit of non-stack memory regions.)
-//     if ((address)addr + bytes > _highest_vm_reserved_address) {
-//       _highest_vm_reserved_address = (address)addr + bytes;
-//     }
-//   }
-// 
-//   return addr == MAP_FAILED ? NULL : addr;
-// }
-// 
-// // Don't update _highest_vm_reserved_address, because there might be memory
-// // regions above addr + size. If so, releasing a memory region only creates
-// // a hole in the address space, it doesn't help prevent heap-stack collision.
-// //
-// static int anon_munmap(char * addr, size_t size) {
-//   return ::munmap(addr, size) == 0;
-// }
-
-char* os::reserve_memory(size_t bytes, char* requested_addr,
-                         size_t alignment_hint) {
-  Unimplemented();
-  return NULL;
+// Flags for reserve_shmatted_memory:
+#define RESSHM_WISHADDR_OR_FAIL                     1
+#define RESSHM_TRY_16M_PAGES                        2
+#define RESSHM_16M_PAGES_OR_FAIL                    4
+
+// Result of reserve_shmatted_memory:
+struct shmatted_memory_info_t {
+  char* addr;
+  size_t pagesize;
+  bool pinned;
+};
+
+// Reserve a section of shmatted memory.
+// params:
+// bytes [in]: size of memory, in bytes
+// requested_addr [in]: wish address.
+//                      NULL = no wish.
+//                      If RESSHM_WISHADDR_OR_FAIL is set in flags and wish address cannot
+//                      be obtained, function will fail. Otherwise wish address is treated as hint and
+//                      another pointer is returned.
+// flags [in]:          some flags. Valid flags are:
+//                      RESSHM_WISHADDR_OR_FAIL - fail if wish address is given and cannot be obtained.
+//                      RESSHM_TRY_16M_PAGES - try to allocate from 16M page pool
+//                          (requires UseLargePages and Use16MPages)
+//                      RESSHM_16M_PAGES_OR_FAIL - if you cannot allocate from 16M page pool, fail.
+//                          Otherwise any other page size will do.
+// p_info [out] :       holds information about the created shared memory segment.
+static bool reserve_shmatted_memory(size_t bytes, char* requested_addr, int flags, shmatted_memory_info_t* p_info) {
+
+  assert(p_info, "parameter error");
+
+  // init output struct.
+  p_info->addr = NULL;
+
+  // neither should we be here for EXTSHM=ON.
+  if (os::Aix::extshm()) {
+    ShouldNotReachHere();
+  }
+
+  // extract flags. sanity checks.
+  const bool wishaddr_or_fail =
+    flags & RESSHM_WISHADDR_OR_FAIL;
+  const bool try_16M_pages =
+    flags & RESSHM_TRY_16M_PAGES;
+  const bool f16M_pages_or_fail =
+    flags & RESSHM_16M_PAGES_OR_FAIL;
+
+  // first check: if a wish address is given and it is mandatory, but not aligned to segment boundary,
+  // shmat will fail anyway, so save some cycles by failing right away
+  if (requested_addr && ((uintptr_t)requested_addr % SIZE_256M == 0)) {
+    if (wishaddr_or_fail) {
+      return false;
+    } else {
+      requested_addr = NULL;
+    }
+  }
+
+  char* addr = NULL;
+
+  // Align size of shm up to the largest possible page size, to avoid errors later on when we try to change
+  // pagesize dynamically.
+  const size_t size = align_size_up(bytes, SIZE_16M);
+
+  // reserve the shared segment
+  int shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | S_IRUSR | S_IWUSR);
+  if (shmid == -1) {
+    warning("shmget(.., %lld, ..) failed (errno: %d).", size, errno);
+    return false;
+  }
+
+  // Important note:
+  // It is very important that we, upon leaving this function, do not leave a shm segment alive.
+  // We must right after attaching it remove it from the system. System V shm segments are global and
+  // survive the process.
+  // So, from here on: Do not assert. Do not return. Always do a "goto cleanup_shm".
+
+  // try forcing the page size
+  size_t pagesize = -1; // unknown so far
+
+  if (UseLargePages) {
+
+    struct shmid_ds shmbuf;
+    memset(&shmbuf, 0, sizeof(shmbuf));
+
+    // First, try to take from 16M page pool if...
+    if (os::Aix::can_use_16M_pages()  // we can ...
+        && Use16MPages                // we are not explicitly forbidden to do so (-XX:-Use16MPages)..
+        && try_16M_pages) {           // caller wants us to.
+
+      shmbuf.shm_pagesize = SIZE_16M;
+      if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
+        pagesize = SIZE_16M;
+      } else {
+        warning("Failed to allocate %d 16M pages. 16M page pool might be exhausted. (shmctl failed with %d)",
+                size / SIZE_16M, errno);
+        if (f16M_pages_or_fail) {
+          goto cleanup_shm;
+        }
+      }
+    }
+
+    // Nothing yet? Try setting 64K pages. Note that I never saw this fail, but in theory it might,
+    //  because the 64K page pool may also be exhausted.
+    if (pagesize == -1) {
+      shmbuf.shm_pagesize = SIZE_64K;
+      if (shmctl(shmid, SHM_PAGESIZE, &shmbuf) == 0) {
+        pagesize = SIZE_64K;
+      } else {
+        warning("Failed to allocate %d 64K pages. (shmctl failed with %d)",
+                size / SIZE_64K, errno);
+        // here I give up. leave page_size -1 - later, after attaching, we will query the
+        // real page size of the attached memory. (in theory, it may be something different
+        // from 4K if LDR_CNTRL SHM_PSIZE is set)
+      }
+    }
+  }
+
+  // sanity point
+  assert(pagesize == -1 || pagesize == SIZE_16M || pagesize == SIZE_64K, "wrong page size");
+
+  // Now attach the shared segment.
+  addr = (char*) shmat(shmid, requested_addr, 0);
+  if (addr == (char*)-1) {
+    // How to handle attach failure:
+    // If it failed for a specific wish address, tolerate this: in that case, if wish address was
+    // mandatory, fail, if not, retry anywhere.
+    // If it failed for any other reason, treat that as fatal error.
+    addr = NULL;
+    if (requested_addr) {
+      if (wishaddr_or_fail) {
+        goto cleanup_shm;
+      } else {
+        addr = (char*) shmat(shmid, NULL, 0);
+        if (addr == (char*)-1) { // fatal
+          addr = NULL;
+          warning("shmat failed (errno: %d)", errno);
+          goto cleanup_shm;
+        }
+      }
+    } else { // fatal
+      addr = NULL;
+      warning("shmat failed (errno: %d)", errno);
+      goto cleanup_shm;
+    }
+  }
+  
+  // sanity point
+  assert(addr && addr != (char*) -1, "wrong address");
+  
+  // after successful Attach remove the segment - right away.
+  if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
+    warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
+    guarantee(false, "failed to remove shared memory segment!");
+  }
+  shmid = -1;
+
+  // query the real page size. In case setting the page size did not work (see above), the system
+  // may have given us something other then 4K (LDR_CNTRL)
+  {
+    const size_t real_pagesize = os::Aix::query_pagesize(addr);
+    if (pagesize != -1) {
+      assert(pagesize == real_pagesize, "unexpected pagesize after shmat");
+    } else {
+      pagesize = real_pagesize;
+    }
+  }
+  
+  // Now register the reserved block with internal book keeping.
+  LOCK_SHMBK
+    const bool pinned = pagesize >= SIZE_16M ? true : false;
+    ShmBkShmatedBlock* const p_block = new ShmBkShmatedBlock(AddrRange(addr, size), pagesize, pinned);
+    assert(p_block, "");
+    shmbk_register(p_block);
+  UNLOCK_SHMBK
+
+cleanup_shm:
+
+  // if we have not done so yet, remove the shared memory segment. This is very important.
+  if (shmid != -1) {
+    if (::shmctl(shmid, IPC_RMID, NULL) == -1) {
+      warning("shmctl(%u, IPC_RMID) failed (%d)\n", shmid, errno);
+      guarantee(false, "failed to remove shared memory segment!");
+    }
+    shmid = -1;
+  }
+
+  // trace
+  if (!addr) {
+    if (requested_addr != NULL) {
+      warning("failed to shm-allocate 0x%llX bytes at with address 0x%p.", size, requested_addr);
+    } else {
+      warning("failed to shm-allocate 0x%llX bytes at any address.", size);
+    }
+  }
+
+  // hand info to caller
+  if (addr) {
+    p_info->addr = addr;
+    p_info->pagesize = pagesize;
+    p_info->pinned = pagesize == SIZE_16M ? true : false;
+  }
+
+  // sanity test:
+  if (requested_addr && addr && wishaddr_or_fail) {
+    guarantee(addr == requested_addr, "shmat error");
+  }
+
+  // just one more test to really make sure we have no dangling shm segments.
+  guarantee(shmid == -1, "dangling shm segments");
+
+  return addr ? true : false;
+
+} // end: reserve_shmatted_memory
+
+// Reserve memory using mmap. Behaves the same as reserve_shmatted_memory():
+// will return NULL in case of an error.
+static char* reserve_mmaped_memory(size_t bytes, char* requested_addr) {
+  
+  // if a wish address is given, but not aligned to 4K page boundary, mmap will fail.
+  if (requested_addr && ((uintptr_t)requested_addr % os::vm_page_size() != 0)) {
+    warning("Wish address 0x%p not aligned to page boundary.", requested_addr);
+    return NULL;
+  }
+  
+  const size_t size = align_size_up(bytes, SIZE_4K);
+  
+  // Note: MAP_SHARED (instead of MAP_PRIVATE) needed to be able to
+  //  msync(MS_INVALIDATE) (see os::uncommit_memory)
+  int flags = MAP_ANONYMOUS | MAP_SHARED;
+  
+  // MAP_FIXED is needed to enforce requested_addr - manpage is vague about what
+  // it means if wishaddress is given but MAP_FIXED is not set.
+  //
+  // Note however that this changes semantics in SPEC1170 mode insofar as MAP_FIXED
+  // clobbers the address range, which is probably not what the caller wants. That's
+  // why I assert here (again) that the SPEC1170 compat mode is off.
+  // If we want to be able to run under SPEC1170, we have to do some porting and
+  // testing.
+  if (requested_addr != NULL) {
+    assert(!os::Aix::xpg_sus_mode(), "SPEC1170 mode not allowed.");
+    flags |= MAP_FIXED;
+  }
+
+  char* addr = (char*)::mmap(requested_addr, size, PROT_READ|PROT_WRITE|PROT_EXEC, flags, -1, 0);
+  
+  if (addr == MAP_FAILED) {
+    // attach failed: tolerate for specific wish addresses. Not being able to attach
+    // anywhere is a fatal error.
+    if (requested_addr == NULL) {
+      // It's ok to fail here if the machine has not enough memory.
+      warning("mmap(NULL, 0x%llX, ..) failed (%d)", size, errno);
+    }
+    addr = NULL;
+    goto cleanup_mmap;
+  }
+
+  // If we did request a specific address and that address was not available, fail.
+  if (addr && requested_addr) {
+    guarantee(addr == requested_addr, "unexpected");
+  }
+  
+  // register this mmap'ed segment with book keeping
+  LOCK_SHMBK
+    ShmBkMappedBlock* const p_block = new ShmBkMappedBlock(AddrRange(addr, size));
+    assert(p_block, "");
+    shmbk_register(p_block);
+  UNLOCK_SHMBK
+
+cleanup_mmap:
+
+  if (addr) {
+    if (Verbose) {
+      fprintf(stderr, "mmap-allocated 0x%p .. 0x%p (0x%llX bytes)", addr, addr + bytes, bytes);
+    }
+  }
+  else {
+    if (requested_addr != NULL) {
+      warning("failed to mmap-allocate 0x%llX bytes at wish address 0x%p.", bytes, requested_addr);
+    } else {
+      warning("failed to mmap-allocate 0x%llX bytes at any address.", bytes);
+    }
+  }
+
+  return addr;
+
+} // end: reserve_mmaped_memory
+
+
+// Reserves and attaches a shared memory segment.
+// Will assert if a wish address is given and could not be obtained.
+char* os::reserve_memory(size_t bytes, char* requested_addr, size_t alignment_hint) {
+  return os::attempt_reserve_memory_at(bytes, requested_addr);
 }
 
 bool os::release_memory(char* addr, size_t size) {
-  Unimplemented();
-  return false;
+
+  // delegate to ShmBkBlock class which knows how to uncommit its memory.
+
+  bool rc = false;
+  LOCK_SHMBK
+    ShmBkBlock* const block = shmbk_find_by_containing_address(addr);
+    if (!block) {
+      fprintf(stderr, "invalid pointer: 0x%p.\n", addr);
+      shmbk_dump_info();
+      assert(false, "invalid pointer");
+      return false;
+    } else if (!block->isSameRange(addr, size)) {
+      // release only the same range. No partial release allowed.
+      // Soften the requirement a bit, because the user may think he owns a smaller size
+      // than the block is due to alignment etc.
+      if (block->base() != addr || block->size() < size) {
+        fprintf(stderr, "invalid range: 0x%p .. 0x%p.\n", addr, addr + size);
+        shmbk_dump_info();
+        assert(false, "invalid range");
+        return false;
+      }
+    }
+    rc = block->release();
+    assert(rc, "release failed");
+    // remove block from bookkeeping
+    shmbk_unregister(block);
+    delete block;
+  UNLOCK_SHMBK
+
+  if (!rc) {
+    warning("failed to released %lu bytes at 0x%p", size, addr);
+  }
+
+  return rc;
 }
 
-// static address highest_vm_reserved_address() {
-//   return _highest_vm_reserved_address;
-// }
-// 
-// static bool linux_mprotect(char* addr, size_t size, int prot) {
-//   // Linux wants the mprotect address argument to be page aligned.
-//   char* bottom = (char*)align_size_down((intptr_t)addr, os::Aix::page_size());
-// 
-//   // According to SUSv3, mprotect() should only be used with mappings
-//   // established by mmap(), and mmap() always maps whole pages. Unaligned
-//   // 'addr' likely indicates problem in the VM (e.g. trying to change
-//   // protection of malloc'ed or statically allocated memory). Check the
-//   // caller if you hit this assert.
-//   assert(addr == bottom, "sanity check");
-// 
-//   size = align_size_up(pointer_delta(addr, bottom, 1) + size, os::Aix::page_size());
-//   return ::mprotect(bottom, size, prot) == 0;
-// }
+static bool checked_mprotect(char* addr, size_t size, int prot) {
+
+  // Little problem here: if SPEC1170 behaviour is off, mprotect() on AIX will
+  // not tell me if protection failed when trying to protect an un-protectable range.
+  //
+  // This means if the memory was allocated using shmget/shmat, protection wont work
+  // but mprotect will still return 0:
+  //
+  // See http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/mprotect.htm
+
+  bool rc = ::mprotect(addr, size, prot) == 0 ? true : false;
+
+  if (!rc) {
+    const char* const s_errno = strerror(errno);
+    warning("mprotect(" PTR_FORMAT "-" PTR_FORMAT ", 0x%X) failed (%s).", addr, addr + size, prot, s_errno);
+    return false;
+  }
+
+  // mprotect success check
+  //
+  // mprotect said it changed the protection but can I believe it?
+  //
+  // To be sure I need to check the protection afterwards. Try to
+  // read from protected memory and check whether that causes a segfault.
+  //
+  if (!os::Aix::xpg_sus_mode()) {
+
+    if (CanUseSafeFetch32()) {
+
+      const bool read_protected =
+        (SafeFetch32((int*)addr, 0x12345678) == 0x12345678 &&
+         SafeFetch32((int*)addr, 0x76543210) == 0x76543210) ? true : false;
+
+      if (prot & PROT_READ) {
+        rc = !read_protected;
+      } else {
+        rc = read_protected;
+      }
+    }
+  }
+  if (!rc) {
+    assert(false, "mprotect failed.");
+  }
+  return rc;
+}
 
 // Set protections specified
-bool os::protect_memory(char* addr, size_t bytes, ProtType prot,
-                        bool is_committed) {
-  Unimplemented();
-  return false;
+bool os::protect_memory(char* addr, size_t size, ProtType prot, bool is_committed) {
+  unsigned int p = 0;
+  switch (prot) {
+  case MEM_PROT_NONE: p = PROT_NONE; break;
+  case MEM_PROT_READ: p = PROT_READ; break;
+  case MEM_PROT_RW:   p = PROT_READ|PROT_WRITE; break;
+  case MEM_PROT_RWX:  p = PROT_READ|PROT_WRITE|PROT_EXEC; break;
+  default:
+    ShouldNotReachHere();
+  }
+  // is_committed is unused.
+  return checked_mprotect(addr, size, p);
 }
 
 bool os::guard_memory(char* addr, size_t size) {
-  Unimplemented();
-  return false;
+  return checked_mprotect(addr, size, PROT_NONE);
 }
 
 bool os::unguard_memory(char* addr, size_t size) {
-  Unimplemented();
-  return false;
+  return checked_mprotect(addr, size, PROT_READ|PROT_WRITE|PROT_EXEC);
 }
 
-// bool os::Aix::hugetlbfs_sanity_check(bool warn, size_t page_size) {
-//   bool result = false;
-//   void *p = mmap (NULL, page_size, PROT_READ|PROT_WRITE,
-//                   MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
-//                   -1, 0);
-// 
-//   if (p != (void *) -1) {
-//     // We don't know if this really is a huge page or not.
-//     FILE *fp = fopen("/proc/self/maps", "r");
-//     if (fp) {
-//       while (!feof(fp)) {
-//         char chars[257];
-//         long x = 0;
-//         if (fgets(chars, sizeof(chars), fp)) {
-//           if (sscanf(chars, "%lx-%*x", &x) == 1
-//               && x == (long)p) {
-//             if (strstr (chars, "hugepage")) {
-//               result = true;
-//               break;
-//             }
-//           }
-//         }
-//       }
-//       fclose(fp);
-//     }
-//     munmap (p, page_size);
-//     if (result)
-//       return true;
-//   }
-// 
-//   if (warn) {
-//     warning("HugeTLBFS is not supported by the operating system.");
-//   }
-// 
-//   return result;
-// }
-// 
-// /*
-// * Set the coredump_filter bits to include largepages in core dump (bit 6)
-// *
-// * From the coredump_filter documentation:
-// *
-// * - (bit 0) anonymous private memory
-// * - (bit 1) anonymous shared memory
-// * - (bit 2) file-backed private memory
-// * - (bit 3) file-backed shared memory
-// * - (bit 4) ELF header pages in file-backed private memory areas (it is
-// *           effective only if the bit 2 is cleared)
-// * - (bit 5) hugetlb private memory
-// * - (bit 6) hugetlb shared memory
-// */
-// static void set_coredump_filter(void) {
-//   FILE *f;
-//   long cdm;
-// 
-//   if ((f = fopen("/proc/self/coredump_filter", "r+")) == NULL) {
-//     return;
-//   }
-// 
-//   if (fscanf(f, "%lx", &cdm) != 1) {
-//     fclose(f);
-//     return;
-//   }
-// 
-//   rewind(f);
-// 
-//   if ((cdm & LARGEPAGES_BIT) == 0) {
-//     cdm |= LARGEPAGES_BIT;
-//     fprintf(f, "%#lx", cdm);
-//   }
-// 
-//   fclose(f);
-// }
-
 // Large page support
 
 static size_t _large_page_size = 0;
 
-// void os::large_page_init() {
-//   if (!UseLargePages) {
-//     UseHugeTLBFS = false;
-//     UseSHM = false;
-//     return;
-//   }
-// 
-//   if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) {
-//     // If UseLargePages is specified on the command line try both methods,
-//     // if it's default, then try only HugeTLBFS.
-//     if (FLAG_IS_DEFAULT(UseLargePages)) {
-//       UseHugeTLBFS = true;
-//     } else {
-//       UseHugeTLBFS = UseSHM = true;
-//     }
-//   }
-// 
-//   if (LargePageSizeInBytes) {
-//     _large_page_size = LargePageSizeInBytes;
-//   } else {
-//     // large_page_size on Linux is used to round up heap size. x86 uses either
-//     // 2M or 4M page, depending on whether PAE (Physical Address Extensions)
-//     // mode is enabled. AMD64/EM64T uses 2M page in 64bit mode. IA64 can use
-//     // page as large as 256M.
-//     //
-//     // Here we try to figure out page size by parsing /proc/meminfo and looking
-//     // for a line with the following format:
-//     //    Hugepagesize:     2048 kB
-//     //
-//     // If we can't determine the value (e.g. /proc is not mounted, or the text
-//     // format has been changed), we'll use the largest page size supported by
-//     // the processor.
-// 
-// #ifndef ZERO
-//     _large_page_size = IA32_ONLY(4 * M) AMD64_ONLY(2 * M) IA64_ONLY(256 * M) SPARC_ONLY(4 * M)
-//                        ARM_ONLY(2 * M) PPC_ONLY(4 * M) PPC64_ONLY(4 * M);
-// #endif // ZERO
-// 
-//     FILE *fp = fopen("/proc/meminfo", "r");
-//     if (fp) {
-//       while (!feof(fp)) {
-//         int x = 0;
-//         char buf[16];
-//         if (fscanf(fp, "Hugepagesize: %d", &x) == 1) {
-//           if (x && fgets(buf, sizeof(buf), fp) && strcmp(buf, " kB\n") == 0) {
-//             _large_page_size = x * K;
-//             break;
-//           }
-//         } else {
-//           // skip to next line
-//           for (;;) {
-//             int ch = fgetc(fp);
-//             if (ch == EOF || ch == (int)'\n') break;
-//           }
-//         }
-//       }
-//       fclose(fp);
-//     }
-//   }
-// 
-//   // print a warning if any large page related flag is specified on command line
-//   bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
-// 
-//   const size_t default_page_size = (size_t)Linux::page_size();
-//   if (_large_page_size > default_page_size) {
-//     _page_sizes[0] = _large_page_size;
-//     _page_sizes[1] = default_page_size;
-//     _page_sizes[2] = 0;
-//   }
-//   UseHugeTLBFS = UseHugeTLBFS &&
-//                  Linux::hugetlbfs_sanity_check(warn_on_failure, _large_page_size);
-// 
-//   if (UseHugeTLBFS)
-//     UseSHM = false;
-// 
-//   UseLargePages = UseHugeTLBFS || UseSHM;
-// 
-//   set_coredump_filter();
-// }
-// 
-// #ifndef SHM_HUGETLB
-// #define SHM_HUGETLB 04000
-// #endif
+// Enable large page support if OS allows that.
+void os::large_page_init() {
+
+  // Note: os::Aix::query_multipage_support must run first.
+
+  if (!UseLargePages) {
+    return;
+  }
+
+  if (!Aix::can_use_64K_pages()) {
+    assert(!Aix::can_use_16M_pages(), "64K is a precondition for 16M.");
+    UseLargePages = false;
+    return;
+  }
+
+  if (!Aix::can_use_16M_pages() && Use16MPages) {
+    fprintf(stderr, "Cannot use 16M pages. Please ensure that there is a 16M page pool "
+            " and that the VM runs with CAP_BYPASS_RAC_VMM and CAP_PROPAGATE capabilities.");
+  }
+
+  //  Do not report 16M page alignment as part of os::_page_sizes if we are
+  //  explicitly forbidden from using 16M pages. Doing so would increase the
+  //  alignment the garbage collector calculates with, slightly increasing
+  //  heap usage. We should only pay for 16M alignment if we really want to
+  //  use 16M pages.
+  if (Use16MPages && Aix::can_use_16M_pages()) {
+    _large_page_size = SIZE_16M;
+    _page_sizes[0] = SIZE_16M;
+    _page_sizes[1] = SIZE_64K;
+    _page_sizes[2] = SIZE_4K;
+    _page_sizes[3] = 0;
+  } else if (Aix::can_use_64K_pages()) {
+    _large_page_size = SIZE_64K;
+    _page_sizes[0] = SIZE_64K;
+    _page_sizes[1] = SIZE_4K;
+    _page_sizes[2] = 0;
+  }
+
+  if (Verbose) {
+    ("Default large page size is 0x%llX.", _large_page_size);
+  }
+} // end: os::large_page_init()
 
 char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
   // "exec" is passed in but not used.  Creating the shared image for
@@ -2247,40 +2540,148 @@
   return _large_page_size;
 }
 
-// HugeTLBFS allows application to commit large page memory on demand;
-// with SysV SHM the entire memory region must be allocated as shared
-// memory.
 bool os::can_commit_large_page_memory() {
-  Unimplemented();
-  return false;
+  // Well, sadly we cannot commit anything at all (see comment in
+  // os::commit_memory) but we claim to so we can make use of large pages
+  return true;
 }
 
 bool os::can_execute_large_page_memory() {
-  Unimplemented();
-  return false;
+  // We can do that
+  return true;
 }
 
 // Reserve memory at an arbitrary address, only if that area is
 // available (and not reserved for something else).
-
 char* os::attempt_reserve_memory_at(size_t bytes, char* requested_addr) {
-  Unimplemented();
-  return NULL;
+
+  bool use_mmap = false;
+
+  // mmap: smaller graining, no large page support
+  // shm: large graining (256M), large page support, limited number of shm segments
+  //
+  // Prefer mmap wherever we either do not need large page support or have OS limits
+
+  if (!UseLargePages || bytes < SIZE_16M) {
+    use_mmap = true;
+  }
+
+  char* addr = NULL;
+  if (use_mmap) {
+    addr = reserve_mmaped_memory(bytes, requested_addr);
+  } else {
+    // shmat: wish address is mandatory, and do not try 16M pages here.
+    shmatted_memory_info_t info;
+    const int flags = RESSHM_WISHADDR_OR_FAIL;
+    if (reserve_shmatted_memory(bytes, requested_addr, flags, &info)) {
+      addr = info.addr;
+    }
+  }
+  
+  return addr;
 }
 
 size_t os::read(int fd, void *buf, unsigned int nBytes) {
   return ::read(fd, buf, nBytes);
 }
 
-// TODO-FIXME: reconcile Solaris' os::sleep with the linux variation.
-// Solaris uses poll(), linux uses park().
-// Poll() is likely a better choice, assuming that Thread.interrupt()
-// generates a SIGUSRx signal. Note that SIGUSR1 can interfere with
-// SIGSEGV, see 4355769.
+#define NANOSECS_PER_MILLISEC 1000000
 
 int os::sleep(Thread* thread, jlong millis, bool interruptible) {
-  Unimplemented();
-  return 0;
+  assert(thread == Thread::current(),  "thread consistency check");
+
+  // Prevent nasty overflow in deadline calculation
+  // by handling long sleeps similar to solaris or windows.
+  const jlong limit = INT_MAX;
+  int result;
+  while (millis > limit) {
+    if ((result = os::sleep(thread, limit, interruptible)) != OS_OK) {
+      return result;
+    }
+    millis -= limit;
+  }
+
+  ParkEvent * const slp = thread->_SleepEvent;
+  slp->reset();
+  OrderAccess::fence();
+
+  if (interruptible) {
+    jlong prevtime = javaTimeNanos();
+
+    // Prevent precision loss and too long sleeps
+    jlong deadline = prevtime + millis * NANOSECS_PER_MILLISEC;
+
+    for (;;) {
+      if (os::is_interrupted(thread, true)) {
+        return OS_INTRPT;
+      }
+
+      jlong newtime = javaTimeNanos();
+
+      assert(newtime >= prevtime, "time moving backwards");
+      /* Doing prevtime and newtime in microseconds doesn't help precision,
+         and trying to round up to avoid lost milliseconds can result in a
+         too-short delay. */
+      millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
+
+      if (millis <= 0) {
+        return OS_OK;
+      }
+
+      // Stop sleeping if we passed the deadline
+      if (newtime >= deadline) {
+        return OS_OK;
+      }
+
+      prevtime = newtime;
+
+      {
+        assert(thread->is_Java_thread(), "sanity check");
+        JavaThread *jt = (JavaThread *) thread;
+        ThreadBlockInVM tbivm(jt);
+        OSThreadWaitState osts(jt->osthread(), false /* not Object.wait() */);
+
+        jt->set_suspend_equivalent();
+
+        slp->park(millis);
+
+        // were we externally suspended while we were waiting?
+        jt->check_and_wait_while_suspended();
+      }
+    }
+  } else {
+    OSThreadWaitState osts(thread->osthread(), false /* not Object.wait() */);
+    jlong prevtime = javaTimeNanos();
+
+    // Prevent precision loss and too long sleeps
+    jlong deadline = prevtime + millis * NANOSECS_PER_MILLISEC;
+
+    for (;;) {
+      // It'd be nice to avoid the back-to-back javaTimeNanos() calls on
+      // the 1st iteration ...
+      jlong newtime = javaTimeNanos();
+
+      if (newtime - prevtime < 0) {
+        // time moving backwards, should only happen if no monotonic clock
+        // not a guarantee() because JVM should not abort on kernel/glibc bugs
+        // - HS14 Commented out as not implemented.
+        // - TODO Maybe we should implement it?
+        //assert(!Aix::supports_monotonic_clock(), "time moving backwards");
+      } else {
+        millis -= (newtime - prevtime) / NANOSECS_PER_MILLISEC;
+      }
+
+      if (millis <= 0) break;
+
+      if (newtime >= deadline) {
+        break;
+      }
+
+      prevtime = newtime;
+      slp->park(millis);
+    }
+    return OS_OK;
+  }
 }
 
 int os::naked_sleep() {
@@ -2353,32 +2754,37 @@
   60              // 11 CriticalPriority
 };
 
-// static int prio_init() {
-//   if (ThreadPriorityPolicy == 1) {
-//     // Only root can raise thread priority. Don't allow ThreadPriorityPolicy=1
-//     // if effective uid is not root. Perhaps, a more elegant way of doing
-//     // this is to test CAP_SYS_NICE capability, but that will require libcap.so
-//     if (geteuid() != 0) {
-//       if (!FLAG_IS_DEFAULT(ThreadPriorityPolicy)) {
-//         warning("-XX:ThreadPriorityPolicy requires root privilege on Linux");
-//       }
-//       ThreadPriorityPolicy = 0;
-//     }
-//   }
-//   if (UseCriticalJavaThreadPriority) {
-//     os::java_to_os_priority[MaxPriority] = os::java_to_os_priority[CriticalPriority];
-//   }
-//   return 0;
-// }
-
 OSReturn os::set_native_priority(Thread* thread, int newpri) {
-  Unimplemented();
-  return OS_ERR;
+  if (!UseThreadPriorities) return OS_OK;
+  pthread_t thr = thread->osthread()->pthread_id();
+  int policy = SCHED_OTHER;
+  struct sched_param param;
+  param.sched_priority = newpri;
+  int ret = pthread_setschedparam(thr, policy, &param);
+
+  if (Verbose) {
+    if (ret == 0) {
+      fprintf(stderr, "changed priority of thread %d to %d", (int)thr, newpri);
+    } else {
+      fprintf(stderr, "Could not changed priority for thread %d to %d (error %d, %s)",
+              (int)thr, newpri, ret, strerror(ret));
+    }
+  }
+  return (ret == 0) ? OS_OK : OS_ERR;
 }
 
 OSReturn os::get_native_priority(const Thread* const thread, int *priority_ptr) {
-  Unimplemented();
-  return OS_ERR;
+  if (!UseThreadPriorities) {
+    *priority_ptr = java_to_os_priority[NormPriority];
+    return OS_OK;
+  }
+  pthread_t thr = thread->osthread()->pthread_id();
+  int policy = SCHED_OTHER;
+  struct sched_param param;
+  int ret = pthread_getschedparam(thr, &policy, &param);
+  *priority_ptr = param.sched_priority;
+
+  return (ret == 0) ? OS_OK : OS_ERR;
 }
 
 // Hint to the underlying OS that a task switch would not be good.
@@ -2447,7 +2853,6 @@
   // read current suspend action
   int action = osthread->sr.suspend_action();
   if (action == SR_SUSPEND) {
-#if 0
     // Allow suspending other threads as well.
     if (thread->is_Java_thread()) {
       JavaThread* jt = (JavaThread*) thread;
@@ -2457,8 +2862,6 @@
       // a readable window but doesn't subvert the stack walking invariants.
       jt->frame_anchor()->make_walkable(jt);
     }
-#endif
-    Unimplemented(); // Do we need this?
 
     suspend_save_context(osthread, siginfo, context);
 
@@ -2620,11 +3023,48 @@
 // handlers, unless invoked with the option "-XX:+AllowUserSignalHandlers".
 //
 extern "C" JNIEXPORT int
-JVM_handle_linux_signal(int signo, siginfo_t* siginfo,
-                        void* ucontext, int abort_if_unrecognized);
-
-void signalHandler(int sig, siginfo_t* info, void* uc) {
-  Unimplemented();
+JVM_handle_aix_signal(int signo, siginfo_t* siginfo, void* ucontext, int abort_if_unrecognized);
+
+// Set thread signal mask (for some reason on AIX  sigthreadmask() seems
+// to be the thing to call; documentation is not terribly clear about whether
+// pthread_sigmask also works, and if it does, whether it does the same.
+bool set_thread_signal_mask(int how, const sigset_t* set, sigset_t* oset) {
+  const int rc =
+#ifdef _AIX
+    ::sigthreadmask(how, set, oset);
+#else
+    ::pthread_sigmask(how, set, oset);
+#endif
+  // return value semantics differ slightly for error case: 
+  // pthread_sigmask returns error number, sigthreadmask -1 and sets global errno
+  // (so, pthread_sigmask is more theadsafe for error handling)
+  // But success is always 0. 
+  return rc == 0 ? true : false;
+}
+
+// Function to unblock all signals which are, according
+// to POSIX, typical program error signals. If they happen while being blocked,
+// they typically will bring down the process immediately.
+bool unblock_program_error_signals() {
+  sigset_t set;
+  ::sigemptyset(&set);
+  ::sigaddset(&set, SIGILL);  
+  ::sigaddset(&set, SIGBUS);  
+  ::sigaddset(&set, SIGFPE);
+  ::sigaddset(&set, SIGSEGV);
+  return set_thread_signal_mask(SIG_UNBLOCK, &set, NULL);
+}
+
+// Renamed from 'signalHandler' to avoid collision with other shared libs.
+void javaSignalHandler(int sig, siginfo_t* info, void* uc) {
+  assert(info != NULL && uc != NULL, "it must be old kernel");
+
+  // Never leave program error signals blocked; 
+  // on all our platforms they would bring down the process immediately when 
+  // getting raised while being blocked.
+  unblock_program_error_signals();
+
+  JVM_handle_aix_signal(sig, info, uc, true);
 }
 
 
@@ -2698,136 +3138,111 @@
   sigflags[sig] = flags;
 }
 
-// void os::Aix::set_signal_handler(int sig, bool set_installed) {
-//   // Check for overwrite.
-//   struct sigaction oldAct;
-//   sigaction(sig, (struct sigaction*)NULL, &oldAct);
-// 
-//   void* oldhand = oldAct.sa_sigaction
-//                 ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
-//                 : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
-//   if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
-//       oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
-//       oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)signalHandler)) {
-//     if (AllowUserSignalHandlers || !set_installed) {
-//       // Do not overwrite; user takes responsibility to forward to us.
-//       return;
-//     } else if (UseSignalChaining) {
-//       // save the old handler in jvm
-//       save_preinstalled_handler(sig, oldAct);
-//       // libjsig also interposes the sigaction() call below and saves the
-//       // old sigaction on it own.
-//     } else {
-//       fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
-//                     "%#lx for signal %d.", (long)oldhand, sig));
-//     }
-//   }
-// 
-//   struct sigaction sigAct;
-//   sigfillset(&(sigAct.sa_mask));
-//   sigAct.sa_handler = SIG_DFL;
-//   if (!set_installed) {
-//     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
-//   } else {
-//     sigAct.sa_sigaction = signalHandler;
-//     sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
-//   }
-//   // Save flags, which are set by ours
-//   assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
-//   sigflags[sig] = sigAct.sa_flags;
-// 
-//   int ret = sigaction(sig, &sigAct, &oldAct);
-//   assert(ret == 0, "check");
-// 
-//   void* oldhand2  = oldAct.sa_sigaction
-//                   ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
-//                   : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
-//   assert(oldhand2 == oldhand, "no concurrent signal handler installation");
-// }
-// 
-// // install signal handlers for signals that HotSpot needs to
-// // handle in order to support Java-level exception handling.
-// 
-// void os::Aix::install_signal_handlers() {
-//   if (!signal_handlers_are_installed) {
-//     signal_handlers_are_installed = true;
-// 
-//     // signal-chaining
-//     typedef void (*signal_setting_t)();
-//     signal_setting_t begin_signal_setting = NULL;
-//     signal_setting_t end_signal_setting = NULL;
-//     begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
-//                              dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
-//     if (begin_signal_setting != NULL) {
-//       end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
-//                              dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
-//       get_signal_action = CAST_TO_FN_PTR(get_signal_t,
-//                             dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
-//       libjsig_is_loaded = true;
-//       assert(UseSignalChaining, "should enable signal-chaining");
-//     }
-//     if (libjsig_is_loaded) {
-//       // Tell libjsig jvm is setting signal handlers
-//       (*begin_signal_setting)();
-//     }
-// 
-//     set_signal_handler(SIGSEGV, true);
-//     set_signal_handler(SIGPIPE, true);
-//     set_signal_handler(SIGBUS, true);
-//     set_signal_handler(SIGILL, true);
-//     set_signal_handler(SIGFPE, true);
-//     set_signal_handler(SIGXFSZ, true);
-// 
-//     if (libjsig_is_loaded) {
-//       // Tell libjsig jvm finishes setting signal handlers
-//       (*end_signal_setting)();
-//     }
-// 
-//     // We don't activate signal checker if libjsig is in place, we trust ourselves
-//     // and if UserSignalHandler is installed all bets are off.
-//     // Log that signal checking is off only if -verbose:jni is specified.
-//     if (CheckJNICalls) {
-//       if (libjsig_is_loaded) {
-//         if (PrintJNIResolving) {
-//           tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
-//         }
-//         check_signals = false;
-//       }
-//       if (AllowUserSignalHandlers) {
-//         if (PrintJNIResolving) {
-//           tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
-//         }
-//         check_signals = false;
-//       }
-//     }
-//   }
-// }
-// 
-// // This is the fastest way to get thread cpu time on Linux.
-// // Returns cpu time (user+sys) for any thread, not only for current.
-// // POSIX compliant clocks are implemented in the kernels 2.6.16+.
-// // It might work on 2.6.10+ with a special kernel/glibc patch.
-// // For reference, please, see IEEE Std 1003.1-2004:
-// //   http://www.unix.org/single_unix_specification
-// 
-// jlong os::Aix::fast_thread_cpu_time(clockid_t clockid) {
-//   struct timespec tp;
-//   int rc = os::Aix::clock_gettime(clockid, &tp);
-//   assert(rc == 0, "clock_gettime is expected to return 0 code");
-// 
-//   return (tp.tv_sec * NANOSECS_PER_SEC) + tp.tv_nsec;
-// }
-// 
-// /////
-// // glibc on Linux platform uses non-documented flag
-// // to indicate, that some special sort of signal
-// // trampoline is used.
-// // We will never set this flag, and we should
-// // ignore this flag in our diagnostic
-// #ifdef SIGNIFICANT_SIGNAL_MASK
-// #undef SIGNIFICANT_SIGNAL_MASK
-// #endif
-// #define SIGNIFICANT_SIGNAL_MASK (~0x04000000)
+void os::Aix::set_signal_handler(int sig, bool set_installed) {
+  // Check for overwrite.
+  struct sigaction oldAct;
+  sigaction(sig, (struct sigaction*)NULL, &oldAct);
+
+  void* oldhand = oldAct.sa_sigaction
+                ? CAST_FROM_FN_PTR(void*,  oldAct.sa_sigaction)
+        : CAST_FROM_FN_PTR(void*,  oldAct.sa_handler);
+  // Renamed 'signalHandler' to avoid collision with other shared libs.
+  if (oldhand != CAST_FROM_FN_PTR(void*, SIG_DFL) &&
+      oldhand != CAST_FROM_FN_PTR(void*, SIG_IGN) &&
+      oldhand != CAST_FROM_FN_PTR(void*, (sa_sigaction_t)javaSignalHandler)) {
+    if (AllowUserSignalHandlers || !set_installed) {
+      // Do not overwrite; user takes responsibility to forward to us.
+      return;
+    } else if (UseSignalChaining) {
+      // save the old handler in jvm
+      save_preinstalled_handler(sig, oldAct);
+      // libjsig also interposes the sigaction() call below and saves the
+      // old sigaction on it own.
+    } else {
+      fatal(err_msg("Encountered unexpected pre-existing sigaction handler "
+                    "%#lx for signal %d.", (long)oldhand, sig));
+    }
+  }
+
+  struct sigaction sigAct;
+  sigfillset(&(sigAct.sa_mask));
+  if (!set_installed) {
+    sigAct.sa_handler = SIG_DFL;
+    sigAct.sa_flags = SA_RESTART;
+  } else {
+    // Renamed 'signalHandler' to avoid collision with other shared libs.
+    sigAct.sa_sigaction = javaSignalHandler;
+    sigAct.sa_flags = SA_SIGINFO|SA_RESTART;
+  }
+  // Save flags, which are set by ours
+  assert(sig > 0 && sig < MAXSIGNUM, "vm signal out of expected range");
+  sigflags[sig] = sigAct.sa_flags;
+
+  int ret = sigaction(sig, &sigAct, &oldAct);
+  assert(ret == 0, "check");
+
+  void* oldhand2  = oldAct.sa_sigaction
+                  ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction)
+                  : CAST_FROM_FN_PTR(void*, oldAct.sa_handler);
+  assert(oldhand2 == oldhand, "no concurrent signal handler installation");
+}
+
+// install signal handlers for signals that HotSpot needs to
+// handle in order to support Java-level exception handling.
+void os::Aix::install_signal_handlers() {
+  if (!signal_handlers_are_installed) {
+    signal_handlers_are_installed = true;
+
+    // signal-chaining
+    typedef void (*signal_setting_t)();
+    signal_setting_t begin_signal_setting = NULL;
+    signal_setting_t end_signal_setting = NULL;
+    begin_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
+                             dlsym(RTLD_DEFAULT, "JVM_begin_signal_setting"));
+    if (begin_signal_setting != NULL) {
+      end_signal_setting = CAST_TO_FN_PTR(signal_setting_t,
+                             dlsym(RTLD_DEFAULT, "JVM_end_signal_setting"));
+      get_signal_action = CAST_TO_FN_PTR(get_signal_t,
+                            dlsym(RTLD_DEFAULT, "JVM_get_signal_action"));
+      libjsig_is_loaded = true;
+      assert(UseSignalChaining, "should enable signal-chaining");
+    }
+    if (libjsig_is_loaded) {
+      // Tell libjsig jvm is setting signal handlers
+      (*begin_signal_setting)();
+    }
+
+    set_signal_handler(SIGSEGV, true);
+    set_signal_handler(SIGPIPE, true);
+    set_signal_handler(SIGBUS, true);
+    set_signal_handler(SIGILL, true);
+    set_signal_handler(SIGFPE, true);
+    set_signal_handler(SIGTRAP, true);
+    set_signal_handler(SIGXFSZ, true);
+    set_signal_handler(SIGDANGER, true);
+
+    if (libjsig_is_loaded) {
+      // Tell libjsig jvm finishes setting signal handlers
+      (*end_signal_setting)();
+    }
+
+    // We don't activate signal checker if libjsig is in place, we trust ourselves
+    // and if UserSignalHandler is installed all bets are off.
+    // Log that signal checking is off only if -verbose:jni is specified.
+    if (CheckJNICalls) {
+      if (libjsig_is_loaded) {
+        tty->print_cr("Info: libjsig is activated, all active signal checking is disabled");
+        check_signals = false;
+      }
+      if (AllowUserSignalHandlers) {
+        tty->print_cr("Info: AllowUserSignalHandlers is activated, all active signal checking is disabled");
+        check_signals = false;
+      }
+      // need to initialize check_signal_done
+      ::sigemptyset(&check_signal_done);
+    }
+  }
+}
 
 static const char* get_signal_handler_name(address handler,
                                            char* buf, int buflen) {
@@ -2905,7 +3320,6 @@
   char buf[O_BUFLEN];
   address jvmHandler = NULL;
 
-
   struct sigaction act;
   if (os_sigaction == NULL) {
     // only trust the default sigaction, in case it has been interposed
@@ -2915,9 +3329,6 @@
 
   os_sigaction(sig, (struct sigaction*)NULL, &act);
 
-
-  //act.sa_flags &= SIGNIFICANT_SIGNAL_MASK;
-
   address thisHandler = (act.sa_flags & SA_SIGINFO)
     ? CAST_FROM_FN_PTR(address, act.sa_sigaction)
     : CAST_FROM_FN_PTR(address, act.sa_handler) ;
@@ -2930,7 +3341,8 @@
   case SIGPIPE:
   case SIGILL:
   case SIGXFSZ:
-    jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)signalHandler);
+    // Renamed 'signalHandler' to avoid collision with other shared libs.
+    jvmHandler = CAST_FROM_FN_PTR(address, (sa_sigaction_t)javaSignalHandler);
     break;
 
   case SHUTDOWN1_SIGNAL:
@@ -2973,8 +3385,6 @@
   }
 }
 
-// extern void report_error(char* file_name, int line_no, char* title, char* format, ...);
-
 extern bool signal_name(int signo, char* buf, size_t len);
 
 const char* os::exception_name(int exception_code, char* buf, size_t size) {
@@ -2989,37 +3399,259 @@
   }
 }
 
+// To install functions for atexit system call
+extern "C" {
+  static void perfMemory_exit_helper() {
+    perfMemory_exit();
+  }
+}
+
 // this is called _before_ the most of global arguments have been parsed
 void os::init(void) {
-  Unimplemented();
+  // This is basic, we want to know if that ever changes.
+  // (shared memory boundary is supposed to be a 256M aligned)
+  assert(SHMLBA == ((uint64_t)0x10000000ULL)/*256M*/, "unexpected");
+
+  // First off, we need to know whether we run on AIX or PASE, and
+  // the OS level we run on.
+  os::Aix::initialize_os_info();
+  
+  // Scan environment (SPEC1170 behaviour, etc)
+  os::Aix::scan_environment();
+
+  // Check which pages are supported by AIX.
+  os::Aix::query_multipage_support();
+
+  // Next, we need to initialize libo4 and libperfstat libraries.
+  if (os::Aix::on_pase()) {
+    os::Aix::initialize_libo4();
+  } else {
+    os::Aix::initialize_libperfstat();
+  }
+
+  // Reset the perfstat information provided by ODM.
+  if (os::Aix::on_aix()) {
+    libperfstat::perfstat_reset();
+  }
+
+  // Now initialze basic system properties. Note that for some of the values we 
+  // need libperfstat etc.
+  os::Aix::initialize_system_info();
+
+  // Initialize large page support.
+  if (UseLargePages) {
+    os::large_page_init();
+    if (!UseLargePages) {
+      // initialize os::_page_sizes
+      _page_sizes[0] = Aix::page_size();
+      _page_sizes[1] = 0;
+      if (Verbose) {
+        fprintf(stderr, "Large Page initialization failed - UseLargePages=0.");
+      }
+    }
+  } else {
+    // initialize os::_page_sizes
+    _page_sizes[0] = Aix::page_size();
+    _page_sizes[1] = 0;
+  }
+
+  // debug trace
+  if (Verbose) {
+    fprintf(stderr, "os::vm_page_size 0x%llX\n", os::vm_page_size());
+    fprintf(stderr, "os::large_page_size 0x%llX\n", os::large_page_size());
+    fprintf(stderr, "os::_page_sizes = ( ");
+    for (int i = 0; _page_sizes[i]; i ++) {
+      fprintf(stderr, " %s ", describe_pagesize(_page_sizes[i]));
+    }
+    fprintf(stderr, ")\n");
+  }
+
+  _initial_pid = getpid();
+
+  clock_tics_per_sec = sysconf(_SC_CLK_TCK);
+
+  init_random(1234567);
+
+  ThreadCritical::initialize();
+
+  // Main_thread points to the aboriginal thread.
+  Aix::_main_thread = pthread_self();
+
+  initial_time_count = os::elapsed_counter();
+  pthread_mutex_init(&dl_mutex, NULL);
 }
 
-// // To install functions for atexit system call
-// extern "C" {
-//   static void perfMemory_exit_helper() {
-//     perfMemory_exit();
-//   }
-// }
-
 // this is called _after_ the global arguments have been parsed
 jint os::init_2(void) {
-  Unimplemented();
-  return 0;
+
+  if (Verbose) {
+    fprintf(stderr, "processor count: %d", os::_processor_count);
+    fprintf(stderr, "physical memory: %lu", Aix::_physical_memory);
+  }
+
+  // initially build up the loaded dll map
+  LoadedLibraries::reload();
+
+  const int page_size = Aix::page_size();
+  const int map_size = page_size;
+
+  address map_address = (address) MAP_FAILED;
+  const int prot  = PROT_READ;
+  const int flags = MAP_PRIVATE|MAP_ANONYMOUS;
+
+  // use optimized addresses for the polling page,
+  // e.g. map it to a special 32-bit address.
+  if (OptimizePollingPageLocation) {
+    // architecture-specific list of address wishes:
+    address address_wishes[] = {
+      // AIX: addresses lower than 0x30000000 don't seem to work on AIX.
+      // PPC64: all address wishes are non-negative 32 bit values where
+      // the lower 16 bits are all zero. we can load these addresses
+      // with a single ppc_lis instruction.
+      (address) 0x30000000, (address) 0x31000000,
+      (address) 0x32000000, (address) 0x33000000,
+      (address) 0x40000000, (address) 0x41000000,
+      (address) 0x42000000, (address) 0x43000000,
+      (address) 0x50000000, (address) 0x51000000,
+      (address) 0x52000000, (address) 0x53000000,
+      (address) 0x60000000, (address) 0x61000000,
+      (address) 0x62000000, (address) 0x63000000
+    };
+    int address_wishes_length = sizeof(address_wishes)/sizeof(address);
+
+    // iterate over the list of address wishes:
+    for (int i=0; i<address_wishes_length; i++) {
+      // try to map with current address wish.
+      // AIX: AIX needs MAP_FIXED if we provide an address and mmap will
+      // fail if the address is already mapped.
+      map_address = (address) ::mmap(address_wishes[i] - (ssize_t)page_size,
+                                     map_size, prot,
+                                     flags | MAP_FIXED,
+                                     -1, 0);
+      if (Verbose) {
+        fprintf(stderr, "SafePoint Polling  Page address: %p (wish) => %p",
+                address_wishes[i], map_address + (ssize_t)page_size);
+      }
+
+      if (map_address + (ssize_t)page_size == address_wishes[i]) {
+        // map succeeded and map_address is at wished address, exit loop.
+        break;
+      }
+
+      if (map_address != (address) MAP_FAILED) {
+        // map succeeded, but polling_page is not at wished address, unmap and continue.
+        ::munmap(map_address, map_size);
+        map_address = (address) MAP_FAILED;
+      }
+      // map failed, continue loop.
+    }
+  } // end OptimizePollingPageLocation
+
+  if (map_address == (address) MAP_FAILED) {
+    map_address = (address) ::mmap(NULL, map_size, prot, flags, -1, 0);
+  }
+  guarantee(map_address != MAP_FAILED, "os::init_2: failed to allocate polling page");
+  os::set_polling_page(map_address);
+
+  if (!UseMembar) {
+    address mem_serialize_page = (address) ::mmap(NULL, Aix::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+    guarantee(mem_serialize_page != NULL, "mmap Failed for memory serialize page");
+    os::set_memory_serialize_page(mem_serialize_page);
+
+#ifndef PRODUCT
+    if (Verbose && PrintMiscellaneous)
+      tty->print("[Memory Serialize  Page address: " INTPTR_FORMAT "]\n", (intptr_t)mem_serialize_page);
+#endif
+  }
+
+  // initialize suspend/resume support - must do this before signal_sets_init()
+  if (SR_initialize() != 0) {
+    perror("SR_initialize failed");
+    return JNI_ERR;
+  }
+
+  Aix::signal_sets_init();
+  Aix::install_signal_handlers();
+
+  // Check minimum allowable stack size for thread creation and to initialize
+  // the java system classes, including StackOverflowError - depends on page
+  // size.  Add a page for compiler2 recursion in main thread.
+  // Add in 2*BytesPerWord times page size to account for VM stack during
+  // class initialization depending on 32 or 64 bit VM.
+  os::Aix::min_stack_allowed = MAX2(os::Aix::min_stack_allowed,
+            (size_t)(StackYellowPages+StackRedPages+StackShadowPages+
+                    2*BytesPerWord COMPILER2_PRESENT(+1)) * Aix::page_size());
+
+  size_t threadStackSizeInBytes = ThreadStackSize * K;
+  if (threadStackSizeInBytes != 0 &&
+      threadStackSizeInBytes < os::Aix::min_stack_allowed) {
+        tty->print_cr("\nThe stack size specified is too small, "
+                      "Specify at least %dk",
+                      os::Aix::min_stack_allowed / K);
+        return JNI_ERR;
+  }
+
+  // Make the stack size a multiple of the page size so that
+  // the yellow/red zones can be guarded.
+  // note that this can be 0, if no default stacksize was set
+  JavaThread::set_stack_size_at_create(round_to(threadStackSizeInBytes, vm_page_size()));
+
+  Aix::libpthread_init();
+
+  if (MaxFDLimit) {
+    // set the number of file descriptors to max. print out error
+    // if getrlimit/setrlimit fails but continue regardless.
+    struct rlimit nbr_files;
+    int status = getrlimit(RLIMIT_NOFILE, &nbr_files);
+    if (status != 0) {
+      if (PrintMiscellaneous && (Verbose || WizardMode))
+        perror("os::init_2 getrlimit failed");
+    } else {
+      nbr_files.rlim_cur = nbr_files.rlim_max;
+      status = setrlimit(RLIMIT_NOFILE, &nbr_files);
+      if (status != 0) {
+        if (PrintMiscellaneous && (Verbose || WizardMode))
+          perror("os::init_2 setrlimit failed");
+      }
+    }
+  }
+
+  if (PerfAllowAtExitRegistration) {
+    // only register atexit functions if PerfAllowAtExitRegistration is set.
+    // atexit functions can be delayed until process exit time, which
+    // can be problematic for embedded VM situations. Embedded VMs should
+    // call DestroyJavaVM() to assure that VM resources are released.
+
+    // note: perfMemory_exit_helper atexit function may be removed in
+    // the future if the appropriate cleanup code can be added to the
+    // VM_Exit VMOperation's doit method.
+    if (atexit(perfMemory_exit_helper) != 0) {
+      warning("os::init_2 atexit(perfMemory_exit_helper) failed");
+    }
+  }
+
+  return JNI_OK;
+  
 }
 
 // this is called at the end of vm_initialization
 void os::init_3(void) {
-  Unimplemented();
+  return;
 }
 
 // Mark the polling page as unreadable
 void os::make_polling_page_unreadable(void) {
-  Unimplemented();
+  if(!guard_memory((char*)_polling_page, Aix::page_size())) {
+    fatal("Could not disable polling page");
+  }
 };
 
 // Mark the polling page as readable
 void os::make_polling_page_readable(void) {
-  Unimplemented();
+  // Changed according to os_linux.cpp.
+  if (!checked_mprotect((char *)_polling_page, Aix::page_size(), PROT_READ)) {
+    fatal(err_msg("Could not enable polling page at " PTR_FORMAT, _polling_page));
+  }
 };
 
 int os::active_processor_count() {
@@ -3313,18 +3945,6 @@
   return munmap(addr, bytes) == 0;
 }
 
-// static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time);
-// 
-// static clockid_t thread_cpu_clockid(Thread* thread) {
-//   pthread_t tid = thread->osthread()->pthread_id();
-//   clockid_t clockid;
-// 
-//   // Get thread clockid
-//   int rc = os::Aix::pthread_getcpuclockid(tid, &clockid);
-//   assert(rc == 0, "pthread_getcpuclockid is expected to return 0 code");
-//   return clockid;
-// }
-
 // current_thread_cpu_time(bool) and thread_cpu_time(Thread*, bool)
 // are used by JVM M&M and JVMTI to get user+sys or user CPU time
 // of a thread.
@@ -3333,18 +3953,23 @@
 // the fast estimate available on the platform.
 
 jlong os::current_thread_cpu_time() {
-  Unimplemented();
-  return 1;
+  // return user + sys since the cost is the same
+  const jlong n = os::thread_cpu_time(Thread::current(), true /* user + sys */);
+  assert(n >= 0, "negative CPU time");
+  return n;
 }
 
 jlong os::thread_cpu_time(Thread* thread) {
-  Unimplemented();
-  return 1;
+  // consistent with what current_thread_cpu_time() returns
+  const jlong n = os::thread_cpu_time(thread, true /* user + sys */);
+  assert(n >= 0, "negative CPU time");
+  return n;
 }
 
 jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
-  Unimplemented();
-  return 1;
+  const jlong n = os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
+  assert(n >= 0, "negative CPU time");
+  return n;
 }
 
 jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
@@ -3352,102 +3977,18 @@
   return 1;
 }
 
-//
-//  -1 on error.
-//
-
-// static jlong slow_thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
-//   static bool proc_pid_cpu_avail = true;
-//   static bool proc_task_unchecked = true;
-//   static const char *proc_stat_path = "/proc/%d/stat";
-//   pid_t  tid = thread->osthread()->thread_id();
-//   int i;
-//   char *s;
-//   char stat[2048];
-//   int statlen;
-//   char proc_name[64];
-//   int count;
-//   long sys_time, user_time;
-//   char string[64];
-//   char cdummy;
-//   int idummy;
-//   long ldummy;
-//   FILE *fp;
-// 
-//   // We first try accessing /proc/<pid>/cpu since this is faster to
-//   // process.  If this file is not present (linux kernels 2.5 and above)
-//   // then we open /proc/<pid>/stat.
-//   if ( proc_pid_cpu_avail ) {
-//     sprintf(proc_name, "/proc/%d/cpu", tid);
-//     fp =  fopen(proc_name, "r");
-//     if ( fp != NULL ) {
-//       count = fscanf( fp, "%s %lu %lu\n", string, &user_time, &sys_time);
-//       fclose(fp);
-//       if ( count != 3 ) return -1;
-// 
-//       if (user_sys_cpu_time) {
-//         return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec);
-//       } else {
-//         return (jlong)user_time * (1000000000 / clock_tics_per_sec);
-//       }
-//     }
-//     else proc_pid_cpu_avail = false;
-//   }
-// 
-//   // The /proc/<tid>/stat aggregates per-process usage on
-//   // new Linux kernels 2.6+ where NPTL is supported.
-//   // The /proc/self/task/<tid>/stat still has the per-thread usage.
-//   // See bug 6328462.
-//   // There can be no directory /proc/self/task on kernels 2.4 with NPTL
-//   // and possibly in some other cases, so we check its availability.
-//   if (proc_task_unchecked && os::Aix::is_NPTL()) {
-//     // This is executed only once
-//     proc_task_unchecked = false;
-//     fp = fopen("/proc/self/task", "r");
-//     if (fp != NULL) {
-//       proc_stat_path = "/proc/self/task/%d/stat";
-//       fclose(fp);
-//     }
-//   }
-// 
-//   sprintf(proc_name, proc_stat_path, tid);
-//   fp = fopen(proc_name, "r");
-//   if ( fp == NULL ) return -1;
-//   statlen = fread(stat, 1, 2047, fp);
-//   stat[statlen] = '\0';
-//   fclose(fp);
-// 
-//   // Skip pid and the command string. Note that we could be dealing with
-//   // weird command names, e.g. user could decide to rename java launcher
-//   // to "java 1.4.2 :)", then the stat file would look like
-//   //                1234 (java 1.4.2 :)) R ... ...
-//   // We don't really need to know the command string, just find the last
-//   // occurrence of ")" and then start parsing from there. See bug 4726580.
-//   s = strrchr(stat, ')');
-//   i = 0;
-//   if (s == NULL ) return -1;
-// 
-//   // Skip blank chars
-//   do s++; while (isspace(*s));
-// 
-//   count = sscanf(s,"%c %d %d %d %d %d %lu %lu %lu %lu %lu %lu %lu",
-//                  &cdummy, &idummy, &idummy, &idummy, &idummy, &idummy,
-//                  &ldummy, &ldummy, &ldummy, &ldummy, &ldummy,
-//                  &user_time, &sys_time);
-//   if ( count != 13 ) return -1;
-//   if (user_sys_cpu_time) {
-//     return ((jlong)sys_time + (jlong)user_time) * (1000000000 / clock_tics_per_sec);
-//   } else {
-//     return (jlong)user_time * (1000000000 / clock_tics_per_sec);
-//   }
-// }
-
 void os::current_thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
-  Unimplemented();
+  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
+  info_ptr->may_skip_backward = false;     // elapsed time not wall time
+  info_ptr->may_skip_forward = false;      // elapsed time not wall time
+  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
 }
 
 void os::thread_cpu_time_info(jvmtiTimerInfo *info_ptr) {
-  Unimplemented();
+  info_ptr->max_value = ALL_64_BITS;       // will not wrap in less than 64 bits
+  info_ptr->may_skip_backward = false;     // elapsed time not wall time
+  info_ptr->may_skip_forward = false;      // elapsed time not wall time
+  info_ptr->kind = JVMTI_TIMER_TOTAL_CPU;  // user+system time is returned
 }
 
 bool os::is_thread_cpu_time_supported() {
@@ -3455,11 +3996,35 @@
 }
 
 // System loadavg support.  Returns -1 if load average cannot be obtained.
-// Linux doesn't yet have a (official) notion of processor sets,
-// so just return the system wide load average.
-int os::loadavg(double loadavg[], int nelem) {
-  Unimplemented();
-  return 0;
+// For now just return the system wide load average (no processor sets).
+int os::loadavg(double values[], int nelem) {
+
+  // Implemented using libperfstat on AIX
+
+  guarantee(nelem >= 0 && nelem <= 3, "argument error");
+  guarantee(values, "argument error");
+
+  if (os::Aix::on_pase()) {
+    Unimplemented();
+  }
+  else  {
+    // AIX: use libperfstat
+    //
+    // See also:
+    // http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/perfstat_cputot.htm
+    // /usr/include/libperfstat.h:
+
+    // Use the already AIX version independent get_cpuinfo
+    os::Aix::cpuinfo_t ci;
+    if (os::Aix::get_cpuinfo(&ci)) {
+      for (int i = 0; i < nelem; i++) {
+        values[i] = ci.loadavg[i];
+      }
+    } else {
+      return -1;
+    }
+    return nelem;
+  }
 }
 
 void os::pause() {
@@ -3483,6 +4048,250 @@
   }
 }
 
+bool os::Aix::is_primordial_thread() {
+  if (pthread_self() == (pthread_t)1) {
+    return true;
+  } else {
+    return false;
+  }
+}
+
+// OS recognitions (PASE/AIX, OS level) call this before calling any
+// one of Aix::on_pase(), Aix::os_version() static
+void os::Aix::initialize_os_info() {
+
+  assert(_on_pase == -1 && _os_version == -1, "already called.");
+
+  struct utsname uts;
+  memset(&uts, 0, sizeof(uts));
+  strcpy(uts.sysname, "?");
+  if (::uname(&uts) == -1) {
+    fprintf(stderr, "uname failed (%d)", errno);
+    guarantee(0, "Could not determine whether we run on AIX or PASE");
+  } else {
+    if (Verbose) {
+      fprintf(stderr,"uname says: sysname \"%s\" version \"%s\" release \"%s\" "
+              "node \"%s\" machine \"%s\"\n",
+              uts.sysname, uts.version, uts.release, uts.nodename, uts.machine);
+    }
+    const int major = atoi(uts.version);
+    assert(major > 0, "invalid OS version");
+    const int minor = atoi(uts.release);
+    assert(minor > 0, "invalid OS release");
+    _os_version = (major << 8) | minor;
+    if (strcmp(uts.sysname, "OS400") == 0) {
+      Unimplemented();
+    } else if (strcmp(uts.sysname, "AIX") == 0) {
+      // We run on AIX. We do not support versions older than AIX 5.3.
+      _on_pase = 0;
+      if (_os_version < 0x0503) {
+        fprintf(stderr, "AIX release older than AIX 5.3 not supported.");
+        assert(false, "AIX release too old.");
+      } else {
+        if (Verbose) {
+          fprintf(stderr, "We run on AIX %d.%d\n", major, minor);
+        }
+      }
+    } else {
+      assert(false, "unknown OS");
+    }
+  }
+
+  guarantee(_on_pase != -1 && _os_version,
+        "Could not determine AIX/OS400 release");
+
+} // end: os::Aix::initialize_os_info()
+
+// Scan environment for important settings which might effect the VM.
+// Trace out settings. Warn about invalid settings and/or correct them.
+//
+// Must run after os::Aix::initialue_os_info().
+void os::Aix::scan_environment() {
+
+  char* p;
+  int rc;
+
+  // Warn explicity if EXTSHM=ON is used. That switch changes how
+  // System V shared memory behaves. One effect is that page size of
+  // shared memory cannot be change dynamically, effectivly preventing
+  // large pages from working.
+  // This switch was needed on AIX 32bit, but on AIX 64bit the general
+  // recommendation is (in OSS notes) to switch it off.
+  p = ::getenv("EXTSHM");
+  if (Verbose) {
+    fprintf(stderr, "EXTSHM=%s.", p ? p : "<unset>");
+  }
+  if (p && strcmp(p, "ON") == 0) {
+    fprintf(stderr, "Unsupported setting: EXTSHM=ON. Large Page support will be disabled.");
+    _extshm = 1;
+  } else {
+    _extshm = 0;
+  }
+
+  // SPEC1170 behaviour: will change the behaviour of a number of POSIX APIs.
+  // Not tested, not supported.
+  //
+  // Note that it might be worth the trouble to test and to require it, if only to
+  // get useful return codes for mprotect.
+  //
+  // Note: Setting XPG_SUS_ENV in the process is too late. Must be set earlier (before
+  // exec() ? before loading the libjvm ? ....)
+  p = ::getenv("XPG_SUS_ENV");
+  if (Verbose) {
+    fprintf(stderr, "XPG_SUS_ENV=%s.", p ? p : "<unset>");
+  }
+  if (p && strcmp(p, "ON") == 0) {
+    _xpg_sus_mode = 1;
+    fprintf(stderr, "Unsupported setting: XPG_SUS_ENV=ON");
+    // This is not supported. Worst of all, it changes behaviour of mmap MAP_FIXED to
+    // clobber address ranges. If we ever want to support that, we have to do some
+    // testing first.
+    guarantee(false, "XPG_SUS_ENV=ON not supported");
+  } else {
+    _xpg_sus_mode = 0;
+  }
+
+  // Switch off AIX internal (pthread) guard pages.  This has
+  // immediate effect for any pthread_create calls which follow.
+  p = ::getenv("AIXTHREAD_GUARDPAGES");
+  if (Verbose) {
+    fprintf(stderr, "AIXTHREAD_GUARDPAGES=%s.", p ? p : "<unset>");
+    fprintf(stderr, "setting AIXTHREAD_GUARDPAGES=0.");
+  }
+  rc = ::putenv("AIXTHREAD_GUARDPAGES=0");
+  guarantee(rc == 0, "");
+
+} // end: os::Aix::scan_environment()
+
+// PASE: initialize the libo4 library (AS400 PASE porting library).
+void os::Aix::initialize_libo4() {
+  Unimplemented();
+}
+
+// AIX: initialize the libperfstat library (we load this dynamically
+// because it is only available on AIX.
+void os::Aix::initialize_libperfstat() {
+
+  assert(os::Aix::on_aix(), "AIX only");
+
+  if (!libperfstat::init()) {
+    fprintf(stderr, "libperfstat initialization failed.");
+    assert(false, "libperfstat initialization failed");
+  } else {
+    if (Verbose) {
+      fprintf(stderr, "libperfstat initialized.");
+    }
+  }
+} // end: os::Aix::initialize_libperfstat
+
+/////////////////////////////////////////////////////////////////////////////
+// thread stack
+
+// function to query the current stack size using pthread_getthrds_np
+//
+// ! do not change anything here unless you know what you are doing !
+static void query_stack_dimensions(address* p_stack_base, size_t* p_stack_size) {
+
+  // This only works when invoked on a pthread. As we agreed not to use
+  // primordial threads anyway, I assert here
+  guarantee(!os::Aix::is_primordial_thread(), "not allowed on the primordial thread");
+
+  // information about this api can be found (a) in the pthread.h header and
+  // (b) in http://publib.boulder.ibm.com/infocenter/pseries/v5r3/index.jsp?topic=/com.ibm.aix.basetechref/doc/basetrf1/pthread_getthrds_np.htm
+  //
+  // The use of this API to find out the current stack is kind of undefined.
+  // But after a lot of tries and asking IBM about it, I concluded that it is safe
+  // enough for cases where I let the pthread library create its stacks. For cases
+  // where I create an own stack and pass this to pthread_create, it seems not to
+  // work (the returned stack size in that case is 0).
+
+  pthread_t tid = pthread_self();
+  struct __pthrdsinfo pinfo;
+  char dummy[1]; // we only need this to satisfy the api and to not get E
+  int dummy_size = sizeof(dummy);
+  
+  memset(&pinfo, 0, sizeof(pinfo));
+
+  const int rc = pthread_getthrds_np (&tid, PTHRDSINFO_QUERY_ALL, &pinfo,
+                                      sizeof(pinfo), dummy, &dummy_size);
+  
+  if (rc != 0) {
+    fprintf(stderr, "pthread_getthrds_np failed (%d)", rc);
+    guarantee(0, "pthread_getthrds_np failed");
+  }
+  
+  guarantee(pinfo.__pi_stackend, "returned stack base invalid");
+
+  // the following can happen when invoking pthread_getthrds_np on a pthread running on a user provided stack
+  // (when handing down a stack to pthread create, see pthread_attr_setstackaddr).
+  // Not sure what to do here - I feel inclined to forbid this use case completely.
+  guarantee(pinfo.__pi_stacksize, "returned stack size invalid");
+
+  // On AIX, stacks are not necessarily page aligned so round the base and size accordingly
+  if (p_stack_base) {
+    (*p_stack_base) = (address) align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size());
+  }
+
+  if (p_stack_size) {
+    (*p_stack_size) = pinfo.__pi_stacksize - os::Aix::stack_page_size();
+  }
+
+#ifndef PRODUCT
+  if (Verbose) {
+    fprintf(stderr, 
+            "query_stack_dimensions() -> real stack_base=" INTPTR_FORMAT ", real stack_addr=" INTPTR_FORMAT 
+            ", real stack_size=" INTPTR_FORMAT 
+            ", stack_base=" INTPTR_FORMAT ", stack_size=" INTPTR_FORMAT "\n", 
+            (intptr_t)pinfo.__pi_stackend, (intptr_t)pinfo.__pi_stackaddr, pinfo.__pi_stacksize,
+            (intptr_t)align_size_up((intptr_t)pinfo.__pi_stackend, os::Aix::stack_page_size()), 
+            pinfo.__pi_stacksize - os::Aix::stack_page_size());
+  }
+#endif
+
+} // end query_stack_dimensions
+
+// get the current stack base from the OS (actually, the pthread library)
+address os::current_stack_base() {
+  address p;
+  query_stack_dimensions(&p, 0);
+  return p;
+}
+
+// get the current stack size from the OS (actually, the pthread library)
+size_t os::current_stack_size() {
+  size_t s;
+  query_stack_dimensions(0, &s);
+  return s;
+}
+
+// resolve an AIX function descriptor literal to a code pointer.
+// If the input is a valid code pointer to a text segment of a loaded module,
+//   it is returned unchanged.
+// If the input is a valid AIX function descriptor, it is resolved to the
+//   code entry point.
+// If the input is neither a valid function descriptor nor a valid code pointer,
+//   NULL is returned.
+static address resolve_function_descriptor_to_code_pointer(address p) {
+
+  const LoadedLibraryModule* lib = LoadedLibraries::find_for_text_address(p);
+  if (lib) {
+    // its a real code pointer
+    return p;
+  } else {
+    lib = LoadedLibraries::find_for_data_address(p);
+    if (lib) {
+      // pointer to data segment, potential function descriptor
+      address code_entry = (address)(((FunctionDescriptor*)p)->entry());
+      if (LoadedLibraries::find_for_text_address(code_entry)) {
+        // Its a function descriptor
+        return code_entry;
+      }
+    }
+  }
+  return NULL;
+}
+
+
 
 // Refer to the comments in os_solaris.cpp park-unpark.
 //
@@ -3611,12 +4420,98 @@
 }
 
 int os::PlatformEvent::park(jlong millis) {
-  Unimplemented();
-  return 0;
+  guarantee (_nParked == 0, "invariant");
+
+  int v;
+  for (;;) {
+      v = _Event;
+      if (Atomic::cmpxchg (v-1, &_Event, v) == v) break;
+  }
+  guarantee (v >= 0, "invariant");
+  if (v != 0) return OS_OK;
+
+  // We do this the hard way, by blocking the thread.
+  // Consider enforcing a minimum timeout value.
+  struct timespec abst;
+  compute_abstime(&abst, millis);
+
+  int ret = OS_TIMEOUT;
+  int status = pthread_mutex_lock(_mutex);
+  assert_status(status == 0, status, "mutex_lock");
+  guarantee (_nParked == 0, "invariant");
+  ++_nParked;
+
+  // Object.wait(timo) will return because of
+  // (a) notification
+  // (b) timeout
+  // (c) thread.interrupt
+  //
+  // Thread.interrupt and object.notify{All} both call Event::set.
+  // That is, we treat thread.interrupt as a special case of notification.
+  // The underlying Solaris implementation, cond_timedwait, admits
+  // spurious/premature wakeups, but the JLS/JVM spec prevents the
+  // JVM from making those visible to Java code.  As such, we must
+  // filter out spurious wakeups.  We assume all ETIME returns are valid.
+  //
+  // TODO: properly differentiate simultaneous notify+interrupt.
+  // In that case, we should propagate the notify to another waiter.
+
+  while (_Event < 0) {
+    status = pthread_cond_timedwait(_cond, _mutex, &abst);
+    assert_status(status == 0 || status == ETIMEDOUT,
+          status, "cond_timedwait");
+    if (!FilterSpuriousWakeups) break;         // previous semantics
+    if (status == ETIMEDOUT) break;
+    // We consume and ignore EINTR and spurious wakeups.
+  }
+  --_nParked;
+  if (_Event >= 0) {
+     ret = OS_OK;
+  }
+  _Event = 0;
+  status = pthread_mutex_unlock(_mutex);
+  assert_status(status == 0, status, "mutex_unlock");
+  assert (_nParked == 0, "invariant");
+  return ret;
 }
 
 void os::PlatformEvent::unpark() {
-  Unimplemented();
+  int v, AnyWaiters;
+  for (;;) {
+    v = _Event;
+    if (v > 0) {
+      // The LD of _Event could have reordered or be satisfied
+      // by a read-aside from this processor's write buffer.
+      // To avoid problems execute a barrier and then
+      // ratify the value.
+      OrderAccess::fence();
+      if (_Event == v) return;
+      continue;
+    }
+    if (Atomic::cmpxchg (v+1, &_Event, v) == v) break;
+  }
+  if (v < 0) {
+    // Wait for the thread associated with the event to vacate
+    int status = pthread_mutex_lock(_mutex);
+    assert_status(status == 0, status, "mutex_lock");
+    AnyWaiters = _nParked;
+
+    if (AnyWaiters != 0) {
+      // We intentional signal *after* dropping the lock
+      // to avoid a common class of futile wakeups.
+      status = pthread_cond_signal(_cond);
+      assert_status(status == 0, status, "cond_signal");
+    }
+    // Mutex should be locked for  pthread_cond_signal(_cond);
+    status = pthread_mutex_unlock(_mutex);
+    assert_status(status == 0, status, "mutex_unlock");
+  }
+
+  // Note that we signal() _after dropping the lock for "immortal" Events.
+  // This is safe and avoids a common class of  futile wakeups.  In rare
+  // circumstances this can cause a thread to return prematurely from
+  // cond_{timed}wait() but the spurious wakeup is benign and the victim will
+  // simply re-test the condition and re-park itself.
 }
 
 
@@ -3813,14 +4708,6 @@
 
 extern char** environ;
 
-// #ifndef __NR_fork
-// #define __NR_fork IA32_ONLY(2) IA64_ONLY(not defined) AMD64_ONLY(57)
-// #endif
-// 
-// #ifndef __NR_execve
-// #define __NR_execve IA32_ONLY(11) IA64_ONLY(1033) AMD64_ONLY(59)
-// #endif
-
 // Run the specified command in a separate process. Return its exit value,
 // or -1 on failure (e.g. can't fork a new process).
 // Unlike system(), this function can be called from signal handler. It
@@ -3832,91 +4719,38 @@
 
 // is_headless_jre()
 //
-// Test for the existence of xawt/libmawt.so or libawt_xawt.so
+// Test for the existence of libmawt in motif21 or xawt directories
 // in order to report if we are running in a headless jre
-//
-// Since JDK8 xawt/libmawt.so was moved into the same directory
-// as libawt.so, and renamed libawt_xawt.so
-
 bool os::is_headless_jre() {
-  Unimplemented();
+  struct stat statbuf;
+  char buf[MAXPATHLEN];
+  char libmawtpath[MAXPATHLEN];
+  const char *xawtstr  = "/xawt/libmawt.so";
+  const char *motifstr = "/motif21/libmawt.so";
+  char *p;
+
+  // Get path to libjvm.so
+  os::jvm_path(buf, sizeof(buf));
+
+  // Get rid of libjvm.so
+  p = strrchr(buf, '/');
+  if (p == NULL) return false;
+  else *p = '\0';
+
+  // Get rid of client or server
+  p = strrchr(buf, '/');
+  if (p == NULL) return false;
+  else *p = '\0';
+
+  // check xawt/libmawt.so
+  strcpy(libmawtpath, buf);
+  strcat(libmawtpath, xawtstr);
+  if (::stat(libmawtpath, &statbuf) == 0) return false;
+
+  // check motif21/libmawt.so
+  strcpy(libmawtpath, buf);
+  strcat(libmawtpath, motifstr);
+  if (::stat(libmawtpath, &statbuf) == 0) return false;
+
   return true;
 }
-
-
-// #ifdef JAVASE_EMBEDDED
-// //
-// // A thread to watch the '/dev/mem_notify' device, which will tell us when the OS is running low on memory.
-// //
-// MemNotifyThread* MemNotifyThread::_memnotify_thread = NULL;
-// 
-// // ctor
-// //
-// MemNotifyThread::MemNotifyThread(int fd): Thread() {
-//   assert(memnotify_thread() == NULL, "we can only allocate one MemNotifyThread");
-//   _fd = fd;
-// 
-//   if (os::create_thread(this, os::os_thread)) {
-//     _memnotify_thread = this;
-//     os::set_priority(this, NearMaxPriority);
-//     os::start_thread(this);
-//   }
-// }
-// 
-// // Where all the work gets done
-// //
-// void MemNotifyThread::run() {
-//   assert(this == memnotify_thread(), "expected the singleton MemNotifyThread");
-// 
-//   // Set up the select arguments
-//   fd_set rfds;
-//   if (_fd != -1) {
-//     FD_ZERO(&rfds);
-//     FD_SET(_fd, &rfds);
-//   }
-// 
-//   // Now wait for the mem_notify device to wake up
-//   while (1) {
-//     // Wait for the mem_notify device to signal us..
-//     int rc = select(_fd+1, _fd != -1 ? &rfds : NULL, NULL, NULL, NULL);
-//     if (rc == -1) {
-//       perror("select!\n");
-//       break;
-//     } else if (rc) {
-//       //ssize_t free_before = os::available_memory();
-//       //tty->print ("Notified: Free: %dK \n",os::available_memory()/1024);
-// 
-//       // The kernel is telling us there is not much memory left...
-//       // try to do something about that
-// 
-//       // If we are not already in a GC, try one.
-//       if (!Universe::heap()->is_gc_active()) {
-//         Universe::heap()->collect(GCCause::_allocation_failure);
-// 
-//         //ssize_t free_after = os::available_memory();
-//         //tty->print ("Post-Notify: Free: %dK\n",free_after/1024);
-//         //tty->print ("GC freed: %dK\n", (free_after - free_before)/1024);
-//       }
-//       // We might want to do something like the following if we find the GC's are not helping...
-//       // Universe::heap()->size_policy()->set_gc_time_limit_exceeded(true);
-//     }
-//   }
-// }
-// 
-// //
-// // See if the /dev/mem_notify device exists, and if so, start a thread to monitor it.
-// //
-// void MemNotifyThread::start() {
-//   int    fd;
-//   fd = open ("/dev/mem_notify", O_RDONLY, 0);
-//   if (fd < 0) {
-//       return;
-//   }
-// 
-//   if (memnotify_thread() == NULL) {
-//     new MemNotifyThread(fd);
-//   }
-// }
-// #endif // JAVASE_EMBEDDED
-// 
-
--- a/src/os/aix/vm/os_aix.hpp	Wed Sep 26 16:54:25 2012 +0200
+++ b/src/os/aix/vm/os_aix.hpp	Mon Oct 22 18:15:38 2012 +0200
@@ -34,6 +34,9 @@
   // For signal-chaining
   // highest so far (AIX 5.2) is SIGSAK (63)
 #define MAXSIGNUM 63
+  // length of strings included in the libperfstat structures
+#define IDENTIFIER_LENGTH 64
+
   static struct sigaction sigact[MAXSIGNUM]; // saved preinstalled sigactions
   static unsigned int sigs;             // mask of signals that have
                                         // preinstalled signal handlers
@@ -48,18 +51,103 @@
   // For signal flags diagnostics
   static int sigflags[MAXSIGNUM];
 
-
  protected:
 
   static julong _physical_memory;
   static pthread_t _main_thread;
   static Mutex* _createThread_lock;
   static int _page_size;
+  static int _logical_cpus;
+
+  // -1 = uninitialized, 0 = AIX, 1 = OS/400 (PASE)
+  static int _on_pase;
+
+  // -1 = uninitialized, otherwise 16 bit number:
+  //  lower 8 bit - minor version
+  //  higher 8 bit - major version
+  //  For AIX, e.g. 0x0601 for AIX 6.1
+  //  for OS/400 e.g. 0x0504 for OS/400 V5R4
+  static int _os_version;
+
+  // -1 = uninitialized, 
+  //  0 - SPEC1170 not requested (XPG_SUS_ENV is OFF or not set)
+  //  1 - SPEC1170 requested (XPG_SUS_ENV is ON) 
+  static int _xpg_sus_mode;
+
+  // -1 = uninitialized, 
+  //  0 - EXTSHM=OFF or not set
+  //  1 - EXTSHM=ON
+  static int _extshm;
+
+  // page sizes on AIX.
+  //  
+  //  AIX supports four different page sizes - 4K, 64K, 16MB, 16GB. The latter two 
+  //  (16M "large" resp. 16G "huge" pages) require special setup and are normally
+  //  not available. 
+  //
+  //  AIX supports multiple page sizes per process, for:
+  //  - Stack (of the primordial thread, so not relevant for us)
+  //  - Data - data, bss, heap, for us also pthread stacks
+  //  - Text - text code
+  //  - shared memory
+  //  
+  //  Default page sizes can be set via linker options (-bdatapsize, -bstacksize, ...)
+  //  and via environment variable LDR_CNTRL (DATAPSIZE, STACKPSIZE, ...)
+  //
+  //  For shared memory, page size can be set dynamically via shmctl(). Different shared memory
+  //  regions can have different page sizes.
+  //  
+  //  More information can be found at AIBM info center: 
+  //   http://publib.boulder.ibm.com/infocenter/aix/v6r1/index.jsp?topic=/com.ibm.aix.prftungd/doc/prftungd/multiple_page_size_app_support.htm
+  //
+  // -----
+  //  We want to support 4K and 64K and, if the machine is set up correctly, 16MB pages.
+  //
+
+  // page size of the stack of newly created pthreads
+  // (should be LDR_CNTRL DATAPSIZE because stack is allocated on heap by pthread lib)
+  static int _stack_page_size;
+
+  // default shm page size. Read: what page size shared memory will be backed
+  // with if no page size was set explicitly using shmctl(SHM_PAGESIZE). 
+  // Should be LDR_CNTRL SHMPSIZE.
+  static size_t _shm_default_page_size;
+
+  // True if sys V shm can be used with 64K pages dynamically.
+  // (via shmctl(.. SHM_PAGESIZE..). Should be true for AIX 53 and
+  // newer / PASE V6R1 and newer. (0 or 1, -1 if not initialized)
+  static int _can_use_64K_pages;
+  
+  // True if sys V shm can be used with 16M pages dynamically.
+  // (via shmctl(.. SHM_PAGESIZE..). Only true on AIX 5.3 and
+  // newer, if the system was set up to use 16M pages and the
+  // jvm has enough user rights. (0 or 1, -1 if not initialized)
+  static int _can_use_16M_pages;
 
   static julong available_memory();
   static julong physical_memory() { return _physical_memory; }
   static void initialize_system_info();
 
+  // OS recognitions (PASE/AIX, OS level) call this before calling any
+  // one of Aix::on_pase(), Aix::os_version().
+  static void initialize_os_info();
+
+  // Scan environment for important settings which might effect the
+  // VM. Trace out settings. Warn about invalid settings and/or
+  // correct them.
+  //
+  // Must run after os::Aix::initialue_os_info().
+  static void scan_environment();
+
+  // Retrieve information about multipage size support. Will initialize
+  // _page_size, _stack_page_size, _can_use_64K_pages/_can_use_16M_pages
+  static void query_multipage_support();
+
+  // Initialize libo4 (on PASE) and libperfstat (on AIX). Call this
+  // before relying on functions from either lib, e.g. Aix::get_meminfo().
+  static void initialize_libo4();
+  static void initialize_libperfstat();
+
   static bool supports_variable_stack_size();
 
  public:
@@ -72,15 +160,56 @@
   static Mutex* createThread_lock(void)                             { return _createThread_lock; }
   static void hotspot_sigmask(Thread* thread);
 
-  static int page_size(void)                                        { return _page_size; }
-  static void set_page_size(int val)                                { _page_size = val; }
+  // Given an address, returns the size of the page backing that address
+  static size_t query_pagesize(void* p);
+
+  // Return `true' if the calling thread is the primordial thread. The
+  // primordial thread is the thread which contains the main function,
+  // *not* necessarily the thread which initialized the VM by calling
+  // JNI_CreateJavaVM.
+  static bool is_primordial_thread(void);
+
+  static int page_size(void) {
+    assert(_page_size != -1, "not initialized");
+    return _page_size; 
+  }
+
+  // Accessor methods for stack page size which may be different from usual page size.
+  static int stack_page_size(void) {
+    assert(_stack_page_size != -1, "not initialized");
+    return _stack_page_size; 
+  }
+
+  // default shm page size. Read: what page size shared memory
+  // will be backed with if no page size was set explicitly using shmctl(SHM_PAGESIZE). 
+  // Should be LDR_CNTRL SHMPSIZE. 
+  static int shm_default_page_size(void) {
+    assert(_shm_default_page_size != -1, "not initialized");
+    return _shm_default_page_size; 
+  }
+
+  // Return true if sys V shm can be used with 64K pages dynamically
+  // (via shmctl(.. SHM_PAGESIZE..).
+  static bool can_use_64K_pages () {
+    assert(_can_use_64K_pages != -1,  "not initialized");
+    return _can_use_64K_pages == 1 ? true : false;
+  }
+  
+  // Return true if sys V shm can be used with 16M pages dynamically.
+  // (via shmctl(.. SHM_PAGESIZE..). 
+  static bool can_use_16M_pages () {
+    assert(_can_use_16M_pages != -1,  "not initialized");
+    return _can_use_16M_pages == 1 ? true : false;
+  }
 
   static address   ucontext_get_pc(ucontext_t* uc);
   static intptr_t* ucontext_get_sp(ucontext_t* uc);
   static intptr_t* ucontext_get_fp(ucontext_t* uc);
+  // Set PC into context. Needed for continuation after signal.
+  static void ucontext_set_pc(ucontext_t* uc, address pc);
 
   // This boolean allows users to forward their own non-matching signals
-  // to JVM_handle_linux_signal, harmlessly.
+  // to JVM_handle_aix_signal, harmlessly.
   static bool signal_handlers_are_installed;
 
   static int get_our_sigflags(int);
@@ -98,6 +227,9 @@
   static struct sigaction *get_chained_signal_action(int sig);
   static bool chained_handler(int sig, siginfo_t* siginfo, void* context);
 
+  // libpthread version string
+  static void libpthread_init();
+
   // Minimum stack size a thread can be created with (allowing
   // the VM to completely create the thread and enter user code)
   static size_t min_stack_allowed;
@@ -106,6 +238,89 @@
   static size_t default_stack_size(os::ThreadType thr_type);
   static size_t default_guard_size(os::ThreadType thr_type);
 
+  // Function returns true if we run on OS/400 (pase), false if we run
+  // on AIX.
+  static bool on_pase() { 
+    assert(_on_pase != -1, "not initialized");
+    return _on_pase ? true : false; 
+  }
+
+  // Function returns true if we run on AIX, false if we run on OS/400
+  // (pase).
+  static bool on_aix() { 
+    assert(_on_pase != -1, "not initialized");
+    return _on_pase ? false : true; 
+  }
+
+  // -1 = uninitialized, otherwise 16 bit number:
+  // lower 8 bit - minor version
+  // higher 8 bit - major version
+  // For AIX, e.g. 0x0601 for AIX 6.1
+  // for OS/400 e.g. 0x0504 for OS/400 V5R4
+  static int os_version () {
+    assert(_os_version != -1, "not initialized");
+    return _os_version;
+  }
+
+  // Convenience method: returns true if running on AIX 5.3 or older.
+  static bool on_aix_53_or_older() {
+    return on_aix() && os_version() <= 0x0503;
+  }
+
+  // Returns true if we run in SPEC1170 compliant mode (XPG_SUS_ENV=ON).
+  static bool xpg_sus_mode() {
+    assert(_xpg_sus_mode != -1, "not initialized");
+    return _xpg_sus_mode;
+  }
+
+  // Returns true if EXTSHM=ON.
+  static bool extshm() {
+    assert(_extshm != -1, "not initialized");
+    return _extshm;
+  }
+
+  // result struct for get_meminfo()
+  struct meminfo_t {
+
+    // Amount of virtual memory (in units of 4 KB pages)
+    unsigned long long virt_total; 
+
+    // Amount of real memory, in bytes
+    unsigned long long real_total;
+
+    // Amount of free real memory, in bytes
+    unsigned long long real_free;
+
+    // Total amount of paging space, in bytes
+    unsigned long long pgsp_total;
+
+    // Amount of free paging space, in bytes
+    unsigned long long pgsp_free;
+
+  };
+
+  // Result struct for get_cpuinfo()
+  struct cpuinfo_t {
+    char description[IDENTIFIER_LENGTH];  /* processor description (type/official name) */
+    u_longlong_t processorHZ;             /* processor speed in Hz */
+    int ncpus;                            /* number of active logical processors */
+    double loadavg[3];                    /* (1<<SBITS) times the average number of runnables processes during the last 1, 5 and 15 minutes.    */
+	                                  /* To calculate the load average, divide the numbers by (1<<SBITS). SBITS is defined in <sys/proc.h>. */
+    char version[20];                     /* processor version from _system_configuration (sys/systemcfg.h) */
+  };
+
+  // Functions to retrieve memory information on AIX, PASE. 
+  // (on AIX, using libperfstat, on PASE with libo4.so).
+  // Returns true if ok, false if error. 
+  static bool get_meminfo(meminfo_t* pmi);
+
+  // Function to retrieve cpu information on AIX
+  // (on AIX, using libperfstat)
+  // Returns true if ok, false if error.
+  static bool get_cpuinfo(cpuinfo_t* pci);
+
+  // suspend/resume support taken from Java6/Linux
+  // Original comment:
   // Linux suspend/resume support - this helper is a shadow of its former
   // self now that low-level suspension is barely used, and old workarounds
   // for LinuxThreads are no longer needed.
--- a/src/os/aix/vm/perfMemory_aix.cpp	Wed Sep 26 16:54:25 2012 +0200
+++ b/src/os/aix/vm/perfMemory_aix.cpp	Mon Oct 22 18:15:38 2012 +0200
@@ -636,7 +636,7 @@
     return -1;
   }
 
-  Unimplemented();
+  return fd;
 }
 
 // open the shared memory file for the given user and vmid. returns
@@ -738,7 +738,11 @@
 // release a named shared memory region
 //
 static void unmap_shared(char* addr, size_t bytes) {
-  Unimplemented();
+  // Do not rely on os::reserve_memory/os::release_memory to use mmap.
+  // Use os::reserve_memory/os::release_memory for PerfDisableSharedMem=1, mmap/munmap for PerfDisableSharedMem=0
+  if (::munmap(addr, bytes) == -1) {
+    warning("perfmemory: munmap failed (%d)\n", errno);
+  }
 }
 
 // create the PerfData memory region in shared memory.
--- a/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp	Wed Sep 26 16:54:25 2012 +0200
+++ b/src/os_cpu/aix_ppc/vm/os_aix_ppc.cpp	Mon Oct 22 18:15:38 2012 +0200
@@ -84,27 +84,41 @@
   return (char*) -1;
 }
 
+// OS specific thread initialization
+//
+// Calculate and store the limits of the memory stack.  
+//
 void os::initialize_thread() {
-  Unimplemented();
+  JavaThread* thread = (JavaThread *)Thread::current();
+  assert(thread != NULL,"Sanity check");
+  if ( !thread->is_Java_thread() ) return;
+
+  // Initialize the memory stack limit
+  address mem_stk_limit = thread->stack_yellow_zone_base() + 
+      (os::vm_page_size() * StackShadowPages);
+  thread->set_memory_stack_limit(mem_stk_limit);
 }
 
 // Frame information (pc, sp, fp) retrieved via ucontext 
 // always looks like a C-frame according to the frame
 // conventions in frame_ppc64.hpp.
 address os::Aix::ucontext_get_pc(ucontext_t * uc) {
-  Unimplemented();
-  return NULL;
+  return (address)uc->uc_mcontext.jmp_context.iar;
 }
 
 intptr_t* os::Aix::ucontext_get_sp(ucontext_t * uc) {
-  Unimplemented();
-  return NULL;
+  // gpr1 holds the stack pointer on aix
+  return (intptr_t*)uc->uc_mcontext.jmp_context.gpr[1/*REG_SP*/];
 }
 
 intptr_t* os::Aix::ucontext_get_fp(ucontext_t * uc) {
   return NULL;
 }
 
+void os::Aix::ucontext_set_pc(ucontext_t* uc, address new_pc) {
+  uc->uc_mcontext.jmp_context.iar = (uint64_t) new_pc;
+}
+
 ExtendedPC os::fetch_frame_from_context(void* ucVoid,
                                         intptr_t** ret_sp, intptr_t** ret_fp) {
 
@@ -158,11 +172,316 @@
 // Utility functions
 
 extern "C" JNIEXPORT int
-JVM_handle_aix_signal(int sig,
-                      siginfo_t* info,
-                      void* ucVoid,
-                      int abort_if_unrecognized) {
-  Unimplemented(); // TODO: PPC port
+JVM_handle_aix_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrecognized) {
+
+  ucontext_t* uc = (ucontext_t*) ucVoid;
+
+  Thread* t = ThreadLocalStorage::get_thread_slow();   // slow & steady
+
+  SignalHandlerMark shm(t);
+
+  // Note: it's not uncommon that JNI code uses signal/sigset to install
+  // then restore certain signal handler (e.g. to temporarily block SIGPIPE,
+  // or have a SIGILL handler when detecting CPU type). When that happens,
+  // JVM_handle_aix_signal() might be invoked with junk info/ucVoid. To 
+  // avoid unnecessary crash when libjsig is not preloaded, try handle signals 
+  // that do not require siginfo/ucontext first.
+
+  if (sig == SIGPIPE) {
+    if (os::Aix::chained_handler(sig, info, ucVoid)) {
+      return 1;
+    } else {
+      if (PrintMiscellaneous && (WizardMode || Verbose)) {
+        warning("Ignoring SIGPIPE - see bug 4229104");
+      }
+      return 1;
+    }
+  }
+
+  JavaThread* thread = NULL;
+  VMThread* vmthread = NULL;
+  if (os::Aix::signal_handlers_are_installed) {
+    if (t != NULL ){
+      if(t->is_Java_thread()) {
+        thread = (JavaThread*)t;
+      }
+      else if(t->is_VM_thread()){
+        vmthread = (VMThread *)t;
+      }
+    }
+  }
+
+  // decide if this trap can be handled by a stub
+  address stub = NULL;
+  
+  // retrieve program counter
+  address const pc = uc ? os::Aix::ucontext_get_pc(uc) : NULL;
+
+  // retrieve crash address
+  address const addr = info ? (const address) info->si_addr : NULL;
+  
+  // SafeFetch 32 handling:
+  // - make it work if _thread is null
+  // - make it use the standard os::...::ucontext_get/set_pc APIs
+  if (uc) {
+    address const pc = os::Aix::ucontext_get_pc(uc);
+    if (pc && StubRoutines::is_safefetch_fault(pc)) {
+       os::Aix::ucontext_set_pc(uc, StubRoutines::continuation_for_safefetch_fault(pc));
+       return true; 
+    }
+  }
+
+  // Handle SIGDANGER right away. AIX would raise SIGDANGER whenever available swap
+  // space falls below 30%. This is only a chance for the process to gracefully abort.
+  // We can't hope to proceed after SIGDANGER since SIGKILL tailgates.
+  if (sig == SIGDANGER) {
+    goto report_and_die;
+  }
+  
+  if (info == NULL || uc == NULL || thread == NULL && vmthread == NULL) {
+    goto run_chained_handler;
+  }
+
+  // If we are a java thread...
+  if (thread != NULL) {
+
+    // Handle ALL stack overflow variations here
+    if (sig == SIGSEGV && (addr < thread->stack_base() &&
+                           addr >= thread->stack_base() - thread->stack_size())) {
+      // stack overflow
+      //
+      // if we are in a yellow zone and we are inside java, we disable the yellow zone and 
+      // throw a stack overflow exception.
+      // If we are in native code or VM C code, we report-and-die. The original coding tried
+      // to continue with yellow zone disabled, but that doesn't buy us much and prevents
+      // hs_err_pid files.
+      if (thread->in_stack_yellow_zone(addr)) {
+        thread->disable_stack_yellow_zone();
+        if (thread->thread_state() == _thread_in_Java) {
+          // Throw a stack overflow exception. 
+          // Guard pages will be reenabled while unwinding the stack.
+          stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::STACK_OVERFLOW);
+          goto run_stub;
+        } else {
+          // Thread was in the vm or native code.  Return and try to finish.
+          return 1;
+        }
+      } else if (thread->in_stack_red_zone(addr)) {
+        // Fatal red zone violation.  Disable the guard pages and fall through
+        // to handle_unexpected_exception way down below.
+        thread->disable_stack_red_zone();
+        tty->print_raw_cr("An irrecoverable stack overflow has occurred.");
+        goto report_and_die;
+      } else {
+        // this means a segv happened inside our stack, but not in
+        // the guarded zone. I'd like to know when this happens,
+        tty->print_raw_cr("SIGSEGV happened inside stack but outside yellow and red zone.");
+        goto report_and_die;
+      }
+      
+    } // end handle SIGSEGV inside stack boundaries
+
+    if (thread->thread_state() == _thread_in_Java) {
+
+      // Java thread running in Java code   
+
+      // the VM uses signals to handle uncommon situations
+      // like nullpointer exceptions and the such.
+      // Accessing a null ptr reference would lead to a signal which in turn would
+      // be recognized as a special, non-error situation by the signal handler. 
+      // The signal handler delegates control in those cases to the piece of coding
+      // which will throw the corresponding java exception.
+
+      // The following signals are used for communicating VM events:
+      //
+      // SIGILL: the compiler generates illegal opcodes 
+      //   at places where it wishes to interrupt the VM:
+      //   Safepoints, Unreachable Code, Entry points of Zombie methods, 
+      //    This results in a SIGILL with (*pc) == inserted illegal instruction.
+      //   
+      //   (so, SIGILLs with a pc inside the zero page are real errors)
+      //
+      // SIGTRAP: 
+      //   used for Zombie methods
+      //   used for ImplicitNullChecks:
+      //    (ImplicitNullChecks are done on aix before/after a load of a java reference
+      //    to generate a signal if the reference is Null.
+      //    On AIX, one can read from the zero page without getting an exception.
+      //     Therefore the jit generates null check traps before/after the loads)
+      //   used for IC (inline cache) misses.
+      //
+      // (Note: !UseSIGTRAP is used to prevent SIGTRAPS altogether, to facilitate debugging.
+      // In release code SIGTRAP is a good idea though, because it is easy to produce 
+      // with trap op codes)
+      //
+      // SIGSEGV: 
+      //   used for safe point polling:
+      //   To notify all threads that they have to reach a safe point, safe point polling is used:
+      //   All threads poll a certain mapped memory page. Normally, this page has read access.
+      //   If the VM wants to inform the threads about impending safe points, it puts this
+      //   page to read only ("poisens" the page), and the threads then reach a safe point.
+      //
+      //
+
+      if (sig == SIGILL && (pc < (address) 0x200)) {
+        if (Verbose)
+          tty->print_raw_cr("SIGILL happened inside zero page.");
+        goto report_and_die;          
+      }
+
+      // handle signal from NativeJump::patch_verified_entry()
+      if (( UseSIGTRAP && sig == SIGTRAP && nativeInstruction_at(pc)->is_sigtrap_zombie_not_entrant()) ||
+          (!UseSIGTRAP && sig == SIGILL  && nativeInstruction_at(pc)->is_sigill_zombie_not_entrant())) {
+        if (Verbose)
+          tty->print_cr("trap: zombie_not_entrant (%s)", (sig == SIGTRAP) ? "SIGTRAP" : "SIGILL");
+        stub = SharedRuntime::get_handle_wrong_method_stub();
+        goto run_stub;
+      }
+      
+      else if (sig == SIGSEGV && os::is_poll_address(addr)) {
+
+        if (Verbose) {
+          tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (SIGSEGV)", pc);
+	}
+	stub = SharedRuntime::get_poll_stub(pc);
+        goto run_stub;
+      }
+
+      // SIGTRAP-based ic miss check in compiled code
+      else if (sig == SIGTRAP && UseSIGTRAP &&
+               nativeInstruction_at(pc)->is_sigtrap_ic_miss_check()) {
+        if (Verbose) {
+          tty->print_cr("trap: ic_miss_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
+        }
+        stub = SharedRuntime::get_ic_miss_stub();
+        goto run_stub;
+      }
+      
+      // SIGTRAP-based implicit null check in compiled code (on AIX)
+      else if (sig == SIGTRAP && UseSIGTRAP && ImplicitNullChecks &&
+               nativeInstruction_at(pc)->is_sigtrap_null_check()) {
+        if (Verbose) {
+          tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
+        }
+        stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
+        goto run_stub;
+      }
+
+      // We support SIGSEGV-based null checks on stores now
+      // SIGSEGV-based implicit null check in compiled code (on AIX)
+      else if (sig == SIGSEGV && ImplicitNullChecks &&
+               CodeCache::contains((void*) pc) &&
+               !MacroAssembler::needs_explicit_null_check((intptr_t) info->si_addr)) {
+        if (Verbose) {
+          tty->print_cr("trap: null_check at " INTPTR_FORMAT " (SIGSEGV)", pc);
+        }
+        stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_NULL);
+      }
+
+      // SIGTRAP-based implicit range check in compiled code
+      else if (sig == SIGTRAP && ImplicitRangeChecks &&
+               nativeInstruction_at(pc)->is_sigtrap_range_check()) {
+        if (Verbose) {
+          tty->print_cr("trap: range_check at " INTPTR_FORMAT " (SIGTRAP)", pc);
+        }
+        stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_RANGE_CHECK);
+        goto run_stub;
+      }
+
+      else if (sig == SIGFPE /* && info->si_code == FPE_INTDIV */) {
+        if (Verbose) {
+          tty->print_raw_cr("Fix SIGFPE handler, trying divide by zero handler.");
+        }
+        stub = SharedRuntime::continuation_for_implicit_exception(thread, pc, SharedRuntime::IMPLICIT_DIVIDE_BY_ZERO);
+        goto run_stub;
+      }
+      
+      else if (sig == SIGBUS) {
+        // BugId 4454115: A read from a MappedByteBuffer can fault
+        // here if the underlying file has been truncated.
+        // Do not crash the VM in such a case.
+        CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
+        nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL;
+        if (nm != NULL && nm->has_unsafe_access()) {
+          // We don't really need a stub here! Just set the pending exeption and
+          // continue at the next instruction after the faulting read. Returning
+          // garbage from this read is ok.
+          thread->set_pending_unsafe_access_error();  
+          uc->uc_mcontext.jmp_context.iar = ((unsigned long)pc) + 4;
+          return 1;
+        }  
+      }    
+    }
+    
+    else { // thread->thread_state() != _thread_in_Java
+      // detect CPU features. This is only done at the very start of the VM. Later, the 
+      //  VM_Version::is_determine_features_test_running() flag should be false.
+
+      if (sig == SIGILL && VM_Version::is_determine_features_test_running()) {
+        // SIGILL must be caused by VM_Version::determine_features().
+        *(int *)pc = 0; // patch instruction to 0 to indicate that it causes a SIGILL, 
+                        // flushing of icache is not necessary.
+        stub = pc + 4;  // continue with next instruction.
+        goto run_stub;
+      }
+      else if (thread->thread_state() == _thread_in_vm &&
+               sig == SIGBUS && thread->doing_unsafe_access()) {
+        // We don't really need a stub here! Just set the pending exeption and
+        // continue at the next instruction after the faulting read. Returning
+        // garbage from this read is ok.
+        thread->set_pending_unsafe_access_error();  
+        uc->uc_mcontext.jmp_context.iar = ((unsigned long)pc) + 4;
+        return 1;
+      }
+    }
+
+    // Check to see if we caught the safepoint code in the
+    // process of write protecting the memory serialization page.
+    // It write enables the page immediately after protecting it
+    // so we can just return to retry the write.
+    if ((sig == SIGSEGV) &&
+        os::is_memory_serialize_page(thread, addr)) {
+      // Synchronization problem in the pseudo memory barrier code (bug id 6546278)
+      // Block current thread until the memory serialize page permission restored.
+      os::block_on_serialize_page_trap();
+      return true;
+    }
+  }
+
+run_stub:
+
+  // one of the above code blocks ininitalized the stub, so we want to 
+  // delegate control to that stub
+  if (stub != NULL) {
+    // save all thread context in case we need to restore it
+    thread->set_saved_exception_pc(pc);
+    uc->uc_mcontext.jmp_context.iar = (unsigned long)stub;
+    return 1;
+  }
+
+run_chained_handler:
+  
+  // signal-chaining
+  if (os::Aix::chained_handler(sig, info, ucVoid)) {
+    return 1;
+  }
+  if (!abort_if_unrecognized) {
+    // caller wants another chance, so give it to him
+    return 0;
+  }
+
+report_and_die:
+
+  // Use sigthreadmask instead of sigprocmask on AIX and unmask current signal
+  sigset_t newset;
+  sigemptyset(&newset);
+  sigaddset(&newset, sig);
+  sigthreadmask(SIG_UNBLOCK, &newset, NULL);
+
+  VMError err(t, sig, pc, info, ucVoid);
+  err.report_and_die();
+
+  ShouldNotReachHere();
   return 0;
 }
 
@@ -199,18 +518,6 @@
   return 2 * page_size();
 }
 
-// Get the current stack base from the OS (actually, the pthread library).
-address os::current_stack_base() {
-  Unimplemented();
-  return NULL;
-}
-
-// Get the current stack size from the OS (actually, the pthread library).
-size_t os::current_stack_size() {
-  Unimplemented();
-  return NULL;
-}
-
 /////////////////////////////////////////////////////////////////////////////
 // helper functions for fatal error handler
 
--- a/src/share/vm/runtime/sharedRuntime.hpp	Wed Sep 26 16:54:25 2012 +0200
+++ b/src/share/vm/runtime/sharedRuntime.hpp	Mon Oct 22 18:15:38 2012 +0200
@@ -179,6 +179,7 @@
   enum ImplicitExceptionKind {
     IMPLICIT_NULL,
     IMPLICIT_DIVIDE_BY_ZERO,
+    IMPLICIT_RANGE_CHECK,
     STACK_OVERFLOW
   };
   static void    throw_AbstractMethodError(JavaThread* thread);
--- a/src/share/vm/runtime/stubRoutines.hpp	Wed Sep 26 16:54:25 2012 +0200
+++ b/src/share/vm/runtime/stubRoutines.hpp	Mon Oct 22 18:15:38 2012 +0200
@@ -426,14 +426,18 @@
 };
 
 #ifdef SAFEFETCH_STUBS
-static int SafeFetch32(int* adr, int errValue) {
-  return StubRoutines::SafeFetch32_stub()(adr, errValue);
-}
-static intptr_t SafeFetchN(intptr_t* adr, intptr_t errValue) {
-  return StubRoutines::SafeFetchN_stub()(adr, errValue);  
-}
+  static int SafeFetch32(int* adr, int errValue) {
+    return StubRoutines::SafeFetch32_stub()(adr, errValue);
+  }
+  static intptr_t SafeFetchN(intptr_t* adr, intptr_t errValue) {
+    return StubRoutines::SafeFetchN_stub()(adr, errValue);
+  }
+  static bool CanUseSafeFetch32() { return StubRoutines::SafeFetch32_stub() ? true : false; }
+  static bool CanUseSafeFetchN()  { return StubRoutines::SafeFetchN_stub() ? true : false; }
 #else
-// Declared as extern "C" in os.hpp.
+  // Declared as extern "C" in os.hpp.
+  static bool CanUseSafeFetch32() { return true; }
+  static bool CanUseSafeFetchN()  { return true; }
 #endif
 
 #endif // SHARE_VM_RUNTIME_STUBROUTINES_HPP
--- a/src/share/vm/utilities/globalDefinitions.hpp	Wed Sep 26 16:54:25 2012 +0200
+++ b/src/share/vm/utilities/globalDefinitions.hpp	Mon Oct 22 18:15:38 2012 +0200
@@ -26,7 +26,7 @@
 #define SHARE_VM_UTILITIES_GLOBALDEFINITIONS_HPP
 
 #ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS 1
+#define __STDC_FORMAT_MACROS
 #endif
 
 #ifdef TARGET_COMPILER_gcc
--- a/src/share/vm/utilities/globalDefinitions_xlc.hpp	Wed Sep 26 16:54:25 2012 +0200
+++ b/src/share/vm/utilities/globalDefinitions_xlc.hpp	Mon Oct 22 18:15:38 2012 +0200
@@ -181,4 +181,22 @@
 // offset_of as it is defined for gcc.
 #define offset_of(klass,field) (size_t)((intx)&(((klass*)16)->field) - 16)
 
+// Some constant sizes used throughout the AIX port
+#define SIZE_1K   ((uint64_t)         0x400ULL)
+#define SIZE_4K   ((uint64_t)        0x1000ULL)
+#define SIZE_64K  ((uint64_t)       0x10000ULL)
+#define SIZE_1M   ((uint64_t)      0x100000ULL)
+#define SIZE_4M   ((uint64_t)      0x400000ULL)
+#define SIZE_8M   ((uint64_t)      0x800000ULL)
+#define SIZE_16M  ((uint64_t)     0x1000000ULL)
+#define SIZE_256M ((uint64_t)    0x10000000ULL)
+#define SIZE_1G   ((uint64_t)    0x40000000ULL)
+#define SIZE_2G   ((uint64_t)    0x80000000ULL)
+#define SIZE_4G   ((uint64_t)   0x100000000ULL)
+#define SIZE_16G  ((uint64_t)   0x400000000ULL)
+#define SIZE_32G  ((uint64_t)   0x800000000ULL)
+#define SIZE_64G  ((uint64_t)  0x1000000000ULL)
+#define SIZE_1T   ((uint64_t) 0x10000000000ULL)
+
+
 #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_XLC_HPP