mirror of git://gcc.gnu.org/git/gcc.git
Hashtable synchronization for PowerPC.
libjava: Hashtable synchronization for PowerPC. * configure.in: Define SLOW_PTHREAD_SELF if configure.host set slow_pthread_self. Set up symlink for sysdeps directory. * configure: Rebuild. * configure.host: Document more shell variables. Set sysdeps_dir for most platforms. Set slow_pthread_self for i686. Set enable_hash_synchronization_default and slow_pthread_self for PowerPC. * posix-threads.cc (_Jv_ThreadSelf_out_of_line): Use release_set so that memory barrier is emitted where required. * prims.cc: 64-bit align static primitive class instances. * include/posix-threads.h (_Jv_ThreadSelf for SLOW_PTHREAD_SELF): Add read_barrier() to enforce ordering of reads. * sysdep/powerpc/locks.h: New file. Implementation of synchronization primitives for PowerPC. * sysdep/i386/locks.h: New file. Synchronization primitives for i386 moved from natObject.cc. * sysdep/alpha/locks.h: Likewise. * sysdep/ia64/locks.h: Likewise. * sysdep/generic/locks.h: Likewise. * java/lang/natObject.cc: Move thread synchronization primitives to system-dependent headers. gcc/java: * decl.c (java_init_decl_processing): Make sure class_type_node alignment is not less than 64 bits if hash synchronization isenabled. boehm-gc: * include/gc_priv.h: Define ALIGN_DOUBLE on 32 bit targets if GCJ support is enabled, for hash synchronization. From-SVN: r50523
This commit is contained in:
parent
828c1ddccc
commit
4559716751
|
|
@ -1,3 +1,8 @@
|
||||||
|
2002-03-09 Bryce McKinlay <bryce@waitaki.otago.ac.nz>
|
||||||
|
|
||||||
|
* include/gc_priv.h: Define ALIGN_DOUBLE on 32 bit targets if GCJ
|
||||||
|
support is enabled, for hash synchronization.
|
||||||
|
|
||||||
2002-02-24 Adam Megacz <adam@xwt.org>
|
2002-02-24 Adam Megacz <adam@xwt.org>
|
||||||
|
|
||||||
* Makefile.am: Added win32_threads.c to sources list.
|
* Makefile.am: Added win32_threads.c to sources list.
|
||||||
|
|
|
||||||
|
|
@ -205,6 +205,12 @@ typedef char * ptr_t; /* A generic pointer to which we can add */
|
||||||
/* odd numbered words to have mark bits. */
|
/* odd numbered words to have mark bits. */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(GC_GCJ_SUPPORT) && ALIGNMENT < 8 && !defined(ALIGN_DOUBLE)
|
||||||
|
/* GCJ's Hashtable synchronization code requires 64-bit alignment. */
|
||||||
|
# define ALIGN_DOUBLE
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
/* ALIGN_DOUBLE requires MERGE_SIZES at present. */
|
/* ALIGN_DOUBLE requires MERGE_SIZES at present. */
|
||||||
# if defined(ALIGN_DOUBLE) && !defined(MERGE_SIZES)
|
# if defined(ALIGN_DOUBLE) && !defined(MERGE_SIZES)
|
||||||
# define MERGE_SIZES
|
# define MERGE_SIZES
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,8 @@
|
||||||
|
2002-03-09 Bryce McKinlay <bryce@waitaki.otago.ac.nz>
|
||||||
|
|
||||||
|
* decl.c (java_init_decl_processing): Make sure class_type_node
|
||||||
|
alignment is not less than 64 bits if hash synchronization is enabled.
|
||||||
|
|
||||||
2002-03-08 Per Bothner <per@bothner.com>
|
2002-03-08 Per Bothner <per@bothner.com>
|
||||||
|
|
||||||
* parse.y (java_complete_lhs): Check if patch_assignment
|
* parse.y (java_complete_lhs): Check if patch_assignment
|
||||||
|
|
|
||||||
|
|
@ -605,6 +605,7 @@ java_init_decl_processing ()
|
||||||
one_elt_array_domain_type = build_index_type (integer_one_node);
|
one_elt_array_domain_type = build_index_type (integer_one_node);
|
||||||
otable_type = build_array_type (integer_type_node,
|
otable_type = build_array_type (integer_type_node,
|
||||||
one_elt_array_domain_type);
|
one_elt_array_domain_type);
|
||||||
|
TYPE_NONALIASED_COMPONENT (otable_type) = 1;
|
||||||
otable_ptr_type = build_pointer_type (otable_type);
|
otable_ptr_type = build_pointer_type (otable_type);
|
||||||
|
|
||||||
method_symbol_type = make_node (RECORD_TYPE);
|
method_symbol_type = make_node (RECORD_TYPE);
|
||||||
|
|
@ -681,6 +682,10 @@ java_init_decl_processing ()
|
||||||
FIELD_PRIVATE (t) = 1;
|
FIELD_PRIVATE (t) = 1;
|
||||||
push_super_field (class_type_node, object_type_node);
|
push_super_field (class_type_node, object_type_node);
|
||||||
|
|
||||||
|
/* Hash synchronization requires at least double-word alignment. */
|
||||||
|
if (flag_hash_synchronization && POINTER_SIZE < 64)
|
||||||
|
TYPE_ALIGN (class_type_node) = 64;
|
||||||
|
|
||||||
FINISH_RECORD (class_type_node);
|
FINISH_RECORD (class_type_node);
|
||||||
build_decl (TYPE_DECL, get_identifier ("Class"), class_type_node);
|
build_decl (TYPE_DECL, get_identifier ("Class"), class_type_node);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,27 @@
|
||||||
|
2002-03-09 Bryce McKinlay <bryce@waitaki.otago.ac.nz>
|
||||||
|
|
||||||
|
Hashtable synchronization for PowerPC.
|
||||||
|
* configure.in: Define SLOW_PTHREAD_SELF if configure.host set
|
||||||
|
slow_pthread_self. Set up symlink for sysdeps directory.
|
||||||
|
* configure: Rebuild.
|
||||||
|
* configure.host: Document more shell variables. Set sysdeps_dir
|
||||||
|
for most platforms. Set slow_pthread_self for i686. Set
|
||||||
|
enable_hash_synchronization_default and slow_pthread_self for PowerPC.
|
||||||
|
* posix-threads.cc (_Jv_ThreadSelf_out_of_line): Use release_set so
|
||||||
|
that memory barrier is emitted where required.
|
||||||
|
* prims.cc: 64-bit align static primitive class instances.
|
||||||
|
* include/posix-threads.h (_Jv_ThreadSelf for SLOW_PTHREAD_SELF): Add
|
||||||
|
read_barrier() to enforce ordering of reads.
|
||||||
|
* sysdep/powerpc/locks.h: New file. Implementation of synchronization
|
||||||
|
primitives for PowerPC.
|
||||||
|
* sysdep/i386/locks.h: New file. Synchronization primitives for i386
|
||||||
|
moved from natObject.cc.
|
||||||
|
* sysdep/alpha/locks.h: Likewise.
|
||||||
|
* sysdep/ia64/locks.h: Likewise.
|
||||||
|
* sysdep/generic/locks.h: Likewise.
|
||||||
|
* java/lang/natObject.cc: Move thread synchronization primitives to
|
||||||
|
system-dependent headers.
|
||||||
|
|
||||||
2002-03-09 Adam Megacz <adam@xwt.org>
|
2002-03-09 Adam Megacz <adam@xwt.org>
|
||||||
|
|
||||||
* java/io/natFileDescriptorWin32.cc (read): Return -1 if zero
|
* java/io/natFileDescriptorWin32.cc (read): Return -1 if zero
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -18,6 +18,14 @@
|
||||||
# libgcj_cflags Special CFLAGS to use when building
|
# libgcj_cflags Special CFLAGS to use when building
|
||||||
# libgcj_cxxflags Special CXXFLAGS to use when building
|
# libgcj_cxxflags Special CXXFLAGS to use when building
|
||||||
# libgcj_javaflags Special JAVAFLAGS to use when building
|
# libgcj_javaflags Special JAVAFLAGS to use when building
|
||||||
|
# libgcj_interpreter If the bytecode interpreter supports this platform.
|
||||||
|
# enable_java_net_default If java.net native code should be enabled by
|
||||||
|
# default.
|
||||||
|
# enable_hash_synchronization_default If hash synchronization should be
|
||||||
|
# enabled by default.
|
||||||
|
# sysdeps_dir Directory containing system-dependent headers
|
||||||
|
# slow_pthread_self The synchronization code should try to avoid
|
||||||
|
# pthread_self calls by caching thread IDs in a hashtable
|
||||||
|
|
||||||
libgcj_flags=
|
libgcj_flags=
|
||||||
libgcj_cflags=
|
libgcj_cflags=
|
||||||
|
|
@ -26,6 +34,8 @@ libgcj_javaflags=
|
||||||
libgcj_interpreter=
|
libgcj_interpreter=
|
||||||
enable_java_net_default=yes
|
enable_java_net_default=yes
|
||||||
enable_hash_synchronization_default=no
|
enable_hash_synchronization_default=no
|
||||||
|
sysdeps_dir=generic
|
||||||
|
slow_pthread_self=
|
||||||
|
|
||||||
case "${target_optspace}:${host}" in
|
case "${target_optspace}:${host}" in
|
||||||
yes:*)
|
yes:*)
|
||||||
|
|
@ -60,27 +70,35 @@ case "${host}" in
|
||||||
enable_getenv_properties_default=no
|
enable_getenv_properties_default=no
|
||||||
;;
|
;;
|
||||||
i686-*|i586-*|i486-*|i386-*)
|
i686-*|i586-*|i486-*|i386-*)
|
||||||
|
sysdeps_dir=i386
|
||||||
libgcj_flags="${libgcj_flags} -ffloat-store"
|
libgcj_flags="${libgcj_flags} -ffloat-store"
|
||||||
libgcj_interpreter=yes
|
libgcj_interpreter=yes
|
||||||
libgcj_cxxflags="-D__NO_MATH_INLINES"
|
libgcj_cxxflags="-D__NO_MATH_INLINES"
|
||||||
libgcj_cflags="-D__NO_MATH_INLINES"
|
libgcj_cflags="-D__NO_MATH_INLINES"
|
||||||
DIVIDESPEC=-fno-use-divide-subroutine
|
DIVIDESPEC=-fno-use-divide-subroutine
|
||||||
enable_hash_synchronization_default=yes
|
enable_hash_synchronization_default=yes
|
||||||
|
slow_pthread_self=yes
|
||||||
;;
|
;;
|
||||||
alpha*-*)
|
alpha*-*)
|
||||||
|
sysdeps_dir=alpha
|
||||||
libgcj_flags="${libgcj_flags} -mieee"
|
libgcj_flags="${libgcj_flags} -mieee"
|
||||||
libgcj_interpreter=yes
|
libgcj_interpreter=yes
|
||||||
enable_hash_synchronization_default=yes
|
enable_hash_synchronization_default=yes
|
||||||
;;
|
;;
|
||||||
powerpc*-linux*)
|
powerpc*-linux*)
|
||||||
|
sysdeps_dir=powerpc
|
||||||
libgcj_interpreter=yes
|
libgcj_interpreter=yes
|
||||||
|
enable_hash_synchronization_default=yes
|
||||||
|
slow_pthread_self=yes
|
||||||
;;
|
;;
|
||||||
powerpc-apple-*)
|
powerpc*-darwin*)
|
||||||
|
sysdeps_dir=powerpc
|
||||||
libgcj_interpreter=no
|
libgcj_interpreter=no
|
||||||
;;
|
;;
|
||||||
sparc-*)
|
sparc-*)
|
||||||
;;
|
;;
|
||||||
ia64-*)
|
ia64-*)
|
||||||
|
sysdeps_dir=ia64
|
||||||
libgcj_flags="${libgcj_flags} -funwind-tables"
|
libgcj_flags="${libgcj_flags} -funwind-tables"
|
||||||
libgcj_interpreter=yes
|
libgcj_interpreter=yes
|
||||||
enable_hash_synchronization_default=yes
|
enable_hash_synchronization_default=yes
|
||||||
|
|
|
||||||
|
|
@ -76,6 +76,12 @@ if test -z "$enable_hash_synchronization"; then
|
||||||
enable_hash_synchronization=$enable_hash_synchronization_default
|
enable_hash_synchronization=$enable_hash_synchronization_default
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
dnl configure.host sets slow_pthread_self if the synchronization code should
|
||||||
|
dnl try to avoid pthread_self calls by caching thread IDs in a hashtable.
|
||||||
|
if test "${slow_pthread_self}" = "yes"; then
|
||||||
|
AC_DEFINE(SLOW_PTHREAD_SELF)
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
dnl See if the user has requested runtime debugging.
|
dnl See if the user has requested runtime debugging.
|
||||||
LIBGCJDEBUG="false"
|
LIBGCJDEBUG="false"
|
||||||
|
|
@ -415,6 +421,8 @@ AC_SUBST(THREADDEPS)
|
||||||
AC_SUBST(THREADOBJS)
|
AC_SUBST(THREADOBJS)
|
||||||
AC_SUBST(THREADSPEC)
|
AC_SUBST(THREADSPEC)
|
||||||
|
|
||||||
|
AC_LINK_FILES(sysdep/$sysdeps_dir, sysdep)
|
||||||
|
|
||||||
HASH_SYNC_SPEC=
|
HASH_SYNC_SPEC=
|
||||||
# Hash synchronization is only useful with posix threads right now.
|
# Hash synchronization is only useful with posix threads right now.
|
||||||
if test "$enable_hash_synchronization" = yes && test "$THREADS" = "posix"; then
|
if test "$enable_hash_synchronization" = yes && test "$THREADS" = "posix"; then
|
||||||
|
|
|
||||||
|
|
@ -221,14 +221,6 @@ _Jv_ThreadCurrent (void)
|
||||||
// to threads.
|
// to threads.
|
||||||
|
|
||||||
|
|
||||||
#ifdef __i386__
|
|
||||||
|
|
||||||
#define SLOW_PTHREAD_SELF
|
|
||||||
// Add a cache for pthread_self() if we don't have the thread
|
|
||||||
// pointer in a register.
|
|
||||||
|
|
||||||
#endif /* __i386__ */
|
|
||||||
|
|
||||||
#ifdef __ia64__
|
#ifdef __ia64__
|
||||||
|
|
||||||
typedef size_t _Jv_ThreadId_t;
|
typedef size_t _Jv_ThreadId_t;
|
||||||
|
|
@ -270,6 +262,8 @@ _Jv_ThreadSelf (void)
|
||||||
|
|
||||||
#if defined(SLOW_PTHREAD_SELF)
|
#if defined(SLOW_PTHREAD_SELF)
|
||||||
|
|
||||||
|
#include "sysdep/locks.h"
|
||||||
|
|
||||||
typedef pthread_t _Jv_ThreadId_t;
|
typedef pthread_t _Jv_ThreadId_t;
|
||||||
|
|
||||||
// E.g. on X86 Linux, pthread_self() is too slow for our purpose.
|
// E.g. on X86 Linux, pthread_self() is too slow for our purpose.
|
||||||
|
|
@ -321,7 +315,7 @@ _Jv_ThreadSelf (void)
|
||||||
unsigned h = SC_INDEX(sp);
|
unsigned h = SC_INDEX(sp);
|
||||||
volatile self_cache_entry *sce = _Jv_self_cache + h;
|
volatile self_cache_entry *sce = _Jv_self_cache + h;
|
||||||
pthread_t candidate_self = sce -> self; // Read must precede following one.
|
pthread_t candidate_self = sce -> self; // Read must precede following one.
|
||||||
// Read barrier goes here, if needed.
|
read_barrier();
|
||||||
if (sce -> high_sp_bits == sp >> LOG_THREAD_SPACING)
|
if (sce -> high_sp_bits == sp >> LOG_THREAD_SPACING)
|
||||||
{
|
{
|
||||||
// The sce -> self value we read must be valid. An intervening
|
// The sce -> self value we read must be valid. An intervening
|
||||||
|
|
|
||||||
|
|
@ -307,128 +307,7 @@ _Jv_MonitorExit (jobject obj)
|
||||||
#include <unistd.h> // for usleep, sysconf.
|
#include <unistd.h> // for usleep, sysconf.
|
||||||
#include <sched.h> // for sched_yield.
|
#include <sched.h> // for sched_yield.
|
||||||
#include <gcj/javaprims.h>
|
#include <gcj/javaprims.h>
|
||||||
|
#include <sysdep/locks.h>
|
||||||
typedef size_t obj_addr_t; /* Integer type big enough for object */
|
|
||||||
/* address. */
|
|
||||||
|
|
||||||
// The following should move to some standard place. Linux-threads
|
|
||||||
// already defines roughly these, as do more recent versions of boehm-gc.
|
|
||||||
// The problem is that neither exports them.
|
|
||||||
|
|
||||||
#if defined(__GNUC__) && defined(__i386__)
|
|
||||||
// Atomically replace *addr by new_val if it was initially equal to old.
|
|
||||||
// Return true if the comparison succeeded.
|
|
||||||
// Assumed to have acquire semantics, i.e. later memory operations
|
|
||||||
// cannot execute before the compare_and_swap finishes.
|
|
||||||
inline static bool
|
|
||||||
compare_and_swap(volatile obj_addr_t *addr,
|
|
||||||
obj_addr_t old,
|
|
||||||
obj_addr_t new_val)
|
|
||||||
{
|
|
||||||
char result;
|
|
||||||
__asm__ __volatile__("lock; cmpxchgl %2, %0; setz %1"
|
|
||||||
: "+m"(*(addr)), "=q"(result)
|
|
||||||
: "r" (new_val), "a"(old)
|
|
||||||
: "memory");
|
|
||||||
return (bool) result;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set *addr to new_val with release semantics, i.e. making sure
|
|
||||||
// that prior loads and stores complete before this
|
|
||||||
// assignment.
|
|
||||||
// On X86, the hardware shouldn't reorder reads and writes,
|
|
||||||
// so we just have to convince gcc not to do it either.
|
|
||||||
inline static void
|
|
||||||
release_set(volatile obj_addr_t *addr, obj_addr_t new_val)
|
|
||||||
{
|
|
||||||
__asm__ __volatile__(" " : : : "memory");
|
|
||||||
*(addr) = new_val;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compare_and_swap with release semantics instead of acquire semantics.
|
|
||||||
// On many architecture, the operation makes both guarantees, so the
|
|
||||||
// implementation can be the same.
|
|
||||||
inline static bool
|
|
||||||
compare_and_swap_release(volatile obj_addr_t *addr,
|
|
||||||
obj_addr_t old,
|
|
||||||
obj_addr_t new_val)
|
|
||||||
{
|
|
||||||
return compare_and_swap(addr, old, new_val);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(__GNUC__) && defined(__ia64__) && SIZEOF_VOID_P == 8
|
|
||||||
inline static bool
|
|
||||||
compare_and_swap(volatile obj_addr_t *addr,
|
|
||||||
obj_addr_t old,
|
|
||||||
obj_addr_t new_val)
|
|
||||||
{
|
|
||||||
unsigned long oldval;
|
|
||||||
__asm__ __volatile__("mov ar.ccv=%4 ;; cmpxchg8.acq %0=%1,%2,ar.ccv"
|
|
||||||
: "=r"(oldval), "=m"(*addr)
|
|
||||||
: "r"(new_val), "1"(*addr), "r"(old) : "memory");
|
|
||||||
return (oldval == old);
|
|
||||||
}
|
|
||||||
|
|
||||||
// The fact that *addr is volatile should cause the compiler to
|
|
||||||
// automatically generate an st8.rel.
|
|
||||||
inline static void
|
|
||||||
release_set(volatile obj_addr_t *addr, obj_addr_t new_val)
|
|
||||||
{
|
|
||||||
__asm__ __volatile__(" " : : : "memory");
|
|
||||||
*(addr) = new_val;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline static bool
|
|
||||||
compare_and_swap_release(volatile obj_addr_t *addr,
|
|
||||||
obj_addr_t old,
|
|
||||||
obj_addr_t new_val)
|
|
||||||
{
|
|
||||||
unsigned long oldval;
|
|
||||||
__asm__ __volatile__("mov ar.ccv=%4 ;; cmpxchg8.rel %0=%1,%2,ar.ccv"
|
|
||||||
: "=r"(oldval), "=m"(*addr)
|
|
||||||
: "r"(new_val), "1"(*addr), "r"(old) : "memory");
|
|
||||||
return (oldval == old);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(__GNUC__) && defined(__alpha__)
|
|
||||||
inline static bool
|
|
||||||
compare_and_swap(volatile obj_addr_t *addr,
|
|
||||||
obj_addr_t old,
|
|
||||||
obj_addr_t new_val)
|
|
||||||
{
|
|
||||||
unsigned long oldval;
|
|
||||||
char result;
|
|
||||||
__asm__ __volatile__(
|
|
||||||
"1:ldq_l %0, %1\n\t" \
|
|
||||||
"cmpeq %0, %5, %2\n\t" \
|
|
||||||
"beq %2, 2f\n\t" \
|
|
||||||
"mov %3, %0\n\t" \
|
|
||||||
"stq_c %0, %1\n\t" \
|
|
||||||
"bne %0, 2f\n\t" \
|
|
||||||
"br 1b\n\t" \
|
|
||||||
"2:mb"
|
|
||||||
: "=&r"(oldval), "=m"(*addr), "=&r"(result)
|
|
||||||
: "r" (new_val), "m"(*addr), "r"(old) : "memory");
|
|
||||||
return (bool) result;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline static void
|
|
||||||
release_set(volatile obj_addr_t *addr, obj_addr_t new_val)
|
|
||||||
{
|
|
||||||
__asm__ __volatile__("mb" : : : "memory");
|
|
||||||
*(addr) = new_val;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline static bool
|
|
||||||
compare_and_swap_release(volatile obj_addr_t *addr,
|
|
||||||
obj_addr_t old,
|
|
||||||
obj_addr_t new_val)
|
|
||||||
{
|
|
||||||
return compare_and_swap(addr, old, new_val);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Try to determine whether we are on a multiprocessor, i.e. whether
|
// Try to determine whether we are on a multiprocessor, i.e. whether
|
||||||
// spinning may be profitable.
|
// spinning may be profitable.
|
||||||
|
|
@ -453,7 +332,6 @@ keep_live(obj_addr_t p)
|
||||||
__asm__ __volatile__("" : : "rm"(p) : "memory");
|
__asm__ __volatile__("" : : "rm"(p) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Each hash table entry holds a single preallocated "lightweight" lock.
|
// Each hash table entry holds a single preallocated "lightweight" lock.
|
||||||
// In addition, it holds a chain of "heavyweight" locks. Lightweight
|
// In addition, it holds a chain of "heavyweight" locks. Lightweight
|
||||||
// locks do not support Object.wait(), and are converted to heavyweight
|
// locks do not support Object.wait(), and are converted to heavyweight
|
||||||
|
|
|
||||||
|
|
@ -438,19 +438,17 @@ _Jv_ThreadWait (void)
|
||||||
|
|
||||||
#if defined(SLOW_PTHREAD_SELF)
|
#if defined(SLOW_PTHREAD_SELF)
|
||||||
|
|
||||||
|
#include "sysdep/locks.h"
|
||||||
|
|
||||||
// Support for pthread_self() lookup cache.
|
// Support for pthread_self() lookup cache.
|
||||||
|
|
||||||
volatile self_cache_entry _Jv_self_cache[SELF_CACHE_SIZE];
|
volatile self_cache_entry _Jv_self_cache[SELF_CACHE_SIZE];
|
||||||
|
|
||||||
|
|
||||||
_Jv_ThreadId_t
|
_Jv_ThreadId_t
|
||||||
_Jv_ThreadSelf_out_of_line(volatile self_cache_entry *sce, size_t high_sp_bits)
|
_Jv_ThreadSelf_out_of_line(volatile self_cache_entry *sce, size_t high_sp_bits)
|
||||||
{
|
{
|
||||||
pthread_t self = pthread_self();
|
pthread_t self = pthread_self();
|
||||||
// The ordering between the following writes matters.
|
|
||||||
// On Alpha, we probably need a memory barrier in the middle.
|
|
||||||
sce -> high_sp_bits = high_sp_bits;
|
sce -> high_sp_bits = high_sp_bits;
|
||||||
sce -> self = self;
|
release_set ((obj_addr_t *) &(sce -> self), self);
|
||||||
return self;
|
return self;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -586,9 +586,10 @@ _Jv_NewMultiArray (jclass array_type, jint dimensions, ...)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
// Ensure 8-byte alignment, for hash synchronization.
|
||||||
#define DECLARE_PRIM_TYPE(NAME) \
|
#define DECLARE_PRIM_TYPE(NAME) \
|
||||||
_Jv_ArrayVTable _Jv_##NAME##VTable; \
|
_Jv_ArrayVTable _Jv_##NAME##VTable; \
|
||||||
java::lang::Class _Jv_##NAME##Class;
|
java::lang::Class _Jv_##NAME##Class __attribute__ ((aligned (8)));
|
||||||
|
|
||||||
DECLARE_PRIM_TYPE(byte);
|
DECLARE_PRIM_TYPE(byte);
|
||||||
DECLARE_PRIM_TYPE(short);
|
DECLARE_PRIM_TYPE(short);
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue