DBD-cubrid

 view release on metacpan or  search on metacpan

cci-src/src/base/porting.h  view on Meta::CPAN

      bool initialized;
      unsigned int waiting;
      CRITICAL_SECTION lock_waiting;
      enum
      {
	COND_SIGNAL = 0,
	COND_BROADCAST = 1,
	MAX_EVENTS = 2
      } EVENTS;
      HANDLE events[MAX_EVENTS];
      HANDLE broadcast_block_event;
    };
  } pthread_cond_t;


  typedef HANDLE pthread_condattr_t;

#if !defined (ETIMEDOUT)
#define ETIMEDOUT WAIT_TIMEOUT
#endif
#define PTHREAD_COND_INITIALIZER	{ NULL }

#if defined(_MSC_VER) && _MSC_VER >= 1900 && !defined(_CRT_NO_TIME_T)
#define _TIMESPEC_DEFINED
#endif				/* _MSC_VER && _MSC_VER >= 1900 && !_CRT_NO_TIME_T */
#if !defined(_TIMESPEC_DEFINED)
#define _TIMESPEC_DEFINED
  struct timespec
  {
    int tv_sec;
    int tv_nsec;
  };
#endif				/* !_TIMESPEC_DEFINED */

  extern pthread_mutex_t css_Internal_mutex_for_mutex_initialize;

  int pthread_mutex_init (pthread_mutex_t * mutex, pthread_mutexattr_t * attr);
  int pthread_mutex_destroy (pthread_mutex_t * mutex);

  void port_win_mutex_init_and_lock (pthread_mutex_t * mutex);
  int port_win_mutex_init_and_trylock (pthread_mutex_t * mutex);

  __inline int pthread_mutex_lock (pthread_mutex_t * mutex)
  {
    if (mutex->csp == &mutex->cs && mutex->watermark == WATERMARK_MUTEX_INITIALIZED)
      {
	EnterCriticalSection (mutex->csp);
      }
    else
      {
	port_win_mutex_init_and_lock (mutex);
      }

    return 0;
  }

  __inline int pthread_mutex_unlock (pthread_mutex_t * mutex)
  {
    if (mutex->csp->LockCount == -1)
      {
	/* this means unlock mutex which isn't locked */
	assert (0);
	return 0;
      }

    LeaveCriticalSection (mutex->csp);
    return 0;
  }

  __inline int pthread_mutex_trylock (pthread_mutex_t * mutex)
  {
    if (mutex->csp == &mutex->cs && mutex->watermark == WATERMARK_MUTEX_INITIALIZED)
      {
	if (TryEnterCriticalSection (mutex->csp))
	  {
	    if (mutex->csp->RecursionCount > 1)
	      {
		LeaveCriticalSection (mutex->csp);
		return EBUSY;
	      }

	    return 0;
	  }

	return EBUSY;
      }
    else
      {
	return port_win_mutex_init_and_trylock (mutex);
      }

    return 0;
  }

  int pthread_mutexattr_init (pthread_mutexattr_t * attr);
  int pthread_mutexattr_settype (pthread_mutexattr_t * attr, int type);
  int pthread_mutexattr_destroy (pthread_mutexattr_t * attr);

  int pthread_cond_init (pthread_cond_t * cond, const pthread_condattr_t * attr);
  int pthread_cond_wait (pthread_cond_t * cond, pthread_mutex_t * mutex);
  int pthread_cond_timedwait (pthread_cond_t * cond, pthread_mutex_t * mutex, struct timespec *ts);
  int pthread_cond_destroy (pthread_cond_t * cond);
  int pthread_cond_signal (pthread_cond_t * cond);
  int pthread_cond_broadcast (pthread_cond_t * cond);



/* Data Types */
  typedef HANDLE pthread_t;
  typedef int pthread_attr_t;
  typedef int pthread_key_t;

#define THREAD_RET_T unsigned int
#define THREAD_CALLING_CONVENTION __stdcall

  int pthread_create (pthread_t * thread, const pthread_attr_t * attr,
		      THREAD_RET_T (THREAD_CALLING_CONVENTION * start_routine) (void *), void *arg);
  void pthread_exit (void *ptr);
  pthread_t pthread_self (void);
  int pthread_join (pthread_t thread, void **value_ptr);

#define pthread_attr_init(dummy1)	0
#define pthread_attr_destroy(dummy1)	0

  int pthread_key_create (pthread_key_t * key, void (*destructor) (void *));
  int pthread_key_delete (pthread_key_t key);
  int pthread_setspecific (pthread_key_t key, const void *value);
  void *pthread_getspecific (pthread_key_t key);

#else				/* WINDOWS */

#define THREAD_RET_T void*
#define THREAD_CALLING_CONVENTION

#endif				/* WINDOWS */

#if (defined (WINDOWS) || defined (X86))
#define COPYMEM(type,dst,src)   do {		\
  *((type *) (dst)) = *((type *) (src));  	\
}while(0)
#else				/* WINDOWS || X86 */
#define COPYMEM(type,dst,src)   do {		\
  memcpy((dst), (src), sizeof(type)); 		\
}while(0)
#endif				/* WINDOWS || X86 */

/*
 * Interfaces for atomic operations
 *
 * Developers should check HAVE_ATOMIC_BUILTINS before using atomic builtins
 * as follows.
 *  #if defined (HAVE_ATOMIC_BUILTINS)
 *   ... write codes with atomic builtins ...
 *  #else
 *   ... leave legacy codes or write codes without atomic builtins ...
 *  #endif
 *
 * ATOMIC_TAS_xx (atomic test-and-set) writes new_val into *ptr, and returns
 * the previous contents of *ptr. ATOMIC_CAS_xx (atomic compare-and-swap) returns
 * true if the swap is done. It is only done if *ptr equals to cmp_val.
 * ATOMIC_INC_xx (atomic increment) returns the result of *ptr + amount.
 *
 * Regarding Windows, there are two types of APIs to provide atomic operations.
 * While InterlockedXXX functions handles 32bit values, InterlockedXXX64 handles
 * 64bit values. That is why we define two types of macros.
 */
#if defined (WINDOWS)

#define HAVE_ATOMIC_BUILTINS

#define ATOMIC_TAS_32(ptr, new_val) \
	InterlockedExchange(ptr, new_val)
#define ATOMIC_CAS_32(ptr, cmp_val, swap_val) \
	(InterlockedCompareExchange(ptr, swap_val, cmp_val) == (cmp_val))
#define ATOMIC_INC_32(ptr, amount) \
	(InterlockedExchangeAdd(ptr, amount) + (amount))
#define MEMORY_BARRIER() \
	MemoryBarrier()

#if defined (_WIN64)
#define ATOMIC_TAS_64(ptr, new_val) \
	InterlockedExchange64(ptr, new_val)
#define ATOMIC_CAS_64(ptr, cmp_val, swap_val) \
	(InterlockedCompareExchange64(ptr, swap_val, cmp_val) == (cmp_val))
#define ATOMIC_INC_64(ptr, amount) \
	(InterlockedExchangeAdd64(ptr, amount) + (amount))

#define ATOMIC_TAS_ADDR(ptr, new_val) ATOMIC_TAS_64 ((long long volatile *) ptr, (long long) new_val)
#define ATOMIC_CAS_ADDR(ptr, cmp_val, swap_val) \
	(InterlockedCompareExchange64((long long volatile *) ptr, (long long) swap_val, (long long) cmp_val) \
         == (long long) cmp_val)

#define ATOMIC_LOAD_64(ptr) (*(ptr))
#define ATOMIC_STORE_64(ptr, val) (*(ptr) = val)
#else				/* _WIN64 */
/*
 * These functions are used on Windows 32bit OS.
 * InterlockedXXX64 functions are provided by Windows Vista (client)/Windows
 * 2003 (server) or later versions. So, Windows XP 32bit does not have them.
 * We provide the following functions to support atomic operations on all
 * Windows versions.
 */
  extern UINT64 win32_compare_exchange64 (UINT64 volatile *val_ptr, UINT64 swap_val, UINT64 cmp_val);
  extern UINT64 win32_exchange_add64 (UINT64 volatile *ptr, UINT64 amount);
  extern UINT64 win32_exchange64 (UINT64 volatile *ptr, UINT64 new_val);

#define ATOMIC_TAS_64(ptr, new_val) \
	win32_exchange64(ptr, new_val)
#define ATOMIC_CAS_64(ptr, cmp_val, swap_val) \
	(win32_compare_exchange64(ptr, swap_val, cmp_val) == (cmp_val))
#define ATOMIC_INC_64(ptr, amount) \
	(win32_exchange_add64(ptr, amount) + (amount))

#define ATOMIC_TAS_ADDR(ptr, new_val) ATOMIC_TAS_32 ((long volatile *) ptr, (long long) new_val)
#define ATOMIC_CAS_ADDR(ptr, cmp_val, swap_val) \
	(InterlockedCompareExchange((long volatile *) ptr, (long long) swap_val, (long long) cmp_val) \
         == (long long) (cmp_val))

#define ATOMIC_LOAD_64(ptr) ATOMIC_INC_64 (ptr, 0)
#define ATOMIC_STORE_64(ptr, val) ATOMIC_TAS_64 (ptr, val)
#endif				/* _WIN64 */

#else				/* WINDOWS */

#if defined (HAVE_GCC_ATOMIC_BUILTINS)

#define HAVE_ATOMIC_BUILTINS

#define ATOMIC_TAS_32(ptr, new_val) \
	__sync_lock_test_and_set(ptr, new_val)
#define ATOMIC_CAS_32(ptr, cmp_val, swap_val) \
	__sync_bool_compare_and_swap(ptr, cmp_val, swap_val)
#define ATOMIC_INC_32(ptr, amount) \
	__sync_add_and_fetch(ptr, amount)

#define ATOMIC_TAS_64(ptr, new_val) \
	__sync_lock_test_and_set(ptr, new_val)
#define ATOMIC_CAS_64(ptr, cmp_val, swap_val) \
	__sync_bool_compare_and_swap(ptr, cmp_val, swap_val)
#define ATOMIC_INC_64(ptr, amount) \
	__sync_add_and_fetch(ptr, amount)

#define ATOMIC_TAS_ADDR(ptr, new_val) \
        __sync_lock_test_and_set(ptr, new_val)
#define ATOMIC_CAS_ADDR(ptr, cmp_val, swap_val) \
	__sync_bool_compare_and_swap(ptr, cmp_val, swap_val)

#define ATOMIC_LOAD_64(ptr) (*(ptr))
#define ATOMIC_STORE_64(ptr, val) (*(ptr) = val)

/* There is a gcc bug of __sync_synchronize in x86-64 when gcc version
 * less than 4.4. we can replace __sync_synchronize as mfence instruction.
 * see detail in https://gcc.gnu.org/bugzilla/show_bug.cgi?id=36793
 */
#if defined (X86) && defined (CUB_GCC_VERSION) && (CUB_GCC_VERSION < 40400)
#define MEMORY_BARRIER() \
  do { \
    asm volatile("mfence" ::: "memory"); \
    __sync_synchronize(); \
  } while (0)
#else
#define MEMORY_BARRIER() \
	__sync_synchronize()
#endif

#else				/* HAVE_GCC_ATOMIC_BUILTINS */
/*
 * Currently we do not provide interfaces for atomic operations
 * on other OS or compilers.
 */
#endif				/* HAVE_GCC_ATOMIC_BUILTINS */

#endif				/* WINDOWS */
#ifdef __cplusplus
}
#endif



( run in 1.378 second using v1.01-cache-2.11-cpan-39bf76dae61 )