gjdwebserver-overlay/sys-kernel/pinephone-pro-sources/files/5020_BMQ-and-PDS-io-schedul...

9957 lines
265 KiB
Diff
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index cc3ea8febc62..ab4c5a35b999 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -5299,6 +5299,12 @@
sa1100ir [NET]
See drivers/net/irda/sa1100_ir.c.
+ sched_timeslice=
+ [KNL] Time slice in ms for Project C BMQ/PDS scheduler.
+ Format: integer 2, 4
+ Default: 4
+ See Documentation/scheduler/sched-BMQ.txt
+
sched_verbose [KNL] Enables verbose scheduler debug messages.
schedstats= [KNL,X86] Enable or disable scheduled statistics.
diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst
index ddccd1077462..e24781970a3d 100644
--- a/Documentation/admin-guide/sysctl/kernel.rst
+++ b/Documentation/admin-guide/sysctl/kernel.rst
@@ -1524,3 +1524,13 @@ is 10 seconds.
The softlockup threshold is (``2 * watchdog_thresh``). Setting this
tunable to zero will disable lockup detection altogether.
+
+yield_type:
+===========
+
+BMQ/PDS CPU scheduler only. This determines what type of yield calls
+to sched_yield will perform.
+
+ 0 - No yield.
+ 1 - Deboost and requeue task. (default)
+ 2 - Set run queue skip task.
diff --git a/Documentation/scheduler/sched-BMQ.txt b/Documentation/scheduler/sched-BMQ.txt
new file mode 100644
index 000000000000..05c84eec0f31
--- /dev/null
+++ b/Documentation/scheduler/sched-BMQ.txt
@@ -0,0 +1,110 @@
+ BitMap queue CPU Scheduler
+ --------------------------
+
+CONTENT
+========
+
+ Background
+ Design
+ Overview
+ Task policy
+ Priority management
+ BitMap Queue
+ CPU Assignment and Migration
+
+
+Background
+==========
+
+BitMap Queue CPU scheduler, referred to as BMQ from here on, is an evolution
+of previous Priority and Deadline based Skiplist multiple queue scheduler(PDS),
+and inspired by Zircon scheduler. The goal of it is to keep the scheduler code
+simple, while efficiency and scalable for interactive tasks, such as desktop,
+movie playback and gaming etc.
+
+Design
+======
+
+Overview
+--------
+
+BMQ use per CPU run queue design, each CPU(logical) has it's own run queue,
+each CPU is responsible for scheduling the tasks that are putting into it's
+run queue.
+
+The run queue is a set of priority queues. Note that these queues are fifo
+queue for non-rt tasks or priority queue for rt tasks in data structure. See
+BitMap Queue below for details. BMQ is optimized for non-rt tasks in the fact
+that most applications are non-rt tasks. No matter the queue is fifo or
+priority, In each queue is an ordered list of runnable tasks awaiting execution
+and the data structures are the same. When it is time for a new task to run,
+the scheduler simply looks the lowest numbered queueue that contains a task,
+and runs the first task from the head of that queue. And per CPU idle task is
+also in the run queue, so the scheduler can always find a task to run on from
+its run queue.
+
+Each task will assigned the same timeslice(default 4ms) when it is picked to
+start running. Task will be reinserted at the end of the appropriate priority
+queue when it uses its whole timeslice. When the scheduler selects a new task
+from the priority queue it sets the CPU's preemption timer for the remainder of
+the previous timeslice. When that timer fires the scheduler will stop execution
+on that task, select another task and start over again.
+
+If a task blocks waiting for a shared resource then it's taken out of its
+priority queue and is placed in a wait queue for the shared resource. When it
+is unblocked it will be reinserted in the appropriate priority queue of an
+eligible CPU.
+
+Task policy
+-----------
+
+BMQ supports DEADLINE, FIFO, RR, NORMAL, BATCH and IDLE task policy like the
+mainline CFS scheduler. But BMQ is heavy optimized for non-rt task, that's
+NORMAL/BATCH/IDLE policy tasks. Below is the implementation detail of each
+policy.
+
+DEADLINE
+ It is squashed as priority 0 FIFO task.
+
+FIFO/RR
+ All RT tasks share one single priority queue in BMQ run queue designed. The
+complexity of insert operation is O(n). BMQ is not designed for system runs
+with major rt policy tasks.
+
+NORMAL/BATCH/IDLE
+ BATCH and IDLE tasks are treated as the same policy. They compete CPU with
+NORMAL policy tasks, but they just don't boost. To control the priority of
+NORMAL/BATCH/IDLE tasks, simply use nice level.
+
+ISO
+ ISO policy is not supported in BMQ. Please use nice level -20 NORMAL policy
+task instead.
+
+Priority management
+-------------------
+
+RT tasks have priority from 0-99. For non-rt tasks, there are three different
+factors used to determine the effective priority of a task. The effective
+priority being what is used to determine which queue it will be in.
+
+The first factor is simply the tasks static priority. Which is assigned from
+task's nice level, within [-20, 19] in userland's point of view and [0, 39]
+internally.
+
+The second factor is the priority boost. This is a value bounded between
+[-MAX_PRIORITY_ADJ, MAX_PRIORITY_ADJ] used to offset the base priority, it is
+modified by the following cases:
+
+*When a thread has used up its entire timeslice, always deboost its boost by
+increasing by one.
+*When a thread gives up cpu control(voluntary or non-voluntary) to reschedule,
+and its switch-in time(time after last switch and run) below the thredhold
+based on its priority boost, will boost its boost by decreasing by one buti is
+capped at 0 (wont go negative).
+
+The intent in this system is to ensure that interactive threads are serviced
+quickly. These are usually the threads that interact directly with the user
+and cause user-perceivable latency. These threads usually do little work and
+spend most of their time blocked awaiting another user event. So they get the
+priority boost from unblocking while background threads that do most of the
+processing receive the priority penalty for using their entire timeslice.
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 8dfa36a99c74..46397c606e01 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -479,7 +479,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns,
seq_puts(m, "0 0 0\n");
else
seq_printf(m, "%llu %llu %lu\n",
- (unsigned long long)task->se.sum_exec_runtime,
+ (unsigned long long)tsk_seruntime(task),
(unsigned long long)task->sched_info.run_delay,
task->sched_info.pcount);
diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
index 8874f681b056..59eb72bf7d5f 100644
--- a/include/asm-generic/resource.h
+++ b/include/asm-generic/resource.h
@@ -23,7 +23,7 @@
[RLIMIT_LOCKS] = { RLIM_INFINITY, RLIM_INFINITY }, \
[RLIMIT_SIGPENDING] = { 0, 0 }, \
[RLIMIT_MSGQUEUE] = { MQ_BYTES_MAX, MQ_BYTES_MAX }, \
- [RLIMIT_NICE] = { 0, 0 }, \
+ [RLIMIT_NICE] = { 30, 30 }, \
[RLIMIT_RTPRIO] = { 0, 0 }, \
[RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index c46f3a63b758..7c65e6317d97 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -751,8 +751,14 @@ struct task_struct {
unsigned int ptrace;
#ifdef CONFIG_SMP
- int on_cpu;
struct __call_single_node wake_entry;
+#endif
+#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_ALT)
+ int on_cpu;
+#endif
+
+#ifdef CONFIG_SMP
+#ifndef CONFIG_SCHED_ALT
unsigned int wakee_flips;
unsigned long wakee_flip_decay_ts;
struct task_struct *last_wakee;
@@ -766,6 +772,7 @@ struct task_struct {
*/
int recent_used_cpu;
int wake_cpu;
+#endif /* !CONFIG_SCHED_ALT */
#endif
int on_rq;
@@ -774,6 +781,20 @@ struct task_struct {
int normal_prio;
unsigned int rt_priority;
+#ifdef CONFIG_SCHED_ALT
+ u64 last_ran;
+ s64 time_slice;
+ int sq_idx;
+ struct list_head sq_node;
+#ifdef CONFIG_SCHED_BMQ
+ int boost_prio;
+#endif /* CONFIG_SCHED_BMQ */
+#ifdef CONFIG_SCHED_PDS
+ u64 deadline;
+#endif /* CONFIG_SCHED_PDS */
+ /* sched_clock time spent running */
+ u64 sched_time;
+#else /* !CONFIG_SCHED_ALT */
struct sched_entity se;
struct sched_rt_entity rt;
struct sched_dl_entity dl;
@@ -784,6 +805,7 @@ struct task_struct {
unsigned long core_cookie;
unsigned int core_occupation;
#endif
+#endif /* !CONFIG_SCHED_ALT */
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
@@ -1517,6 +1539,15 @@ struct task_struct {
*/
};
+#ifdef CONFIG_SCHED_ALT
+#define tsk_seruntime(t) ((t)->sched_time)
+/* replace the uncertian rt_timeout with 0UL */
+#define tsk_rttimeout(t) (0UL)
+#else /* CFS */
+#define tsk_seruntime(t) ((t)->se.sum_exec_runtime)
+#define tsk_rttimeout(t) ((t)->rt.timeout)
+#endif /* !CONFIG_SCHED_ALT */
+
static inline struct pid *task_pid(struct task_struct *task)
{
return task->thread_pid;
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
index 7c83d4d5a971..fa30f98cb2be 100644
--- a/include/linux/sched/deadline.h
+++ b/include/linux/sched/deadline.h
@@ -1,5 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifdef CONFIG_SCHED_ALT
+
+static inline int dl_task(struct task_struct *p)
+{
+ return 0;
+}
+
+#ifdef CONFIG_SCHED_BMQ
+#define __tsk_deadline(p) (0UL)
+#endif
+
+#ifdef CONFIG_SCHED_PDS
+#define __tsk_deadline(p) ((((u64) ((p)->prio))<<56) | (p)->deadline)
+#endif
+
+#else
+
+#define __tsk_deadline(p) ((p)->dl.deadline)
+
/*
* SCHED_DEADLINE tasks has negative priorities, reflecting
* the fact that any of them has higher prio than RT and
@@ -21,6 +40,7 @@ static inline int dl_task(struct task_struct *p)
{
return dl_prio(p->prio);
}
+#endif /* CONFIG_SCHED_ALT */
static inline bool dl_time_before(u64 a, u64 b)
{
diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h
index ab83d85e1183..6af9ae681116 100644
--- a/include/linux/sched/prio.h
+++ b/include/linux/sched/prio.h
@@ -18,6 +18,32 @@
#define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH)
#define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2)
+#ifdef CONFIG_SCHED_ALT
+
+/* Undefine MAX_PRIO and DEFAULT_PRIO */
+#undef MAX_PRIO
+#undef DEFAULT_PRIO
+
+/* +/- priority levels from the base priority */
+#ifdef CONFIG_SCHED_BMQ
+#define MAX_PRIORITY_ADJ (7)
+
+#define MIN_NORMAL_PRIO (MAX_RT_PRIO)
+#define MAX_PRIO (MIN_NORMAL_PRIO + NICE_WIDTH)
+#define DEFAULT_PRIO (MIN_NORMAL_PRIO + NICE_WIDTH / 2)
+#endif
+
+#ifdef CONFIG_SCHED_PDS
+#define MAX_PRIORITY_ADJ (0)
+
+#define MIN_NORMAL_PRIO (128)
+#define NORMAL_PRIO_NUM (64)
+#define MAX_PRIO (MIN_NORMAL_PRIO + NORMAL_PRIO_NUM)
+#define DEFAULT_PRIO (MAX_PRIO - NICE_WIDTH / 2)
+#endif
+
+#endif /* CONFIG_SCHED_ALT */
+
/*
* Convert user-nice values [ -20 ... 0 ... 19 ]
* to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index e5af028c08b4..0a7565d0d3cf 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -24,8 +24,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
if (policy == SCHED_FIFO || policy == SCHED_RR)
return true;
+#ifndef CONFIG_SCHED_ALT
if (policy == SCHED_DEADLINE)
return true;
+#endif
return false;
}
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 56cffe42abbc..e020fc572b22 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -233,7 +233,8 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
#endif /* !CONFIG_SMP */
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) && \
+ !defined(CONFIG_SCHED_ALT)
extern void rebuild_sched_domains_energy(void);
#else
static inline void rebuild_sched_domains_energy(void)
diff --git a/init/Kconfig b/init/Kconfig
index c7900e8975f1..d2b593e3807d 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -812,6 +812,7 @@ menu "Scheduler features"
config UCLAMP_TASK
bool "Enable utilization clamping for RT/FAIR tasks"
depends on CPU_FREQ_GOV_SCHEDUTIL
+ depends on !SCHED_ALT
help
This feature enables the scheduler to track the clamped utilization
of each CPU based on RUNNABLE tasks scheduled on that CPU.
@@ -858,6 +859,35 @@ config UCLAMP_BUCKETS_COUNT
If in doubt, use the default value.
+menuconfig SCHED_ALT
+ bool "Alternative CPU Schedulers"
+ default y
+ help
+ This feature enable alternative CPU scheduler"
+
+if SCHED_ALT
+
+choice
+ prompt "Alternative CPU Scheduler"
+ default SCHED_BMQ
+
+config SCHED_BMQ
+ bool "BMQ CPU scheduler"
+ help
+ The BitMap Queue CPU scheduler for excellent interactivity and
+ responsiveness on the desktop and solid scalability on normal
+ hardware and commodity servers.
+
+config SCHED_PDS
+ bool "PDS CPU scheduler"
+ help
+ The Priority and Deadline based Skip list multiple queue CPU
+ Scheduler.
+
+endchoice
+
+endif
+
endmenu
#
@@ -911,6 +941,7 @@ config NUMA_BALANCING
depends on ARCH_SUPPORTS_NUMA_BALANCING
depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY
depends on SMP && NUMA && MIGRATION && !PREEMPT_RT
+ depends on !SCHED_ALT
help
This option adds support for automatic NUMA aware memory/task placement.
The mechanism is quite primitive and is based on migrating memory when
@@ -1003,6 +1034,7 @@ config FAIR_GROUP_SCHED
depends on CGROUP_SCHED
default CGROUP_SCHED
+if !SCHED_ALT
config CFS_BANDWIDTH
bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED"
depends on FAIR_GROUP_SCHED
@@ -1025,6 +1057,7 @@ config RT_GROUP_SCHED
realtime bandwidth for them.
See Documentation/scheduler/sched-rt-group.rst for more information.
+endif #!SCHED_ALT
endif #CGROUP_SCHED
config UCLAMP_TASK_GROUP
@@ -1268,6 +1301,7 @@ config CHECKPOINT_RESTORE
config SCHED_AUTOGROUP
bool "Automatic process group scheduling"
+ depends on !SCHED_ALT
select CGROUPS
select CGROUP_SCHED
select FAIR_GROUP_SCHED
diff --git a/init/init_task.c b/init/init_task.c
index 73cc8f03511a..2d0bad762895 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -75,9 +75,15 @@ struct task_struct init_task
.stack = init_stack,
.usage = REFCOUNT_INIT(2),
.flags = PF_KTHREAD,
+#ifdef CONFIG_SCHED_ALT
+ .prio = DEFAULT_PRIO + MAX_PRIORITY_ADJ,
+ .static_prio = DEFAULT_PRIO,
+ .normal_prio = DEFAULT_PRIO + MAX_PRIORITY_ADJ,
+#else
.prio = MAX_PRIO - 20,
.static_prio = MAX_PRIO - 20,
.normal_prio = MAX_PRIO - 20,
+#endif
.policy = SCHED_NORMAL,
.cpus_ptr = &init_task.cpus_mask,
.user_cpus_ptr = NULL,
@@ -88,6 +94,17 @@ struct task_struct init_task
.restart_block = {
.fn = do_no_restart_syscall,
},
+#ifdef CONFIG_SCHED_ALT
+ .sq_node = LIST_HEAD_INIT(init_task.sq_node),
+#ifdef CONFIG_SCHED_BMQ
+ .boost_prio = 0,
+ .sq_idx = 15,
+#endif
+#ifdef CONFIG_SCHED_PDS
+ .deadline = 0,
+#endif
+ .time_slice = HZ,
+#else
.se = {
.group_node = LIST_HEAD_INIT(init_task.se.group_node),
},
@@ -95,6 +112,7 @@ struct task_struct init_task
.run_list = LIST_HEAD_INIT(init_task.rt.run_list),
.time_slice = RR_TIMESLICE,
},
+#endif
.tasks = LIST_HEAD_INIT(init_task.tasks),
#ifdef CONFIG_SMP
.pushable_tasks = PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO),
diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt
index c2f1fd95a821..41654679b1b2 100644
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -117,7 +117,7 @@ config PREEMPT_DYNAMIC
config SCHED_CORE
bool "Core Scheduling for SMT"
- depends on SCHED_SMT
+ depends on SCHED_SMT && !SCHED_ALT
help
This option permits Core Scheduling, a means of coordinated task
selection across SMT siblings. When enabled -- see
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 71a418858a5e..7e3016873db1 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -704,7 +704,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
return ret;
}
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_ALT)
/*
* Helper routine for generate_sched_domains().
* Do cpusets a, b have overlapping effective cpus_allowed masks?
@@ -1100,7 +1100,7 @@ static void rebuild_sched_domains_locked(void)
/* Have scheduler rebuild the domains */
partition_and_rebuild_sched_domains(ndoms, doms, attr);
}
-#else /* !CONFIG_SMP */
+#else /* !CONFIG_SMP || CONFIG_SCHED_ALT */
static void rebuild_sched_domains_locked(void)
{
}
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 164ed9ef77a3..c974a84b056f 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -150,7 +150,7 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
*/
t1 = tsk->sched_info.pcount;
t2 = tsk->sched_info.run_delay;
- t3 = tsk->se.sum_exec_runtime;
+ t3 = tsk_seruntime(tsk);
d->cpu_count += t1;
diff --git a/kernel/exit.c b/kernel/exit.c
index 64c938ce36fe..a353f7ef5392 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -124,7 +124,7 @@ static void __exit_signal(struct task_struct *tsk)
sig->curr_target = next_thread(tsk);
}
- add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
+ add_device_randomness((const void*) &tsk_seruntime(tsk),
sizeof(unsigned long long));
/*
@@ -145,7 +145,7 @@ static void __exit_signal(struct task_struct *tsk)
sig->inblock += task_io_get_inblock(tsk);
sig->oublock += task_io_get_oublock(tsk);
task_io_accounting_add(&sig->ioac, &tsk->ioac);
- sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
+ sig->sum_sched_runtime += tsk_seruntime(tsk);
sig->nr_threads--;
__unhash_process(tsk, group_dead);
write_sequnlock(&sig->stats_lock);
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 7779ee8abc2a..5b9893cdfb1b 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -300,21 +300,25 @@ static __always_inline void
waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task)
{
waiter->prio = __waiter_prio(task);
- waiter->deadline = task->dl.deadline;
+ waiter->deadline = __tsk_deadline(task);
}
/*
* Only use with rt_mutex_waiter_{less,equal}()
*/
#define task_to_waiter(p) \
- &(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline }
+ &(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = __tsk_deadline(p) }
static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
struct rt_mutex_waiter *right)
{
+#ifdef CONFIG_SCHED_PDS
+ return (left->deadline < right->deadline);
+#else
if (left->prio < right->prio)
return 1;
+#ifndef CONFIG_SCHED_BMQ
/*
* If both waiters have dl_prio(), we check the deadlines of the
* associated tasks.
@@ -323,16 +327,22 @@ static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left,
*/
if (dl_prio(left->prio))
return dl_time_before(left->deadline, right->deadline);
+#endif
return 0;
+#endif
}
static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
struct rt_mutex_waiter *right)
{
+#ifdef CONFIG_SCHED_PDS
+ return (left->deadline == right->deadline);
+#else
if (left->prio != right->prio)
return 0;
+#ifndef CONFIG_SCHED_BMQ
/*
* If both waiters have dl_prio(), we check the deadlines of the
* associated tasks.
@@ -341,8 +351,10 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
*/
if (dl_prio(left->prio))
return left->deadline == right->deadline;
+#endif
return 1;
+#endif
}
static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter,
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 976092b7bd45..31d587c16ec1 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -28,7 +28,12 @@ endif
# These compilation units have roughly the same size and complexity - so their
# build parallelizes well and finishes roughly at once:
#
+ifdef CONFIG_SCHED_ALT
+obj-y += alt_core.o
+obj-$(CONFIG_SCHED_DEBUG) += alt_debug.o
+else
obj-y += core.o
obj-y += fair.o
+endif
obj-y += build_policy.o
obj-y += build_utility.o
diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c
new file mode 100644
index 000000000000..d0ab41c4d9ad
--- /dev/null
+++ b/kernel/sched/alt_core.c
@@ -0,0 +1,7807 @@
+/*
+ * kernel/sched/alt_core.c
+ *
+ * Core alternative kernel scheduler code and related syscalls
+ *
+ * Copyright (C) 1991-2002 Linus Torvalds
+ *
+ * 2009-08-13 Brainfuck deadline scheduling policy by Con Kolivas deletes
+ * a whole lot of those previous things.
+ * 2017-09-06 Priority and Deadline based Skip list multiple queue kernel
+ * scheduler by Alfred Chen.
+ * 2019-02-20 BMQ(BitMap Queue) kernel scheduler by Alfred Chen.
+ */
+#include <linux/sched/cputime.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/isolation.h>
+#include <linux/sched/loadavg.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/nohz.h>
+#include <linux/sched/stat.h>
+#include <linux/sched/wake_q.h>
+
+#include <linux/blkdev.h>
+#include <linux/context_tracking.h>
+#include <linux/cpuset.h>
+#include <linux/delayacct.h>
+#include <linux/init_task.h>
+#include <linux/kcov.h>
+#include <linux/kprobes.h>
+#include <linux/profile.h>
+#include <linux/nmi.h>
+#include <linux/scs.h>
+
+#include <uapi/linux/sched/types.h>
+
+#include <asm/switch_to.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/sched.h>
+#undef CREATE_TRACE_POINTS
+
+#include "sched.h"
+
+#include "pelt.h"
+
+#include "../../fs/io-wq.h"
+#include "../smpboot.h"
+
+/*
+ * Export tracepoints that act as a bare tracehook (ie: have no trace event
+ * associated with them) to allow external modules to probe them.
+ */
+EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp);
+
+#ifdef CONFIG_SCHED_DEBUG
+#define sched_feat(x) (1)
+/*
+ * Print a warning if need_resched is set for the given duration (if
+ * LATENCY_WARN is enabled).
+ *
+ * If sysctl_resched_latency_warn_once is set, only one warning will be shown
+ * per boot.
+ */
+__read_mostly int sysctl_resched_latency_warn_ms = 100;
+__read_mostly int sysctl_resched_latency_warn_once = 1;
+#else
+#define sched_feat(x) (0)
+#endif /* CONFIG_SCHED_DEBUG */
+
+#define ALT_SCHED_VERSION "v5.19-r0"
+
+/* rt_prio(prio) defined in include/linux/sched/rt.h */
+#define rt_task(p) rt_prio((p)->prio)
+#define rt_policy(policy) ((policy) == SCHED_FIFO || (policy) == SCHED_RR)
+#define task_has_rt_policy(p) (rt_policy((p)->policy))
+
+#define STOP_PRIO (MAX_RT_PRIO - 1)
+
+/* Default time slice is 4 in ms, can be set via kernel parameter "sched_timeslice" */
+u64 sched_timeslice_ns __read_mostly = (4 << 20);
+
+static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx);
+
+#ifdef CONFIG_SCHED_BMQ
+#include "bmq.h"
+#endif
+#ifdef CONFIG_SCHED_PDS
+#include "pds.h"
+#endif
+
+static int __init sched_timeslice(char *str)
+{
+ int timeslice_ms;
+
+ get_option(&str, &timeslice_ms);
+ if (2 != timeslice_ms)
+ timeslice_ms = 4;
+ sched_timeslice_ns = timeslice_ms << 20;
+ sched_timeslice_imp(timeslice_ms);
+
+ return 0;
+}
+early_param("sched_timeslice", sched_timeslice);
+
+/* Reschedule if less than this many μs left */
+#define RESCHED_NS (100 << 10)
+
+/**
+ * sched_yield_type - Choose what sort of yield sched_yield will perform.
+ * 0: No yield.
+ * 1: Deboost and requeue task. (default)
+ * 2: Set rq skip task.
+ */
+int sched_yield_type __read_mostly = 1;
+
+#ifdef CONFIG_SMP
+static cpumask_t sched_rq_pending_mask ____cacheline_aligned_in_smp;
+
+DEFINE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
+DEFINE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
+DEFINE_PER_CPU(cpumask_t *, sched_cpu_topo_end_mask);
+
+#ifdef CONFIG_SCHED_SMT
+DEFINE_STATIC_KEY_FALSE(sched_smt_present);
+EXPORT_SYMBOL_GPL(sched_smt_present);
+#endif
+
+/*
+ * Keep a unique ID per domain (we use the first CPUs number in the cpumask of
+ * the domain), this allows us to quickly tell if two cpus are in the same cache
+ * domain, see cpus_share_cache().
+ */
+DEFINE_PER_CPU(int, sd_llc_id);
+#endif /* CONFIG_SMP */
+
+static DEFINE_MUTEX(sched_hotcpu_mutex);
+
+DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+
+#ifndef prepare_arch_switch
+# define prepare_arch_switch(next) do { } while (0)
+#endif
+#ifndef finish_arch_post_lock_switch
+# define finish_arch_post_lock_switch() do { } while (0)
+#endif
+
+#ifdef CONFIG_SCHED_SMT
+static cpumask_t sched_sg_idle_mask ____cacheline_aligned_in_smp;
+#endif
+static cpumask_t sched_rq_watermark[SCHED_QUEUE_BITS] ____cacheline_aligned_in_smp;
+
+/* sched_queue related functions */
+static inline void sched_queue_init(struct sched_queue *q)
+{
+ int i;
+
+ bitmap_zero(q->bitmap, SCHED_QUEUE_BITS);
+ for(i = 0; i < SCHED_BITS; i++)
+ INIT_LIST_HEAD(&q->heads[i]);
+}
+
+/*
+ * Init idle task and put into queue structure of rq
+ * IMPORTANT: may be called multiple times for a single cpu
+ */
+static inline void sched_queue_init_idle(struct sched_queue *q,
+ struct task_struct *idle)
+{
+ idle->sq_idx = IDLE_TASK_SCHED_PRIO;
+ INIT_LIST_HEAD(&q->heads[idle->sq_idx]);
+ list_add(&idle->sq_node, &q->heads[idle->sq_idx]);
+}
+
+/* water mark related functions */
+static inline void update_sched_rq_watermark(struct rq *rq)
+{
+ unsigned long watermark = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
+ unsigned long last_wm = rq->watermark;
+ unsigned long i;
+ int cpu;
+
+ if (watermark == last_wm)
+ return;
+
+ rq->watermark = watermark;
+ cpu = cpu_of(rq);
+ if (watermark < last_wm) {
+ for (i = last_wm; i > watermark; i--)
+ cpumask_clear_cpu(cpu, sched_rq_watermark + SCHED_QUEUE_BITS - i);
+#ifdef CONFIG_SCHED_SMT
+ if (static_branch_likely(&sched_smt_present) &&
+ IDLE_TASK_SCHED_PRIO == last_wm)
+ cpumask_andnot(&sched_sg_idle_mask,
+ &sched_sg_idle_mask, cpu_smt_mask(cpu));
+#endif
+ return;
+ }
+ /* last_wm < watermark */
+ for (i = watermark; i > last_wm; i--)
+ cpumask_set_cpu(cpu, sched_rq_watermark + SCHED_QUEUE_BITS - i);
+#ifdef CONFIG_SCHED_SMT
+ if (static_branch_likely(&sched_smt_present) &&
+ IDLE_TASK_SCHED_PRIO == watermark) {
+ cpumask_t tmp;
+
+ cpumask_and(&tmp, cpu_smt_mask(cpu), sched_rq_watermark);
+ if (cpumask_equal(&tmp, cpu_smt_mask(cpu)))
+ cpumask_or(&sched_sg_idle_mask,
+ &sched_sg_idle_mask, cpu_smt_mask(cpu));
+ }
+#endif
+}
+
+/*
+ * This routine assume that the idle task always in queue
+ */
+static inline struct task_struct *sched_rq_first_task(struct rq *rq)
+{
+ unsigned long idx = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS);
+ const struct list_head *head = &rq->queue.heads[sched_prio2idx(idx, rq)];
+
+ return list_first_entry(head, struct task_struct, sq_node);
+}
+
+static inline struct task_struct *
+sched_rq_next_task(struct task_struct *p, struct rq *rq)
+{
+ unsigned long idx = p->sq_idx;
+ struct list_head *head = &rq->queue.heads[idx];
+
+ if (list_is_last(&p->sq_node, head)) {
+ idx = find_next_bit(rq->queue.bitmap, SCHED_QUEUE_BITS,
+ sched_idx2prio(idx, rq) + 1);
+ head = &rq->queue.heads[sched_prio2idx(idx, rq)];
+
+ return list_first_entry(head, struct task_struct, sq_node);
+ }
+
+ return list_next_entry(p, sq_node);
+}
+
+static inline struct task_struct *rq_runnable_task(struct rq *rq)
+{
+ struct task_struct *next = sched_rq_first_task(rq);
+
+ if (unlikely(next == rq->skip))
+ next = sched_rq_next_task(next, rq);
+
+ return next;
+}
+
+/*
+ * Serialization rules:
+ *
+ * Lock order:
+ *
+ * p->pi_lock
+ * rq->lock
+ * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls)
+ *
+ * rq1->lock
+ * rq2->lock where: rq1 < rq2
+ *
+ * Regular state:
+ *
+ * Normal scheduling state is serialized by rq->lock. __schedule() takes the
+ * local CPU's rq->lock, it optionally removes the task from the runqueue and
+ * always looks at the local rq data structures to find the most eligible task
+ * to run next.
+ *
+ * Task enqueue is also under rq->lock, possibly taken from another CPU.
+ * Wakeups from another LLC domain might use an IPI to transfer the enqueue to
+ * the local CPU to avoid bouncing the runqueue state around [ see
+ * ttwu_queue_wakelist() ]
+ *
+ * Task wakeup, specifically wakeups that involve migration, are horribly
+ * complicated to avoid having to take two rq->locks.
+ *
+ * Special state:
+ *
+ * System-calls and anything external will use task_rq_lock() which acquires
+ * both p->pi_lock and rq->lock. As a consequence the state they change is
+ * stable while holding either lock:
+ *
+ * - sched_setaffinity()/
+ * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed
+ * - set_user_nice(): p->se.load, p->*prio
+ * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio,
+ * p->se.load, p->rt_priority,
+ * p->dl.dl_{runtime, deadline, period, flags, bw, density}
+ * - sched_setnuma(): p->numa_preferred_nid
+ * - sched_move_task()/
+ * cpu_cgroup_fork(): p->sched_task_group
+ * - uclamp_update_active() p->uclamp*
+ *
+ * p->state <- TASK_*:
+ *
+ * is changed locklessly using set_current_state(), __set_current_state() or
+ * set_special_state(), see their respective comments, or by
+ * try_to_wake_up(). This latter uses p->pi_lock to serialize against
+ * concurrent self.
+ *
+ * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }:
+ *
+ * is set by activate_task() and cleared by deactivate_task(), under
+ * rq->lock. Non-zero indicates the task is runnable, the special
+ * ON_RQ_MIGRATING state is used for migration without holding both
+ * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock().
+ *
+ * p->on_cpu <- { 0, 1 }:
+ *
+ * is set by prepare_task() and cleared by finish_task() such that it will be
+ * set before p is scheduled-in and cleared after p is scheduled-out, both
+ * under rq->lock. Non-zero indicates the task is running on its CPU.
+ *
+ * [ The astute reader will observe that it is possible for two tasks on one
+ * CPU to have ->on_cpu = 1 at the same time. ]
+ *
+ * task_cpu(p): is changed by set_task_cpu(), the rules are:
+ *
+ * - Don't call set_task_cpu() on a blocked task:
+ *
+ * We don't care what CPU we're not running on, this simplifies hotplug,
+ * the CPU assignment of blocked tasks isn't required to be valid.
+ *
+ * - for try_to_wake_up(), called under p->pi_lock:
+ *
+ * This allows try_to_wake_up() to only take one rq->lock, see its comment.
+ *
+ * - for migration called under rq->lock:
+ * [ see task_on_rq_migrating() in task_rq_lock() ]
+ *
+ * o move_queued_task()
+ * o detach_task()
+ *
+ * - for migration called under double_rq_lock():
+ *
+ * o __migrate_swap_task()
+ * o push_rt_task() / pull_rt_task()
+ * o push_dl_task() / pull_dl_task()
+ * o dl_task_offline_migration()
+ *
+ */
+
+/*
+ * Context: p->pi_lock
+ */
+static inline struct rq
+*__task_access_lock(struct task_struct *p, raw_spinlock_t **plock)
+{
+ struct rq *rq;
+ for (;;) {
+ rq = task_rq(p);
+ if (p->on_cpu || task_on_rq_queued(p)) {
+ raw_spin_lock(&rq->lock);
+ if (likely((p->on_cpu || task_on_rq_queued(p))
+ && rq == task_rq(p))) {
+ *plock = &rq->lock;
+ return rq;
+ }
+ raw_spin_unlock(&rq->lock);
+ } else if (task_on_rq_migrating(p)) {
+ do {
+ cpu_relax();
+ } while (unlikely(task_on_rq_migrating(p)));
+ } else {
+ *plock = NULL;
+ return rq;
+ }
+ }
+}
+
+static inline void
+__task_access_unlock(struct task_struct *p, raw_spinlock_t *lock)
+{
+ if (NULL != lock)
+ raw_spin_unlock(lock);
+}
+
+static inline struct rq
+*task_access_lock_irqsave(struct task_struct *p, raw_spinlock_t **plock,
+ unsigned long *flags)
+{
+ struct rq *rq;
+ for (;;) {
+ rq = task_rq(p);
+ if (p->on_cpu || task_on_rq_queued(p)) {
+ raw_spin_lock_irqsave(&rq->lock, *flags);
+ if (likely((p->on_cpu || task_on_rq_queued(p))
+ && rq == task_rq(p))) {
+ *plock = &rq->lock;
+ return rq;
+ }
+ raw_spin_unlock_irqrestore(&rq->lock, *flags);
+ } else if (task_on_rq_migrating(p)) {
+ do {
+ cpu_relax();
+ } while (unlikely(task_on_rq_migrating(p)));
+ } else {
+ raw_spin_lock_irqsave(&p->pi_lock, *flags);
+ if (likely(!p->on_cpu && !p->on_rq &&
+ rq == task_rq(p))) {
+ *plock = &p->pi_lock;
+ return rq;
+ }
+ raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
+ }
+ }
+}
+
+static inline void
+task_access_unlock_irqrestore(struct task_struct *p, raw_spinlock_t *lock,
+ unsigned long *flags)
+{
+ raw_spin_unlock_irqrestore(lock, *flags);
+}
+
+/*
+ * __task_rq_lock - lock the rq @p resides on.
+ */
+struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
+ __acquires(rq->lock)
+{
+ struct rq *rq;
+
+ lockdep_assert_held(&p->pi_lock);
+
+ for (;;) {
+ rq = task_rq(p);
+ raw_spin_lock(&rq->lock);
+ if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
+ return rq;
+ raw_spin_unlock(&rq->lock);
+
+ while (unlikely(task_on_rq_migrating(p)))
+ cpu_relax();
+ }
+}
+
+/*
+ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
+ */
+struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
+ __acquires(p->pi_lock)
+ __acquires(rq->lock)
+{
+ struct rq *rq;
+
+ for (;;) {
+ raw_spin_lock_irqsave(&p->pi_lock, rf->flags);
+ rq = task_rq(p);
+ raw_spin_lock(&rq->lock);
+ /*
+ * move_queued_task() task_rq_lock()
+ *
+ * ACQUIRE (rq->lock)
+ * [S] ->on_rq = MIGRATING [L] rq = task_rq()
+ * WMB (__set_task_cpu()) ACQUIRE (rq->lock);
+ * [S] ->cpu = new_cpu [L] task_rq()
+ * [L] ->on_rq
+ * RELEASE (rq->lock)
+ *
+ * If we observe the old CPU in task_rq_lock(), the acquire of
+ * the old rq->lock will fully serialize against the stores.
+ *
+ * If we observe the new CPU in task_rq_lock(), the address
+ * dependency headed by '[L] rq = task_rq()' and the acquire
+ * will pair with the WMB to ensure we then also see migrating.
+ */
+ if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
+ return rq;
+ }
+ raw_spin_unlock(&rq->lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
+
+ while (unlikely(task_on_rq_migrating(p)))
+ cpu_relax();
+ }
+}
+
+static inline void
+rq_lock_irqsave(struct rq *rq, struct rq_flags *rf)
+ __acquires(rq->lock)
+{
+ raw_spin_lock_irqsave(&rq->lock, rf->flags);
+}
+
+static inline void
+rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf)
+ __releases(rq->lock)
+{
+ raw_spin_unlock_irqrestore(&rq->lock, rf->flags);
+}
+
+void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
+{
+ raw_spinlock_t *lock;
+
+ /* Matches synchronize_rcu() in __sched_core_enable() */
+ preempt_disable();
+
+ for (;;) {
+ lock = __rq_lockp(rq);
+ raw_spin_lock_nested(lock, subclass);
+ if (likely(lock == __rq_lockp(rq))) {
+ /* preempt_count *MUST* be > 1 */
+ preempt_enable_no_resched();
+ return;
+ }
+ raw_spin_unlock(lock);
+ }
+}
+
+void raw_spin_rq_unlock(struct rq *rq)
+{
+ raw_spin_unlock(rq_lockp(rq));
+}
+
+/*
+ * RQ-clock updating methods:
+ */
+
+static void update_rq_clock_task(struct rq *rq, s64 delta)
+{
+/*
+ * In theory, the compile should just see 0 here, and optimize out the call
+ * to sched_rt_avg_update. But I don't trust it...
+ */
+ s64 __maybe_unused steal = 0, irq_delta = 0;
+
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time;
+
+ /*
+ * Since irq_time is only updated on {soft,}irq_exit, we might run into
+ * this case when a previous update_rq_clock() happened inside a
+ * {soft,}irq region.
+ *
+ * When this happens, we stop ->clock_task and only update the
+ * prev_irq_time stamp to account for the part that fit, so that a next
+ * update will consume the rest. This ensures ->clock_task is
+ * monotonic.
+ *
+ * It does however cause some slight miss-attribution of {soft,}irq
+ * time, a more accurate solution would be to update the irq_time using
+ * the current rq->clock timestamp, except that would require using
+ * atomic ops.
+ */
+ if (irq_delta > delta)
+ irq_delta = delta;
+
+ rq->prev_irq_time += irq_delta;
+ delta -= irq_delta;
+#endif
+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+ if (static_key_false((&paravirt_steal_rq_enabled))) {
+ steal = paravirt_steal_clock(cpu_of(rq));
+ steal -= rq->prev_steal_time_rq;
+
+ if (unlikely(steal > delta))
+ steal = delta;
+
+ rq->prev_steal_time_rq += steal;
+ delta -= steal;
+ }
+#endif
+
+ rq->clock_task += delta;
+
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
+ if ((irq_delta + steal))
+ update_irq_load_avg(rq, irq_delta + steal);
+#endif
+}
+
+static inline void update_rq_clock(struct rq *rq)
+{
+ s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
+
+ if (unlikely(delta <= 0))
+ return;
+ rq->clock += delta;
+ update_rq_time_edge(rq);
+ update_rq_clock_task(rq, delta);
+}
+
+/*
+ * RQ Load update routine
+ */
+#define RQ_LOAD_HISTORY_BITS (sizeof(s32) * 8ULL)
+#define RQ_UTIL_SHIFT (8)
+#define RQ_LOAD_HISTORY_TO_UTIL(l) (((l) >> (RQ_LOAD_HISTORY_BITS - 1 - RQ_UTIL_SHIFT)) & 0xff)
+
+#define LOAD_BLOCK(t) ((t) >> 17)
+#define LOAD_HALF_BLOCK(t) ((t) >> 16)
+#define BLOCK_MASK(t) ((t) & ((0x01 << 18) - 1))
+#define LOAD_BLOCK_BIT(b) (1UL << (RQ_LOAD_HISTORY_BITS - 1 - (b)))
+#define CURRENT_LOAD_BIT LOAD_BLOCK_BIT(0)
+
+static inline void rq_load_update(struct rq *rq)
+{
+ u64 time = rq->clock;
+ u64 delta = min(LOAD_BLOCK(time) - LOAD_BLOCK(rq->load_stamp),
+ RQ_LOAD_HISTORY_BITS - 1);
+ u64 prev = !!(rq->load_history & CURRENT_LOAD_BIT);
+ u64 curr = !!rq->nr_running;
+
+ if (delta) {
+ rq->load_history = rq->load_history >> delta;
+
+ if (delta < RQ_UTIL_SHIFT) {
+ rq->load_block += (~BLOCK_MASK(rq->load_stamp)) * prev;
+ if (!!LOAD_HALF_BLOCK(rq->load_block) ^ curr)
+ rq->load_history ^= LOAD_BLOCK_BIT(delta);
+ }
+
+ rq->load_block = BLOCK_MASK(time) * prev;
+ } else {
+ rq->load_block += (time - rq->load_stamp) * prev;
+ }
+ if (prev ^ curr)
+ rq->load_history ^= CURRENT_LOAD_BIT;
+ rq->load_stamp = time;
+}
+
+unsigned long rq_load_util(struct rq *rq, unsigned long max)
+{
+ return RQ_LOAD_HISTORY_TO_UTIL(rq->load_history) * (max >> RQ_UTIL_SHIFT);
+}
+
+#ifdef CONFIG_SMP
+unsigned long sched_cpu_util(int cpu, unsigned long max)
+{
+ return rq_load_util(cpu_rq(cpu), max);
+}
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_CPU_FREQ
+/**
+ * cpufreq_update_util - Take a note about CPU utilization changes.
+ * @rq: Runqueue to carry out the update for.
+ * @flags: Update reason flags.
+ *
+ * This function is called by the scheduler on the CPU whose utilization is
+ * being updated.
+ *
+ * It can only be called from RCU-sched read-side critical sections.
+ *
+ * The way cpufreq is currently arranged requires it to evaluate the CPU
+ * performance state (frequency/voltage) on a regular basis to prevent it from
+ * being stuck in a completely inadequate performance level for too long.
+ * That is not guaranteed to happen if the updates are only triggered from CFS
+ * and DL, though, because they may not be coming in if only RT tasks are
+ * active all the time (or there are RT tasks only).
+ *
+ * As a workaround for that issue, this function is called periodically by the
+ * RT sched class to trigger extra cpufreq updates to prevent it from stalling,
+ * but that really is a band-aid. Going forward it should be replaced with
+ * solutions targeted more specifically at RT tasks.
+ */
+static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
+{
+ struct update_util_data *data;
+
+#ifdef CONFIG_SMP
+ rq_load_update(rq);
+#endif
+ data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
+ cpu_of(rq)));
+ if (data)
+ data->func(data, rq_clock(rq), flags);
+}
+#else
+static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
+{
+#ifdef CONFIG_SMP
+ rq_load_update(rq);
+#endif
+}
+#endif /* CONFIG_CPU_FREQ */
+
+#ifdef CONFIG_NO_HZ_FULL
+/*
+ * Tick may be needed by tasks in the runqueue depending on their policy and
+ * requirements. If tick is needed, lets send the target an IPI to kick it out
+ * of nohz mode if necessary.
+ */
+static inline void sched_update_tick_dependency(struct rq *rq)
+{
+ int cpu = cpu_of(rq);
+
+ if (!tick_nohz_full_cpu(cpu))
+ return;
+
+ if (rq->nr_running < 2)
+ tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED);
+ else
+ tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED);
+}
+#else /* !CONFIG_NO_HZ_FULL */
+static inline void sched_update_tick_dependency(struct rq *rq) { }
+#endif
+
+bool sched_task_on_rq(struct task_struct *p)
+{
+ return task_on_rq_queued(p);
+}
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long ip = 0;
+ unsigned int state;
+
+ if (!p || p == current)
+ return 0;
+
+ /* Only get wchan if task is blocked and we can keep it that way. */
+ raw_spin_lock_irq(&p->pi_lock);
+ state = READ_ONCE(p->__state);
+ smp_rmb(); /* see try_to_wake_up() */
+ if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq)
+ ip = __get_wchan(p);
+ raw_spin_unlock_irq(&p->pi_lock);
+
+ return ip;
+}
+
+/*
+ * Add/Remove/Requeue task to/from the runqueue routines
+ * Context: rq->lock
+ */
+#define __SCHED_DEQUEUE_TASK(p, rq, flags) \
+ psi_dequeue(p, flags & DEQUEUE_SLEEP); \
+ sched_info_dequeue(rq, p); \
+ \
+ list_del(&p->sq_node); \
+ if (list_empty(&rq->queue.heads[p->sq_idx])) \
+ clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
+
+#define __SCHED_ENQUEUE_TASK(p, rq, flags) \
+ sched_info_enqueue(rq, p); \
+ psi_enqueue(p, flags); \
+ \
+ p->sq_idx = task_sched_prio_idx(p, rq); \
+ list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]); \
+ set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
+
+static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags)
+{
+ lockdep_assert_held(&rq->lock);
+
+ /*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
+ WARN_ONCE(task_rq(p) != rq, "sched: dequeue task reside on cpu%d from cpu%d\n",
+ task_cpu(p), cpu_of(rq));
+
+ __SCHED_DEQUEUE_TASK(p, rq, flags);
+ --rq->nr_running;
+#ifdef CONFIG_SMP
+ if (1 == rq->nr_running)
+ cpumask_clear_cpu(cpu_of(rq), &sched_rq_pending_mask);
+#endif
+
+ sched_update_tick_dependency(rq);
+}
+
+static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags)
+{
+ lockdep_assert_held(&rq->lock);
+
+ /*printk(KERN_INFO "sched: enqueue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
+ WARN_ONCE(task_rq(p) != rq, "sched: enqueue task reside on cpu%d to cpu%d\n",
+ task_cpu(p), cpu_of(rq));
+
+ __SCHED_ENQUEUE_TASK(p, rq, flags);
+ update_sched_rq_watermark(rq);
+ ++rq->nr_running;
+#ifdef CONFIG_SMP
+ if (2 == rq->nr_running)
+ cpumask_set_cpu(cpu_of(rq), &sched_rq_pending_mask);
+#endif
+
+ sched_update_tick_dependency(rq);
+}
+
+static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx)
+{
+ lockdep_assert_held(&rq->lock);
+ /*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/
+ WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n",
+ cpu_of(rq), task_cpu(p));
+
+ list_del(&p->sq_node);
+ list_add_tail(&p->sq_node, &rq->queue.heads[idx]);
+ if (idx != p->sq_idx) {
+ if (list_empty(&rq->queue.heads[p->sq_idx]))
+ clear_bit(sched_idx2prio(p->sq_idx, rq),
+ rq->queue.bitmap);
+ p->sq_idx = idx;
+ set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap);
+ update_sched_rq_watermark(rq);
+ }
+}
+
+/*
+ * cmpxchg based fetch_or, macro so it works for different integer types
+ */
+#define fetch_or(ptr, mask) \
+ ({ \
+ typeof(ptr) _ptr = (ptr); \
+ typeof(mask) _mask = (mask); \
+ typeof(*_ptr) _old, _val = *_ptr; \
+ \
+ for (;;) { \
+ _old = cmpxchg(_ptr, _val, _val | _mask); \
+ if (_old == _val) \
+ break; \
+ _val = _old; \
+ } \
+ _old; \
+})
+
+#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG)
+/*
+ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG,
+ * this avoids any races wrt polling state changes and thereby avoids
+ * spurious IPIs.
+ */
+static bool set_nr_and_not_polling(struct task_struct *p)
+{
+ struct thread_info *ti = task_thread_info(p);
+ return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG);
+}
+
+/*
+ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set.
+ *
+ * If this returns true, then the idle task promises to call
+ * sched_ttwu_pending() and reschedule soon.
+ */
+static bool set_nr_if_polling(struct task_struct *p)
+{
+ struct thread_info *ti = task_thread_info(p);
+ typeof(ti->flags) old, val = READ_ONCE(ti->flags);
+
+ for (;;) {
+ if (!(val & _TIF_POLLING_NRFLAG))
+ return false;
+ if (val & _TIF_NEED_RESCHED)
+ return true;
+ old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED);
+ if (old == val)
+ break;
+ val = old;
+ }
+ return true;
+}
+
+#else
+static bool set_nr_and_not_polling(struct task_struct *p)
+{
+ set_tsk_need_resched(p);
+ return true;
+}
+
+#ifdef CONFIG_SMP
+static bool set_nr_if_polling(struct task_struct *p)
+{
+ return false;
+}
+#endif
+#endif
+
+static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
+{
+ struct wake_q_node *node = &task->wake_q;
+
+ /*
+ * Atomically grab the task, if ->wake_q is !nil already it means
+ * it's already queued (either by us or someone else) and will get the
+ * wakeup due to that.
+ *
+ * In order to ensure that a pending wakeup will observe our pending
+ * state, even in the failed case, an explicit smp_mb() must be used.
+ */
+ smp_mb__before_atomic();
+ if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL)))
+ return false;
+
+ /*
+ * The head is context local, there can be no concurrency.
+ */
+ *head->lastp = node;
+ head->lastp = &node->next;
+ return true;
+}
+
+/**
+ * wake_q_add() - queue a wakeup for 'later' waking.
+ * @head: the wake_q_head to add @task to
+ * @task: the task to queue for 'later' wakeup
+ *
+ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
+ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
+ * instantly.
+ *
+ * This function must be used as-if it were wake_up_process(); IOW the task
+ * must be ready to be woken at this location.
+ */
+void wake_q_add(struct wake_q_head *head, struct task_struct *task)
+{
+ if (__wake_q_add(head, task))
+ get_task_struct(task);
+}
+
+/**
+ * wake_q_add_safe() - safely queue a wakeup for 'later' waking.
+ * @head: the wake_q_head to add @task to
+ * @task: the task to queue for 'later' wakeup
+ *
+ * Queue a task for later wakeup, most likely by the wake_up_q() call in the
+ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come
+ * instantly.
+ *
+ * This function must be used as-if it were wake_up_process(); IOW the task
+ * must be ready to be woken at this location.
+ *
+ * This function is essentially a task-safe equivalent to wake_q_add(). Callers
+ * that already hold reference to @task can call the 'safe' version and trust
+ * wake_q to do the right thing depending whether or not the @task is already
+ * queued for wakeup.
+ */
+void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
+{
+ if (!__wake_q_add(head, task))
+ put_task_struct(task);
+}
+
+void wake_up_q(struct wake_q_head *head)
+{
+ struct wake_q_node *node = head->first;
+
+ while (node != WAKE_Q_TAIL) {
+ struct task_struct *task;
+
+ task = container_of(node, struct task_struct, wake_q);
+ /* task can safely be re-inserted now: */
+ node = node->next;
+ task->wake_q.next = NULL;
+
+ /*
+ * wake_up_process() executes a full barrier, which pairs with
+ * the queueing in wake_q_add() so as not to miss wakeups.
+ */
+ wake_up_process(task);
+ put_task_struct(task);
+ }
+}
+
+/*
+ * resched_curr - mark rq's current task 'to be rescheduled now'.
+ *
+ * On UP this means the setting of the need_resched flag, on SMP it
+ * might also involve a cross-CPU call to trigger the scheduler on
+ * the target CPU.
+ */
+void resched_curr(struct rq *rq)
+{
+ struct task_struct *curr = rq->curr;
+ int cpu;
+
+ lockdep_assert_held(&rq->lock);
+
+ if (test_tsk_need_resched(curr))
+ return;
+
+ cpu = cpu_of(rq);
+ if (cpu == smp_processor_id()) {
+ set_tsk_need_resched(curr);
+ set_preempt_need_resched();
+ return;
+ }
+
+ if (set_nr_and_not_polling(curr))
+ smp_send_reschedule(cpu);
+ else
+ trace_sched_wake_idle_without_ipi(cpu);
+}
+
+void resched_cpu(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ if (cpu_online(cpu) || cpu == smp_processor_id())
+ resched_curr(cpu_rq(cpu));
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+}
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_NO_HZ_COMMON
+void nohz_balance_enter_idle(int cpu) {}
+
+void select_nohz_load_balancer(int stop_tick) {}
+
+void set_cpu_sd_state_idle(void) {}
+
+/*
+ * In the semi idle case, use the nearest busy CPU for migrating timers
+ * from an idle CPU. This is good for power-savings.
+ *
+ * We don't do similar optimization for completely idle system, as
+ * selecting an idle CPU will add more delays to the timers than intended
+ * (as that CPU's timer base may not be uptodate wrt jiffies etc).
+ */
+int get_nohz_timer_target(void)
+{
+ int i, cpu = smp_processor_id(), default_cpu = -1;
+ struct cpumask *mask;
+ const struct cpumask *hk_mask;
+
+ if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) {
+ if (!idle_cpu(cpu))
+ return cpu;
+ default_cpu = cpu;
+ }
+
+ hk_mask = housekeeping_cpumask(HK_TYPE_TIMER);
+
+ for (mask = per_cpu(sched_cpu_topo_masks, cpu) + 1;
+ mask < per_cpu(sched_cpu_topo_end_mask, cpu); mask++)
+ for_each_cpu_and(i, mask, hk_mask)
+ if (!idle_cpu(i))
+ return i;
+
+ if (default_cpu == -1)
+ default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER);
+ cpu = default_cpu;
+
+ return cpu;
+}
+
+/*
+ * When add_timer_on() enqueues a timer into the timer wheel of an
+ * idle CPU then this timer might expire before the next timer event
+ * which is scheduled to wake up that CPU. In case of a completely
+ * idle system the next event might even be infinite time into the
+ * future. wake_up_idle_cpu() ensures that the CPU is woken up and
+ * leaves the inner idle loop so the newly added timer is taken into
+ * account when the CPU goes back to idle and evaluates the timer
+ * wheel for the next timer event.
+ */
+static inline void wake_up_idle_cpu(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ if (cpu == smp_processor_id())
+ return;
+
+ if (set_nr_and_not_polling(rq->idle))
+ smp_send_reschedule(cpu);
+ else
+ trace_sched_wake_idle_without_ipi(cpu);
+}
+
+static inline bool wake_up_full_nohz_cpu(int cpu)
+{
+ /*
+ * We just need the target to call irq_exit() and re-evaluate
+ * the next tick. The nohz full kick at least implies that.
+ * If needed we can still optimize that later with an
+ * empty IRQ.
+ */
+ if (cpu_is_offline(cpu))
+ return true; /* Don't try to wake offline CPUs. */
+ if (tick_nohz_full_cpu(cpu)) {
+ if (cpu != smp_processor_id() ||
+ tick_nohz_tick_stopped())
+ tick_nohz_full_kick_cpu(cpu);
+ return true;
+ }
+
+ return false;
+}
+
+void wake_up_nohz_cpu(int cpu)
+{
+ if (!wake_up_full_nohz_cpu(cpu))
+ wake_up_idle_cpu(cpu);
+}
+
+static void nohz_csd_func(void *info)
+{
+ struct rq *rq = info;
+ int cpu = cpu_of(rq);
+ unsigned int flags;
+
+ /*
+ * Release the rq::nohz_csd.
+ */
+ flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu));
+ WARN_ON(!(flags & NOHZ_KICK_MASK));
+
+ rq->idle_balance = idle_cpu(cpu);
+ if (rq->idle_balance && !need_resched()) {
+ rq->nohz_idle_balance = flags;
+ raise_softirq_irqoff(SCHED_SOFTIRQ);
+ }
+}
+
+#endif /* CONFIG_NO_HZ_COMMON */
+#endif /* CONFIG_SMP */
+
+static inline void check_preempt_curr(struct rq *rq)
+{
+ if (sched_rq_first_task(rq) != rq->curr)
+ resched_curr(rq);
+}
+
+#ifdef CONFIG_SCHED_HRTICK
+/*
+ * Use HR-timers to deliver accurate preemption points.
+ */
+
+static void hrtick_clear(struct rq *rq)
+{
+ if (hrtimer_active(&rq->hrtick_timer))
+ hrtimer_cancel(&rq->hrtick_timer);
+}
+
+/*
+ * High-resolution timer tick.
+ * Runs from hardirq context with interrupts disabled.
+ */
+static enum hrtimer_restart hrtick(struct hrtimer *timer)
+{
+ struct rq *rq = container_of(timer, struct rq, hrtick_timer);
+
+ WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
+
+ raw_spin_lock(&rq->lock);
+ resched_curr(rq);
+ raw_spin_unlock(&rq->lock);
+
+ return HRTIMER_NORESTART;
+}
+
+/*
+ * Use hrtick when:
+ * - enabled by features
+ * - hrtimer is actually high res
+ */
+static inline int hrtick_enabled(struct rq *rq)
+{
+ /**
+ * Alt schedule FW doesn't support sched_feat yet
+ if (!sched_feat(HRTICK))
+ return 0;
+ */
+ if (!cpu_active(cpu_of(rq)))
+ return 0;
+ return hrtimer_is_hres_active(&rq->hrtick_timer);
+}
+
+#ifdef CONFIG_SMP
+
+static void __hrtick_restart(struct rq *rq)
+{
+ struct hrtimer *timer = &rq->hrtick_timer;
+ ktime_t time = rq->hrtick_time;
+
+ hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD);
+}
+
+/*
+ * called from hardirq (IPI) context
+ */
+static void __hrtick_start(void *arg)
+{
+ struct rq *rq = arg;
+
+ raw_spin_lock(&rq->lock);
+ __hrtick_restart(rq);
+ raw_spin_unlock(&rq->lock);
+}
+
+/*
+ * Called to set the hrtick timer state.
+ *
+ * called with rq->lock held and irqs disabled
+ */
+void hrtick_start(struct rq *rq, u64 delay)
+{
+ struct hrtimer *timer = &rq->hrtick_timer;
+ s64 delta;
+
+ /*
+ * Don't schedule slices shorter than 10000ns, that just
+ * doesn't make sense and can cause timer DoS.
+ */
+ delta = max_t(s64, delay, 10000LL);
+
+ rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta);
+
+ if (rq == this_rq())
+ __hrtick_restart(rq);
+ else
+ smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
+}
+
+#else
+/*
+ * Called to set the hrtick timer state.
+ *
+ * called with rq->lock held and irqs disabled
+ */
+void hrtick_start(struct rq *rq, u64 delay)
+{
+ /*
+ * Don't schedule slices shorter than 10000ns, that just
+ * doesn't make sense. Rely on vruntime for fairness.
+ */
+ delay = max_t(u64, delay, 10000LL);
+ hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
+ HRTIMER_MODE_REL_PINNED_HARD);
+}
+#endif /* CONFIG_SMP */
+
+static void hrtick_rq_init(struct rq *rq)
+{
+#ifdef CONFIG_SMP
+ INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq);
+#endif
+
+ hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
+ rq->hrtick_timer.function = hrtick;
+}
+#else /* CONFIG_SCHED_HRTICK */
+static inline int hrtick_enabled(struct rq *rq)
+{
+ return 0;
+}
+
+static inline void hrtick_clear(struct rq *rq)
+{
+}
+
+static inline void hrtick_rq_init(struct rq *rq)
+{
+}
+#endif /* CONFIG_SCHED_HRTICK */
+
+static inline int __normal_prio(int policy, int rt_prio, int static_prio)
+{
+ return rt_policy(policy) ? (MAX_RT_PRIO - 1 - rt_prio) :
+ static_prio + MAX_PRIORITY_ADJ;
+}
+
+/*
+ * Calculate the expected normal priority: i.e. priority
+ * without taking RT-inheritance into account. Might be
+ * boosted by interactivity modifiers. Changes upon fork,
+ * setprio syscalls, and whenever the interactivity
+ * estimator recalculates.
+ */
+static inline int normal_prio(struct task_struct *p)
+{
+ return __normal_prio(p->policy, p->rt_priority, p->static_prio);
+}
+
+/*
+ * Calculate the current priority, i.e. the priority
+ * taken into account by the scheduler. This value might
+ * be boosted by RT tasks as it will be RT if the task got
+ * RT-boosted. If not then it returns p->normal_prio.
+ */
+static int effective_prio(struct task_struct *p)
+{
+ p->normal_prio = normal_prio(p);
+ /*
+ * If we are RT tasks or we were boosted to RT priority,
+ * keep the priority unchanged. Otherwise, update priority
+ * to the normal priority:
+ */
+ if (!rt_prio(p->prio))
+ return p->normal_prio;
+ return p->prio;
+}
+
+/*
+ * activate_task - move a task to the runqueue.
+ *
+ * Context: rq->lock
+ */
+static void activate_task(struct task_struct *p, struct rq *rq)
+{
+ enqueue_task(p, rq, ENQUEUE_WAKEUP);
+ p->on_rq = TASK_ON_RQ_QUEUED;
+
+ /*
+ * If in_iowait is set, the code below may not trigger any cpufreq
+ * utilization updates, so do it here explicitly with the IOWAIT flag
+ * passed.
+ */
+ cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT * p->in_iowait);
+}
+
+/*
+ * deactivate_task - remove a task from the runqueue.
+ *
+ * Context: rq->lock
+ */
+static inline void deactivate_task(struct task_struct *p, struct rq *rq)
+{
+ dequeue_task(p, rq, DEQUEUE_SLEEP);
+ p->on_rq = 0;
+ cpufreq_update_util(rq, 0);
+}
+
+static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
+{
+#ifdef CONFIG_SMP
+ /*
+ * After ->cpu is set up to a new value, task_access_lock(p, ...) can be
+ * successfully executed on another CPU. We must ensure that updates of
+ * per-task data have been completed by this moment.
+ */
+ smp_wmb();
+
+ WRITE_ONCE(task_thread_info(p)->cpu, cpu);
+#endif
+}
+
+static inline bool is_migration_disabled(struct task_struct *p)
+{
+#ifdef CONFIG_SMP
+ return p->migration_disabled;
+#else
+ return false;
+#endif
+}
+
+#define SCA_CHECK 0x01
+#define SCA_USER 0x08
+
+#ifdef CONFIG_SMP
+
+void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
+{
+#ifdef CONFIG_SCHED_DEBUG
+ unsigned int state = READ_ONCE(p->__state);
+
+ /*
+ * We should never call set_task_cpu() on a blocked task,
+ * ttwu() will sort out the placement.
+ */
+ WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq);
+
+#ifdef CONFIG_LOCKDEP
+ /*
+ * The caller should hold either p->pi_lock or rq->lock, when changing
+ * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
+ *
+ * sched_move_task() holds both and thus holding either pins the cgroup,
+ * see task_group().
+ */
+ WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
+ lockdep_is_held(&task_rq(p)->lock)));
+#endif
+ /*
+ * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
+ */
+ WARN_ON_ONCE(!cpu_online(new_cpu));
+
+ WARN_ON_ONCE(is_migration_disabled(p));
+#endif
+ if (task_cpu(p) == new_cpu)
+ return;
+ trace_sched_migrate_task(p, new_cpu);
+ rseq_migrate(p);
+ perf_event_task_migrate(p);
+
+ __set_task_cpu(p, new_cpu);
+}
+
+#define MDF_FORCE_ENABLED 0x80
+
+static void
+__do_set_cpus_ptr(struct task_struct *p, const struct cpumask *new_mask)
+{
+ /*
+ * This here violates the locking rules for affinity, since we're only
+ * supposed to change these variables while holding both rq->lock and
+ * p->pi_lock.
+ *
+ * HOWEVER, it magically works, because ttwu() is the only code that
+ * accesses these variables under p->pi_lock and only does so after
+ * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule()
+ * before finish_task().
+ *
+ * XXX do further audits, this smells like something putrid.
+ */
+ SCHED_WARN_ON(!p->on_cpu);
+ p->cpus_ptr = new_mask;
+}
+
+void migrate_disable(void)
+{
+ struct task_struct *p = current;
+ int cpu;
+
+ if (p->migration_disabled) {
+ p->migration_disabled++;
+ return;
+ }
+
+ preempt_disable();
+ cpu = smp_processor_id();
+ if (cpumask_test_cpu(cpu, &p->cpus_mask)) {
+ cpu_rq(cpu)->nr_pinned++;
+ p->migration_disabled = 1;
+ p->migration_flags &= ~MDF_FORCE_ENABLED;
+
+ /*
+ * Violates locking rules! see comment in __do_set_cpus_ptr().
+ */
+ if (p->cpus_ptr == &p->cpus_mask)
+ __do_set_cpus_ptr(p, cpumask_of(cpu));
+ }
+ preempt_enable();
+}
+EXPORT_SYMBOL_GPL(migrate_disable);
+
+void migrate_enable(void)
+{
+ struct task_struct *p = current;
+
+ if (0 == p->migration_disabled)
+ return;
+
+ if (p->migration_disabled > 1) {
+ p->migration_disabled--;
+ return;
+ }
+
+ if (WARN_ON_ONCE(!p->migration_disabled))
+ return;
+
+ /*
+ * Ensure stop_task runs either before or after this, and that
+ * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
+ */
+ preempt_disable();
+ /*
+ * Assumption: current should be running on allowed cpu
+ */
+ WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &p->cpus_mask));
+ if (p->cpus_ptr != &p->cpus_mask)
+ __do_set_cpus_ptr(p, &p->cpus_mask);
+ /*
+ * Mustn't clear migration_disabled() until cpus_ptr points back at the
+ * regular cpus_mask, otherwise things that race (eg.
+ * select_fallback_rq) get confused.
+ */
+ barrier();
+ p->migration_disabled = 0;
+ this_rq()->nr_pinned--;
+ preempt_enable();
+}
+EXPORT_SYMBOL_GPL(migrate_enable);
+
+static inline bool rq_has_pinned_tasks(struct rq *rq)
+{
+ return rq->nr_pinned;
+}
+
+/*
+ * Per-CPU kthreads are allowed to run on !active && online CPUs, see
+ * __set_cpus_allowed_ptr() and select_fallback_rq().
+ */
+static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
+{
+ /* When not in the task's cpumask, no point in looking further. */
+ if (!cpumask_test_cpu(cpu, p->cpus_ptr))
+ return false;
+
+ /* migrate_disabled() must be allowed to finish. */
+ if (is_migration_disabled(p))
+ return cpu_online(cpu);
+
+ /* Non kernel threads are not allowed during either online or offline. */
+ if (!(p->flags & PF_KTHREAD))
+ return cpu_active(cpu) && task_cpu_possible(cpu, p);
+
+ /* KTHREAD_IS_PER_CPU is always allowed. */
+ if (kthread_is_per_cpu(p))
+ return cpu_online(cpu);
+
+ /* Regular kernel threads don't get to stay during offline. */
+ if (cpu_dying(cpu))
+ return false;
+
+ /* But are allowed during online. */
+ return cpu_online(cpu);
+}
+
+/*
+ * This is how migration works:
+ *
+ * 1) we invoke migration_cpu_stop() on the target CPU using
+ * stop_one_cpu().
+ * 2) stopper starts to run (implicitly forcing the migrated thread
+ * off the CPU)
+ * 3) it checks whether the migrated task is still in the wrong runqueue.
+ * 4) if it's in the wrong runqueue then the migration thread removes
+ * it and puts it into the right queue.
+ * 5) stopper completes and stop_one_cpu() returns and the migration
+ * is done.
+ */
+
+/*
+ * move_queued_task - move a queued task to new rq.
+ *
+ * Returns (locked) new rq. Old rq's lock is released.
+ */
+static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int
+ new_cpu)
+{
+ lockdep_assert_held(&rq->lock);
+
+ WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
+ dequeue_task(p, rq, 0);
+ update_sched_rq_watermark(rq);
+ set_task_cpu(p, new_cpu);
+ raw_spin_unlock(&rq->lock);
+
+ rq = cpu_rq(new_cpu);
+
+ raw_spin_lock(&rq->lock);
+ BUG_ON(task_cpu(p) != new_cpu);
+ sched_task_sanity_check(p, rq);
+ enqueue_task(p, rq, 0);
+ p->on_rq = TASK_ON_RQ_QUEUED;
+ check_preempt_curr(rq);
+
+ return rq;
+}
+
+struct migration_arg {
+ struct task_struct *task;
+ int dest_cpu;
+};
+
+/*
+ * Move (not current) task off this CPU, onto the destination CPU. We're doing
+ * this because either it can't run here any more (set_cpus_allowed()
+ * away from this CPU, or CPU going down), or because we're
+ * attempting to rebalance this task on exec (sched_exec).
+ *
+ * So we race with normal scheduler movements, but that's OK, as long
+ * as the task is no longer on this CPU.
+ */
+static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int
+ dest_cpu)
+{
+ /* Affinity changed (again). */
+ if (!is_cpu_allowed(p, dest_cpu))
+ return rq;
+
+ update_rq_clock(rq);
+ return move_queued_task(rq, p, dest_cpu);
+}
+
+/*
+ * migration_cpu_stop - this will be executed by a highprio stopper thread
+ * and performs thread migration by bumping thread off CPU then
+ * 'pushing' onto another runqueue.
+ */
+static int migration_cpu_stop(void *data)
+{
+ struct migration_arg *arg = data;
+ struct task_struct *p = arg->task;
+ struct rq *rq = this_rq();
+ unsigned long flags;
+
+ /*
+ * The original target CPU might have gone down and we might
+ * be on another CPU but it doesn't matter.
+ */
+ local_irq_save(flags);
+ /*
+ * We need to explicitly wake pending tasks before running
+ * __migrate_task() such that we will not miss enforcing cpus_ptr
+ * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
+ */
+ flush_smp_call_function_queue();
+
+ raw_spin_lock(&p->pi_lock);
+ raw_spin_lock(&rq->lock);
+ /*
+ * If task_rq(p) != rq, it cannot be migrated here, because we're
+ * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because
+ * we're holding p->pi_lock.
+ */
+ if (task_rq(p) == rq && task_on_rq_queued(p))
+ rq = __migrate_task(rq, p, arg->dest_cpu);
+ raw_spin_unlock(&rq->lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
+ return 0;
+}
+
+static inline void
+set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
+{
+ cpumask_copy(&p->cpus_mask, new_mask);
+ p->nr_cpus_allowed = cpumask_weight(new_mask);
+}
+
+static void
+__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{
+ lockdep_assert_held(&p->pi_lock);
+ set_cpus_allowed_common(p, new_mask);
+}
+
+void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+{
+ __do_set_cpus_allowed(p, new_mask);
+}
+
+int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src,
+ int node)
+{
+ if (!src->user_cpus_ptr)
+ return 0;
+
+ dst->user_cpus_ptr = kmalloc_node(cpumask_size(), GFP_KERNEL, node);
+ if (!dst->user_cpus_ptr)
+ return -ENOMEM;
+
+ cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr);
+ return 0;
+}
+
+static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p)
+{
+ struct cpumask *user_mask = NULL;
+
+ swap(p->user_cpus_ptr, user_mask);
+
+ return user_mask;
+}
+
+void release_user_cpus_ptr(struct task_struct *p)
+{
+ kfree(clear_user_cpus_ptr(p));
+}
+
+#endif
+
+/**
+ * task_curr - is this task currently executing on a CPU?
+ * @p: the task in question.
+ *
+ * Return: 1 if the task is currently executing. 0 otherwise.
+ */
+inline int task_curr(const struct task_struct *p)
+{
+ return cpu_curr(task_cpu(p)) == p;
+}
+
+#ifdef CONFIG_SMP
+/*
+ * wait_task_inactive - wait for a thread to unschedule.
+ *
+ * If @match_state is nonzero, it's the @p->state value just checked and
+ * not expected to change. If it changes, i.e. @p might have woken up,
+ * then return zero. When we succeed in waiting for @p to be off its CPU,
+ * we return a positive number (its total switch count). If a second call
+ * a short while later returns the same number, the caller can be sure that
+ * @p has remained unscheduled the whole time.
+ *
+ * The caller must ensure that the task *will* unschedule sometime soon,
+ * else this function might spin for a *long* time. This function can't
+ * be called with interrupts off, or it may introduce deadlock with
+ * smp_call_function() if an IPI is sent by the same process we are
+ * waiting to become inactive.
+ */
+unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
+{
+ unsigned long flags;
+ bool running, on_rq;
+ unsigned long ncsw;
+ struct rq *rq;
+ raw_spinlock_t *lock;
+
+ for (;;) {
+ rq = task_rq(p);
+
+ /*
+ * If the task is actively running on another CPU
+ * still, just relax and busy-wait without holding
+ * any locks.
+ *
+ * NOTE! Since we don't hold any locks, it's not
+ * even sure that "rq" stays as the right runqueue!
+ * But we don't care, since this will return false
+ * if the runqueue has changed and p is actually now
+ * running somewhere else!
+ */
+ while (task_running(p) && p == rq->curr) {
+ if (match_state && unlikely(READ_ONCE(p->__state) != match_state))
+ return 0;
+ cpu_relax();
+ }
+
+ /*
+ * Ok, time to look more closely! We need the rq
+ * lock now, to be *sure*. If we're wrong, we'll
+ * just go back and repeat.
+ */
+ task_access_lock_irqsave(p, &lock, &flags);
+ trace_sched_wait_task(p);
+ running = task_running(p);
+ on_rq = p->on_rq;
+ ncsw = 0;
+ if (!match_state || READ_ONCE(p->__state) == match_state)
+ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
+ task_access_unlock_irqrestore(p, lock, &flags);
+
+ /*
+ * If it changed from the expected state, bail out now.
+ */
+ if (unlikely(!ncsw))
+ break;
+
+ /*
+ * Was it really running after all now that we
+ * checked with the proper locks actually held?
+ *
+ * Oops. Go back and try again..
+ */
+ if (unlikely(running)) {
+ cpu_relax();
+ continue;
+ }
+
+ /*
+ * It's not enough that it's not actively running,
+ * it must be off the runqueue _entirely_, and not
+ * preempted!
+ *
+ * So if it was still runnable (but just not actively
+ * running right now), it's preempted, and we should
+ * yield - it could be a while.
+ */
+ if (unlikely(on_rq)) {
+ ktime_t to = NSEC_PER_SEC / HZ;
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD);
+ continue;
+ }
+
+ /*
+ * Ahh, all good. It wasn't running, and it wasn't
+ * runnable, which means that it will never become
+ * running in the future either. We're all done!
+ */
+ break;
+ }
+
+ return ncsw;
+}
+
+/***
+ * kick_process - kick a running thread to enter/exit the kernel
+ * @p: the to-be-kicked thread
+ *
+ * Cause a process which is running on another CPU to enter
+ * kernel-mode, without any delay. (to get signals handled.)
+ *
+ * NOTE: this function doesn't have to take the runqueue lock,
+ * because all it wants to ensure is that the remote task enters
+ * the kernel. If the IPI races and the task has been migrated
+ * to another CPU then no harm is done and the purpose has been
+ * achieved as well.
+ */
+void kick_process(struct task_struct *p)
+{
+ int cpu;
+
+ preempt_disable();
+ cpu = task_cpu(p);
+ if ((cpu != smp_processor_id()) && task_curr(p))
+ smp_send_reschedule(cpu);
+ preempt_enable();
+}
+EXPORT_SYMBOL_GPL(kick_process);
+
+/*
+ * ->cpus_ptr is protected by both rq->lock and p->pi_lock
+ *
+ * A few notes on cpu_active vs cpu_online:
+ *
+ * - cpu_active must be a subset of cpu_online
+ *
+ * - on CPU-up we allow per-CPU kthreads on the online && !active CPU,
+ * see __set_cpus_allowed_ptr(). At this point the newly online
+ * CPU isn't yet part of the sched domains, and balancing will not
+ * see it.
+ *
+ * - on cpu-down we clear cpu_active() to mask the sched domains and
+ * avoid the load balancer to place new tasks on the to be removed
+ * CPU. Existing tasks will remain running there and will be taken
+ * off.
+ *
+ * This means that fallback selection must not select !active CPUs.
+ * And can assume that any active CPU must be online. Conversely
+ * select_task_rq() below may allow selection of !active CPUs in order
+ * to satisfy the above rules.
+ */
+static int select_fallback_rq(int cpu, struct task_struct *p)
+{
+ int nid = cpu_to_node(cpu);
+ const struct cpumask *nodemask = NULL;
+ enum { cpuset, possible, fail } state = cpuset;
+ int dest_cpu;
+
+ /*
+ * If the node that the CPU is on has been offlined, cpu_to_node()
+ * will return -1. There is no CPU on the node, and we should
+ * select the CPU on the other node.
+ */
+ if (nid != -1) {
+ nodemask = cpumask_of_node(nid);
+
+ /* Look for allowed, online CPU in same node. */
+ for_each_cpu(dest_cpu, nodemask) {
+ if (is_cpu_allowed(p, dest_cpu))
+ return dest_cpu;
+ }
+ }
+
+ for (;;) {
+ /* Any allowed, online CPU? */
+ for_each_cpu(dest_cpu, p->cpus_ptr) {
+ if (!is_cpu_allowed(p, dest_cpu))
+ continue;
+ goto out;
+ }
+
+ /* No more Mr. Nice Guy. */
+ switch (state) {
+ case cpuset:
+ if (cpuset_cpus_allowed_fallback(p)) {
+ state = possible;
+ break;
+ }
+ fallthrough;
+ case possible:
+ /*
+ * XXX When called from select_task_rq() we only
+ * hold p->pi_lock and again violate locking order.
+ *
+ * More yuck to audit.
+ */
+ do_set_cpus_allowed(p, task_cpu_possible_mask(p));
+ state = fail;
+ break;
+
+ case fail:
+ BUG();
+ break;
+ }
+ }
+
+out:
+ if (state != cpuset) {
+ /*
+ * Don't tell them about moving exiting tasks or
+ * kernel threads (both mm NULL), since they never
+ * leave kernel.
+ */
+ if (p->mm && printk_ratelimit()) {
+ printk_deferred("process %d (%s) no longer affine to cpu%d\n",
+ task_pid_nr(p), p->comm, cpu);
+ }
+ }
+
+ return dest_cpu;
+}
+
+static inline int select_task_rq(struct task_struct *p)
+{
+ cpumask_t chk_mask, tmp;
+
+ if (unlikely(!cpumask_and(&chk_mask, p->cpus_ptr, cpu_active_mask)))
+ return select_fallback_rq(task_cpu(p), p);
+
+ if (
+#ifdef CONFIG_SCHED_SMT
+ cpumask_and(&tmp, &chk_mask, &sched_sg_idle_mask) ||
+#endif
+ cpumask_and(&tmp, &chk_mask, sched_rq_watermark) ||
+ cpumask_and(&tmp, &chk_mask,
+ sched_rq_watermark + SCHED_QUEUE_BITS - 1 - task_sched_prio(p)))
+ return best_mask_cpu(task_cpu(p), &tmp);
+
+ return best_mask_cpu(task_cpu(p), &chk_mask);
+}
+
+void sched_set_stop_task(int cpu, struct task_struct *stop)
+{
+ static struct lock_class_key stop_pi_lock;
+ struct sched_param stop_param = { .sched_priority = STOP_PRIO };
+ struct sched_param start_param = { .sched_priority = 0 };
+ struct task_struct *old_stop = cpu_rq(cpu)->stop;
+
+ if (stop) {
+ /*
+ * Make it appear like a SCHED_FIFO task, its something
+ * userspace knows about and won't get confused about.
+ *
+ * Also, it will make PI more or less work without too
+ * much confusion -- but then, stop work should not
+ * rely on PI working anyway.
+ */
+ sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param);
+
+ /*
+ * The PI code calls rt_mutex_setprio() with ->pi_lock held to
+ * adjust the effective priority of a task. As a result,
+ * rt_mutex_setprio() can trigger (RT) balancing operations,
+ * which can then trigger wakeups of the stop thread to push
+ * around the current task.
+ *
+ * The stop task itself will never be part of the PI-chain, it
+ * never blocks, therefore that ->pi_lock recursion is safe.
+ * Tell lockdep about this by placing the stop->pi_lock in its
+ * own class.
+ */
+ lockdep_set_class(&stop->pi_lock, &stop_pi_lock);
+ }
+
+ cpu_rq(cpu)->stop = stop;
+
+ if (old_stop) {
+ /*
+ * Reset it back to a normal scheduling policy so that
+ * it can die in pieces.
+ */
+ sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param);
+ }
+}
+
+static int affine_move_task(struct rq *rq, struct task_struct *p, int dest_cpu,
+ raw_spinlock_t *lock, unsigned long irq_flags)
+{
+ /* Can the task run on the task's current CPU? If so, we're done */
+ if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
+ if (p->migration_disabled) {
+ if (likely(p->cpus_ptr != &p->cpus_mask))
+ __do_set_cpus_ptr(p, &p->cpus_mask);
+ p->migration_disabled = 0;
+ p->migration_flags |= MDF_FORCE_ENABLED;
+ /* When p is migrate_disabled, rq->lock should be held */
+ rq->nr_pinned--;
+ }
+
+ if (task_running(p) || READ_ONCE(p->__state) == TASK_WAKING) {
+ struct migration_arg arg = { p, dest_cpu };
+
+ /* Need help from migration thread: drop lock and wait. */
+ __task_access_unlock(p, lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
+ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg);
+ return 0;
+ }
+ if (task_on_rq_queued(p)) {
+ /*
+ * OK, since we're going to drop the lock immediately
+ * afterwards anyway.
+ */
+ update_rq_clock(rq);
+ rq = move_queued_task(rq, p, dest_cpu);
+ lock = &rq->lock;
+ }
+ }
+ __task_access_unlock(p, lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
+ return 0;
+}
+
+static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
+ const struct cpumask *new_mask,
+ u32 flags,
+ struct rq *rq,
+ raw_spinlock_t *lock,
+ unsigned long irq_flags)
+{
+ const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p);
+ const struct cpumask *cpu_valid_mask = cpu_active_mask;
+ bool kthread = p->flags & PF_KTHREAD;
+ struct cpumask *user_mask = NULL;
+ int dest_cpu;
+ int ret = 0;
+
+ if (kthread || is_migration_disabled(p)) {
+ /*
+ * Kernel threads are allowed on online && !active CPUs,
+ * however, during cpu-hot-unplug, even these might get pushed
+ * away if not KTHREAD_IS_PER_CPU.
+ *
+ * Specifically, migration_disabled() tasks must not fail the
+ * cpumask_any_and_distribute() pick below, esp. so on
+ * SCA_MIGRATE_ENABLE, otherwise we'll not call
+ * set_cpus_allowed_common() and actually reset p->cpus_ptr.
+ */
+ cpu_valid_mask = cpu_online_mask;
+ }
+
+ if (!kthread && !cpumask_subset(new_mask, cpu_allowed_mask)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * Must re-check here, to close a race against __kthread_bind(),
+ * sched_setaffinity() is not guaranteed to observe the flag.
+ */
+ if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (cpumask_equal(&p->cpus_mask, new_mask))
+ goto out;
+
+ dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
+ if (dest_cpu >= nr_cpu_ids) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ __do_set_cpus_allowed(p, new_mask);
+
+ if (flags & SCA_USER)
+ user_mask = clear_user_cpus_ptr(p);
+
+ ret = affine_move_task(rq, p, dest_cpu, lock, irq_flags);
+
+ kfree(user_mask);
+
+ return ret;
+
+out:
+ __task_access_unlock(p, lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
+
+ return ret;
+}
+
+/*
+ * Change a given task's CPU affinity. Migrate the thread to a
+ * proper CPU and schedule it away if the CPU it's executing on
+ * is removed from the allowed bitmask.
+ *
+ * NOTE: the caller must have a valid reference to the task, the
+ * task must not exit() & deallocate itself prematurely. The
+ * call is not atomic; no spinlocks may be held.
+ */
+static int __set_cpus_allowed_ptr(struct task_struct *p,
+ const struct cpumask *new_mask, u32 flags)
+{
+ unsigned long irq_flags;
+ struct rq *rq;
+ raw_spinlock_t *lock;
+
+ raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
+ rq = __task_access_lock(p, &lock);
+
+ return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, lock, irq_flags);
+}
+
+int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
+{
+ return __set_cpus_allowed_ptr(p, new_mask, 0);
+}
+EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
+
+/*
+ * Change a given task's CPU affinity to the intersection of its current
+ * affinity mask and @subset_mask, writing the resulting mask to @new_mask
+ * and pointing @p->user_cpus_ptr to a copy of the old mask.
+ * If the resulting mask is empty, leave the affinity unchanged and return
+ * -EINVAL.
+ */
+static int restrict_cpus_allowed_ptr(struct task_struct *p,
+ struct cpumask *new_mask,
+ const struct cpumask *subset_mask)
+{
+ struct cpumask *user_mask = NULL;
+ unsigned long irq_flags;
+ raw_spinlock_t *lock;
+ struct rq *rq;
+ int err;
+
+ if (!p->user_cpus_ptr) {
+ user_mask = kmalloc(cpumask_size(), GFP_KERNEL);
+ if (!user_mask)
+ return -ENOMEM;
+ }
+
+ raw_spin_lock_irqsave(&p->pi_lock, irq_flags);
+ rq = __task_access_lock(p, &lock);
+
+ if (!cpumask_and(new_mask, &p->cpus_mask, subset_mask)) {
+ err = -EINVAL;
+ goto err_unlock;
+ }
+
+ /*
+ * We're about to butcher the task affinity, so keep track of what
+ * the user asked for in case we're able to restore it later on.
+ */
+ if (user_mask) {
+ cpumask_copy(user_mask, p->cpus_ptr);
+ p->user_cpus_ptr = user_mask;
+ }
+
+ /*return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf);*/
+ return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, lock, irq_flags);
+
+err_unlock:
+ __task_access_unlock(p, lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags);
+ kfree(user_mask);
+ return err;
+}
+
+/*
+ * Restrict the CPU affinity of task @p so that it is a subset of
+ * task_cpu_possible_mask() and point @p->user_cpu_ptr to a copy of the
+ * old affinity mask. If the resulting mask is empty, we warn and walk
+ * up the cpuset hierarchy until we find a suitable mask.
+ */
+void force_compatible_cpus_allowed_ptr(struct task_struct *p)
+{
+ cpumask_var_t new_mask;
+ const struct cpumask *override_mask = task_cpu_possible_mask(p);
+
+ alloc_cpumask_var(&new_mask, GFP_KERNEL);
+
+ /*
+ * __migrate_task() can fail silently in the face of concurrent
+ * offlining of the chosen destination CPU, so take the hotplug
+ * lock to ensure that the migration succeeds.
+ */
+ cpus_read_lock();
+ if (!cpumask_available(new_mask))
+ goto out_set_mask;
+
+ if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask))
+ goto out_free_mask;
+
+ /*
+ * We failed to find a valid subset of the affinity mask for the
+ * task, so override it based on its cpuset hierarchy.
+ */
+ cpuset_cpus_allowed(p, new_mask);
+ override_mask = new_mask;
+
+out_set_mask:
+ if (printk_ratelimit()) {
+ printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
+ task_pid_nr(p), p->comm,
+ cpumask_pr_args(override_mask));
+ }
+
+ WARN_ON(set_cpus_allowed_ptr(p, override_mask));
+out_free_mask:
+ cpus_read_unlock();
+ free_cpumask_var(new_mask);
+}
+
+static int
+__sched_setaffinity(struct task_struct *p, const struct cpumask *mask);
+
+/*
+ * Restore the affinity of a task @p which was previously restricted by a
+ * call to force_compatible_cpus_allowed_ptr(). This will clear (and free)
+ * @p->user_cpus_ptr.
+ *
+ * It is the caller's responsibility to serialise this with any calls to
+ * force_compatible_cpus_allowed_ptr(@p).
+ */
+void relax_compatible_cpus_allowed_ptr(struct task_struct *p)
+{
+ struct cpumask *user_mask = p->user_cpus_ptr;
+ unsigned long flags;
+
+ /*
+ * Try to restore the old affinity mask. If this fails, then
+ * we free the mask explicitly to avoid it being inherited across
+ * a subsequent fork().
+ */
+ if (!user_mask || !__sched_setaffinity(p, user_mask))
+ return;
+
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+ user_mask = clear_user_cpus_ptr(p);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
+ kfree(user_mask);
+}
+
+#else /* CONFIG_SMP */
+
+static inline int select_task_rq(struct task_struct *p)
+{
+ return 0;
+}
+
+static inline int
+__set_cpus_allowed_ptr(struct task_struct *p,
+ const struct cpumask *new_mask, u32 flags)
+{
+ return set_cpus_allowed_ptr(p, new_mask);
+}
+
+static inline bool rq_has_pinned_tasks(struct rq *rq)
+{
+ return false;
+}
+
+#endif /* !CONFIG_SMP */
+
+static void
+ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
+{
+ struct rq *rq;
+
+ if (!schedstat_enabled())
+ return;
+
+ rq = this_rq();
+
+#ifdef CONFIG_SMP
+ if (cpu == rq->cpu) {
+ __schedstat_inc(rq->ttwu_local);
+ __schedstat_inc(p->stats.nr_wakeups_local);
+ } else {
+ /** Alt schedule FW ToDo:
+ * How to do ttwu_wake_remote
+ */
+ }
+#endif /* CONFIG_SMP */
+
+ __schedstat_inc(rq->ttwu_count);
+ __schedstat_inc(p->stats.nr_wakeups);
+}
+
+/*
+ * Mark the task runnable and perform wakeup-preemption.
+ */
+static inline void
+ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
+{
+ check_preempt_curr(rq);
+ WRITE_ONCE(p->__state, TASK_RUNNING);
+ trace_sched_wakeup(p);
+}
+
+static inline void
+ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags)
+{
+ if (p->sched_contributes_to_load)
+ rq->nr_uninterruptible--;
+
+ if (
+#ifdef CONFIG_SMP
+ !(wake_flags & WF_MIGRATED) &&
+#endif
+ p->in_iowait) {
+ delayacct_blkio_end(p);
+ atomic_dec(&task_rq(p)->nr_iowait);
+ }
+
+ activate_task(p, rq);
+ ttwu_do_wakeup(rq, p, 0);
+}
+
+/*
+ * Consider @p being inside a wait loop:
+ *
+ * for (;;) {
+ * set_current_state(TASK_UNINTERRUPTIBLE);
+ *
+ * if (CONDITION)
+ * break;
+ *
+ * schedule();
+ * }
+ * __set_current_state(TASK_RUNNING);
+ *
+ * between set_current_state() and schedule(). In this case @p is still
+ * runnable, so all that needs doing is change p->state back to TASK_RUNNING in
+ * an atomic manner.
+ *
+ * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq
+ * then schedule() must still happen and p->state can be changed to
+ * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we
+ * need to do a full wakeup with enqueue.
+ *
+ * Returns: %true when the wakeup is done,
+ * %false otherwise.
+ */
+static int ttwu_runnable(struct task_struct *p, int wake_flags)
+{
+ struct rq *rq;
+ raw_spinlock_t *lock;
+ int ret = 0;
+
+ rq = __task_access_lock(p, &lock);
+ if (task_on_rq_queued(p)) {
+ /* check_preempt_curr() may use rq clock */
+ update_rq_clock(rq);
+ ttwu_do_wakeup(rq, p, wake_flags);
+ ret = 1;
+ }
+ __task_access_unlock(p, lock);
+
+ return ret;
+}
+
+#ifdef CONFIG_SMP
+void sched_ttwu_pending(void *arg)
+{
+ struct llist_node *llist = arg;
+ struct rq *rq = this_rq();
+ struct task_struct *p, *t;
+ struct rq_flags rf;
+
+ if (!llist)
+ return;
+
+ /*
+ * rq::ttwu_pending racy indication of out-standing wakeups.
+ * Races such that false-negatives are possible, since they
+ * are shorter lived that false-positives would be.
+ */
+ WRITE_ONCE(rq->ttwu_pending, 0);
+
+ rq_lock_irqsave(rq, &rf);
+ update_rq_clock(rq);
+
+ llist_for_each_entry_safe(p, t, llist, wake_entry.llist) {
+ if (WARN_ON_ONCE(p->on_cpu))
+ smp_cond_load_acquire(&p->on_cpu, !VAL);
+
+ if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq)))
+ set_task_cpu(p, cpu_of(rq));
+
+ ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0);
+ }
+
+ rq_unlock_irqrestore(rq, &rf);
+}
+
+void send_call_function_single_ipi(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ if (!set_nr_if_polling(rq->idle))
+ arch_send_call_function_single_ipi(cpu);
+ else
+ trace_sched_wake_idle_without_ipi(cpu);
+}
+
+/*
+ * Queue a task on the target CPUs wake_list and wake the CPU via IPI if
+ * necessary. The wakee CPU on receipt of the IPI will queue the task
+ * via sched_ttwu_wakeup() for activation so the wakee incurs the cost
+ * of the wakeup instead of the waker.
+ */
+static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
+
+ WRITE_ONCE(rq->ttwu_pending, 1);
+ __smp_call_single_queue(cpu, &p->wake_entry.llist);
+}
+
+static inline bool ttwu_queue_cond(int cpu, int wake_flags)
+{
+ /*
+ * Do not complicate things with the async wake_list while the CPU is
+ * in hotplug state.
+ */
+ if (!cpu_active(cpu))
+ return false;
+
+ /*
+ * If the CPU does not share cache, then queue the task on the
+ * remote rqs wakelist to avoid accessing remote data.
+ */
+ if (!cpus_share_cache(smp_processor_id(), cpu))
+ return true;
+
+ /*
+ * If the task is descheduling and the only running task on the
+ * CPU then use the wakelist to offload the task activation to
+ * the soon-to-be-idle CPU as the current CPU is likely busy.
+ * nr_running is checked to avoid unnecessary task stacking.
+ */
+ if ((wake_flags & WF_ON_CPU) && cpu_rq(cpu)->nr_running <= 1)
+ return true;
+
+ return false;
+}
+
+static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
+{
+ if (__is_defined(ALT_SCHED_TTWU_QUEUE) && ttwu_queue_cond(cpu, wake_flags)) {
+ if (WARN_ON_ONCE(cpu == smp_processor_id()))
+ return false;
+
+ sched_clock_cpu(cpu); /* Sync clocks across CPUs */
+ __ttwu_queue_wakelist(p, cpu, wake_flags);
+ return true;
+ }
+
+ return false;
+}
+
+void wake_up_if_idle(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
+
+ rcu_read_lock();
+
+ if (!is_idle_task(rcu_dereference(rq->curr)))
+ goto out;
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ if (is_idle_task(rq->curr))
+ resched_curr(rq);
+ /* Else CPU is not idle, do nothing here */
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+out:
+ rcu_read_unlock();
+}
+
+bool cpus_share_cache(int this_cpu, int that_cpu)
+{
+ if (this_cpu == that_cpu)
+ return true;
+
+ return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
+}
+#else /* !CONFIG_SMP */
+
+static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
+{
+ return false;
+}
+
+#endif /* CONFIG_SMP */
+
+static inline void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ if (ttwu_queue_wakelist(p, cpu, wake_flags))
+ return;
+
+ raw_spin_lock(&rq->lock);
+ update_rq_clock(rq);
+ ttwu_do_activate(rq, p, wake_flags);
+ raw_spin_unlock(&rq->lock);
+}
+
+/*
+ * Invoked from try_to_wake_up() to check whether the task can be woken up.
+ *
+ * The caller holds p::pi_lock if p != current or has preemption
+ * disabled when p == current.
+ *
+ * The rules of PREEMPT_RT saved_state:
+ *
+ * The related locking code always holds p::pi_lock when updating
+ * p::saved_state, which means the code is fully serialized in both cases.
+ *
+ * The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other
+ * bits set. This allows to distinguish all wakeup scenarios.
+ */
+static __always_inline
+bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success)
+{
+ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) {
+ WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) &&
+ state != TASK_RTLOCK_WAIT);
+ }
+
+ if (READ_ONCE(p->__state) & state) {
+ *success = 1;
+ return true;
+ }
+
+#ifdef CONFIG_PREEMPT_RT
+ /*
+ * Saved state preserves the task state across blocking on
+ * an RT lock. If the state matches, set p::saved_state to
+ * TASK_RUNNING, but do not wake the task because it waits
+ * for a lock wakeup. Also indicate success because from
+ * the regular waker's point of view this has succeeded.
+ *
+ * After acquiring the lock the task will restore p::__state
+ * from p::saved_state which ensures that the regular
+ * wakeup is not lost. The restore will also set
+ * p::saved_state to TASK_RUNNING so any further tests will
+ * not result in false positives vs. @success
+ */
+ if (p->saved_state & state) {
+ p->saved_state = TASK_RUNNING;
+ *success = 1;
+ }
+#endif
+ return false;
+}
+
+/*
+ * Notes on Program-Order guarantees on SMP systems.
+ *
+ * MIGRATION
+ *
+ * The basic program-order guarantee on SMP systems is that when a task [t]
+ * migrates, all its activity on its old CPU [c0] happens-before any subsequent
+ * execution on its new CPU [c1].
+ *
+ * For migration (of runnable tasks) this is provided by the following means:
+ *
+ * A) UNLOCK of the rq(c0)->lock scheduling out task t
+ * B) migration for t is required to synchronize *both* rq(c0)->lock and
+ * rq(c1)->lock (if not at the same time, then in that order).
+ * C) LOCK of the rq(c1)->lock scheduling in task
+ *
+ * Transitivity guarantees that B happens after A and C after B.
+ * Note: we only require RCpc transitivity.
+ * Note: the CPU doing B need not be c0 or c1
+ *
+ * Example:
+ *
+ * CPU0 CPU1 CPU2
+ *
+ * LOCK rq(0)->lock
+ * sched-out X
+ * sched-in Y
+ * UNLOCK rq(0)->lock
+ *
+ * LOCK rq(0)->lock // orders against CPU0
+ * dequeue X
+ * UNLOCK rq(0)->lock
+ *
+ * LOCK rq(1)->lock
+ * enqueue X
+ * UNLOCK rq(1)->lock
+ *
+ * LOCK rq(1)->lock // orders against CPU2
+ * sched-out Z
+ * sched-in X
+ * UNLOCK rq(1)->lock
+ *
+ *
+ * BLOCKING -- aka. SLEEP + WAKEUP
+ *
+ * For blocking we (obviously) need to provide the same guarantee as for
+ * migration. However the means are completely different as there is no lock
+ * chain to provide order. Instead we do:
+ *
+ * 1) smp_store_release(X->on_cpu, 0) -- finish_task()
+ * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up()
+ *
+ * Example:
+ *
+ * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule)
+ *
+ * LOCK rq(0)->lock LOCK X->pi_lock
+ * dequeue X
+ * sched-out X
+ * smp_store_release(X->on_cpu, 0);
+ *
+ * smp_cond_load_acquire(&X->on_cpu, !VAL);
+ * X->state = WAKING
+ * set_task_cpu(X,2)
+ *
+ * LOCK rq(2)->lock
+ * enqueue X
+ * X->state = RUNNING
+ * UNLOCK rq(2)->lock
+ *
+ * LOCK rq(2)->lock // orders against CPU1
+ * sched-out Z
+ * sched-in X
+ * UNLOCK rq(2)->lock
+ *
+ * UNLOCK X->pi_lock
+ * UNLOCK rq(0)->lock
+ *
+ *
+ * However; for wakeups there is a second guarantee we must provide, namely we
+ * must observe the state that lead to our wakeup. That is, not only must our
+ * task observe its own prior state, it must also observe the stores prior to
+ * its wakeup.
+ *
+ * This means that any means of doing remote wakeups must order the CPU doing
+ * the wakeup against the CPU the task is going to end up running on. This,
+ * however, is already required for the regular Program-Order guarantee above,
+ * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire).
+ *
+ */
+
+/**
+ * try_to_wake_up - wake up a thread
+ * @p: the thread to be awakened
+ * @state: the mask of task states that can be woken
+ * @wake_flags: wake modifier flags (WF_*)
+ *
+ * Conceptually does:
+ *
+ * If (@state & @p->state) @p->state = TASK_RUNNING.
+ *
+ * If the task was not queued/runnable, also place it back on a runqueue.
+ *
+ * This function is atomic against schedule() which would dequeue the task.
+ *
+ * It issues a full memory barrier before accessing @p->state, see the comment
+ * with set_current_state().
+ *
+ * Uses p->pi_lock to serialize against concurrent wake-ups.
+ *
+ * Relies on p->pi_lock stabilizing:
+ * - p->sched_class
+ * - p->cpus_ptr
+ * - p->sched_task_group
+ * in order to do migration, see its use of select_task_rq()/set_task_cpu().
+ *
+ * Tries really hard to only take one task_rq(p)->lock for performance.
+ * Takes rq->lock in:
+ * - ttwu_runnable() -- old rq, unavoidable, see comment there;
+ * - ttwu_queue() -- new rq, for enqueue of the task;
+ * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us.
+ *
+ * As a consequence we race really badly with just about everything. See the
+ * many memory barriers and their comments for details.
+ *
+ * Return: %true if @p->state changes (an actual wakeup was done),
+ * %false otherwise.
+ */
+static int try_to_wake_up(struct task_struct *p, unsigned int state,
+ int wake_flags)
+{
+ unsigned long flags;
+ int cpu, success = 0;
+
+ preempt_disable();
+ if (p == current) {
+ /*
+ * We're waking current, this means 'p->on_rq' and 'task_cpu(p)
+ * == smp_processor_id()'. Together this means we can special
+ * case the whole 'p->on_rq && ttwu_runnable()' case below
+ * without taking any locks.
+ *
+ * In particular:
+ * - we rely on Program-Order guarantees for all the ordering,
+ * - we're serialized against set_special_state() by virtue of
+ * it disabling IRQs (this allows not taking ->pi_lock).
+ */
+ if (!ttwu_state_match(p, state, &success))
+ goto out;
+
+ trace_sched_waking(p);
+ WRITE_ONCE(p->__state, TASK_RUNNING);
+ trace_sched_wakeup(p);
+ goto out;
+ }
+
+ /*
+ * If we are going to wake up a thread waiting for CONDITION we
+ * need to ensure that CONDITION=1 done by the caller can not be
+ * reordered with p->state check below. This pairs with smp_store_mb()
+ * in set_current_state() that the waiting thread does.
+ */
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+ smp_mb__after_spinlock();
+ if (!ttwu_state_match(p, state, &success))
+ goto unlock;
+
+ trace_sched_waking(p);
+
+ /*
+ * Ensure we load p->on_rq _after_ p->state, otherwise it would
+ * be possible to, falsely, observe p->on_rq == 0 and get stuck
+ * in smp_cond_load_acquire() below.
+ *
+ * sched_ttwu_pending() try_to_wake_up()
+ * STORE p->on_rq = 1 LOAD p->state
+ * UNLOCK rq->lock
+ *
+ * __schedule() (switch to task 'p')
+ * LOCK rq->lock smp_rmb();
+ * smp_mb__after_spinlock();
+ * UNLOCK rq->lock
+ *
+ * [task p]
+ * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq
+ *
+ * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
+ * __schedule(). See the comment for smp_mb__after_spinlock().
+ *
+ * A similar smb_rmb() lives in try_invoke_on_locked_down_task().
+ */
+ smp_rmb();
+ if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
+ goto unlock;
+
+#ifdef CONFIG_SMP
+ /*
+ * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
+ * possible to, falsely, observe p->on_cpu == 0.
+ *
+ * One must be running (->on_cpu == 1) in order to remove oneself
+ * from the runqueue.
+ *
+ * __schedule() (switch to task 'p') try_to_wake_up()
+ * STORE p->on_cpu = 1 LOAD p->on_rq
+ * UNLOCK rq->lock
+ *
+ * __schedule() (put 'p' to sleep)
+ * LOCK rq->lock smp_rmb();
+ * smp_mb__after_spinlock();
+ * STORE p->on_rq = 0 LOAD p->on_cpu
+ *
+ * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in
+ * __schedule(). See the comment for smp_mb__after_spinlock().
+ *
+ * Form a control-dep-acquire with p->on_rq == 0 above, to ensure
+ * schedule()'s deactivate_task() has 'happened' and p will no longer
+ * care about it's own p->state. See the comment in __schedule().
+ */
+ smp_acquire__after_ctrl_dep();
+
+ /*
+ * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq
+ * == 0), which means we need to do an enqueue, change p->state to
+ * TASK_WAKING such that we can unlock p->pi_lock before doing the
+ * enqueue, such as ttwu_queue_wakelist().
+ */
+ WRITE_ONCE(p->__state, TASK_WAKING);
+
+ /*
+ * If the owning (remote) CPU is still in the middle of schedule() with
+ * this task as prev, considering queueing p on the remote CPUs wake_list
+ * which potentially sends an IPI instead of spinning on p->on_cpu to
+ * let the waker make forward progress. This is safe because IRQs are
+ * disabled and the IPI will deliver after on_cpu is cleared.
+ *
+ * Ensure we load task_cpu(p) after p->on_cpu:
+ *
+ * set_task_cpu(p, cpu);
+ * STORE p->cpu = @cpu
+ * __schedule() (switch to task 'p')
+ * LOCK rq->lock
+ * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu)
+ * STORE p->on_cpu = 1 LOAD p->cpu
+ *
+ * to ensure we observe the correct CPU on which the task is currently
+ * scheduling.
+ */
+ if (smp_load_acquire(&p->on_cpu) &&
+ ttwu_queue_wakelist(p, task_cpu(p), wake_flags | WF_ON_CPU))
+ goto unlock;
+
+ /*
+ * If the owning (remote) CPU is still in the middle of schedule() with
+ * this task as prev, wait until it's done referencing the task.
+ *
+ * Pairs with the smp_store_release() in finish_task().
+ *
+ * This ensures that tasks getting woken will be fully ordered against
+ * their previous state and preserve Program Order.
+ */
+ smp_cond_load_acquire(&p->on_cpu, !VAL);
+
+ sched_task_ttwu(p);
+
+ cpu = select_task_rq(p);
+
+ if (cpu != task_cpu(p)) {
+ if (p->in_iowait) {
+ delayacct_blkio_end(p);
+ atomic_dec(&task_rq(p)->nr_iowait);
+ }
+
+ wake_flags |= WF_MIGRATED;
+ psi_ttwu_dequeue(p);
+ set_task_cpu(p, cpu);
+ }
+#else
+ cpu = task_cpu(p);
+#endif /* CONFIG_SMP */
+
+ ttwu_queue(p, cpu, wake_flags);
+unlock:
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+out:
+ if (success)
+ ttwu_stat(p, task_cpu(p), wake_flags);
+ preempt_enable();
+
+ return success;
+}
+
+/**
+ * task_call_func - Invoke a function on task in fixed state
+ * @p: Process for which the function is to be invoked, can be @current.
+ * @func: Function to invoke.
+ * @arg: Argument to function.
+ *
+ * Fix the task in it's current state by avoiding wakeups and or rq operations
+ * and call @func(@arg) on it. This function can use ->on_rq and task_curr()
+ * to work out what the state is, if required. Given that @func can be invoked
+ * with a runqueue lock held, it had better be quite lightweight.
+ *
+ * Returns:
+ * Whatever @func returns
+ */
+int task_call_func(struct task_struct *p, task_call_f func, void *arg)
+{
+ struct rq *rq = NULL;
+ unsigned int state;
+ struct rq_flags rf;
+ int ret;
+
+ raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
+
+ state = READ_ONCE(p->__state);
+
+ /*
+ * Ensure we load p->on_rq after p->__state, otherwise it would be
+ * possible to, falsely, observe p->on_rq == 0.
+ *
+ * See try_to_wake_up() for a longer comment.
+ */
+ smp_rmb();
+
+ /*
+ * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when
+ * the task is blocked. Make sure to check @state since ttwu() can drop
+ * locks at the end, see ttwu_queue_wakelist().
+ */
+ if (state == TASK_RUNNING || state == TASK_WAKING || p->on_rq)
+ rq = __task_rq_lock(p, &rf);
+
+ /*
+ * At this point the task is pinned; either:
+ * - blocked and we're holding off wakeups (pi->lock)
+ * - woken, and we're holding off enqueue (rq->lock)
+ * - queued, and we're holding off schedule (rq->lock)
+ * - running, and we're holding off de-schedule (rq->lock)
+ *
+ * The called function (@func) can use: task_curr(), p->on_rq and
+ * p->__state to differentiate between these states.
+ */
+ ret = func(p, arg);
+
+ if (rq)
+ __task_rq_unlock(rq, &rf);
+
+ raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
+ return ret;
+}
+
+/**
+ * wake_up_process - Wake up a specific process
+ * @p: The process to be woken up.
+ *
+ * Attempt to wake up the nominated process and move it to the set of runnable
+ * processes.
+ *
+ * Return: 1 if the process was woken up, 0 if it was already running.
+ *
+ * This function executes a full memory barrier before accessing the task state.
+ */
+int wake_up_process(struct task_struct *p)
+{
+ return try_to_wake_up(p, TASK_NORMAL, 0);
+}
+EXPORT_SYMBOL(wake_up_process);
+
+int wake_up_state(struct task_struct *p, unsigned int state)
+{
+ return try_to_wake_up(p, state, 0);
+}
+
+/*
+ * Perform scheduler related setup for a newly forked process p.
+ * p is forked by current.
+ *
+ * __sched_fork() is basic setup used by init_idle() too:
+ */
+static inline void __sched_fork(unsigned long clone_flags, struct task_struct *p)
+{
+ p->on_rq = 0;
+ p->on_cpu = 0;
+ p->utime = 0;
+ p->stime = 0;
+ p->sched_time = 0;
+
+#ifdef CONFIG_SCHEDSTATS
+ /* Even if schedstat is disabled, there should not be garbage */
+ memset(&p->stats, 0, sizeof(p->stats));
+#endif
+
+#ifdef CONFIG_PREEMPT_NOTIFIERS
+ INIT_HLIST_HEAD(&p->preempt_notifiers);
+#endif
+
+#ifdef CONFIG_COMPACTION
+ p->capture_control = NULL;
+#endif
+#ifdef CONFIG_SMP
+ p->wake_entry.u_flags = CSD_TYPE_TTWU;
+#endif
+}
+
+/*
+ * fork()/clone()-time setup:
+ */
+int sched_fork(unsigned long clone_flags, struct task_struct *p)
+{
+ __sched_fork(clone_flags, p);
+ /*
+ * We mark the process as NEW here. This guarantees that
+ * nobody will actually run it, and a signal or other external
+ * event cannot wake it up and insert it on the runqueue either.
+ */
+ p->__state = TASK_NEW;
+
+ /*
+ * Make sure we do not leak PI boosting priority to the child.
+ */
+ p->prio = current->normal_prio;
+
+ /*
+ * Revert to default priority/policy on fork if requested.
+ */
+ if (unlikely(p->sched_reset_on_fork)) {
+ if (task_has_rt_policy(p)) {
+ p->policy = SCHED_NORMAL;
+ p->static_prio = NICE_TO_PRIO(0);
+ p->rt_priority = 0;
+ } else if (PRIO_TO_NICE(p->static_prio) < 0)
+ p->static_prio = NICE_TO_PRIO(0);
+
+ p->prio = p->normal_prio = p->static_prio;
+
+ /*
+ * We don't need the reset flag anymore after the fork. It has
+ * fulfilled its duty:
+ */
+ p->sched_reset_on_fork = 0;
+ }
+
+#ifdef CONFIG_SCHED_INFO
+ if (unlikely(sched_info_on()))
+ memset(&p->sched_info, 0, sizeof(p->sched_info));
+#endif
+ init_task_preempt_count(p);
+
+ return 0;
+}
+
+void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs)
+{
+ unsigned long flags;
+ struct rq *rq;
+
+ /*
+ * Because we're not yet on the pid-hash, p->pi_lock isn't strictly
+ * required yet, but lockdep gets upset if rules are violated.
+ */
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+ /*
+ * Share the timeslice between parent and child, thus the
+ * total amount of pending timeslices in the system doesn't change,
+ * resulting in more scheduling fairness.
+ */
+ rq = this_rq();
+ raw_spin_lock(&rq->lock);
+
+ rq->curr->time_slice /= 2;
+ p->time_slice = rq->curr->time_slice;
+#ifdef CONFIG_SCHED_HRTICK
+ hrtick_start(rq, rq->curr->time_slice);
+#endif
+
+ if (p->time_slice < RESCHED_NS) {
+ p->time_slice = sched_timeslice_ns;
+ resched_curr(rq);
+ }
+ sched_task_fork(p, rq);
+ raw_spin_unlock(&rq->lock);
+
+ rseq_migrate(p);
+ /*
+ * We're setting the CPU for the first time, we don't migrate,
+ * so use __set_task_cpu().
+ */
+ __set_task_cpu(p, smp_processor_id());
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+}
+
+void sched_post_fork(struct task_struct *p)
+{
+}
+
+#ifdef CONFIG_SCHEDSTATS
+
+DEFINE_STATIC_KEY_FALSE(sched_schedstats);
+
+static void set_schedstats(bool enabled)
+{
+ if (enabled)
+ static_branch_enable(&sched_schedstats);
+ else
+ static_branch_disable(&sched_schedstats);
+}
+
+void force_schedstat_enabled(void)
+{
+ if (!schedstat_enabled()) {
+ pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n");
+ static_branch_enable(&sched_schedstats);
+ }
+}
+
+static int __init setup_schedstats(char *str)
+{
+ int ret = 0;
+ if (!str)
+ goto out;
+
+ if (!strcmp(str, "enable")) {
+ set_schedstats(true);
+ ret = 1;
+ } else if (!strcmp(str, "disable")) {
+ set_schedstats(false);
+ ret = 1;
+ }
+out:
+ if (!ret)
+ pr_warn("Unable to parse schedstats=\n");
+
+ return ret;
+}
+__setup("schedstats=", setup_schedstats);
+
+#ifdef CONFIG_PROC_SYSCTL
+static int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ struct ctl_table t;
+ int err;
+ int state = static_branch_likely(&sched_schedstats);
+
+ if (write && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ t = *table;
+ t.data = &state;
+ err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos);
+ if (err < 0)
+ return err;
+ if (write)
+ set_schedstats(state);
+ return err;
+}
+
+static struct ctl_table sched_core_sysctls[] = {
+ {
+ .procname = "sched_schedstats",
+ .data = NULL,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sysctl_schedstats,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
+ },
+ {}
+};
+static int __init sched_core_sysctl_init(void)
+{
+ register_sysctl_init("kernel", sched_core_sysctls);
+ return 0;
+}
+late_initcall(sched_core_sysctl_init);
+#endif /* CONFIG_PROC_SYSCTL */
+#endif /* CONFIG_SCHEDSTATS */
+
+/*
+ * wake_up_new_task - wake up a newly created task for the first time.
+ *
+ * This function will do some initial scheduler statistics housekeeping
+ * that must be done for every newly created context, then puts the task
+ * on the runqueue and wakes it.
+ */
+void wake_up_new_task(struct task_struct *p)
+{
+ unsigned long flags;
+ struct rq *rq;
+
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+ WRITE_ONCE(p->__state, TASK_RUNNING);
+ rq = cpu_rq(select_task_rq(p));
+#ifdef CONFIG_SMP
+ rseq_migrate(p);
+ /*
+ * Fork balancing, do it here and not earlier because:
+ * - cpus_ptr can change in the fork path
+ * - any previously selected CPU might disappear through hotplug
+ *
+ * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
+ * as we're not fully set-up yet.
+ */
+ __set_task_cpu(p, cpu_of(rq));
+#endif
+
+ raw_spin_lock(&rq->lock);
+ update_rq_clock(rq);
+
+ activate_task(p, rq);
+ trace_sched_wakeup_new(p);
+ check_preempt_curr(rq);
+
+ raw_spin_unlock(&rq->lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+}
+
+#ifdef CONFIG_PREEMPT_NOTIFIERS
+
+static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key);
+
+void preempt_notifier_inc(void)
+{
+ static_branch_inc(&preempt_notifier_key);
+}
+EXPORT_SYMBOL_GPL(preempt_notifier_inc);
+
+void preempt_notifier_dec(void)
+{
+ static_branch_dec(&preempt_notifier_key);
+}
+EXPORT_SYMBOL_GPL(preempt_notifier_dec);
+
+/**
+ * preempt_notifier_register - tell me when current is being preempted & rescheduled
+ * @notifier: notifier struct to register
+ */
+void preempt_notifier_register(struct preempt_notifier *notifier)
+{
+ if (!static_branch_unlikely(&preempt_notifier_key))
+ WARN(1, "registering preempt_notifier while notifiers disabled\n");
+
+ hlist_add_head(&notifier->link, &current->preempt_notifiers);
+}
+EXPORT_SYMBOL_GPL(preempt_notifier_register);
+
+/**
+ * preempt_notifier_unregister - no longer interested in preemption notifications
+ * @notifier: notifier struct to unregister
+ *
+ * This is *not* safe to call from within a preemption notifier.
+ */
+void preempt_notifier_unregister(struct preempt_notifier *notifier)
+{
+ hlist_del(&notifier->link);
+}
+EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
+
+static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
+{
+ struct preempt_notifier *notifier;
+
+ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
+ notifier->ops->sched_in(notifier, raw_smp_processor_id());
+}
+
+static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
+{
+ if (static_branch_unlikely(&preempt_notifier_key))
+ __fire_sched_in_preempt_notifiers(curr);
+}
+
+static void
+__fire_sched_out_preempt_notifiers(struct task_struct *curr,
+ struct task_struct *next)
+{
+ struct preempt_notifier *notifier;
+
+ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link)
+ notifier->ops->sched_out(notifier, next);
+}
+
+static __always_inline void
+fire_sched_out_preempt_notifiers(struct task_struct *curr,
+ struct task_struct *next)
+{
+ if (static_branch_unlikely(&preempt_notifier_key))
+ __fire_sched_out_preempt_notifiers(curr, next);
+}
+
+#else /* !CONFIG_PREEMPT_NOTIFIERS */
+
+static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
+{
+}
+
+static inline void
+fire_sched_out_preempt_notifiers(struct task_struct *curr,
+ struct task_struct *next)
+{
+}
+
+#endif /* CONFIG_PREEMPT_NOTIFIERS */
+
+static inline void prepare_task(struct task_struct *next)
+{
+ /*
+ * Claim the task as running, we do this before switching to it
+ * such that any running task will have this set.
+ *
+ * See the ttwu() WF_ON_CPU case and its ordering comment.
+ */
+ WRITE_ONCE(next->on_cpu, 1);
+}
+
+static inline void finish_task(struct task_struct *prev)
+{
+#ifdef CONFIG_SMP
+ /*
+ * This must be the very last reference to @prev from this CPU. After
+ * p->on_cpu is cleared, the task can be moved to a different CPU. We
+ * must ensure this doesn't happen until the switch is completely
+ * finished.
+ *
+ * In particular, the load of prev->state in finish_task_switch() must
+ * happen before this.
+ *
+ * Pairs with the smp_cond_load_acquire() in try_to_wake_up().
+ */
+ smp_store_release(&prev->on_cpu, 0);
+#else
+ prev->on_cpu = 0;
+#endif
+}
+
+#ifdef CONFIG_SMP
+
+static void do_balance_callbacks(struct rq *rq, struct callback_head *head)
+{
+ void (*func)(struct rq *rq);
+ struct callback_head *next;
+
+ lockdep_assert_held(&rq->lock);
+
+ while (head) {
+ func = (void (*)(struct rq *))head->func;
+ next = head->next;
+ head->next = NULL;
+ head = next;
+
+ func(rq);
+ }
+}
+
+static void balance_push(struct rq *rq);
+
+/*
+ * balance_push_callback is a right abuse of the callback interface and plays
+ * by significantly different rules.
+ *
+ * Where the normal balance_callback's purpose is to be ran in the same context
+ * that queued it (only later, when it's safe to drop rq->lock again),
+ * balance_push_callback is specifically targeted at __schedule().
+ *
+ * This abuse is tolerated because it places all the unlikely/odd cases behind
+ * a single test, namely: rq->balance_callback == NULL.
+ */
+struct callback_head balance_push_callback = {
+ .next = NULL,
+ .func = (void (*)(struct callback_head *))balance_push,
+};
+
+static inline struct callback_head *
+__splice_balance_callbacks(struct rq *rq, bool split)
+{
+ struct callback_head *head = rq->balance_callback;
+
+ if (likely(!head))
+ return NULL;
+
+ lockdep_assert_rq_held(rq);
+ /*
+ * Must not take balance_push_callback off the list when
+ * splice_balance_callbacks() and balance_callbacks() are not
+ * in the same rq->lock section.
+ *
+ * In that case it would be possible for __schedule() to interleave
+ * and observe the list empty.
+ */
+ if (split && head == &balance_push_callback)
+ head = NULL;
+ else
+ rq->balance_callback = NULL;
+
+ return head;
+}
+
+static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
+{
+ return __splice_balance_callbacks(rq, true);
+}
+
+static void __balance_callbacks(struct rq *rq)
+{
+ do_balance_callbacks(rq, __splice_balance_callbacks(rq, false));
+}
+
+static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
+{
+ unsigned long flags;
+
+ if (unlikely(head)) {
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ do_balance_callbacks(rq, head);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ }
+}
+
+#else
+
+static inline void __balance_callbacks(struct rq *rq)
+{
+}
+
+static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
+{
+ return NULL;
+}
+
+static inline void balance_callbacks(struct rq *rq, struct callback_head *head)
+{
+}
+
+#endif
+
+static inline void
+prepare_lock_switch(struct rq *rq, struct task_struct *next)
+{
+ /*
+ * Since the runqueue lock will be released by the next
+ * task (which is an invalid locking op but in the case
+ * of the scheduler it's an obvious special-case), so we
+ * do an early lockdep release here:
+ */
+ spin_release(&rq->lock.dep_map, _THIS_IP_);
+#ifdef CONFIG_DEBUG_SPINLOCK
+ /* this is a valid case when another task releases the spinlock */
+ rq->lock.owner = next;
+#endif
+}
+
+static inline void finish_lock_switch(struct rq *rq)
+{
+ /*
+ * If we are tracking spinlock dependencies then we have to
+ * fix up the runqueue lock - which gets 'carried over' from
+ * prev into current:
+ */
+ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
+ __balance_callbacks(rq);
+ raw_spin_unlock_irq(&rq->lock);
+}
+
+/*
+ * NOP if the arch has not defined these:
+ */
+
+#ifndef prepare_arch_switch
+# define prepare_arch_switch(next) do { } while (0)
+#endif
+
+#ifndef finish_arch_post_lock_switch
+# define finish_arch_post_lock_switch() do { } while (0)
+#endif
+
+static inline void kmap_local_sched_out(void)
+{
+#ifdef CONFIG_KMAP_LOCAL
+ if (unlikely(current->kmap_ctrl.idx))
+ __kmap_local_sched_out();
+#endif
+}
+
+static inline void kmap_local_sched_in(void)
+{
+#ifdef CONFIG_KMAP_LOCAL
+ if (unlikely(current->kmap_ctrl.idx))
+ __kmap_local_sched_in();
+#endif
+}
+
+/**
+ * prepare_task_switch - prepare to switch tasks
+ * @rq: the runqueue preparing to switch
+ * @next: the task we are going to switch to.
+ *
+ * This is called with the rq lock held and interrupts off. It must
+ * be paired with a subsequent finish_task_switch after the context
+ * switch.
+ *
+ * prepare_task_switch sets up locking and calls architecture specific
+ * hooks.
+ */
+static inline void
+prepare_task_switch(struct rq *rq, struct task_struct *prev,
+ struct task_struct *next)
+{
+ kcov_prepare_switch(prev);
+ sched_info_switch(rq, prev, next);
+ perf_event_task_sched_out(prev, next);
+ rseq_preempt(prev);
+ fire_sched_out_preempt_notifiers(prev, next);
+ kmap_local_sched_out();
+ prepare_task(next);
+ prepare_arch_switch(next);
+}
+
+/**
+ * finish_task_switch - clean up after a task-switch
+ * @rq: runqueue associated with task-switch
+ * @prev: the thread we just switched away from.
+ *
+ * finish_task_switch must be called after the context switch, paired
+ * with a prepare_task_switch call before the context switch.
+ * finish_task_switch will reconcile locking set up by prepare_task_switch,
+ * and do any other architecture-specific cleanup actions.
+ *
+ * Note that we may have delayed dropping an mm in context_switch(). If
+ * so, we finish that here outside of the runqueue lock. (Doing it
+ * with the lock held can cause deadlocks; see schedule() for
+ * details.)
+ *
+ * The context switch have flipped the stack from under us and restored the
+ * local variables which were saved when this task called schedule() in the
+ * past. prev == current is still correct but we need to recalculate this_rq
+ * because prev may have moved to another CPU.
+ */
+static struct rq *finish_task_switch(struct task_struct *prev)
+ __releases(rq->lock)
+{
+ struct rq *rq = this_rq();
+ struct mm_struct *mm = rq->prev_mm;
+ unsigned int prev_state;
+
+ /*
+ * The previous task will have left us with a preempt_count of 2
+ * because it left us after:
+ *
+ * schedule()
+ * preempt_disable(); // 1
+ * __schedule()
+ * raw_spin_lock_irq(&rq->lock) // 2
+ *
+ * Also, see FORK_PREEMPT_COUNT.
+ */
+ if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET,
+ "corrupted preempt_count: %s/%d/0x%x\n",
+ current->comm, current->pid, preempt_count()))
+ preempt_count_set(FORK_PREEMPT_COUNT);
+
+ rq->prev_mm = NULL;
+
+ /*
+ * A task struct has one reference for the use as "current".
+ * If a task dies, then it sets TASK_DEAD in tsk->state and calls
+ * schedule one last time. The schedule call will never return, and
+ * the scheduled task must drop that reference.
+ *
+ * We must observe prev->state before clearing prev->on_cpu (in
+ * finish_task), otherwise a concurrent wakeup can get prev
+ * running on another CPU and we could rave with its RUNNING -> DEAD
+ * transition, resulting in a double drop.
+ */
+ prev_state = READ_ONCE(prev->__state);
+ vtime_task_switch(prev);
+ perf_event_task_sched_in(prev, current);
+ finish_task(prev);
+ tick_nohz_task_switch();
+ finish_lock_switch(rq);
+ finish_arch_post_lock_switch();
+ kcov_finish_switch(current);
+ /*
+ * kmap_local_sched_out() is invoked with rq::lock held and
+ * interrupts disabled. There is no requirement for that, but the
+ * sched out code does not have an interrupt enabled section.
+ * Restoring the maps on sched in does not require interrupts being
+ * disabled either.
+ */
+ kmap_local_sched_in();
+
+ fire_sched_in_preempt_notifiers(current);
+ /*
+ * When switching through a kernel thread, the loop in
+ * membarrier_{private,global}_expedited() may have observed that
+ * kernel thread and not issued an IPI. It is therefore possible to
+ * schedule between user->kernel->user threads without passing though
+ * switch_mm(). Membarrier requires a barrier after storing to
+ * rq->curr, before returning to userspace, so provide them here:
+ *
+ * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly
+ * provided by mmdrop(),
+ * - a sync_core for SYNC_CORE.
+ */
+ if (mm) {
+ membarrier_mm_sync_core_before_usermode(mm);
+ mmdrop_sched(mm);
+ }
+ if (unlikely(prev_state == TASK_DEAD)) {
+ /* Task is done with its stack. */
+ put_task_stack(prev);
+
+ put_task_struct_rcu_user(prev);
+ }
+
+ return rq;
+}
+
+/**
+ * schedule_tail - first thing a freshly forked thread must call.
+ * @prev: the thread we just switched away from.
+ */
+asmlinkage __visible void schedule_tail(struct task_struct *prev)
+ __releases(rq->lock)
+{
+ /*
+ * New tasks start with FORK_PREEMPT_COUNT, see there and
+ * finish_task_switch() for details.
+ *
+ * finish_task_switch() will drop rq->lock() and lower preempt_count
+ * and the preempt_enable() will end up enabling preemption (on
+ * PREEMPT_COUNT kernels).
+ */
+
+ finish_task_switch(prev);
+ preempt_enable();
+
+ if (current->set_child_tid)
+ put_user(task_pid_vnr(current), current->set_child_tid);
+
+ calculate_sigpending();
+}
+
+/*
+ * context_switch - switch to the new MM and the new thread's register state.
+ */
+static __always_inline struct rq *
+context_switch(struct rq *rq, struct task_struct *prev,
+ struct task_struct *next)
+{
+ prepare_task_switch(rq, prev, next);
+
+ /*
+ * For paravirt, this is coupled with an exit in switch_to to
+ * combine the page table reload and the switch backend into
+ * one hypercall.
+ */
+ arch_start_context_switch(prev);
+
+ /*
+ * kernel -> kernel lazy + transfer active
+ * user -> kernel lazy + mmgrab() active
+ *
+ * kernel -> user switch + mmdrop() active
+ * user -> user switch
+ */
+ if (!next->mm) { // to kernel
+ enter_lazy_tlb(prev->active_mm, next);
+
+ next->active_mm = prev->active_mm;
+ if (prev->mm) // from user
+ mmgrab(prev->active_mm);
+ else
+ prev->active_mm = NULL;
+ } else { // to user
+ membarrier_switch_mm(rq, prev->active_mm, next->mm);
+ /*
+ * sys_membarrier() requires an smp_mb() between setting
+ * rq->curr / membarrier_switch_mm() and returning to userspace.
+ *
+ * The below provides this either through switch_mm(), or in
+ * case 'prev->active_mm == next->mm' through
+ * finish_task_switch()'s mmdrop().
+ */
+ switch_mm_irqs_off(prev->active_mm, next->mm, next);
+
+ if (!prev->mm) { // from kernel
+ /* will mmdrop() in finish_task_switch(). */
+ rq->prev_mm = prev->active_mm;
+ prev->active_mm = NULL;
+ }
+ }
+
+ prepare_lock_switch(rq, next);
+
+ /* Here we just switch the register state and the stack. */
+ switch_to(prev, next, prev);
+ barrier();
+
+ return finish_task_switch(prev);
+}
+
+/*
+ * nr_running, nr_uninterruptible and nr_context_switches:
+ *
+ * externally visible scheduler statistics: current number of runnable
+ * threads, total number of context switches performed since bootup.
+ */
+unsigned int nr_running(void)
+{
+ unsigned int i, sum = 0;
+
+ for_each_online_cpu(i)
+ sum += cpu_rq(i)->nr_running;
+
+ return sum;
+}
+
+/*
+ * Check if only the current task is running on the CPU.
+ *
+ * Caution: this function does not check that the caller has disabled
+ * preemption, thus the result might have a time-of-check-to-time-of-use
+ * race. The caller is responsible to use it correctly, for example:
+ *
+ * - from a non-preemptible section (of course)
+ *
+ * - from a thread that is bound to a single CPU
+ *
+ * - in a loop with very short iterations (e.g. a polling loop)
+ */
+bool single_task_running(void)
+{
+ return raw_rq()->nr_running == 1;
+}
+EXPORT_SYMBOL(single_task_running);
+
+unsigned long long nr_context_switches(void)
+{
+ int i;
+ unsigned long long sum = 0;
+
+ for_each_possible_cpu(i)
+ sum += cpu_rq(i)->nr_switches;
+
+ return sum;
+}
+
+/*
+ * Consumers of these two interfaces, like for example the cpuidle menu
+ * governor, are using nonsensical data. Preferring shallow idle state selection
+ * for a CPU that has IO-wait which might not even end up running the task when
+ * it does become runnable.
+ */
+
+unsigned int nr_iowait_cpu(int cpu)
+{
+ return atomic_read(&cpu_rq(cpu)->nr_iowait);
+}
+
+/*
+ * IO-wait accounting, and how it's mostly bollocks (on SMP).
+ *
+ * The idea behind IO-wait account is to account the idle time that we could
+ * have spend running if it were not for IO. That is, if we were to improve the
+ * storage performance, we'd have a proportional reduction in IO-wait time.
+ *
+ * This all works nicely on UP, where, when a task blocks on IO, we account
+ * idle time as IO-wait, because if the storage were faster, it could've been
+ * running and we'd not be idle.
+ *
+ * This has been extended to SMP, by doing the same for each CPU. This however
+ * is broken.
+ *
+ * Imagine for instance the case where two tasks block on one CPU, only the one
+ * CPU will have IO-wait accounted, while the other has regular idle. Even
+ * though, if the storage were faster, both could've ran at the same time,
+ * utilising both CPUs.
+ *
+ * This means, that when looking globally, the current IO-wait accounting on
+ * SMP is a lower bound, by reason of under accounting.
+ *
+ * Worse, since the numbers are provided per CPU, they are sometimes
+ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly
+ * associated with any one particular CPU, it can wake to another CPU than it
+ * blocked on. This means the per CPU IO-wait number is meaningless.
+ *
+ * Task CPU affinities can make all that even more 'interesting'.
+ */
+
+unsigned int nr_iowait(void)
+{
+ unsigned int i, sum = 0;
+
+ for_each_possible_cpu(i)
+ sum += nr_iowait_cpu(i);
+
+ return sum;
+}
+
+#ifdef CONFIG_SMP
+
+/*
+ * sched_exec - execve() is a valuable balancing opportunity, because at
+ * this point the task has the smallest effective memory and cache
+ * footprint.
+ */
+void sched_exec(void)
+{
+}
+
+#endif
+
+DEFINE_PER_CPU(struct kernel_stat, kstat);
+DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
+
+EXPORT_PER_CPU_SYMBOL(kstat);
+EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
+
+static inline void update_curr(struct rq *rq, struct task_struct *p)
+{
+ s64 ns = rq->clock_task - p->last_ran;
+
+ p->sched_time += ns;
+ cgroup_account_cputime(p, ns);
+ account_group_exec_runtime(p, ns);
+
+ p->time_slice -= ns;
+ p->last_ran = rq->clock_task;
+}
+
+/*
+ * Return accounted runtime for the task.
+ * Return separately the current's pending runtime that have not been
+ * accounted yet.
+ */
+unsigned long long task_sched_runtime(struct task_struct *p)
+{
+ unsigned long flags;
+ struct rq *rq;
+ raw_spinlock_t *lock;
+ u64 ns;
+
+#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
+ /*
+ * 64-bit doesn't need locks to atomically read a 64-bit value.
+ * So we have a optimization chance when the task's delta_exec is 0.
+ * Reading ->on_cpu is racy, but this is ok.
+ *
+ * If we race with it leaving CPU, we'll take a lock. So we're correct.
+ * If we race with it entering CPU, unaccounted time is 0. This is
+ * indistinguishable from the read occurring a few cycles earlier.
+ * If we see ->on_cpu without ->on_rq, the task is leaving, and has
+ * been accounted, so we're correct here as well.
+ */
+ if (!p->on_cpu || !task_on_rq_queued(p))
+ return tsk_seruntime(p);
+#endif
+
+ rq = task_access_lock_irqsave(p, &lock, &flags);
+ /*
+ * Must be ->curr _and_ ->on_rq. If dequeued, we would
+ * project cycles that may never be accounted to this
+ * thread, breaking clock_gettime().
+ */
+ if (p == rq->curr && task_on_rq_queued(p)) {
+ update_rq_clock(rq);
+ update_curr(rq, p);
+ }
+ ns = tsk_seruntime(p);
+ task_access_unlock_irqrestore(p, lock, &flags);
+
+ return ns;
+}
+
+/* This manages tasks that have run out of timeslice during a scheduler_tick */
+static inline void scheduler_task_tick(struct rq *rq)
+{
+ struct task_struct *p = rq->curr;
+
+ if (is_idle_task(p))
+ return;
+
+ update_curr(rq, p);
+ cpufreq_update_util(rq, 0);
+
+ /*
+ * Tasks have less than RESCHED_NS of time slice left they will be
+ * rescheduled.
+ */
+ if (p->time_slice >= RESCHED_NS)
+ return;
+ set_tsk_need_resched(p);
+ set_preempt_need_resched();
+}
+
+#ifdef CONFIG_SCHED_DEBUG
+static u64 cpu_resched_latency(struct rq *rq)
+{
+ int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms);
+ u64 resched_latency, now = rq_clock(rq);
+ static bool warned_once;
+
+ if (sysctl_resched_latency_warn_once && warned_once)
+ return 0;
+
+ if (!need_resched() || !latency_warn_ms)
+ return 0;
+
+ if (system_state == SYSTEM_BOOTING)
+ return 0;
+
+ if (!rq->last_seen_need_resched_ns) {
+ rq->last_seen_need_resched_ns = now;
+ rq->ticks_without_resched = 0;
+ return 0;
+ }
+
+ rq->ticks_without_resched++;
+ resched_latency = now - rq->last_seen_need_resched_ns;
+ if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC)
+ return 0;
+
+ warned_once = true;
+
+ return resched_latency;
+}
+
+static int __init setup_resched_latency_warn_ms(char *str)
+{
+ long val;
+
+ if ((kstrtol(str, 0, &val))) {
+ pr_warn("Unable to set resched_latency_warn_ms\n");
+ return 1;
+ }
+
+ sysctl_resched_latency_warn_ms = val;
+ return 1;
+}
+__setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms);
+#else
+static inline u64 cpu_resched_latency(struct rq *rq) { return 0; }
+#endif /* CONFIG_SCHED_DEBUG */
+
+/*
+ * This function gets called by the timer code, with HZ frequency.
+ * We call it with interrupts disabled.
+ */
+void scheduler_tick(void)
+{
+ int cpu __maybe_unused = smp_processor_id();
+ struct rq *rq = cpu_rq(cpu);
+ u64 resched_latency;
+
+ arch_scale_freq_tick();
+ sched_clock_tick();
+
+ raw_spin_lock(&rq->lock);
+ update_rq_clock(rq);
+
+ scheduler_task_tick(rq);
+ if (sched_feat(LATENCY_WARN))
+ resched_latency = cpu_resched_latency(rq);
+ calc_global_load_tick(rq);
+
+ rq->last_tick = rq->clock;
+ raw_spin_unlock(&rq->lock);
+
+ if (sched_feat(LATENCY_WARN) && resched_latency)
+ resched_latency_warn(cpu, resched_latency);
+
+ perf_event_task_tick();
+}
+
+#ifdef CONFIG_SCHED_SMT
+static inline int sg_balance_cpu_stop(void *data)
+{
+ struct rq *rq = this_rq();
+ struct task_struct *p = data;
+ cpumask_t tmp;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ raw_spin_lock(&p->pi_lock);
+ raw_spin_lock(&rq->lock);
+
+ rq->active_balance = 0;
+ /* _something_ may have changed the task, double check again */
+ if (task_on_rq_queued(p) && task_rq(p) == rq &&
+ cpumask_and(&tmp, p->cpus_ptr, &sched_sg_idle_mask) &&
+ !is_migration_disabled(p)) {
+ int cpu = cpu_of(rq);
+ int dcpu = __best_mask_cpu(&tmp, per_cpu(sched_cpu_llc_mask, cpu));
+ rq = move_queued_task(rq, p, dcpu);
+ }
+
+ raw_spin_unlock(&rq->lock);
+ raw_spin_unlock(&p->pi_lock);
+
+ local_irq_restore(flags);
+
+ return 0;
+}
+
+/* sg_balance_trigger - trigger slibing group balance for @cpu */
+static inline int sg_balance_trigger(const int cpu)
+{
+ struct rq *rq= cpu_rq(cpu);
+ unsigned long flags;
+ struct task_struct *curr;
+ int res;
+
+ if (!raw_spin_trylock_irqsave(&rq->lock, flags))
+ return 0;
+ curr = rq->curr;
+ res = (!is_idle_task(curr)) && (1 == rq->nr_running) &&\
+ cpumask_intersects(curr->cpus_ptr, &sched_sg_idle_mask) &&\
+ !is_migration_disabled(curr) && (!rq->active_balance);
+
+ if (res)
+ rq->active_balance = 1;
+
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+ if (res)
+ stop_one_cpu_nowait(cpu, sg_balance_cpu_stop, curr,
+ &rq->active_balance_work);
+ return res;
+}
+
+/*
+ * sg_balance - slibing group balance check for run queue @rq
+ */
+static inline void sg_balance(struct rq *rq)
+{
+ cpumask_t chk;
+ int cpu = cpu_of(rq);
+
+ /* exit when cpu is offline */
+ if (unlikely(!rq->online))
+ return;
+
+ /*
+ * Only cpu in slibing idle group will do the checking and then
+ * find potential cpus which can migrate the current running task
+ */
+ if (cpumask_test_cpu(cpu, &sched_sg_idle_mask) &&
+ cpumask_andnot(&chk, cpu_online_mask, sched_rq_watermark) &&
+ cpumask_andnot(&chk, &chk, &sched_rq_pending_mask)) {
+ int i;
+
+ for_each_cpu_wrap(i, &chk, cpu) {
+ if (cpumask_subset(cpu_smt_mask(i), &chk) &&
+ sg_balance_trigger(i))
+ return;
+ }
+ }
+}
+#endif /* CONFIG_SCHED_SMT */
+
+#ifdef CONFIG_NO_HZ_FULL
+
+struct tick_work {
+ int cpu;
+ atomic_t state;
+ struct delayed_work work;
+};
+/* Values for ->state, see diagram below. */
+#define TICK_SCHED_REMOTE_OFFLINE 0
+#define TICK_SCHED_REMOTE_OFFLINING 1
+#define TICK_SCHED_REMOTE_RUNNING 2
+
+/*
+ * State diagram for ->state:
+ *
+ *
+ * TICK_SCHED_REMOTE_OFFLINE
+ * | ^
+ * | |
+ * | | sched_tick_remote()
+ * | |
+ * | |
+ * +--TICK_SCHED_REMOTE_OFFLINING
+ * | ^
+ * | |
+ * sched_tick_start() | | sched_tick_stop()
+ * | |
+ * V |
+ * TICK_SCHED_REMOTE_RUNNING
+ *
+ *
+ * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
+ * and sched_tick_start() are happy to leave the state in RUNNING.
+ */
+
+static struct tick_work __percpu *tick_work_cpu;
+
+static void sched_tick_remote(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tick_work *twork = container_of(dwork, struct tick_work, work);
+ int cpu = twork->cpu;
+ struct rq *rq = cpu_rq(cpu);
+ struct task_struct *curr;
+ unsigned long flags;
+ u64 delta;
+ int os;
+
+ /*
+ * Handle the tick only if it appears the remote CPU is running in full
+ * dynticks mode. The check is racy by nature, but missing a tick or
+ * having one too much is no big deal because the scheduler tick updates
+ * statistics and checks timeslices in a time-independent way, regardless
+ * of when exactly it is running.
+ */
+ if (!tick_nohz_tick_stopped_cpu(cpu))
+ goto out_requeue;
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ curr = rq->curr;
+ if (cpu_is_offline(cpu))
+ goto out_unlock;
+
+ update_rq_clock(rq);
+ if (!is_idle_task(curr)) {
+ /*
+ * Make sure the next tick runs within a reasonable
+ * amount of time.
+ */
+ delta = rq_clock_task(rq) - curr->last_ran;
+ WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
+ }
+ scheduler_task_tick(rq);
+
+ calc_load_nohz_remote(rq);
+out_unlock:
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+out_requeue:
+ /*
+ * Run the remote tick once per second (1Hz). This arbitrary
+ * frequency is large enough to avoid overload but short enough
+ * to keep scheduler internal stats reasonably up to date. But
+ * first update state to reflect hotplug activity if required.
+ */
+ os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
+ WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
+ if (os == TICK_SCHED_REMOTE_RUNNING)
+ queue_delayed_work(system_unbound_wq, dwork, HZ);
+}
+
+static void sched_tick_start(int cpu)
+{
+ int os;
+ struct tick_work *twork;
+
+ if (housekeeping_cpu(cpu, HK_TYPE_TICK))
+ return;
+
+ WARN_ON_ONCE(!tick_work_cpu);
+
+ twork = per_cpu_ptr(tick_work_cpu, cpu);
+ os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
+ WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
+ if (os == TICK_SCHED_REMOTE_OFFLINE) {
+ twork->cpu = cpu;
+ INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
+ queue_delayed_work(system_unbound_wq, &twork->work, HZ);
+ }
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void sched_tick_stop(int cpu)
+{
+ struct tick_work *twork;
+
+ if (housekeeping_cpu(cpu, HK_TYPE_TICK))
+ return;
+
+ WARN_ON_ONCE(!tick_work_cpu);
+
+ twork = per_cpu_ptr(tick_work_cpu, cpu);
+ cancel_delayed_work_sync(&twork->work);
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+int __init sched_tick_offload_init(void)
+{
+ tick_work_cpu = alloc_percpu(struct tick_work);
+ BUG_ON(!tick_work_cpu);
+ return 0;
+}
+
+#else /* !CONFIG_NO_HZ_FULL */
+static inline void sched_tick_start(int cpu) { }
+static inline void sched_tick_stop(int cpu) { }
+#endif
+
+#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
+ defined(CONFIG_PREEMPT_TRACER))
+/*
+ * If the value passed in is equal to the current preempt count
+ * then we just disabled preemption. Start timing the latency.
+ */
+static inline void preempt_latency_start(int val)
+{
+ if (preempt_count() == val) {
+ unsigned long ip = get_lock_parent_ip();
+#ifdef CONFIG_DEBUG_PREEMPT
+ current->preempt_disable_ip = ip;
+#endif
+ trace_preempt_off(CALLER_ADDR0, ip);
+ }
+}
+
+void preempt_count_add(int val)
+{
+#ifdef CONFIG_DEBUG_PREEMPT
+ /*
+ * Underflow?
+ */
+ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
+ return;
+#endif
+ __preempt_count_add(val);
+#ifdef CONFIG_DEBUG_PREEMPT
+ /*
+ * Spinlock count overflowing soon?
+ */
+ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
+ PREEMPT_MASK - 10);
+#endif
+ preempt_latency_start(val);
+}
+EXPORT_SYMBOL(preempt_count_add);
+NOKPROBE_SYMBOL(preempt_count_add);
+
+/*
+ * If the value passed in equals to the current preempt count
+ * then we just enabled preemption. Stop timing the latency.
+ */
+static inline void preempt_latency_stop(int val)
+{
+ if (preempt_count() == val)
+ trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
+}
+
+void preempt_count_sub(int val)
+{
+#ifdef CONFIG_DEBUG_PREEMPT
+ /*
+ * Underflow?
+ */
+ if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
+ return;
+ /*
+ * Is the spinlock portion underflowing?
+ */
+ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
+ !(preempt_count() & PREEMPT_MASK)))
+ return;
+#endif
+
+ preempt_latency_stop(val);
+ __preempt_count_sub(val);
+}
+EXPORT_SYMBOL(preempt_count_sub);
+NOKPROBE_SYMBOL(preempt_count_sub);
+
+#else
+static inline void preempt_latency_start(int val) { }
+static inline void preempt_latency_stop(int val) { }
+#endif
+
+static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
+{
+#ifdef CONFIG_DEBUG_PREEMPT
+ return p->preempt_disable_ip;
+#else
+ return 0;
+#endif
+}
+
+/*
+ * Print scheduling while atomic bug:
+ */
+static noinline void __schedule_bug(struct task_struct *prev)
+{
+ /* Save this before calling printk(), since that will clobber it */
+ unsigned long preempt_disable_ip = get_preempt_disable_ip(current);
+
+ if (oops_in_progress)
+ return;
+
+ printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
+ prev->comm, prev->pid, preempt_count());
+
+ debug_show_held_locks(prev);
+ print_modules();
+ if (irqs_disabled())
+ print_irqtrace_events(prev);
+ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)
+ && in_atomic_preempt_off()) {
+ pr_err("Preemption disabled at:");
+ print_ip_sym(KERN_ERR, preempt_disable_ip);
+ }
+ if (panic_on_warn)
+ panic("scheduling while atomic\n");
+
+ dump_stack();
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+}
+
+/*
+ * Various schedule()-time debugging checks and statistics:
+ */
+static inline void schedule_debug(struct task_struct *prev, bool preempt)
+{
+#ifdef CONFIG_SCHED_STACK_END_CHECK
+ if (task_stack_end_corrupted(prev))
+ panic("corrupted stack end detected inside scheduler\n");
+
+ if (task_scs_end_corrupted(prev))
+ panic("corrupted shadow stack detected inside scheduler\n");
+#endif
+
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+ if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) {
+ printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
+ prev->comm, prev->pid, prev->non_block_count);
+ dump_stack();
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+ }
+#endif
+
+ if (unlikely(in_atomic_preempt_off())) {
+ __schedule_bug(prev);
+ preempt_count_set(PREEMPT_DISABLED);
+ }
+ rcu_sleep_check();
+ SCHED_WARN_ON(ct_state() == CONTEXT_USER);
+
+ profile_hit(SCHED_PROFILING, __builtin_return_address(0));
+
+ schedstat_inc(this_rq()->sched_count);
+}
+
+/*
+ * Compile time debug macro
+ * #define ALT_SCHED_DEBUG
+ */
+
+#ifdef ALT_SCHED_DEBUG
+void alt_sched_debug(void)
+{
+ printk(KERN_INFO "sched: pending: 0x%04lx, idle: 0x%04lx, sg_idle: 0x%04lx\n",
+ sched_rq_pending_mask.bits[0],
+ sched_rq_watermark[0].bits[0],
+ sched_sg_idle_mask.bits[0]);
+}
+#else
+inline void alt_sched_debug(void) {}
+#endif
+
+#ifdef CONFIG_SMP
+
+#define SCHED_RQ_NR_MIGRATION (32U)
+/*
+ * Migrate pending tasks in @rq to @dest_cpu
+ * Will try to migrate mininal of half of @rq nr_running tasks and
+ * SCHED_RQ_NR_MIGRATION to @dest_cpu
+ */
+static inline int
+migrate_pending_tasks(struct rq *rq, struct rq *dest_rq, const int dest_cpu)
+{
+ struct task_struct *p, *skip = rq->curr;
+ int nr_migrated = 0;
+ int nr_tries = min(rq->nr_running / 2, SCHED_RQ_NR_MIGRATION);
+
+ while (skip != rq->idle && nr_tries &&
+ (p = sched_rq_next_task(skip, rq)) != rq->idle) {
+ skip = sched_rq_next_task(p, rq);
+ if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) {
+ __SCHED_DEQUEUE_TASK(p, rq, 0);
+ set_task_cpu(p, dest_cpu);
+ sched_task_sanity_check(p, dest_rq);
+ __SCHED_ENQUEUE_TASK(p, dest_rq, 0);
+ nr_migrated++;
+ }
+ nr_tries--;
+ }
+
+ return nr_migrated;
+}
+
+static inline int take_other_rq_tasks(struct rq *rq, int cpu)
+{
+ struct cpumask *topo_mask, *end_mask;
+
+ if (unlikely(!rq->online))
+ return 0;
+
+ if (cpumask_empty(&sched_rq_pending_mask))
+ return 0;
+
+ topo_mask = per_cpu(sched_cpu_topo_masks, cpu) + 1;
+ end_mask = per_cpu(sched_cpu_topo_end_mask, cpu);
+ do {
+ int i;
+ for_each_cpu_and(i, &sched_rq_pending_mask, topo_mask) {
+ int nr_migrated;
+ struct rq *src_rq;
+
+ src_rq = cpu_rq(i);
+ if (!do_raw_spin_trylock(&src_rq->lock))
+ continue;
+ spin_acquire(&src_rq->lock.dep_map,
+ SINGLE_DEPTH_NESTING, 1, _RET_IP_);
+
+ if ((nr_migrated = migrate_pending_tasks(src_rq, rq, cpu))) {
+ src_rq->nr_running -= nr_migrated;
+ if (src_rq->nr_running < 2)
+ cpumask_clear_cpu(i, &sched_rq_pending_mask);
+
+ rq->nr_running += nr_migrated;
+ if (rq->nr_running > 1)
+ cpumask_set_cpu(cpu, &sched_rq_pending_mask);
+
+ cpufreq_update_util(rq, 0);
+
+ spin_release(&src_rq->lock.dep_map, _RET_IP_);
+ do_raw_spin_unlock(&src_rq->lock);
+
+ return 1;
+ }
+
+ spin_release(&src_rq->lock.dep_map, _RET_IP_);
+ do_raw_spin_unlock(&src_rq->lock);
+ }
+ } while (++topo_mask < end_mask);
+
+ return 0;
+}
+#endif
+
+/*
+ * Timeslices below RESCHED_NS are considered as good as expired as there's no
+ * point rescheduling when there's so little time left.
+ */
+static inline void check_curr(struct task_struct *p, struct rq *rq)
+{
+ if (unlikely(rq->idle == p))
+ return;
+
+ update_curr(rq, p);
+
+ if (p->time_slice < RESCHED_NS)
+ time_slice_expired(p, rq);
+}
+
+static inline struct task_struct *
+choose_next_task(struct rq *rq, int cpu, struct task_struct *prev)
+{
+ struct task_struct *next;
+
+ if (unlikely(rq->skip)) {
+ next = rq_runnable_task(rq);
+ if (next == rq->idle) {
+#ifdef CONFIG_SMP
+ if (!take_other_rq_tasks(rq, cpu)) {
+#endif
+ rq->skip = NULL;
+ schedstat_inc(rq->sched_goidle);
+ return next;
+#ifdef CONFIG_SMP
+ }
+ next = rq_runnable_task(rq);
+#endif
+ }
+ rq->skip = NULL;
+#ifdef CONFIG_HIGH_RES_TIMERS
+ hrtick_start(rq, next->time_slice);
+#endif
+ return next;
+ }
+
+ next = sched_rq_first_task(rq);
+ if (next == rq->idle) {
+#ifdef CONFIG_SMP
+ if (!take_other_rq_tasks(rq, cpu)) {
+#endif
+ schedstat_inc(rq->sched_goidle);
+ /*printk(KERN_INFO "sched: choose_next_task(%d) idle %px\n", cpu, next);*/
+ return next;
+#ifdef CONFIG_SMP
+ }
+ next = sched_rq_first_task(rq);
+#endif
+ }
+#ifdef CONFIG_HIGH_RES_TIMERS
+ hrtick_start(rq, next->time_slice);
+#endif
+ /*printk(KERN_INFO "sched: choose_next_task(%d) next %px\n", cpu,
+ * next);*/
+ return next;
+}
+
+/*
+ * Constants for the sched_mode argument of __schedule().
+ *
+ * The mode argument allows RT enabled kernels to differentiate a
+ * preemption from blocking on an 'sleeping' spin/rwlock. Note that
+ * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to
+ * optimize the AND operation out and just check for zero.
+ */
+#define SM_NONE 0x0
+#define SM_PREEMPT 0x1
+#define SM_RTLOCK_WAIT 0x2
+
+#ifndef CONFIG_PREEMPT_RT
+# define SM_MASK_PREEMPT (~0U)
+#else
+# define SM_MASK_PREEMPT SM_PREEMPT
+#endif
+
+/*
+ * schedule() is the main scheduler function.
+ *
+ * The main means of driving the scheduler and thus entering this function are:
+ *
+ * 1. Explicit blocking: mutex, semaphore, waitqueue, etc.
+ *
+ * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return
+ * paths. For example, see arch/x86/entry_64.S.
+ *
+ * To drive preemption between tasks, the scheduler sets the flag in timer
+ * interrupt handler scheduler_tick().
+ *
+ * 3. Wakeups don't really cause entry into schedule(). They add a
+ * task to the run-queue and that's it.
+ *
+ * Now, if the new task added to the run-queue preempts the current
+ * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
+ * called on the nearest possible occasion:
+ *
+ * - If the kernel is preemptible (CONFIG_PREEMPTION=y):
+ *
+ * - in syscall or exception context, at the next outmost
+ * preempt_enable(). (this might be as soon as the wake_up()'s
+ * spin_unlock()!)
+ *
+ * - in IRQ context, return from interrupt-handler to
+ * preemptible context
+ *
+ * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
+ * then at the next:
+ *
+ * - cond_resched() call
+ * - explicit schedule() call
+ * - return from syscall or exception to user-space
+ * - return from interrupt-handler to user-space
+ *
+ * WARNING: must be called with preemption disabled!
+ */
+static void __sched notrace __schedule(unsigned int sched_mode)
+{
+ struct task_struct *prev, *next;
+ unsigned long *switch_count;
+ unsigned long prev_state;
+ struct rq *rq;
+ int cpu;
+ int deactivated = 0;
+
+ cpu = smp_processor_id();
+ rq = cpu_rq(cpu);
+ prev = rq->curr;
+
+ schedule_debug(prev, !!sched_mode);
+
+ /* by passing sched_feat(HRTICK) checking which Alt schedule FW doesn't support */
+ hrtick_clear(rq);
+
+ local_irq_disable();
+ rcu_note_context_switch(!!sched_mode);
+
+ /*
+ * Make sure that signal_pending_state()->signal_pending() below
+ * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
+ * done by the caller to avoid the race with signal_wake_up():
+ *
+ * __set_current_state(@state) signal_wake_up()
+ * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING)
+ * wake_up_state(p, state)
+ * LOCK rq->lock LOCK p->pi_state
+ * smp_mb__after_spinlock() smp_mb__after_spinlock()
+ * if (signal_pending_state()) if (p->state & @state)
+ *
+ * Also, the membarrier system call requires a full memory barrier
+ * after coming from user-space, before storing to rq->curr.
+ */
+ raw_spin_lock(&rq->lock);
+ smp_mb__after_spinlock();
+
+ update_rq_clock(rq);
+
+ switch_count = &prev->nivcsw;
+ /*
+ * We must load prev->state once (task_struct::state is volatile), such
+ * that we form a control dependency vs deactivate_task() below.
+ */
+ prev_state = READ_ONCE(prev->__state);
+ if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) {
+ if (signal_pending_state(prev_state, prev)) {
+ WRITE_ONCE(prev->__state, TASK_RUNNING);
+ } else {
+ prev->sched_contributes_to_load =
+ (prev_state & TASK_UNINTERRUPTIBLE) &&
+ !(prev_state & TASK_NOLOAD) &&
+ !(prev->flags & PF_FROZEN);
+
+ if (prev->sched_contributes_to_load)
+ rq->nr_uninterruptible++;
+
+ /*
+ * __schedule() ttwu()
+ * prev_state = prev->state; if (p->on_rq && ...)
+ * if (prev_state) goto out;
+ * p->on_rq = 0; smp_acquire__after_ctrl_dep();
+ * p->state = TASK_WAKING
+ *
+ * Where __schedule() and ttwu() have matching control dependencies.
+ *
+ * After this, schedule() must not care about p->state any more.
+ */
+ sched_task_deactivate(prev, rq);
+ deactivate_task(prev, rq);
+ deactivated = 1;
+
+ if (prev->in_iowait) {
+ atomic_inc(&rq->nr_iowait);
+ delayacct_blkio_start();
+ }
+ }
+ switch_count = &prev->nvcsw;
+ }
+
+ check_curr(prev, rq);
+
+ next = choose_next_task(rq, cpu, prev);
+ clear_tsk_need_resched(prev);
+ clear_preempt_need_resched();
+#ifdef CONFIG_SCHED_DEBUG
+ rq->last_seen_need_resched_ns = 0;
+#endif
+
+ if (likely(prev != next)) {
+ if (deactivated)
+ update_sched_rq_watermark(rq);
+ next->last_ran = rq->clock_task;
+ rq->last_ts_switch = rq->clock;
+
+ rq->nr_switches++;
+ /*
+ * RCU users of rcu_dereference(rq->curr) may not see
+ * changes to task_struct made by pick_next_task().
+ */
+ RCU_INIT_POINTER(rq->curr, next);
+ /*
+ * The membarrier system call requires each architecture
+ * to have a full memory barrier after updating
+ * rq->curr, before returning to user-space.
+ *
+ * Here are the schemes providing that barrier on the
+ * various architectures:
+ * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC.
+ * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC.
+ * - finish_lock_switch() for weakly-ordered
+ * architectures where spin_unlock is a full barrier,
+ * - switch_to() for arm64 (weakly-ordered, spin_unlock
+ * is a RELEASE barrier),
+ */
+ ++*switch_count;
+
+ psi_sched_switch(prev, next, !task_on_rq_queued(prev));
+
+ trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state);
+
+ /* Also unlocks the rq: */
+ rq = context_switch(rq, prev, next);
+ } else {
+ __balance_callbacks(rq);
+ raw_spin_unlock_irq(&rq->lock);
+ }
+
+#ifdef CONFIG_SCHED_SMT
+ sg_balance(rq);
+#endif
+}
+
+void __noreturn do_task_dead(void)
+{
+ /* Causes final put_task_struct in finish_task_switch(): */
+ set_special_state(TASK_DEAD);
+
+ /* Tell freezer to ignore us: */
+ current->flags |= PF_NOFREEZE;
+
+ __schedule(SM_NONE);
+ BUG();
+
+ /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
+ for (;;)
+ cpu_relax();
+}
+
+static inline void sched_submit_work(struct task_struct *tsk)
+{
+ unsigned int task_flags;
+
+ if (task_is_running(tsk))
+ return;
+
+ task_flags = tsk->flags;
+ /*
+ * If a worker goes to sleep, notify and ask workqueue whether it
+ * wants to wake up a task to maintain concurrency.
+ */
+ if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
+ if (task_flags & PF_WQ_WORKER)
+ wq_worker_sleeping(tsk);
+ else
+ io_wq_worker_sleeping(tsk);
+ }
+
+ if (tsk_is_pi_blocked(tsk))
+ return;
+
+ /*
+ * If we are going to sleep and we have plugged IO queued,
+ * make sure to submit it to avoid deadlocks.
+ */
+ blk_flush_plug(tsk->plug, true);
+}
+
+static void sched_update_worker(struct task_struct *tsk)
+{
+ if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) {
+ if (tsk->flags & PF_WQ_WORKER)
+ wq_worker_running(tsk);
+ else
+ io_wq_worker_running(tsk);
+ }
+}
+
+asmlinkage __visible void __sched schedule(void)
+{
+ struct task_struct *tsk = current;
+
+ sched_submit_work(tsk);
+ do {
+ preempt_disable();
+ __schedule(SM_NONE);
+ sched_preempt_enable_no_resched();
+ } while (need_resched());
+ sched_update_worker(tsk);
+}
+EXPORT_SYMBOL(schedule);
+
+/*
+ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted
+ * state (have scheduled out non-voluntarily) by making sure that all
+ * tasks have either left the run queue or have gone into user space.
+ * As idle tasks do not do either, they must not ever be preempted
+ * (schedule out non-voluntarily).
+ *
+ * schedule_idle() is similar to schedule_preempt_disable() except that it
+ * never enables preemption because it does not call sched_submit_work().
+ */
+void __sched schedule_idle(void)
+{
+ /*
+ * As this skips calling sched_submit_work(), which the idle task does
+ * regardless because that function is a nop when the task is in a
+ * TASK_RUNNING state, make sure this isn't used someplace that the
+ * current task can be in any other state. Note, idle is always in the
+ * TASK_RUNNING state.
+ */
+ WARN_ON_ONCE(current->__state);
+ do {
+ __schedule(SM_NONE);
+ } while (need_resched());
+}
+
+#if defined(CONFIG_CONTEXT_TRACKING) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK)
+asmlinkage __visible void __sched schedule_user(void)
+{
+ /*
+ * If we come here after a random call to set_need_resched(),
+ * or we have been woken up remotely but the IPI has not yet arrived,
+ * we haven't yet exited the RCU idle mode. Do it here manually until
+ * we find a better solution.
+ *
+ * NB: There are buggy callers of this function. Ideally we
+ * should warn if prev_state != CONTEXT_USER, but that will trigger
+ * too frequently to make sense yet.
+ */
+ enum ctx_state prev_state = exception_enter();
+ schedule();
+ exception_exit(prev_state);
+}
+#endif
+
+/**
+ * schedule_preempt_disabled - called with preemption disabled
+ *
+ * Returns with preemption disabled. Note: preempt_count must be 1
+ */
+void __sched schedule_preempt_disabled(void)
+{
+ sched_preempt_enable_no_resched();
+ schedule();
+ preempt_disable();
+}
+
+#ifdef CONFIG_PREEMPT_RT
+void __sched notrace schedule_rtlock(void)
+{
+ do {
+ preempt_disable();
+ __schedule(SM_RTLOCK_WAIT);
+ sched_preempt_enable_no_resched();
+ } while (need_resched());
+}
+NOKPROBE_SYMBOL(schedule_rtlock);
+#endif
+
+static void __sched notrace preempt_schedule_common(void)
+{
+ do {
+ /*
+ * Because the function tracer can trace preempt_count_sub()
+ * and it also uses preempt_enable/disable_notrace(), if
+ * NEED_RESCHED is set, the preempt_enable_notrace() called
+ * by the function tracer will call this function again and
+ * cause infinite recursion.
+ *
+ * Preemption must be disabled here before the function
+ * tracer can trace. Break up preempt_disable() into two
+ * calls. One to disable preemption without fear of being
+ * traced. The other to still record the preemption latency,
+ * which can also be traced by the function tracer.
+ */
+ preempt_disable_notrace();
+ preempt_latency_start(1);
+ __schedule(SM_PREEMPT);
+ preempt_latency_stop(1);
+ preempt_enable_no_resched_notrace();
+
+ /*
+ * Check again in case we missed a preemption opportunity
+ * between schedule and now.
+ */
+ } while (need_resched());
+}
+
+#ifdef CONFIG_PREEMPTION
+/*
+ * This is the entry point to schedule() from in-kernel preemption
+ * off of preempt_enable.
+ */
+asmlinkage __visible void __sched notrace preempt_schedule(void)
+{
+ /*
+ * If there is a non-zero preempt_count or interrupts are disabled,
+ * we do not want to preempt the current task. Just return..
+ */
+ if (likely(!preemptible()))
+ return;
+
+ preempt_schedule_common();
+}
+NOKPROBE_SYMBOL(preempt_schedule);
+EXPORT_SYMBOL(preempt_schedule);
+
+#ifdef CONFIG_PREEMPT_DYNAMIC
+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
+#ifndef preempt_schedule_dynamic_enabled
+#define preempt_schedule_dynamic_enabled preempt_schedule
+#define preempt_schedule_dynamic_disabled NULL
+#endif
+DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled);
+EXPORT_STATIC_CALL_TRAMP(preempt_schedule);
+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule);
+void __sched notrace dynamic_preempt_schedule(void)
+{
+ if (!static_branch_unlikely(&sk_dynamic_preempt_schedule))
+ return;
+ preempt_schedule();
+}
+NOKPROBE_SYMBOL(dynamic_preempt_schedule);
+EXPORT_SYMBOL(dynamic_preempt_schedule);
+#endif
+#endif
+
+/**
+ * preempt_schedule_notrace - preempt_schedule called by tracing
+ *
+ * The tracing infrastructure uses preempt_enable_notrace to prevent
+ * recursion and tracing preempt enabling caused by the tracing
+ * infrastructure itself. But as tracing can happen in areas coming
+ * from userspace or just about to enter userspace, a preempt enable
+ * can occur before user_exit() is called. This will cause the scheduler
+ * to be called when the system is still in usermode.
+ *
+ * To prevent this, the preempt_enable_notrace will use this function
+ * instead of preempt_schedule() to exit user context if needed before
+ * calling the scheduler.
+ */
+asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
+{
+ enum ctx_state prev_ctx;
+
+ if (likely(!preemptible()))
+ return;
+
+ do {
+ /*
+ * Because the function tracer can trace preempt_count_sub()
+ * and it also uses preempt_enable/disable_notrace(), if
+ * NEED_RESCHED is set, the preempt_enable_notrace() called
+ * by the function tracer will call this function again and
+ * cause infinite recursion.
+ *
+ * Preemption must be disabled here before the function
+ * tracer can trace. Break up preempt_disable() into two
+ * calls. One to disable preemption without fear of being
+ * traced. The other to still record the preemption latency,
+ * which can also be traced by the function tracer.
+ */
+ preempt_disable_notrace();
+ preempt_latency_start(1);
+ /*
+ * Needs preempt disabled in case user_exit() is traced
+ * and the tracer calls preempt_enable_notrace() causing
+ * an infinite recursion.
+ */
+ prev_ctx = exception_enter();
+ __schedule(SM_PREEMPT);
+ exception_exit(prev_ctx);
+
+ preempt_latency_stop(1);
+ preempt_enable_no_resched_notrace();
+ } while (need_resched());
+}
+EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
+
+#ifdef CONFIG_PREEMPT_DYNAMIC
+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
+#ifndef preempt_schedule_notrace_dynamic_enabled
+#define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace
+#define preempt_schedule_notrace_dynamic_disabled NULL
+#endif
+DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled);
+EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace);
+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace);
+void __sched notrace dynamic_preempt_schedule_notrace(void)
+{
+ if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace))
+ return;
+ preempt_schedule_notrace();
+}
+NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace);
+EXPORT_SYMBOL(dynamic_preempt_schedule_notrace);
+#endif
+#endif
+
+#endif /* CONFIG_PREEMPTION */
+
+/*
+ * This is the entry point to schedule() from kernel preemption
+ * off of irq context.
+ * Note, that this is called and return with irqs disabled. This will
+ * protect us against recursive calling from irq.
+ */
+asmlinkage __visible void __sched preempt_schedule_irq(void)
+{
+ enum ctx_state prev_state;
+
+ /* Catch callers which need to be fixed */
+ BUG_ON(preempt_count() || !irqs_disabled());
+
+ prev_state = exception_enter();
+
+ do {
+ preempt_disable();
+ local_irq_enable();
+ __schedule(SM_PREEMPT);
+ local_irq_disable();
+ sched_preempt_enable_no_resched();
+ } while (need_resched());
+
+ exception_exit(prev_state);
+}
+
+int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags,
+ void *key)
+{
+ WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC);
+ return try_to_wake_up(curr->private, mode, wake_flags);
+}
+EXPORT_SYMBOL(default_wake_function);
+
+static inline void check_task_changed(struct task_struct *p, struct rq *rq)
+{
+ int idx;
+
+ /* Trigger resched if task sched_prio has been modified. */
+ if (task_on_rq_queued(p) && (idx = task_sched_prio_idx(p, rq)) != p->sq_idx) {
+ requeue_task(p, rq, idx);
+ check_preempt_curr(rq);
+ }
+}
+
+static void __setscheduler_prio(struct task_struct *p, int prio)
+{
+ p->prio = prio;
+}
+
+#ifdef CONFIG_RT_MUTEXES
+
+static inline int __rt_effective_prio(struct task_struct *pi_task, int prio)
+{
+ if (pi_task)
+ prio = min(prio, pi_task->prio);
+
+ return prio;
+}
+
+static inline int rt_effective_prio(struct task_struct *p, int prio)
+{
+ struct task_struct *pi_task = rt_mutex_get_top_task(p);
+
+ return __rt_effective_prio(pi_task, prio);
+}
+
+/*
+ * rt_mutex_setprio - set the current priority of a task
+ * @p: task to boost
+ * @pi_task: donor task
+ *
+ * This function changes the 'effective' priority of a task. It does
+ * not touch ->normal_prio like __setscheduler().
+ *
+ * Used by the rt_mutex code to implement priority inheritance
+ * logic. Call site only calls if the priority of the task changed.
+ */
+void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
+{
+ int prio;
+ struct rq *rq;
+ raw_spinlock_t *lock;
+
+ /* XXX used to be waiter->prio, not waiter->task->prio */
+ prio = __rt_effective_prio(pi_task, p->normal_prio);
+
+ /*
+ * If nothing changed; bail early.
+ */
+ if (p->pi_top_task == pi_task && prio == p->prio)
+ return;
+
+ rq = __task_access_lock(p, &lock);
+ /*
+ * Set under pi_lock && rq->lock, such that the value can be used under
+ * either lock.
+ *
+ * Note that there is loads of tricky to make this pointer cache work
+ * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to
+ * ensure a task is de-boosted (pi_task is set to NULL) before the
+ * task is allowed to run again (and can exit). This ensures the pointer
+ * points to a blocked task -- which guarantees the task is present.
+ */
+ p->pi_top_task = pi_task;
+
+ /*
+ * For FIFO/RR we only need to set prio, if that matches we're done.
+ */
+ if (prio == p->prio)
+ goto out_unlock;
+
+ /*
+ * Idle task boosting is a nono in general. There is one
+ * exception, when PREEMPT_RT and NOHZ is active:
+ *
+ * The idle task calls get_next_timer_interrupt() and holds
+ * the timer wheel base->lock on the CPU and another CPU wants
+ * to access the timer (probably to cancel it). We can safely
+ * ignore the boosting request, as the idle CPU runs this code
+ * with interrupts disabled and will complete the lock
+ * protected section without being interrupted. So there is no
+ * real need to boost.
+ */
+ if (unlikely(p == rq->idle)) {
+ WARN_ON(p != rq->curr);
+ WARN_ON(p->pi_blocked_on);
+ goto out_unlock;
+ }
+
+ trace_sched_pi_setprio(p, pi_task);
+
+ __setscheduler_prio(p, prio);
+
+ check_task_changed(p, rq);
+out_unlock:
+ /* Avoid rq from going away on us: */
+ preempt_disable();
+
+ __balance_callbacks(rq);
+ __task_access_unlock(p, lock);
+
+ preempt_enable();
+}
+#else
+static inline int rt_effective_prio(struct task_struct *p, int prio)
+{
+ return prio;
+}
+#endif
+
+void set_user_nice(struct task_struct *p, long nice)
+{
+ unsigned long flags;
+ struct rq *rq;
+ raw_spinlock_t *lock;
+
+ if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE)
+ return;
+ /*
+ * We have to be careful, if called from sys_setpriority(),
+ * the task might be in the middle of scheduling on another CPU.
+ */
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+ rq = __task_access_lock(p, &lock);
+
+ p->static_prio = NICE_TO_PRIO(nice);
+ /*
+ * The RT priorities are set via sched_setscheduler(), but we still
+ * allow the 'normal' nice value to be set - but as expected
+ * it won't have any effect on scheduling until the task is
+ * not SCHED_NORMAL/SCHED_BATCH:
+ */
+ if (task_has_rt_policy(p))
+ goto out_unlock;
+
+ p->prio = effective_prio(p);
+
+ check_task_changed(p, rq);
+out_unlock:
+ __task_access_unlock(p, lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+}
+EXPORT_SYMBOL(set_user_nice);
+
+/*
+ * can_nice - check if a task can reduce its nice value
+ * @p: task
+ * @nice: nice value
+ */
+int can_nice(const struct task_struct *p, const int nice)
+{
+ /* Convert nice value [19,-20] to rlimit style value [1,40] */
+ int nice_rlim = nice_to_rlimit(nice);
+
+ return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
+ capable(CAP_SYS_NICE));
+}
+
+#ifdef __ARCH_WANT_SYS_NICE
+
+/*
+ * sys_nice - change the priority of the current process.
+ * @increment: priority increment
+ *
+ * sys_setpriority is a more generic, but much slower function that
+ * does similar things.
+ */
+SYSCALL_DEFINE1(nice, int, increment)
+{
+ long nice, retval;
+
+ /*
+ * Setpriority might change our priority at the same moment.
+ * We don't have to worry. Conceptually one call occurs first
+ * and we have a single winner.
+ */
+
+ increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH);
+ nice = task_nice(current) + increment;
+
+ nice = clamp_val(nice, MIN_NICE, MAX_NICE);
+ if (increment < 0 && !can_nice(current, nice))
+ return -EPERM;
+
+ retval = security_task_setnice(current, nice);
+ if (retval)
+ return retval;
+
+ set_user_nice(current, nice);
+ return 0;
+}
+
+#endif
+
+/**
+ * task_prio - return the priority value of a given task.
+ * @p: the task in question.
+ *
+ * Return: The priority value as seen by users in /proc.
+ *
+ * sched policy return value kernel prio user prio/nice
+ *
+ * (BMQ)normal, batch, idle[0 ... 53] [100 ... 139] 0/[-20 ... 19]/[-7 ... 7]
+ * (PDS)normal, batch, idle[0 ... 39] 100 0/[-20 ... 19]
+ * fifo, rr [-1 ... -100] [99 ... 0] [0 ... 99]
+ */
+int task_prio(const struct task_struct *p)
+{
+ return (p->prio < MAX_RT_PRIO) ? p->prio - MAX_RT_PRIO :
+ task_sched_prio_normal(p, task_rq(p));
+}
+
+/**
+ * idle_cpu - is a given CPU idle currently?
+ * @cpu: the processor in question.
+ *
+ * Return: 1 if the CPU is currently idle. 0 otherwise.
+ */
+int idle_cpu(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ if (rq->curr != rq->idle)
+ return 0;
+
+ if (rq->nr_running)
+ return 0;
+
+#ifdef CONFIG_SMP
+ if (rq->ttwu_pending)
+ return 0;
+#endif
+
+ return 1;
+}
+
+/**
+ * idle_task - return the idle task for a given CPU.
+ * @cpu: the processor in question.
+ *
+ * Return: The idle task for the cpu @cpu.
+ */
+struct task_struct *idle_task(int cpu)
+{
+ return cpu_rq(cpu)->idle;
+}
+
+/**
+ * find_process_by_pid - find a process with a matching PID value.
+ * @pid: the pid in question.
+ *
+ * The task of @pid, if found. %NULL otherwise.
+ */
+static inline struct task_struct *find_process_by_pid(pid_t pid)
+{
+ return pid ? find_task_by_vpid(pid) : current;
+}
+
+/*
+ * sched_setparam() passes in -1 for its policy, to let the functions
+ * it calls know not to change it.
+ */
+#define SETPARAM_POLICY -1
+
+static void __setscheduler_params(struct task_struct *p,
+ const struct sched_attr *attr)
+{
+ int policy = attr->sched_policy;
+
+ if (policy == SETPARAM_POLICY)
+ policy = p->policy;
+
+ p->policy = policy;
+
+ /*
+ * allow normal nice value to be set, but will not have any
+ * effect on scheduling until the task not SCHED_NORMAL/
+ * SCHED_BATCH
+ */
+ p->static_prio = NICE_TO_PRIO(attr->sched_nice);
+
+ /*
+ * __sched_setscheduler() ensures attr->sched_priority == 0 when
+ * !rt_policy. Always setting this ensures that things like
+ * getparam()/getattr() don't report silly values for !rt tasks.
+ */
+ p->rt_priority = attr->sched_priority;
+ p->normal_prio = normal_prio(p);
+}
+
+/*
+ * check the target process has a UID that matches the current process's
+ */
+static bool check_same_owner(struct task_struct *p)
+{
+ const struct cred *cred = current_cred(), *pcred;
+ bool match;
+
+ rcu_read_lock();
+ pcred = __task_cred(p);
+ match = (uid_eq(cred->euid, pcred->euid) ||
+ uid_eq(cred->euid, pcred->uid));
+ rcu_read_unlock();
+ return match;
+}
+
+static int __sched_setscheduler(struct task_struct *p,
+ const struct sched_attr *attr,
+ bool user, bool pi)
+{
+ const struct sched_attr dl_squash_attr = {
+ .size = sizeof(struct sched_attr),
+ .sched_policy = SCHED_FIFO,
+ .sched_nice = 0,
+ .sched_priority = 99,
+ };
+ int oldpolicy = -1, policy = attr->sched_policy;
+ int retval, newprio;
+ struct callback_head *head;
+ unsigned long flags;
+ struct rq *rq;
+ int reset_on_fork;
+ raw_spinlock_t *lock;
+
+ /* The pi code expects interrupts enabled */
+ BUG_ON(pi && in_interrupt());
+
+ /*
+ * Alt schedule FW supports SCHED_DEADLINE by squash it as prio 0 SCHED_FIFO
+ */
+ if (unlikely(SCHED_DEADLINE == policy)) {
+ attr = &dl_squash_attr;
+ policy = attr->sched_policy;
+ }
+recheck:
+ /* Double check policy once rq lock held */
+ if (policy < 0) {
+ reset_on_fork = p->sched_reset_on_fork;
+ policy = oldpolicy = p->policy;
+ } else {
+ reset_on_fork = !!(attr->sched_flags & SCHED_RESET_ON_FORK);
+
+ if (policy > SCHED_IDLE)
+ return -EINVAL;
+ }
+
+ if (attr->sched_flags & ~(SCHED_FLAG_ALL))
+ return -EINVAL;
+
+ /*
+ * Valid priorities for SCHED_FIFO and SCHED_RR are
+ * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL and
+ * SCHED_BATCH and SCHED_IDLE is 0.
+ */
+ if (attr->sched_priority < 0 ||
+ (p->mm && attr->sched_priority > MAX_RT_PRIO - 1) ||
+ (!p->mm && attr->sched_priority > MAX_RT_PRIO - 1))
+ return -EINVAL;
+ if ((SCHED_RR == policy || SCHED_FIFO == policy) !=
+ (attr->sched_priority != 0))
+ return -EINVAL;
+
+ /*
+ * Allow unprivileged RT tasks to decrease priority:
+ */
+ if (user && !capable(CAP_SYS_NICE)) {
+ if (SCHED_FIFO == policy || SCHED_RR == policy) {
+ unsigned long rlim_rtprio =
+ task_rlimit(p, RLIMIT_RTPRIO);
+
+ /* Can't set/change the rt policy */
+ if (policy != p->policy && !rlim_rtprio)
+ return -EPERM;
+
+ /* Can't increase priority */
+ if (attr->sched_priority > p->rt_priority &&
+ attr->sched_priority > rlim_rtprio)
+ return -EPERM;
+ }
+
+ /* Can't change other user's priorities */
+ if (!check_same_owner(p))
+ return -EPERM;
+
+ /* Normal users shall not reset the sched_reset_on_fork flag */
+ if (p->sched_reset_on_fork && !reset_on_fork)
+ return -EPERM;
+ }
+
+ if (user) {
+ retval = security_task_setscheduler(p);
+ if (retval)
+ return retval;
+ }
+
+ if (pi)
+ cpuset_read_lock();
+
+ /*
+ * Make sure no PI-waiters arrive (or leave) while we are
+ * changing the priority of the task:
+ */
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+
+ /*
+ * To be able to change p->policy safely, task_access_lock()
+ * must be called.
+ * IF use task_access_lock() here:
+ * For the task p which is not running, reading rq->stop is
+ * racy but acceptable as ->stop doesn't change much.
+ * An enhancemnet can be made to read rq->stop saftly.
+ */
+ rq = __task_access_lock(p, &lock);
+
+ /*
+ * Changing the policy of the stop threads its a very bad idea
+ */
+ if (p == rq->stop) {
+ retval = -EINVAL;
+ goto unlock;
+ }
+
+ /*
+ * If not changing anything there's no need to proceed further:
+ */
+ if (unlikely(policy == p->policy)) {
+ if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
+ goto change;
+ if (!rt_policy(policy) &&
+ NICE_TO_PRIO(attr->sched_nice) != p->static_prio)
+ goto change;
+
+ p->sched_reset_on_fork = reset_on_fork;
+ retval = 0;
+ goto unlock;
+ }
+change:
+
+ /* Re-check policy now with rq lock held */
+ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
+ policy = oldpolicy = -1;
+ __task_access_unlock(p, lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ if (pi)
+ cpuset_read_unlock();
+ goto recheck;
+ }
+
+ p->sched_reset_on_fork = reset_on_fork;
+
+ newprio = __normal_prio(policy, attr->sched_priority, NICE_TO_PRIO(attr->sched_nice));
+ if (pi) {
+ /*
+ * Take priority boosted tasks into account. If the new
+ * effective priority is unchanged, we just store the new
+ * normal parameters and do not touch the scheduler class and
+ * the runqueue. This will be done when the task deboost
+ * itself.
+ */
+ newprio = rt_effective_prio(p, newprio);
+ }
+
+ if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) {
+ __setscheduler_params(p, attr);
+ __setscheduler_prio(p, newprio);
+ }
+
+ check_task_changed(p, rq);
+
+ /* Avoid rq from going away on us: */
+ preempt_disable();
+ head = splice_balance_callbacks(rq);
+ __task_access_unlock(p, lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
+ if (pi) {
+ cpuset_read_unlock();
+ rt_mutex_adjust_pi(p);
+ }
+
+ /* Run balance callbacks after we've adjusted the PI chain: */
+ balance_callbacks(rq, head);
+ preempt_enable();
+
+ return 0;
+
+unlock:
+ __task_access_unlock(p, lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ if (pi)
+ cpuset_read_unlock();
+ return retval;
+}
+
+static int _sched_setscheduler(struct task_struct *p, int policy,
+ const struct sched_param *param, bool check)
+{
+ struct sched_attr attr = {
+ .sched_policy = policy,
+ .sched_priority = param->sched_priority,
+ .sched_nice = PRIO_TO_NICE(p->static_prio),
+ };
+
+ /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
+ if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
+ attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
+ policy &= ~SCHED_RESET_ON_FORK;
+ attr.sched_policy = policy;
+ }
+
+ return __sched_setscheduler(p, &attr, check, true);
+}
+
+/**
+ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
+ * @p: the task in question.
+ * @policy: new policy.
+ * @param: structure containing the new RT priority.
+ *
+ * Use sched_set_fifo(), read its comment.
+ *
+ * Return: 0 on success. An error code otherwise.
+ *
+ * NOTE that the task may be already dead.
+ */
+int sched_setscheduler(struct task_struct *p, int policy,
+ const struct sched_param *param)
+{
+ return _sched_setscheduler(p, policy, param, true);
+}
+
+int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
+{
+ return __sched_setscheduler(p, attr, true, true);
+}
+
+int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
+{
+ return __sched_setscheduler(p, attr, false, true);
+}
+EXPORT_SYMBOL_GPL(sched_setattr_nocheck);
+
+/**
+ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
+ * @p: the task in question.
+ * @policy: new policy.
+ * @param: structure containing the new RT priority.
+ *
+ * Just like sched_setscheduler, only don't bother checking if the
+ * current context has permission. For example, this is needed in
+ * stop_machine(): we create temporary high priority worker threads,
+ * but our caller might not have that capability.
+ *
+ * Return: 0 on success. An error code otherwise.
+ */
+int sched_setscheduler_nocheck(struct task_struct *p, int policy,
+ const struct sched_param *param)
+{
+ return _sched_setscheduler(p, policy, param, false);
+}
+
+/*
+ * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
+ * incapable of resource management, which is the one thing an OS really should
+ * be doing.
+ *
+ * This is of course the reason it is limited to privileged users only.
+ *
+ * Worse still; it is fundamentally impossible to compose static priority
+ * workloads. You cannot take two correctly working static prio workloads
+ * and smash them together and still expect them to work.
+ *
+ * For this reason 'all' FIFO tasks the kernel creates are basically at:
+ *
+ * MAX_RT_PRIO / 2
+ *
+ * The administrator _MUST_ configure the system, the kernel simply doesn't
+ * know enough information to make a sensible choice.
+ */
+void sched_set_fifo(struct task_struct *p)
+{
+ struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 };
+ WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
+}
+EXPORT_SYMBOL_GPL(sched_set_fifo);
+
+/*
+ * For when you don't much care about FIFO, but want to be above SCHED_NORMAL.
+ */
+void sched_set_fifo_low(struct task_struct *p)
+{
+ struct sched_param sp = { .sched_priority = 1 };
+ WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0);
+}
+EXPORT_SYMBOL_GPL(sched_set_fifo_low);
+
+void sched_set_normal(struct task_struct *p, int nice)
+{
+ struct sched_attr attr = {
+ .sched_policy = SCHED_NORMAL,
+ .sched_nice = nice,
+ };
+ WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0);
+}
+EXPORT_SYMBOL_GPL(sched_set_normal);
+
+static int
+do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
+{
+ struct sched_param lparam;
+ struct task_struct *p;
+ int retval;
+
+ if (!param || pid < 0)
+ return -EINVAL;
+ if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
+ return -EFAULT;
+
+ rcu_read_lock();
+ retval = -ESRCH;
+ p = find_process_by_pid(pid);
+ if (likely(p))
+ get_task_struct(p);
+ rcu_read_unlock();
+
+ if (likely(p)) {
+ retval = sched_setscheduler(p, policy, &lparam);
+ put_task_struct(p);
+ }
+
+ return retval;
+}
+
+/*
+ * Mimics kernel/events/core.c perf_copy_attr().
+ */
+static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr)
+{
+ u32 size;
+ int ret;
+
+ /* Zero the full structure, so that a short copy will be nice: */
+ memset(attr, 0, sizeof(*attr));
+
+ ret = get_user(size, &uattr->size);
+ if (ret)
+ return ret;
+
+ /* ABI compatibility quirk: */
+ if (!size)
+ size = SCHED_ATTR_SIZE_VER0;
+
+ if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE)
+ goto err_size;
+
+ ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size);
+ if (ret) {
+ if (ret == -E2BIG)
+ goto err_size;
+ return ret;
+ }
+
+ /*
+ * XXX: Do we want to be lenient like existing syscalls; or do we want
+ * to be strict and return an error on out-of-bounds values?
+ */
+ attr->sched_nice = clamp(attr->sched_nice, -20, 19);
+
+ /* sched/core.c uses zero here but we already know ret is zero */
+ return 0;
+
+err_size:
+ put_user(sizeof(*attr), &uattr->size);
+ return -E2BIG;
+}
+
+/**
+ * sys_sched_setscheduler - set/change the scheduler policy and RT priority
+ * @pid: the pid in question.
+ * @policy: new policy.
+ *
+ * Return: 0 on success. An error code otherwise.
+ * @param: structure containing the new RT priority.
+ */
+SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param)
+{
+ if (policy < 0)
+ return -EINVAL;
+
+ return do_sched_setscheduler(pid, policy, param);
+}
+
+/**
+ * sys_sched_setparam - set/change the RT priority of a thread
+ * @pid: the pid in question.
+ * @param: structure containing the new RT priority.
+ *
+ * Return: 0 on success. An error code otherwise.
+ */
+SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
+{
+ return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
+}
+
+/**
+ * sys_sched_setattr - same as above, but with extended sched_attr
+ * @pid: the pid in question.
+ * @uattr: structure containing the extended parameters.
+ */
+SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
+ unsigned int, flags)
+{
+ struct sched_attr attr;
+ struct task_struct *p;
+ int retval;
+
+ if (!uattr || pid < 0 || flags)
+ return -EINVAL;
+
+ retval = sched_copy_attr(uattr, &attr);
+ if (retval)
+ return retval;
+
+ if ((int)attr.sched_policy < 0)
+ return -EINVAL;
+
+ rcu_read_lock();
+ retval = -ESRCH;
+ p = find_process_by_pid(pid);
+ if (likely(p))
+ get_task_struct(p);
+ rcu_read_unlock();
+
+ if (likely(p)) {
+ retval = sched_setattr(p, &attr);
+ put_task_struct(p);
+ }
+
+ return retval;
+}
+
+/**
+ * sys_sched_getscheduler - get the policy (scheduling class) of a thread
+ * @pid: the pid in question.
+ *
+ * Return: On success, the policy of the thread. Otherwise, a negative error
+ * code.
+ */
+SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
+{
+ struct task_struct *p;
+ int retval = -EINVAL;
+
+ if (pid < 0)
+ goto out_nounlock;
+
+ retval = -ESRCH;
+ rcu_read_lock();
+ p = find_process_by_pid(pid);
+ if (p) {
+ retval = security_task_getscheduler(p);
+ if (!retval)
+ retval = p->policy;
+ }
+ rcu_read_unlock();
+
+out_nounlock:
+ return retval;
+}
+
+/**
+ * sys_sched_getscheduler - get the RT priority of a thread
+ * @pid: the pid in question.
+ * @param: structure containing the RT priority.
+ *
+ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
+ * code.
+ */
+SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
+{
+ struct sched_param lp = { .sched_priority = 0 };
+ struct task_struct *p;
+ int retval = -EINVAL;
+
+ if (!param || pid < 0)
+ goto out_nounlock;
+
+ rcu_read_lock();
+ p = find_process_by_pid(pid);
+ retval = -ESRCH;
+ if (!p)
+ goto out_unlock;
+
+ retval = security_task_getscheduler(p);
+ if (retval)
+ goto out_unlock;
+
+ if (task_has_rt_policy(p))
+ lp.sched_priority = p->rt_priority;
+ rcu_read_unlock();
+
+ /*
+ * This one might sleep, we cannot do it with a spinlock held ...
+ */
+ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
+
+out_nounlock:
+ return retval;
+
+out_unlock:
+ rcu_read_unlock();
+ return retval;
+}
+
+/*
+ * Copy the kernel size attribute structure (which might be larger
+ * than what user-space knows about) to user-space.
+ *
+ * Note that all cases are valid: user-space buffer can be larger or
+ * smaller than the kernel-space buffer. The usual case is that both
+ * have the same size.
+ */
+static int
+sched_attr_copy_to_user(struct sched_attr __user *uattr,
+ struct sched_attr *kattr,
+ unsigned int usize)
+{
+ unsigned int ksize = sizeof(*kattr);
+
+ if (!access_ok(uattr, usize))
+ return -EFAULT;
+
+ /*
+ * sched_getattr() ABI forwards and backwards compatibility:
+ *
+ * If usize == ksize then we just copy everything to user-space and all is good.
+ *
+ * If usize < ksize then we only copy as much as user-space has space for,
+ * this keeps ABI compatibility as well. We skip the rest.
+ *
+ * If usize > ksize then user-space is using a newer version of the ABI,
+ * which part the kernel doesn't know about. Just ignore it - tooling can
+ * detect the kernel's knowledge of attributes from the attr->size value
+ * which is set to ksize in this case.
+ */
+ kattr->size = min(usize, ksize);
+
+ if (copy_to_user(uattr, kattr, kattr->size))
+ return -EFAULT;
+
+ return 0;
+}
+
+/**
+ * sys_sched_getattr - similar to sched_getparam, but with sched_attr
+ * @pid: the pid in question.
+ * @uattr: structure containing the extended parameters.
+ * @usize: sizeof(attr) for fwd/bwd comp.
+ * @flags: for future extension.
+ */
+SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
+ unsigned int, usize, unsigned int, flags)
+{
+ struct sched_attr kattr = { };
+ struct task_struct *p;
+ int retval;
+
+ if (!uattr || pid < 0 || usize > PAGE_SIZE ||
+ usize < SCHED_ATTR_SIZE_VER0 || flags)
+ return -EINVAL;
+
+ rcu_read_lock();
+ p = find_process_by_pid(pid);
+ retval = -ESRCH;
+ if (!p)
+ goto out_unlock;
+
+ retval = security_task_getscheduler(p);
+ if (retval)
+ goto out_unlock;
+
+ kattr.sched_policy = p->policy;
+ if (p->sched_reset_on_fork)
+ kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
+ if (task_has_rt_policy(p))
+ kattr.sched_priority = p->rt_priority;
+ else
+ kattr.sched_nice = task_nice(p);
+ kattr.sched_flags &= SCHED_FLAG_ALL;
+
+#ifdef CONFIG_UCLAMP_TASK
+ kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
+ kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
+#endif
+
+ rcu_read_unlock();
+
+ return sched_attr_copy_to_user(uattr, &kattr, usize);
+
+out_unlock:
+ rcu_read_unlock();
+ return retval;
+}
+
+static int
+__sched_setaffinity(struct task_struct *p, const struct cpumask *mask)
+{
+ int retval;
+ cpumask_var_t cpus_allowed, new_mask;
+
+ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL))
+ return -ENOMEM;
+
+ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto out_free_cpus_allowed;
+ }
+
+ cpuset_cpus_allowed(p, cpus_allowed);
+ cpumask_and(new_mask, mask, cpus_allowed);
+again:
+ retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK | SCA_USER);
+ if (retval)
+ goto out_free_new_mask;
+
+ cpuset_cpus_allowed(p, cpus_allowed);
+ if (!cpumask_subset(new_mask, cpus_allowed)) {
+ /*
+ * We must have raced with a concurrent cpuset
+ * update. Just reset the cpus_allowed to the
+ * cpuset's cpus_allowed
+ */
+ cpumask_copy(new_mask, cpus_allowed);
+ goto again;
+ }
+
+out_free_new_mask:
+ free_cpumask_var(new_mask);
+out_free_cpus_allowed:
+ free_cpumask_var(cpus_allowed);
+ return retval;
+}
+
+long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
+{
+ struct task_struct *p;
+ int retval;
+
+ rcu_read_lock();
+
+ p = find_process_by_pid(pid);
+ if (!p) {
+ rcu_read_unlock();
+ return -ESRCH;
+ }
+
+ /* Prevent p going away */
+ get_task_struct(p);
+ rcu_read_unlock();
+
+ if (p->flags & PF_NO_SETAFFINITY) {
+ retval = -EINVAL;
+ goto out_put_task;
+ }
+
+ if (!check_same_owner(p)) {
+ rcu_read_lock();
+ if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) {
+ rcu_read_unlock();
+ retval = -EPERM;
+ goto out_put_task;
+ }
+ rcu_read_unlock();
+ }
+
+ retval = security_task_setscheduler(p);
+ if (retval)
+ goto out_put_task;
+
+ retval = __sched_setaffinity(p, in_mask);
+out_put_task:
+ put_task_struct(p);
+ return retval;
+}
+
+static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
+ struct cpumask *new_mask)
+{
+ if (len < cpumask_size())
+ cpumask_clear(new_mask);
+ else if (len > cpumask_size())
+ len = cpumask_size();
+
+ return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
+}
+
+/**
+ * sys_sched_setaffinity - set the CPU affinity of a process
+ * @pid: pid of the process
+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
+ * @user_mask_ptr: user-space pointer to the new CPU mask
+ *
+ * Return: 0 on success. An error code otherwise.
+ */
+SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
+ unsigned long __user *, user_mask_ptr)
+{
+ cpumask_var_t new_mask;
+ int retval;
+
+ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
+ if (retval == 0)
+ retval = sched_setaffinity(pid, new_mask);
+ free_cpumask_var(new_mask);
+ return retval;
+}
+
+long sched_getaffinity(pid_t pid, cpumask_t *mask)
+{
+ struct task_struct *p;
+ raw_spinlock_t *lock;
+ unsigned long flags;
+ int retval;
+
+ rcu_read_lock();
+
+ retval = -ESRCH;
+ p = find_process_by_pid(pid);
+ if (!p)
+ goto out_unlock;
+
+ retval = security_task_getscheduler(p);
+ if (retval)
+ goto out_unlock;
+
+ task_access_lock_irqsave(p, &lock, &flags);
+ cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
+ task_access_unlock_irqrestore(p, lock, &flags);
+
+out_unlock:
+ rcu_read_unlock();
+
+ return retval;
+}
+
+/**
+ * sys_sched_getaffinity - get the CPU affinity of a process
+ * @pid: pid of the process
+ * @len: length in bytes of the bitmask pointed to by user_mask_ptr
+ * @user_mask_ptr: user-space pointer to hold the current CPU mask
+ *
+ * Return: size of CPU mask copied to user_mask_ptr on success. An
+ * error code otherwise.
+ */
+SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
+ unsigned long __user *, user_mask_ptr)
+{
+ int ret;
+ cpumask_var_t mask;
+
+ if ((len * BITS_PER_BYTE) < nr_cpu_ids)
+ return -EINVAL;
+ if (len & (sizeof(unsigned long)-1))
+ return -EINVAL;
+
+ if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ ret = sched_getaffinity(pid, mask);
+ if (ret == 0) {
+ unsigned int retlen = min_t(size_t, len, cpumask_size());
+
+ if (copy_to_user(user_mask_ptr, mask, retlen))
+ ret = -EFAULT;
+ else
+ ret = retlen;
+ }
+ free_cpumask_var(mask);
+
+ return ret;
+}
+
+static void do_sched_yield(void)
+{
+ struct rq *rq;
+ struct rq_flags rf;
+
+ if (!sched_yield_type)
+ return;
+
+ rq = this_rq_lock_irq(&rf);
+
+ schedstat_inc(rq->yld_count);
+
+ if (1 == sched_yield_type) {
+ if (!rt_task(current))
+ do_sched_yield_type_1(current, rq);
+ } else if (2 == sched_yield_type) {
+ if (rq->nr_running > 1)
+ rq->skip = current;
+ }
+
+ preempt_disable();
+ raw_spin_unlock_irq(&rq->lock);
+ sched_preempt_enable_no_resched();
+
+ schedule();
+}
+
+/**
+ * sys_sched_yield - yield the current processor to other threads.
+ *
+ * This function yields the current CPU to other tasks. If there are no
+ * other threads running on this CPU then this function will return.
+ *
+ * Return: 0.
+ */
+SYSCALL_DEFINE0(sched_yield)
+{
+ do_sched_yield();
+ return 0;
+}
+
+#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC)
+int __sched __cond_resched(void)
+{
+ if (should_resched(0)) {
+ preempt_schedule_common();
+ return 1;
+ }
+ /*
+ * In preemptible kernels, ->rcu_read_lock_nesting tells the tick
+ * whether the current CPU is in an RCU read-side critical section,
+ * so the tick can report quiescent states even for CPUs looping
+ * in kernel context. In contrast, in non-preemptible kernels,
+ * RCU readers leave no in-memory hints, which means that CPU-bound
+ * processes executing in kernel context might never report an
+ * RCU quiescent state. Therefore, the following code causes
+ * cond_resched() to report a quiescent state, but only when RCU
+ * is in urgent need of one.
+ */
+#ifndef CONFIG_PREEMPT_RCU
+ rcu_all_qs();
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(__cond_resched);
+#endif
+
+#ifdef CONFIG_PREEMPT_DYNAMIC
+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
+#define cond_resched_dynamic_enabled __cond_resched
+#define cond_resched_dynamic_disabled ((void *)&__static_call_return0)
+DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched);
+EXPORT_STATIC_CALL_TRAMP(cond_resched);
+
+#define might_resched_dynamic_enabled __cond_resched
+#define might_resched_dynamic_disabled ((void *)&__static_call_return0)
+DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched);
+EXPORT_STATIC_CALL_TRAMP(might_resched);
+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched);
+int __sched dynamic_cond_resched(void)
+{
+ if (!static_branch_unlikely(&sk_dynamic_cond_resched))
+ return 0;
+ return __cond_resched();
+}
+EXPORT_SYMBOL(dynamic_cond_resched);
+
+static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched);
+int __sched dynamic_might_resched(void)
+{
+ if (!static_branch_unlikely(&sk_dynamic_might_resched))
+ return 0;
+ return __cond_resched();
+}
+EXPORT_SYMBOL(dynamic_might_resched);
+#endif
+#endif
+
+/*
+ * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
+ * call schedule, and on return reacquire the lock.
+ *
+ * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
+ * operations here to prevent schedule() from being called twice (once via
+ * spin_unlock(), once by hand).
+ */
+int __cond_resched_lock(spinlock_t *lock)
+{
+ int resched = should_resched(PREEMPT_LOCK_OFFSET);
+ int ret = 0;
+
+ lockdep_assert_held(lock);
+
+ if (spin_needbreak(lock) || resched) {
+ spin_unlock(lock);
+ if (!_cond_resched())
+ cpu_relax();
+ ret = 1;
+ spin_lock(lock);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(__cond_resched_lock);
+
+int __cond_resched_rwlock_read(rwlock_t *lock)
+{
+ int resched = should_resched(PREEMPT_LOCK_OFFSET);
+ int ret = 0;
+
+ lockdep_assert_held_read(lock);
+
+ if (rwlock_needbreak(lock) || resched) {
+ read_unlock(lock);
+ if (!_cond_resched())
+ cpu_relax();
+ ret = 1;
+ read_lock(lock);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(__cond_resched_rwlock_read);
+
+int __cond_resched_rwlock_write(rwlock_t *lock)
+{
+ int resched = should_resched(PREEMPT_LOCK_OFFSET);
+ int ret = 0;
+
+ lockdep_assert_held_write(lock);
+
+ if (rwlock_needbreak(lock) || resched) {
+ write_unlock(lock);
+ if (!_cond_resched())
+ cpu_relax();
+ ret = 1;
+ write_lock(lock);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(__cond_resched_rwlock_write);
+
+#ifdef CONFIG_PREEMPT_DYNAMIC
+
+#ifdef CONFIG_GENERIC_ENTRY
+#include <linux/entry-common.h>
+#endif
+
+/*
+ * SC:cond_resched
+ * SC:might_resched
+ * SC:preempt_schedule
+ * SC:preempt_schedule_notrace
+ * SC:irqentry_exit_cond_resched
+ *
+ *
+ * NONE:
+ * cond_resched <- __cond_resched
+ * might_resched <- RET0
+ * preempt_schedule <- NOP
+ * preempt_schedule_notrace <- NOP
+ * irqentry_exit_cond_resched <- NOP
+ *
+ * VOLUNTARY:
+ * cond_resched <- __cond_resched
+ * might_resched <- __cond_resched
+ * preempt_schedule <- NOP
+ * preempt_schedule_notrace <- NOP
+ * irqentry_exit_cond_resched <- NOP
+ *
+ * FULL:
+ * cond_resched <- RET0
+ * might_resched <- RET0
+ * preempt_schedule <- preempt_schedule
+ * preempt_schedule_notrace <- preempt_schedule_notrace
+ * irqentry_exit_cond_resched <- irqentry_exit_cond_resched
+ */
+
+enum {
+ preempt_dynamic_undefined = -1,
+ preempt_dynamic_none,
+ preempt_dynamic_voluntary,
+ preempt_dynamic_full,
+};
+
+int preempt_dynamic_mode = preempt_dynamic_undefined;
+
+int sched_dynamic_mode(const char *str)
+{
+ if (!strcmp(str, "none"))
+ return preempt_dynamic_none;
+
+ if (!strcmp(str, "voluntary"))
+ return preempt_dynamic_voluntary;
+
+ if (!strcmp(str, "full"))
+ return preempt_dynamic_full;
+
+ return -EINVAL;
+}
+
+#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL)
+#define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled)
+#define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled)
+#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+#define preempt_dynamic_enable(f) static_key_enable(&sk_dynamic_##f.key)
+#define preempt_dynamic_disable(f) static_key_disable(&sk_dynamic_##f.key)
+#else
+#error "Unsupported PREEMPT_DYNAMIC mechanism"
+#endif
+
+void sched_dynamic_update(int mode)
+{
+ /*
+ * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in
+ * the ZERO state, which is invalid.
+ */
+ preempt_dynamic_enable(cond_resched);
+ preempt_dynamic_enable(might_resched);
+ preempt_dynamic_enable(preempt_schedule);
+ preempt_dynamic_enable(preempt_schedule_notrace);
+ preempt_dynamic_enable(irqentry_exit_cond_resched);
+
+ switch (mode) {
+ case preempt_dynamic_none:
+ preempt_dynamic_enable(cond_resched);
+ preempt_dynamic_disable(might_resched);
+ preempt_dynamic_disable(preempt_schedule);
+ preempt_dynamic_disable(preempt_schedule_notrace);
+ preempt_dynamic_disable(irqentry_exit_cond_resched);
+ pr_info("Dynamic Preempt: none\n");
+ break;
+
+ case preempt_dynamic_voluntary:
+ preempt_dynamic_enable(cond_resched);
+ preempt_dynamic_enable(might_resched);
+ preempt_dynamic_disable(preempt_schedule);
+ preempt_dynamic_disable(preempt_schedule_notrace);
+ preempt_dynamic_disable(irqentry_exit_cond_resched);
+ pr_info("Dynamic Preempt: voluntary\n");
+ break;
+
+ case preempt_dynamic_full:
+ preempt_dynamic_disable(cond_resched);
+ preempt_dynamic_disable(might_resched);
+ preempt_dynamic_enable(preempt_schedule);
+ preempt_dynamic_enable(preempt_schedule_notrace);
+ preempt_dynamic_enable(irqentry_exit_cond_resched);
+ pr_info("Dynamic Preempt: full\n");
+ break;
+ }
+
+ preempt_dynamic_mode = mode;
+}
+
+static int __init setup_preempt_mode(char *str)
+{
+ int mode = sched_dynamic_mode(str);
+ if (mode < 0) {
+ pr_warn("Dynamic Preempt: unsupported mode: %s\n", str);
+ return 0;
+ }
+
+ sched_dynamic_update(mode);
+ return 1;
+}
+__setup("preempt=", setup_preempt_mode);
+
+static void __init preempt_dynamic_init(void)
+{
+ if (preempt_dynamic_mode == preempt_dynamic_undefined) {
+ if (IS_ENABLED(CONFIG_PREEMPT_NONE)) {
+ sched_dynamic_update(preempt_dynamic_none);
+ } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) {
+ sched_dynamic_update(preempt_dynamic_voluntary);
+ } else {
+ /* Default static call setting, nothing to do */
+ WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT));
+ preempt_dynamic_mode = preempt_dynamic_full;
+ pr_info("Dynamic Preempt: full\n");
+ }
+ }
+}
+
+#define PREEMPT_MODEL_ACCESSOR(mode) \
+ bool preempt_model_##mode(void) \
+ { \
+ WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \
+ return preempt_dynamic_mode == preempt_dynamic_##mode; \
+ } \
+ EXPORT_SYMBOL_GPL(preempt_model_##mode)
+
+PREEMPT_MODEL_ACCESSOR(none);
+PREEMPT_MODEL_ACCESSOR(voluntary);
+PREEMPT_MODEL_ACCESSOR(full);
+
+#else /* !CONFIG_PREEMPT_DYNAMIC */
+
+static inline void preempt_dynamic_init(void) { }
+
+#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */
+
+/**
+ * yield - yield the current processor to other threads.
+ *
+ * Do not ever use this function, there's a 99% chance you're doing it wrong.
+ *
+ * The scheduler is at all times free to pick the calling task as the most
+ * eligible task to run, if removing the yield() call from your code breaks
+ * it, it's already broken.
+ *
+ * Typical broken usage is:
+ *
+ * while (!event)
+ * yield();
+ *
+ * where one assumes that yield() will let 'the other' process run that will
+ * make event true. If the current task is a SCHED_FIFO task that will never
+ * happen. Never use yield() as a progress guarantee!!
+ *
+ * If you want to use yield() to wait for something, use wait_event().
+ * If you want to use yield() to be 'nice' for others, use cond_resched().
+ * If you still want to use yield(), do not!
+ */
+void __sched yield(void)
+{
+ set_current_state(TASK_RUNNING);
+ do_sched_yield();
+}
+EXPORT_SYMBOL(yield);
+
+/**
+ * yield_to - yield the current processor to another thread in
+ * your thread group, or accelerate that thread toward the
+ * processor it's on.
+ * @p: target task
+ * @preempt: whether task preemption is allowed or not
+ *
+ * It's the caller's job to ensure that the target task struct
+ * can't go away on us before we can do any checks.
+ *
+ * In Alt schedule FW, yield_to is not supported.
+ *
+ * Return:
+ * true (>0) if we indeed boosted the target task.
+ * false (0) if we failed to boost the target.
+ * -ESRCH if there's no task to yield to.
+ */
+int __sched yield_to(struct task_struct *p, bool preempt)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(yield_to);
+
+int io_schedule_prepare(void)
+{
+ int old_iowait = current->in_iowait;
+
+ current->in_iowait = 1;
+ blk_flush_plug(current->plug, true);
+ return old_iowait;
+}
+
+void io_schedule_finish(int token)
+{
+ current->in_iowait = token;
+}
+
+/*
+ * This task is about to go to sleep on IO. Increment rq->nr_iowait so
+ * that process accounting knows that this is a task in IO wait state.
+ *
+ * But don't do that if it is a deliberate, throttling IO wait (this task
+ * has set its backing_dev_info: the queue against which it should throttle)
+ */
+
+long __sched io_schedule_timeout(long timeout)
+{
+ int token;
+ long ret;
+
+ token = io_schedule_prepare();
+ ret = schedule_timeout(timeout);
+ io_schedule_finish(token);
+
+ return ret;
+}
+EXPORT_SYMBOL(io_schedule_timeout);
+
+void __sched io_schedule(void)
+{
+ int token;
+
+ token = io_schedule_prepare();
+ schedule();
+ io_schedule_finish(token);
+}
+EXPORT_SYMBOL(io_schedule);
+
+/**
+ * sys_sched_get_priority_max - return maximum RT priority.
+ * @policy: scheduling class.
+ *
+ * Return: On success, this syscall returns the maximum
+ * rt_priority that can be used by a given scheduling class.
+ * On failure, a negative error code is returned.
+ */
+SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
+{
+ int ret = -EINVAL;
+
+ switch (policy) {
+ case SCHED_FIFO:
+ case SCHED_RR:
+ ret = MAX_RT_PRIO - 1;
+ break;
+ case SCHED_NORMAL:
+ case SCHED_BATCH:
+ case SCHED_IDLE:
+ ret = 0;
+ break;
+ }
+ return ret;
+}
+
+/**
+ * sys_sched_get_priority_min - return minimum RT priority.
+ * @policy: scheduling class.
+ *
+ * Return: On success, this syscall returns the minimum
+ * rt_priority that can be used by a given scheduling class.
+ * On failure, a negative error code is returned.
+ */
+SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
+{
+ int ret = -EINVAL;
+
+ switch (policy) {
+ case SCHED_FIFO:
+ case SCHED_RR:
+ ret = 1;
+ break;
+ case SCHED_NORMAL:
+ case SCHED_BATCH:
+ case SCHED_IDLE:
+ ret = 0;
+ break;
+ }
+ return ret;
+}
+
+static int sched_rr_get_interval(pid_t pid, struct timespec64 *t)
+{
+ struct task_struct *p;
+ int retval;
+
+ alt_sched_debug();
+
+ if (pid < 0)
+ return -EINVAL;
+
+ retval = -ESRCH;
+ rcu_read_lock();
+ p = find_process_by_pid(pid);
+ if (!p)
+ goto out_unlock;
+
+ retval = security_task_getscheduler(p);
+ if (retval)
+ goto out_unlock;
+ rcu_read_unlock();
+
+ *t = ns_to_timespec64(sched_timeslice_ns);
+ return 0;
+
+out_unlock:
+ rcu_read_unlock();
+ return retval;
+}
+
+/**
+ * sys_sched_rr_get_interval - return the default timeslice of a process.
+ * @pid: pid of the process.
+ * @interval: userspace pointer to the timeslice value.
+ *
+ *
+ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
+ * an error code.
+ */
+SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
+ struct __kernel_timespec __user *, interval)
+{
+ struct timespec64 t;
+ int retval = sched_rr_get_interval(pid, &t);
+
+ if (retval == 0)
+ retval = put_timespec64(&t, interval);
+
+ return retval;
+}
+
+#ifdef CONFIG_COMPAT_32BIT_TIME
+SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid,
+ struct old_timespec32 __user *, interval)
+{
+ struct timespec64 t;
+ int retval = sched_rr_get_interval(pid, &t);
+
+ if (retval == 0)
+ retval = put_old_timespec32(&t, interval);
+ return retval;
+}
+#endif
+
+void sched_show_task(struct task_struct *p)
+{
+ unsigned long free = 0;
+ int ppid;
+
+ if (!try_get_task_stack(p))
+ return;
+
+ pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
+
+ if (task_is_running(p))
+ pr_cont(" running task ");
+#ifdef CONFIG_DEBUG_STACK_USAGE
+ free = stack_not_used(p);
+#endif
+ ppid = 0;
+ rcu_read_lock();
+ if (pid_alive(p))
+ ppid = task_pid_nr(rcu_dereference(p->real_parent));
+ rcu_read_unlock();
+ pr_cont(" stack:%5lu pid:%5d ppid:%6d flags:0x%08lx\n",
+ free, task_pid_nr(p), ppid,
+ read_task_thread_flags(p));
+
+ print_worker_info(KERN_INFO, p);
+ print_stop_info(KERN_INFO, p);
+ show_stack(p, NULL, KERN_INFO);
+ put_task_stack(p);
+}
+EXPORT_SYMBOL_GPL(sched_show_task);
+
+static inline bool
+state_filter_match(unsigned long state_filter, struct task_struct *p)
+{
+ unsigned int state = READ_ONCE(p->__state);
+
+ /* no filter, everything matches */
+ if (!state_filter)
+ return true;
+
+ /* filter, but doesn't match */
+ if (!(state & state_filter))
+ return false;
+
+ /*
+ * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows
+ * TASK_KILLABLE).
+ */
+ if (state_filter == TASK_UNINTERRUPTIBLE && state == TASK_IDLE)
+ return false;
+
+ return true;
+}
+
+
+void show_state_filter(unsigned int state_filter)
+{
+ struct task_struct *g, *p;
+
+ rcu_read_lock();
+ for_each_process_thread(g, p) {
+ /*
+ * reset the NMI-timeout, listing all files on a slow
+ * console might take a lot of time:
+ * Also, reset softlockup watchdogs on all CPUs, because
+ * another CPU might be blocked waiting for us to process
+ * an IPI.
+ */
+ touch_nmi_watchdog();
+ touch_all_softlockup_watchdogs();
+ if (state_filter_match(state_filter, p))
+ sched_show_task(p);
+ }
+
+#ifdef CONFIG_SCHED_DEBUG
+ /* TODO: Alt schedule FW should support this
+ if (!state_filter)
+ sysrq_sched_debug_show();
+ */
+#endif
+ rcu_read_unlock();
+ /*
+ * Only show locks if all tasks are dumped:
+ */
+ if (!state_filter)
+ debug_show_all_locks();
+}
+
+void dump_cpu_task(int cpu)
+{
+ pr_info("Task dump for CPU %d:\n", cpu);
+ sched_show_task(cpu_curr(cpu));
+}
+
+/**
+ * init_idle - set up an idle thread for a given CPU
+ * @idle: task in question
+ * @cpu: CPU the idle task belongs to
+ *
+ * NOTE: this function does not set the idle thread's NEED_RESCHED
+ * flag, to make booting more robust.
+ */
+void __init init_idle(struct task_struct *idle, int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
+
+ __sched_fork(0, idle);
+
+ raw_spin_lock_irqsave(&idle->pi_lock, flags);
+ raw_spin_lock(&rq->lock);
+ update_rq_clock(rq);
+
+ idle->last_ran = rq->clock_task;
+ idle->__state = TASK_RUNNING;
+ /*
+ * PF_KTHREAD should already be set at this point; regardless, make it
+ * look like a proper per-CPU kthread.
+ */
+ idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY;
+ kthread_set_per_cpu(idle, cpu);
+
+ sched_queue_init_idle(&rq->queue, idle);
+
+#ifdef CONFIG_SMP
+ /*
+ * It's possible that init_idle() gets called multiple times on a task,
+ * in that case do_set_cpus_allowed() will not do the right thing.
+ *
+ * And since this is boot we can forgo the serialisation.
+ */
+ set_cpus_allowed_common(idle, cpumask_of(cpu));
+#endif
+
+ /* Silence PROVE_RCU */
+ rcu_read_lock();
+ __set_task_cpu(idle, cpu);
+ rcu_read_unlock();
+
+ rq->idle = idle;
+ rcu_assign_pointer(rq->curr, idle);
+ idle->on_cpu = 1;
+
+ raw_spin_unlock(&rq->lock);
+ raw_spin_unlock_irqrestore(&idle->pi_lock, flags);
+
+ /* Set the preempt count _outside_ the spinlocks! */
+ init_idle_preempt_count(idle, cpu);
+
+ ftrace_graph_init_idle_task(idle, cpu);
+ vtime_init_idle(idle, cpu);
+#ifdef CONFIG_SMP
+ sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
+#endif
+}
+
+#ifdef CONFIG_SMP
+
+int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur,
+ const struct cpumask __maybe_unused *trial)
+{
+ return 1;
+}
+
+int task_can_attach(struct task_struct *p,
+ const struct cpumask *cs_cpus_allowed)
+{
+ int ret = 0;
+
+ /*
+ * Kthreads which disallow setaffinity shouldn't be moved
+ * to a new cpuset; we don't want to change their CPU
+ * affinity and isolating such threads by their set of
+ * allowed nodes is unnecessary. Thus, cpusets are not
+ * applicable for such threads. This prevents checking for
+ * success of set_cpus_allowed_ptr() on all attached tasks
+ * before cpus_mask may be changed.
+ */
+ if (p->flags & PF_NO_SETAFFINITY)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+bool sched_smp_initialized __read_mostly;
+
+#ifdef CONFIG_HOTPLUG_CPU
+/*
+ * Ensures that the idle task is using init_mm right before its CPU goes
+ * offline.
+ */
+void idle_task_exit(void)
+{
+ struct mm_struct *mm = current->active_mm;
+
+ BUG_ON(current != this_rq()->idle);
+
+ if (mm != &init_mm) {
+ switch_mm(mm, &init_mm, current);
+ finish_arch_post_lock_switch();
+ }
+
+ /* finish_cpu(), as ran on the BP, will clean up the active_mm state */
+}
+
+static int __balance_push_cpu_stop(void *arg)
+{
+ struct task_struct *p = arg;
+ struct rq *rq = this_rq();
+ struct rq_flags rf;
+ int cpu;
+
+ raw_spin_lock_irq(&p->pi_lock);
+ rq_lock(rq, &rf);
+
+ update_rq_clock(rq);
+
+ if (task_rq(p) == rq && task_on_rq_queued(p)) {
+ cpu = select_fallback_rq(rq->cpu, p);
+ rq = __migrate_task(rq, p, cpu);
+ }
+
+ rq_unlock(rq, &rf);
+ raw_spin_unlock_irq(&p->pi_lock);
+
+ put_task_struct(p);
+
+ return 0;
+}
+
+static DEFINE_PER_CPU(struct cpu_stop_work, push_work);
+
+/*
+ * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only
+ * effective when the hotplug motion is down.
+ */
+static void balance_push(struct rq *rq)
+{
+ struct task_struct *push_task = rq->curr;
+
+ lockdep_assert_held(&rq->lock);
+
+ /*
+ * Ensure the thing is persistent until balance_push_set(.on = false);
+ */
+ rq->balance_callback = &balance_push_callback;
+
+ /*
+ * Only active while going offline and when invoked on the outgoing
+ * CPU.
+ */
+ if (!cpu_dying(rq->cpu) || rq != this_rq())
+ return;
+
+ /*
+ * Both the cpu-hotplug and stop task are in this case and are
+ * required to complete the hotplug process.
+ */
+ if (kthread_is_per_cpu(push_task) ||
+ is_migration_disabled(push_task)) {
+
+ /*
+ * If this is the idle task on the outgoing CPU try to wake
+ * up the hotplug control thread which might wait for the
+ * last task to vanish. The rcuwait_active() check is
+ * accurate here because the waiter is pinned on this CPU
+ * and can't obviously be running in parallel.
+ *
+ * On RT kernels this also has to check whether there are
+ * pinned and scheduled out tasks on the runqueue. They
+ * need to leave the migrate disabled section first.
+ */
+ if (!rq->nr_running && !rq_has_pinned_tasks(rq) &&
+ rcuwait_active(&rq->hotplug_wait)) {
+ raw_spin_unlock(&rq->lock);
+ rcuwait_wake_up(&rq->hotplug_wait);
+ raw_spin_lock(&rq->lock);
+ }
+ return;
+ }
+
+ get_task_struct(push_task);
+ /*
+ * Temporarily drop rq->lock such that we can wake-up the stop task.
+ * Both preemption and IRQs are still disabled.
+ */
+ raw_spin_unlock(&rq->lock);
+ stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
+ this_cpu_ptr(&push_work));
+ /*
+ * At this point need_resched() is true and we'll take the loop in
+ * schedule(). The next pick is obviously going to be the stop task
+ * which kthread_is_per_cpu() and will push this task away.
+ */
+ raw_spin_lock(&rq->lock);
+}
+
+static void balance_push_set(int cpu, bool on)
+{
+ struct rq *rq = cpu_rq(cpu);
+ struct rq_flags rf;
+
+ rq_lock_irqsave(rq, &rf);
+ if (on) {
+ WARN_ON_ONCE(rq->balance_callback);
+ rq->balance_callback = &balance_push_callback;
+ } else if (rq->balance_callback == &balance_push_callback) {
+ rq->balance_callback = NULL;
+ }
+ rq_unlock_irqrestore(rq, &rf);
+}
+
+/*
+ * Invoked from a CPUs hotplug control thread after the CPU has been marked
+ * inactive. All tasks which are not per CPU kernel threads are either
+ * pushed off this CPU now via balance_push() or placed on a different CPU
+ * during wakeup. Wait until the CPU is quiescent.
+ */
+static void balance_hotplug_wait(void)
+{
+ struct rq *rq = this_rq();
+
+ rcuwait_wait_event(&rq->hotplug_wait,
+ rq->nr_running == 1 && !rq_has_pinned_tasks(rq),
+ TASK_UNINTERRUPTIBLE);
+}
+
+#else
+
+static void balance_push(struct rq *rq)
+{
+}
+
+static void balance_push_set(int cpu, bool on)
+{
+}
+
+static inline void balance_hotplug_wait(void)
+{
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+static void set_rq_offline(struct rq *rq)
+{
+ if (rq->online)
+ rq->online = false;
+}
+
+static void set_rq_online(struct rq *rq)
+{
+ if (!rq->online)
+ rq->online = true;
+}
+
+/*
+ * used to mark begin/end of suspend/resume:
+ */
+static int num_cpus_frozen;
+
+/*
+ * Update cpusets according to cpu_active mask. If cpusets are
+ * disabled, cpuset_update_active_cpus() becomes a simple wrapper
+ * around partition_sched_domains().
+ *
+ * If we come here as part of a suspend/resume, don't touch cpusets because we
+ * want to restore it back to its original state upon resume anyway.
+ */
+static void cpuset_cpu_active(void)
+{
+ if (cpuhp_tasks_frozen) {
+ /*
+ * num_cpus_frozen tracks how many CPUs are involved in suspend
+ * resume sequence. As long as this is not the last online
+ * operation in the resume sequence, just build a single sched
+ * domain, ignoring cpusets.
+ */
+ partition_sched_domains(1, NULL, NULL);
+ if (--num_cpus_frozen)
+ return;
+ /*
+ * This is the last CPU online operation. So fall through and
+ * restore the original sched domains by considering the
+ * cpuset configurations.
+ */
+ cpuset_force_rebuild();
+ }
+
+ cpuset_update_active_cpus();
+}
+
+static int cpuset_cpu_inactive(unsigned int cpu)
+{
+ if (!cpuhp_tasks_frozen) {
+ cpuset_update_active_cpus();
+ } else {
+ num_cpus_frozen++;
+ partition_sched_domains(1, NULL, NULL);
+ }
+ return 0;
+}
+
+int sched_cpu_activate(unsigned int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
+
+ /*
+ * Clear the balance_push callback and prepare to schedule
+ * regular tasks.
+ */
+ balance_push_set(cpu, false);
+
+#ifdef CONFIG_SCHED_SMT
+ /*
+ * When going up, increment the number of cores with SMT present.
+ */
+ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+ static_branch_inc_cpuslocked(&sched_smt_present);
+#endif
+ set_cpu_active(cpu, true);
+
+ if (sched_smp_initialized)
+ cpuset_cpu_active();
+
+ /*
+ * Put the rq online, if not already. This happens:
+ *
+ * 1) In the early boot process, because we build the real domains
+ * after all cpus have been brought up.
+ *
+ * 2) At runtime, if cpuset_cpu_active() fails to rebuild the
+ * domains.
+ */
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ set_rq_online(rq);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+ return 0;
+}
+
+int sched_cpu_deactivate(unsigned int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
+ int ret;
+
+ set_cpu_active(cpu, false);
+
+ /*
+ * From this point forward, this CPU will refuse to run any task that
+ * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively
+ * push those tasks away until this gets cleared, see
+ * sched_cpu_dying().
+ */
+ balance_push_set(cpu, true);
+
+ /*
+ * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU
+ * users of this state to go away such that all new such users will
+ * observe it.
+ *
+ * Specifically, we rely on ttwu to no longer target this CPU, see
+ * ttwu_queue_cond() and is_cpu_allowed().
+ *
+ * Do sync before park smpboot threads to take care the rcu boost case.
+ */
+ synchronize_rcu();
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ update_rq_clock(rq);
+ set_rq_offline(rq);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+#ifdef CONFIG_SCHED_SMT
+ /*
+ * When going down, decrement the number of cores with SMT present.
+ */
+ if (cpumask_weight(cpu_smt_mask(cpu)) == 2) {
+ static_branch_dec_cpuslocked(&sched_smt_present);
+ if (!static_branch_likely(&sched_smt_present))
+ cpumask_clear(&sched_sg_idle_mask);
+ }
+#endif
+
+ if (!sched_smp_initialized)
+ return 0;
+
+ ret = cpuset_cpu_inactive(cpu);
+ if (ret) {
+ balance_push_set(cpu, false);
+ set_cpu_active(cpu, true);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void sched_rq_cpu_starting(unsigned int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+
+ rq->calc_load_update = calc_load_update;
+}
+
+int sched_cpu_starting(unsigned int cpu)
+{
+ sched_rq_cpu_starting(cpu);
+ sched_tick_start(cpu);
+ return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/*
+ * Invoked immediately before the stopper thread is invoked to bring the
+ * CPU down completely. At this point all per CPU kthreads except the
+ * hotplug thread (current) and the stopper thread (inactive) have been
+ * either parked or have been unbound from the outgoing CPU. Ensure that
+ * any of those which might be on the way out are gone.
+ *
+ * If after this point a bound task is being woken on this CPU then the
+ * responsible hotplug callback has failed to do it's job.
+ * sched_cpu_dying() will catch it with the appropriate fireworks.
+ */
+int sched_cpu_wait_empty(unsigned int cpu)
+{
+ balance_hotplug_wait();
+ return 0;
+}
+
+/*
+ * Since this CPU is going 'away' for a while, fold any nr_active delta we
+ * might have. Called from the CPU stopper task after ensuring that the
+ * stopper is the last running task on the CPU, so nr_active count is
+ * stable. We need to take the teardown thread which is calling this into
+ * account, so we hand in adjust = 1 to the load calculation.
+ *
+ * Also see the comment "Global load-average calculations".
+ */
+static void calc_load_migrate(struct rq *rq)
+{
+ long delta = calc_load_fold_active(rq, 1);
+
+ if (delta)
+ atomic_long_add(delta, &calc_load_tasks);
+}
+
+static void dump_rq_tasks(struct rq *rq, const char *loglvl)
+{
+ struct task_struct *g, *p;
+ int cpu = cpu_of(rq);
+
+ lockdep_assert_held(&rq->lock);
+
+ printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running);
+ for_each_process_thread(g, p) {
+ if (task_cpu(p) != cpu)
+ continue;
+
+ if (!task_on_rq_queued(p))
+ continue;
+
+ printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm);
+ }
+}
+
+int sched_cpu_dying(unsigned int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
+
+ /* Handle pending wakeups and then migrate everything off */
+ sched_tick_stop(cpu);
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) {
+ WARN(true, "Dying CPU not properly vacated!");
+ dump_rq_tasks(rq, KERN_WARNING);
+ }
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+ calc_load_migrate(rq);
+ hrtick_clear(rq);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_SMP
+static void sched_init_topology_cpumask_early(void)
+{
+ int cpu;
+ cpumask_t *tmp;
+
+ for_each_possible_cpu(cpu) {
+ /* init topo masks */
+ tmp = per_cpu(sched_cpu_topo_masks, cpu);
+
+ cpumask_copy(tmp, cpumask_of(cpu));
+ tmp++;
+ cpumask_copy(tmp, cpu_possible_mask);
+ per_cpu(sched_cpu_llc_mask, cpu) = tmp;
+ per_cpu(sched_cpu_topo_end_mask, cpu) = ++tmp;
+ /*per_cpu(sd_llc_id, cpu) = cpu;*/
+ }
+}
+
+#define TOPOLOGY_CPUMASK(name, mask, last)\
+ if (cpumask_and(topo, topo, mask)) { \
+ cpumask_copy(topo, mask); \
+ printk(KERN_INFO "sched: cpu#%02d topo: 0x%08lx - "#name, \
+ cpu, (topo++)->bits[0]); \
+ } \
+ if (!last) \
+ cpumask_complement(topo, mask)
+
+static void sched_init_topology_cpumask(void)
+{
+ int cpu;
+ cpumask_t *topo;
+
+ for_each_online_cpu(cpu) {
+ /* take chance to reset time slice for idle tasks */
+ cpu_rq(cpu)->idle->time_slice = sched_timeslice_ns;
+
+ topo = per_cpu(sched_cpu_topo_masks, cpu) + 1;
+
+ cpumask_complement(topo, cpumask_of(cpu));
+#ifdef CONFIG_SCHED_SMT
+ TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask(cpu), false);
+#endif
+ per_cpu(sd_llc_id, cpu) = cpumask_first(cpu_coregroup_mask(cpu));
+ per_cpu(sched_cpu_llc_mask, cpu) = topo;
+ TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask(cpu), false);
+
+ TOPOLOGY_CPUMASK(core, topology_core_cpumask(cpu), false);
+
+ TOPOLOGY_CPUMASK(others, cpu_online_mask, true);
+
+ per_cpu(sched_cpu_topo_end_mask, cpu) = topo;
+ printk(KERN_INFO "sched: cpu#%02d llc_id = %d, llc_mask idx = %d\n",
+ cpu, per_cpu(sd_llc_id, cpu),
+ (int) (per_cpu(sched_cpu_llc_mask, cpu) -
+ per_cpu(sched_cpu_topo_masks, cpu)));
+ }
+}
+#endif
+
+void __init sched_init_smp(void)
+{
+ /* Move init over to a non-isolated CPU */
+ if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0)
+ BUG();
+ current->flags &= ~PF_NO_SETAFFINITY;
+
+ sched_init_topology_cpumask();
+
+ sched_smp_initialized = true;
+}
+#else
+void __init sched_init_smp(void)
+{
+ cpu_rq(0)->idle->time_slice = sched_timeslice_ns;
+}
+#endif /* CONFIG_SMP */
+
+int in_sched_functions(unsigned long addr)
+{
+ return in_lock_functions(addr) ||
+ (addr >= (unsigned long)__sched_text_start
+ && addr < (unsigned long)__sched_text_end);
+}
+
+#ifdef CONFIG_CGROUP_SCHED
+/* task group related information */
+struct task_group {
+ struct cgroup_subsys_state css;
+
+ struct rcu_head rcu;
+ struct list_head list;
+
+ struct task_group *parent;
+ struct list_head siblings;
+ struct list_head children;
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ unsigned long shares;
+#endif
+};
+
+/*
+ * Default task group.
+ * Every task in system belongs to this group at bootup.
+ */
+struct task_group root_task_group;
+LIST_HEAD(task_groups);
+
+/* Cacheline aligned slab cache for task_group */
+static struct kmem_cache *task_group_cache __read_mostly;
+#endif /* CONFIG_CGROUP_SCHED */
+
+void __init sched_init(void)
+{
+ int i;
+ struct rq *rq;
+
+ printk(KERN_INFO ALT_SCHED_VERSION_MSG);
+
+ wait_bit_init();
+
+#ifdef CONFIG_SMP
+ for (i = 0; i < SCHED_QUEUE_BITS; i++)
+ cpumask_copy(sched_rq_watermark + i, cpu_present_mask);
+#endif
+
+#ifdef CONFIG_CGROUP_SCHED
+ task_group_cache = KMEM_CACHE(task_group, 0);
+
+ list_add(&root_task_group.list, &task_groups);
+ INIT_LIST_HEAD(&root_task_group.children);
+ INIT_LIST_HEAD(&root_task_group.siblings);
+#endif /* CONFIG_CGROUP_SCHED */
+ for_each_possible_cpu(i) {
+ rq = cpu_rq(i);
+
+ sched_queue_init(&rq->queue);
+ rq->watermark = IDLE_TASK_SCHED_PRIO;
+ rq->skip = NULL;
+
+ raw_spin_lock_init(&rq->lock);
+ rq->nr_running = rq->nr_uninterruptible = 0;
+ rq->calc_load_active = 0;
+ rq->calc_load_update = jiffies + LOAD_FREQ;
+#ifdef CONFIG_SMP
+ rq->online = false;
+ rq->cpu = i;
+
+#ifdef CONFIG_SCHED_SMT
+ rq->active_balance = 0;
+#endif
+
+#ifdef CONFIG_NO_HZ_COMMON
+ INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq);
+#endif
+ rq->balance_callback = &balance_push_callback;
+#ifdef CONFIG_HOTPLUG_CPU
+ rcuwait_init(&rq->hotplug_wait);
+#endif
+#endif /* CONFIG_SMP */
+ rq->nr_switches = 0;
+
+ hrtick_rq_init(rq);
+ atomic_set(&rq->nr_iowait, 0);
+ }
+#ifdef CONFIG_SMP
+ /* Set rq->online for cpu 0 */
+ cpu_rq(0)->online = true;
+#endif
+ /*
+ * The boot idle thread does lazy MMU switching as well:
+ */
+ mmgrab(&init_mm);
+ enter_lazy_tlb(&init_mm, current);
+
+ /*
+ * The idle task doesn't need the kthread struct to function, but it
+ * is dressed up as a per-CPU kthread and thus needs to play the part
+ * if we want to avoid special-casing it in code that deals with per-CPU
+ * kthreads.
+ */
+ WARN_ON(!set_kthread_struct(current));
+
+ /*
+ * Make us the idle thread. Technically, schedule() should not be
+ * called from this thread, however somewhere below it might be,
+ * but because we are the idle thread, we just pick up running again
+ * when this runqueue becomes "idle".
+ */
+ init_idle(current, smp_processor_id());
+
+ calc_load_update = jiffies + LOAD_FREQ;
+
+#ifdef CONFIG_SMP
+ idle_thread_set_boot_cpu();
+ balance_push_set(smp_processor_id(), false);
+
+ sched_init_topology_cpumask_early();
+#endif /* SMP */
+
+ psi_init();
+
+ preempt_dynamic_init();
+}
+
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+
+void __might_sleep(const char *file, int line)
+{
+ unsigned int state = get_current_state();
+ /*
+ * Blocking primitives will set (and therefore destroy) current->state,
+ * since we will exit with TASK_RUNNING make sure we enter with it,
+ * otherwise we will destroy state.
+ */
+ WARN_ONCE(state != TASK_RUNNING && current->task_state_change,
+ "do not call blocking ops when !TASK_RUNNING; "
+ "state=%x set at [<%p>] %pS\n", state,
+ (void *)current->task_state_change,
+ (void *)current->task_state_change);
+
+ __might_resched(file, line, 0);
+}
+EXPORT_SYMBOL(__might_sleep);
+
+static void print_preempt_disable_ip(int preempt_offset, unsigned long ip)
+{
+ if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT))
+ return;
+
+ if (preempt_count() == preempt_offset)
+ return;
+
+ pr_err("Preemption disabled at:");
+ print_ip_sym(KERN_ERR, ip);
+}
+
+static inline bool resched_offsets_ok(unsigned int offsets)
+{
+ unsigned int nested = preempt_count();
+
+ nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT;
+
+ return nested == offsets;
+}
+
+void __might_resched(const char *file, int line, unsigned int offsets)
+{
+ /* Ratelimiting timestamp: */
+ static unsigned long prev_jiffy;
+
+ unsigned long preempt_disable_ip;
+
+ /* WARN_ON_ONCE() by default, no rate limit required: */
+ rcu_sleep_check();
+
+ if ((resched_offsets_ok(offsets) && !irqs_disabled() &&
+ !is_idle_task(current) && !current->non_block_count) ||
+ system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
+ oops_in_progress)
+ return;
+ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
+ return;
+ prev_jiffy = jiffies;
+
+ /* Save this before calling printk(), since that will clobber it: */
+ preempt_disable_ip = get_preempt_disable_ip(current);
+
+ pr_err("BUG: sleeping function called from invalid context at %s:%d\n",
+ file, line);
+ pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n",
+ in_atomic(), irqs_disabled(), current->non_block_count,
+ current->pid, current->comm);
+ pr_err("preempt_count: %x, expected: %x\n", preempt_count(),
+ offsets & MIGHT_RESCHED_PREEMPT_MASK);
+
+ if (IS_ENABLED(CONFIG_PREEMPT_RCU)) {
+ pr_err("RCU nest depth: %d, expected: %u\n",
+ rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT);
+ }
+
+ if (task_stack_end_corrupted(current))
+ pr_emerg("Thread overran stack, or stack corrupted\n");
+
+ debug_show_held_locks(current);
+ if (irqs_disabled())
+ print_irqtrace_events(current);
+
+ print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
+ preempt_disable_ip);
+
+ dump_stack();
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+}
+EXPORT_SYMBOL(__might_resched);
+
+void __cant_sleep(const char *file, int line, int preempt_offset)
+{
+ static unsigned long prev_jiffy;
+
+ if (irqs_disabled())
+ return;
+
+ if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
+ return;
+
+ if (preempt_count() > preempt_offset)
+ return;
+
+ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
+ return;
+ prev_jiffy = jiffies;
+
+ printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line);
+ printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
+ in_atomic(), irqs_disabled(),
+ current->pid, current->comm);
+
+ debug_show_held_locks(current);
+ dump_stack();
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+}
+EXPORT_SYMBOL_GPL(__cant_sleep);
+
+#ifdef CONFIG_SMP
+void __cant_migrate(const char *file, int line)
+{
+ static unsigned long prev_jiffy;
+
+ if (irqs_disabled())
+ return;
+
+ if (is_migration_disabled(current))
+ return;
+
+ if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
+ return;
+
+ if (preempt_count() > 0)
+ return;
+
+ if (current->migration_flags & MDF_FORCE_ENABLED)
+ return;
+
+ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
+ return;
+ prev_jiffy = jiffies;
+
+ pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
+ pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
+ in_atomic(), irqs_disabled(), is_migration_disabled(current),
+ current->pid, current->comm);
+
+ debug_show_held_locks(current);
+ dump_stack();
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+}
+EXPORT_SYMBOL_GPL(__cant_migrate);
+#endif
+#endif
+
+#ifdef CONFIG_MAGIC_SYSRQ
+void normalize_rt_tasks(void)
+{
+ struct task_struct *g, *p;
+ struct sched_attr attr = {
+ .sched_policy = SCHED_NORMAL,
+ };
+
+ read_lock(&tasklist_lock);
+ for_each_process_thread(g, p) {
+ /*
+ * Only normalize user tasks:
+ */
+ if (p->flags & PF_KTHREAD)
+ continue;
+
+ schedstat_set(p->stats.wait_start, 0);
+ schedstat_set(p->stats.sleep_start, 0);
+ schedstat_set(p->stats.block_start, 0);
+
+ if (!rt_task(p)) {
+ /*
+ * Renice negative nice level userspace
+ * tasks back to 0:
+ */
+ if (task_nice(p) < 0)
+ set_user_nice(p, 0);
+ continue;
+ }
+
+ __sched_setscheduler(p, &attr, false, false);
+ }
+ read_unlock(&tasklist_lock);
+}
+#endif /* CONFIG_MAGIC_SYSRQ */
+
+#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB)
+/*
+ * These functions are only useful for the IA64 MCA handling, or kdb.
+ *
+ * They can only be called when the whole system has been
+ * stopped - every CPU needs to be quiescent, and no scheduling
+ * activity can take place. Using them for anything else would
+ * be a serious bug, and as a result, they aren't even visible
+ * under any other configuration.
+ */
+
+/**
+ * curr_task - return the current task for a given CPU.
+ * @cpu: the processor in question.
+ *
+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
+ *
+ * Return: The current task for @cpu.
+ */
+struct task_struct *curr_task(int cpu)
+{
+ return cpu_curr(cpu);
+}
+
+#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */
+
+#ifdef CONFIG_IA64
+/**
+ * ia64_set_curr_task - set the current task for a given CPU.
+ * @cpu: the processor in question.
+ * @p: the task pointer to set.
+ *
+ * Description: This function must only be used when non-maskable interrupts
+ * are serviced on a separate stack. It allows the architecture to switch the
+ * notion of the current task on a CPU in a non-blocking manner. This function
+ * must be called with all CPU's synchronised, and interrupts disabled, the
+ * and caller must save the original value of the current task (see
+ * curr_task() above) and restore that value before reenabling interrupts and
+ * re-starting the system.
+ *
+ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
+ */
+void ia64_set_curr_task(int cpu, struct task_struct *p)
+{
+ cpu_curr(cpu) = p;
+}
+
+#endif
+
+#ifdef CONFIG_CGROUP_SCHED
+static void sched_free_group(struct task_group *tg)
+{
+ kmem_cache_free(task_group_cache, tg);
+}
+
+static void sched_free_group_rcu(struct rcu_head *rhp)
+{
+ sched_free_group(container_of(rhp, struct task_group, rcu));
+}
+
+static void sched_unregister_group(struct task_group *tg)
+{
+ /*
+ * We have to wait for yet another RCU grace period to expire, as
+ * print_cfs_stats() might run concurrently.
+ */
+ call_rcu(&tg->rcu, sched_free_group_rcu);
+}
+
+/* allocate runqueue etc for a new task group */
+struct task_group *sched_create_group(struct task_group *parent)
+{
+ struct task_group *tg;
+
+ tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO);
+ if (!tg)
+ return ERR_PTR(-ENOMEM);
+
+ return tg;
+}
+
+void sched_online_group(struct task_group *tg, struct task_group *parent)
+{
+}
+
+/* rcu callback to free various structures associated with a task group */
+static void sched_unregister_group_rcu(struct rcu_head *rhp)
+{
+ /* Now it should be safe to free those cfs_rqs: */
+ sched_unregister_group(container_of(rhp, struct task_group, rcu));
+}
+
+void sched_destroy_group(struct task_group *tg)
+{
+ /* Wait for possible concurrent references to cfs_rqs complete: */
+ call_rcu(&tg->rcu, sched_unregister_group_rcu);
+}
+
+void sched_release_group(struct task_group *tg)
+{
+}
+
+static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
+{
+ return css ? container_of(css, struct task_group, css) : NULL;
+}
+
+static struct cgroup_subsys_state *
+cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
+{
+ struct task_group *parent = css_tg(parent_css);
+ struct task_group *tg;
+
+ if (!parent) {
+ /* This is early initialization for the top cgroup */
+ return &root_task_group.css;
+ }
+
+ tg = sched_create_group(parent);
+ if (IS_ERR(tg))
+ return ERR_PTR(-ENOMEM);
+ return &tg->css;
+}
+
+/* Expose task group only after completing cgroup initialization */
+static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
+{
+ struct task_group *tg = css_tg(css);
+ struct task_group *parent = css_tg(css->parent);
+
+ if (parent)
+ sched_online_group(tg, parent);
+ return 0;
+}
+
+static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
+{
+ struct task_group *tg = css_tg(css);
+
+ sched_release_group(tg);
+}
+
+static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
+{
+ struct task_group *tg = css_tg(css);
+
+ /*
+ * Relies on the RCU grace period between css_released() and this.
+ */
+ sched_unregister_group(tg);
+}
+
+static void cpu_cgroup_fork(struct task_struct *task)
+{
+}
+
+static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
+{
+ return 0;
+}
+
+static void cpu_cgroup_attach(struct cgroup_taskset *tset)
+{
+}
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static DEFINE_MUTEX(shares_mutex);
+
+int sched_group_set_shares(struct task_group *tg, unsigned long shares)
+{
+ /*
+ * We can't change the weight of the root cgroup.
+ */
+ if (&root_task_group == tg)
+ return -EINVAL;
+
+ shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES));
+
+ mutex_lock(&shares_mutex);
+ if (tg->shares == shares)
+ goto done;
+
+ tg->shares = shares;
+done:
+ mutex_unlock(&shares_mutex);
+ return 0;
+}
+
+static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
+ struct cftype *cftype, u64 shareval)
+{
+ if (shareval > scale_load_down(ULONG_MAX))
+ shareval = MAX_SHARES;
+ return sched_group_set_shares(css_tg(css), scale_load(shareval));
+}
+
+static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft)
+{
+ struct task_group *tg = css_tg(css);
+
+ return (u64) scale_load_down(tg->shares);
+}
+#endif
+
+static struct cftype cpu_legacy_files[] = {
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ {
+ .name = "shares",
+ .read_u64 = cpu_shares_read_u64,
+ .write_u64 = cpu_shares_write_u64,
+ },
+#endif
+ { } /* Terminate */
+};
+
+
+static struct cftype cpu_files[] = {
+ { } /* terminate */
+};
+
+static int cpu_extra_stat_show(struct seq_file *sf,
+ struct cgroup_subsys_state *css)
+{
+ return 0;
+}
+
+struct cgroup_subsys cpu_cgrp_subsys = {
+ .css_alloc = cpu_cgroup_css_alloc,
+ .css_online = cpu_cgroup_css_online,
+ .css_released = cpu_cgroup_css_released,
+ .css_free = cpu_cgroup_css_free,
+ .css_extra_stat_show = cpu_extra_stat_show,
+ .fork = cpu_cgroup_fork,
+ .can_attach = cpu_cgroup_can_attach,
+ .attach = cpu_cgroup_attach,
+ .legacy_cftypes = cpu_files,
+ .legacy_cftypes = cpu_legacy_files,
+ .dfl_cftypes = cpu_files,
+ .early_init = true,
+ .threaded = true,
+};
+#endif /* CONFIG_CGROUP_SCHED */
+
+#undef CREATE_TRACE_POINTS
diff --git a/kernel/sched/alt_debug.c b/kernel/sched/alt_debug.c
new file mode 100644
index 000000000000..1212a031700e
--- /dev/null
+++ b/kernel/sched/alt_debug.c
@@ -0,0 +1,31 @@
+/*
+ * kernel/sched/alt_debug.c
+ *
+ * Print the alt scheduler debugging details
+ *
+ * Author: Alfred Chen
+ * Date : 2020
+ */
+#include "sched.h"
+
+/*
+ * This allows printing both to /proc/sched_debug and
+ * to the console
+ */
+#define SEQ_printf(m, x...) \
+ do { \
+ if (m) \
+ seq_printf(m, x); \
+ else \
+ pr_cont(x); \
+ } while (0)
+
+void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns,
+ struct seq_file *m)
+{
+ SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns),
+ get_nr_threads(p));
+}
+
+void proc_sched_set_task(struct task_struct *p)
+{}
diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h
new file mode 100644
index 000000000000..a181bf9ce57d
--- /dev/null
+++ b/kernel/sched/alt_sched.h
@@ -0,0 +1,645 @@
+#ifndef ALT_SCHED_H
+#define ALT_SCHED_H
+
+#include <linux/psi.h>
+#include <linux/stop_machine.h>
+#include <linux/syscalls.h>
+#include <linux/tick.h>
+
+#include <trace/events/power.h>
+#include <trace/events/sched.h>
+
+#include "../workqueue_internal.h"
+
+#include "cpupri.h"
+
+#ifdef CONFIG_SCHED_BMQ
+/* bits:
+ * RT(0-99), (Low prio adj range, nice width, high prio adj range) / 2, cpu idle task */
+#define SCHED_BITS (MAX_RT_PRIO + NICE_WIDTH / 2 + MAX_PRIORITY_ADJ + 1)
+#endif
+
+#ifdef CONFIG_SCHED_PDS
+/* bits: RT(0-99), reserved(100-127), NORMAL_PRIO_NUM, cpu idle task */
+#define SCHED_BITS (MIN_NORMAL_PRIO + NORMAL_PRIO_NUM + 1)
+#endif /* CONFIG_SCHED_PDS */
+
+#define IDLE_TASK_SCHED_PRIO (SCHED_BITS - 1)
+
+#ifdef CONFIG_SCHED_DEBUG
+# define SCHED_WARN_ON(x) WARN_ONCE(x, #x)
+extern void resched_latency_warn(int cpu, u64 latency);
+#else
+# define SCHED_WARN_ON(x) ({ (void)(x), 0; })
+static inline void resched_latency_warn(int cpu, u64 latency) {}
+#endif
+
+/*
+ * Increase resolution of nice-level calculations for 64-bit architectures.
+ * The extra resolution improves shares distribution and load balancing of
+ * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup
+ * hierarchies, especially on larger systems. This is not a user-visible change
+ * and does not change the user-interface for setting shares/weights.
+ *
+ * We increase resolution only if we have enough bits to allow this increased
+ * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit
+ * are pretty high and the returns do not justify the increased costs.
+ *
+ * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to
+ * increase coverage and consistency always enable it on 64-bit platforms.
+ */
+#ifdef CONFIG_64BIT
+# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT)
+# define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT)
+# define scale_load_down(w) \
+({ \
+ unsigned long __w = (w); \
+ if (__w) \
+ __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \
+ __w; \
+})
+#else
+# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT)
+# define scale_load(w) (w)
+# define scale_load_down(w) (w)
+#endif
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD
+
+/*
+ * A weight of 0 or 1 can cause arithmetics problems.
+ * A weight of a cfs_rq is the sum of weights of which entities
+ * are queued on this cfs_rq, so a weight of a entity should not be
+ * too large, so as the shares value of a task group.
+ * (The default weight is 1024 - so there's no practical
+ * limitation from this.)
+ */
+#define MIN_SHARES (1UL << 1)
+#define MAX_SHARES (1UL << 18)
+#endif
+
+/* task_struct::on_rq states: */
+#define TASK_ON_RQ_QUEUED 1
+#define TASK_ON_RQ_MIGRATING 2
+
+static inline int task_on_rq_queued(struct task_struct *p)
+{
+ return p->on_rq == TASK_ON_RQ_QUEUED;
+}
+
+static inline int task_on_rq_migrating(struct task_struct *p)
+{
+ return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
+}
+
+/*
+ * wake flags
+ */
+#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
+#define WF_FORK 0x02 /* child wakeup after fork */
+#define WF_MIGRATED 0x04 /* internal use, task got migrated */
+#define WF_ON_CPU 0x08 /* Wakee is on_rq */
+
+#define SCHED_QUEUE_BITS (SCHED_BITS - 1)
+
+struct sched_queue {
+ DECLARE_BITMAP(bitmap, SCHED_QUEUE_BITS);
+ struct list_head heads[SCHED_BITS];
+};
+
+/*
+ * This is the main, per-CPU runqueue data structure.
+ * This data should only be modified by the local cpu.
+ */
+struct rq {
+ /* runqueue lock: */
+ raw_spinlock_t lock;
+
+ struct task_struct __rcu *curr;
+ struct task_struct *idle, *stop, *skip;
+ struct mm_struct *prev_mm;
+
+ struct sched_queue queue;
+#ifdef CONFIG_SCHED_PDS
+ u64 time_edge;
+#endif
+ unsigned long watermark;
+
+ /* switch count */
+ u64 nr_switches;
+
+ atomic_t nr_iowait;
+
+#ifdef CONFIG_SCHED_DEBUG
+ u64 last_seen_need_resched_ns;
+ int ticks_without_resched;
+#endif
+
+#ifdef CONFIG_MEMBARRIER
+ int membarrier_state;
+#endif
+
+#ifdef CONFIG_SMP
+ int cpu; /* cpu of this runqueue */
+ bool online;
+
+ unsigned int ttwu_pending;
+ unsigned char nohz_idle_balance;
+ unsigned char idle_balance;
+
+#ifdef CONFIG_HAVE_SCHED_AVG_IRQ
+ struct sched_avg avg_irq;
+#endif
+
+#ifdef CONFIG_SCHED_SMT
+ int active_balance;
+ struct cpu_stop_work active_balance_work;
+#endif
+ struct callback_head *balance_callback;
+#ifdef CONFIG_HOTPLUG_CPU
+ struct rcuwait hotplug_wait;
+#endif
+ unsigned int nr_pinned;
+
+#endif /* CONFIG_SMP */
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ u64 prev_irq_time;
+#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
+#ifdef CONFIG_PARAVIRT
+ u64 prev_steal_time;
+#endif /* CONFIG_PARAVIRT */
+#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+ u64 prev_steal_time_rq;
+#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */
+
+ /* For genenal cpu load util */
+ s32 load_history;
+ u64 load_block;
+ u64 load_stamp;
+
+ /* calc_load related fields */
+ unsigned long calc_load_update;
+ long calc_load_active;
+
+ u64 clock, last_tick;
+ u64 last_ts_switch;
+ u64 clock_task;
+
+ unsigned int nr_running;
+ unsigned long nr_uninterruptible;
+
+#ifdef CONFIG_SCHED_HRTICK
+#ifdef CONFIG_SMP
+ call_single_data_t hrtick_csd;
+#endif
+ struct hrtimer hrtick_timer;
+ ktime_t hrtick_time;
+#endif
+
+#ifdef CONFIG_SCHEDSTATS
+
+ /* latency stats */
+ struct sched_info rq_sched_info;
+ unsigned long long rq_cpu_time;
+ /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
+
+ /* sys_sched_yield() stats */
+ unsigned int yld_count;
+
+ /* schedule() stats */
+ unsigned int sched_switch;
+ unsigned int sched_count;
+ unsigned int sched_goidle;
+
+ /* try_to_wake_up() stats */
+ unsigned int ttwu_count;
+ unsigned int ttwu_local;
+#endif /* CONFIG_SCHEDSTATS */
+
+#ifdef CONFIG_CPU_IDLE
+ /* Must be inspected within a rcu lock section */
+ struct cpuidle_state *idle_state;
+#endif
+
+#ifdef CONFIG_NO_HZ_COMMON
+#ifdef CONFIG_SMP
+ call_single_data_t nohz_csd;
+#endif
+ atomic_t nohz_flags;
+#endif /* CONFIG_NO_HZ_COMMON */
+};
+
+extern unsigned long rq_load_util(struct rq *rq, unsigned long max);
+
+extern unsigned long calc_load_update;
+extern atomic_long_t calc_load_tasks;
+
+extern void calc_global_load_tick(struct rq *this_rq);
+extern long calc_load_fold_active(struct rq *this_rq, long adjust);
+
+DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
+#define this_rq() this_cpu_ptr(&runqueues)
+#define task_rq(p) cpu_rq(task_cpu(p))
+#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
+#define raw_rq() raw_cpu_ptr(&runqueues)
+
+#ifdef CONFIG_SMP
+#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
+void register_sched_domain_sysctl(void);
+void unregister_sched_domain_sysctl(void);
+#else
+static inline void register_sched_domain_sysctl(void)
+{
+}
+static inline void unregister_sched_domain_sysctl(void)
+{
+}
+#endif
+
+extern bool sched_smp_initialized;
+
+enum {
+ ITSELF_LEVEL_SPACE_HOLDER,
+#ifdef CONFIG_SCHED_SMT
+ SMT_LEVEL_SPACE_HOLDER,
+#endif
+ COREGROUP_LEVEL_SPACE_HOLDER,
+ CORE_LEVEL_SPACE_HOLDER,
+ OTHER_LEVEL_SPACE_HOLDER,
+ NR_CPU_AFFINITY_LEVELS
+};
+
+DECLARE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks);
+DECLARE_PER_CPU(cpumask_t *, sched_cpu_llc_mask);
+
+static inline int
+__best_mask_cpu(const cpumask_t *cpumask, const cpumask_t *mask)
+{
+ int cpu;
+
+ while ((cpu = cpumask_any_and(cpumask, mask)) >= nr_cpu_ids)
+ mask++;
+
+ return cpu;
+}
+
+static inline int best_mask_cpu(int cpu, const cpumask_t *mask)
+{
+ return __best_mask_cpu(mask, per_cpu(sched_cpu_topo_masks, cpu));
+}
+
+extern void flush_smp_call_function_queue(void);
+
+#else /* !CONFIG_SMP */
+static inline void flush_smp_call_function_queue(void) { }
+#endif
+
+#ifndef arch_scale_freq_tick
+static __always_inline
+void arch_scale_freq_tick(void)
+{
+}
+#endif
+
+#ifndef arch_scale_freq_capacity
+static __always_inline
+unsigned long arch_scale_freq_capacity(int cpu)
+{
+ return SCHED_CAPACITY_SCALE;
+}
+#endif
+
+static inline u64 __rq_clock_broken(struct rq *rq)
+{
+ return READ_ONCE(rq->clock);
+}
+
+static inline u64 rq_clock(struct rq *rq)
+{
+ /*
+ * Relax lockdep_assert_held() checking as in VRQ, call to
+ * sched_info_xxxx() may not held rq->lock
+ * lockdep_assert_held(&rq->lock);
+ */
+ return rq->clock;
+}
+
+static inline u64 rq_clock_task(struct rq *rq)
+{
+ /*
+ * Relax lockdep_assert_held() checking as in VRQ, call to
+ * sched_info_xxxx() may not held rq->lock
+ * lockdep_assert_held(&rq->lock);
+ */
+ return rq->clock_task;
+}
+
+/*
+ * {de,en}queue flags:
+ *
+ * DEQUEUE_SLEEP - task is no longer runnable
+ * ENQUEUE_WAKEUP - task just became runnable
+ *
+ */
+
+#define DEQUEUE_SLEEP 0x01
+
+#define ENQUEUE_WAKEUP 0x01
+
+
+/*
+ * Below are scheduler API which using in other kernel code
+ * It use the dummy rq_flags
+ * ToDo : BMQ need to support these APIs for compatibility with mainline
+ * scheduler code.
+ */
+struct rq_flags {
+ unsigned long flags;
+};
+
+struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
+ __acquires(rq->lock);
+
+struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
+ __acquires(p->pi_lock)
+ __acquires(rq->lock);
+
+static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf)
+ __releases(rq->lock)
+{
+ raw_spin_unlock(&rq->lock);
+}
+
+static inline void
+task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
+ __releases(rq->lock)
+ __releases(p->pi_lock)
+{
+ raw_spin_unlock(&rq->lock);
+ raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags);
+}
+
+static inline void
+rq_lock(struct rq *rq, struct rq_flags *rf)
+ __acquires(rq->lock)
+{
+ raw_spin_lock(&rq->lock);
+}
+
+static inline void
+rq_unlock_irq(struct rq *rq, struct rq_flags *rf)
+ __releases(rq->lock)
+{
+ raw_spin_unlock_irq(&rq->lock);
+}
+
+static inline void
+rq_unlock(struct rq *rq, struct rq_flags *rf)
+ __releases(rq->lock)
+{
+ raw_spin_unlock(&rq->lock);
+}
+
+static inline struct rq *
+this_rq_lock_irq(struct rq_flags *rf)
+ __acquires(rq->lock)
+{
+ struct rq *rq;
+
+ local_irq_disable();
+ rq = this_rq();
+ raw_spin_lock(&rq->lock);
+
+ return rq;
+}
+
+static inline raw_spinlock_t *__rq_lockp(struct rq *rq)
+{
+ return &rq->lock;
+}
+
+static inline raw_spinlock_t *rq_lockp(struct rq *rq)
+{
+ return __rq_lockp(rq);
+}
+
+static inline void lockdep_assert_rq_held(struct rq *rq)
+{
+ lockdep_assert_held(__rq_lockp(rq));
+}
+
+extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass);
+extern void raw_spin_rq_unlock(struct rq *rq);
+
+static inline void raw_spin_rq_lock(struct rq *rq)
+{
+ raw_spin_rq_lock_nested(rq, 0);
+}
+
+static inline void raw_spin_rq_lock_irq(struct rq *rq)
+{
+ local_irq_disable();
+ raw_spin_rq_lock(rq);
+}
+
+static inline void raw_spin_rq_unlock_irq(struct rq *rq)
+{
+ raw_spin_rq_unlock(rq);
+ local_irq_enable();
+}
+
+static inline int task_current(struct rq *rq, struct task_struct *p)
+{
+ return rq->curr == p;
+}
+
+static inline bool task_running(struct task_struct *p)
+{
+ return p->on_cpu;
+}
+
+extern int task_running_nice(struct task_struct *p);
+
+extern struct static_key_false sched_schedstats;
+
+#ifdef CONFIG_CPU_IDLE
+static inline void idle_set_state(struct rq *rq,
+ struct cpuidle_state *idle_state)
+{
+ rq->idle_state = idle_state;
+}
+
+static inline struct cpuidle_state *idle_get_state(struct rq *rq)
+{
+ WARN_ON(!rcu_read_lock_held());
+ return rq->idle_state;
+}
+#else
+static inline void idle_set_state(struct rq *rq,
+ struct cpuidle_state *idle_state)
+{
+}
+
+static inline struct cpuidle_state *idle_get_state(struct rq *rq)
+{
+ return NULL;
+}
+#endif
+
+static inline int cpu_of(const struct rq *rq)
+{
+#ifdef CONFIG_SMP
+ return rq->cpu;
+#else
+ return 0;
+#endif
+}
+
+#include "stats.h"
+
+#ifdef CONFIG_NO_HZ_COMMON
+#define NOHZ_BALANCE_KICK_BIT 0
+#define NOHZ_STATS_KICK_BIT 1
+
+#define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT)
+#define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT)
+
+#define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK)
+
+#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags)
+
+/* TODO: needed?
+extern void nohz_balance_exit_idle(struct rq *rq);
+#else
+static inline void nohz_balance_exit_idle(struct rq *rq) { }
+*/
+#endif
+
+#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+struct irqtime {
+ u64 total;
+ u64 tick_delta;
+ u64 irq_start_time;
+ struct u64_stats_sync sync;
+};
+
+DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
+
+/*
+ * Returns the irqtime minus the softirq time computed by ksoftirqd.
+ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
+ * and never move forward.
+ */
+static inline u64 irq_time_read(int cpu)
+{
+ struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
+ unsigned int seq;
+ u64 total;
+
+ do {
+ seq = __u64_stats_fetch_begin(&irqtime->sync);
+ total = irqtime->total;
+ } while (__u64_stats_fetch_retry(&irqtime->sync, seq));
+
+ return total;
+}
+#endif /* CONFIG_IRQ_TIME_ACCOUNTING */
+
+#ifdef CONFIG_CPU_FREQ
+DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
+#endif /* CONFIG_CPU_FREQ */
+
+#ifdef CONFIG_NO_HZ_FULL
+extern int __init sched_tick_offload_init(void);
+#else
+static inline int sched_tick_offload_init(void) { return 0; }
+#endif
+
+#ifdef arch_scale_freq_capacity
+#ifndef arch_scale_freq_invariant
+#define arch_scale_freq_invariant() (true)
+#endif
+#else /* arch_scale_freq_capacity */
+#define arch_scale_freq_invariant() (false)
+#endif
+
+extern void schedule_idle(void);
+
+#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
+
+/*
+ * !! For sched_setattr_nocheck() (kernel) only !!
+ *
+ * This is actually gross. :(
+ *
+ * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE
+ * tasks, but still be able to sleep. We need this on platforms that cannot
+ * atomically change clock frequency. Remove once fast switching will be
+ * available on such platforms.
+ *
+ * SUGOV stands for SchedUtil GOVernor.
+ */
+#define SCHED_FLAG_SUGOV 0x10000000
+
+#ifdef CONFIG_MEMBARRIER
+/*
+ * The scheduler provides memory barriers required by membarrier between:
+ * - prior user-space memory accesses and store to rq->membarrier_state,
+ * - store to rq->membarrier_state and following user-space memory accesses.
+ * In the same way it provides those guarantees around store to rq->curr.
+ */
+static inline void membarrier_switch_mm(struct rq *rq,
+ struct mm_struct *prev_mm,
+ struct mm_struct *next_mm)
+{
+ int membarrier_state;
+
+ if (prev_mm == next_mm)
+ return;
+
+ membarrier_state = atomic_read(&next_mm->membarrier_state);
+ if (READ_ONCE(rq->membarrier_state) == membarrier_state)
+ return;
+
+ WRITE_ONCE(rq->membarrier_state, membarrier_state);
+}
+#else
+static inline void membarrier_switch_mm(struct rq *rq,
+ struct mm_struct *prev_mm,
+ struct mm_struct *next_mm)
+{
+}
+#endif
+
+#ifdef CONFIG_NUMA
+extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu);
+#else
+static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
+{
+ return nr_cpu_ids;
+}
+#endif
+
+extern void swake_up_all_locked(struct swait_queue_head *q);
+extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
+
+#ifdef CONFIG_PREEMPT_DYNAMIC
+extern int preempt_dynamic_mode;
+extern int sched_dynamic_mode(const char *str);
+extern void sched_dynamic_update(int mode);
+#endif
+
+static inline void nohz_run_idle_balance(int cpu) { }
+
+static inline
+unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util,
+ struct task_struct *p)
+{
+ return util;
+}
+
+static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; }
+
+#endif /* ALT_SCHED_H */
diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h
new file mode 100644
index 000000000000..66b77291b9d0
--- /dev/null
+++ b/kernel/sched/bmq.h
@@ -0,0 +1,110 @@
+#define ALT_SCHED_VERSION_MSG "sched/bmq: BMQ CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
+
+/*
+ * BMQ only routines
+ */
+#define rq_switch_time(rq) ((rq)->clock - (rq)->last_ts_switch)
+#define boost_threshold(p) (sched_timeslice_ns >>\
+ (15 - MAX_PRIORITY_ADJ - (p)->boost_prio))
+
+static inline void boost_task(struct task_struct *p)
+{
+ int limit;
+
+ switch (p->policy) {
+ case SCHED_NORMAL:
+ limit = -MAX_PRIORITY_ADJ;
+ break;
+ case SCHED_BATCH:
+ case SCHED_IDLE:
+ limit = 0;
+ break;
+ default:
+ return;
+ }
+
+ if (p->boost_prio > limit)
+ p->boost_prio--;
+}
+
+static inline void deboost_task(struct task_struct *p)
+{
+ if (p->boost_prio < MAX_PRIORITY_ADJ)
+ p->boost_prio++;
+}
+
+/*
+ * Common interfaces
+ */
+static inline void sched_timeslice_imp(const int timeslice_ms) {}
+
+static inline int
+task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
+{
+ return p->prio + p->boost_prio - MAX_RT_PRIO;
+}
+
+static inline int task_sched_prio(const struct task_struct *p)
+{
+ return (p->prio < MAX_RT_PRIO)? p->prio : MAX_RT_PRIO / 2 + (p->prio + p->boost_prio) / 2;
+}
+
+static inline int
+task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
+{
+ return task_sched_prio(p);
+}
+
+static inline int sched_prio2idx(int prio, struct rq *rq)
+{
+ return prio;
+}
+
+static inline int sched_idx2prio(int idx, struct rq *rq)
+{
+ return idx;
+}
+
+static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
+{
+ p->time_slice = sched_timeslice_ns;
+
+ if (SCHED_FIFO != p->policy && task_on_rq_queued(p)) {
+ if (SCHED_RR != p->policy)
+ deboost_task(p);
+ requeue_task(p, rq, task_sched_prio_idx(p, rq));
+ }
+}
+
+static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq) {}
+
+inline int task_running_nice(struct task_struct *p)
+{
+ return (p->prio + p->boost_prio > DEFAULT_PRIO + MAX_PRIORITY_ADJ);
+}
+
+static void sched_task_fork(struct task_struct *p, struct rq *rq)
+{
+ p->boost_prio = MAX_PRIORITY_ADJ;
+}
+
+static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
+{
+ p->boost_prio = MAX_PRIORITY_ADJ;
+}
+
+#ifdef CONFIG_SMP
+static inline void sched_task_ttwu(struct task_struct *p)
+{
+ if(this_rq()->clock_task - p->last_ran > sched_timeslice_ns)
+ boost_task(p);
+}
+#endif
+
+static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq)
+{
+ if (rq_switch_time(rq) < boost_threshold(p))
+ boost_task(p);
+}
+
+static inline void update_rq_time_edge(struct rq *rq) {}
diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c
index d9dc9ab3773f..71a25540d65e 100644
--- a/kernel/sched/build_policy.c
+++ b/kernel/sched/build_policy.c
@@ -42,13 +42,19 @@
#include "idle.c"
+#ifndef CONFIG_SCHED_ALT
#include "rt.c"
+#endif
#ifdef CONFIG_SMP
+#ifndef CONFIG_SCHED_ALT
# include "cpudeadline.c"
+#endif
# include "pelt.c"
#endif
#include "cputime.c"
-#include "deadline.c"
+#ifndef CONFIG_SCHED_ALT
+#include "deadline.c"
+#endif
diff --git a/kernel/sched/build_utility.c b/kernel/sched/build_utility.c
index 99bdd96f454f..23f80a86d2d7 100644
--- a/kernel/sched/build_utility.c
+++ b/kernel/sched/build_utility.c
@@ -85,7 +85,9 @@
#ifdef CONFIG_SMP
# include "cpupri.c"
+#ifndef CONFIG_SCHED_ALT
# include "stop_task.c"
+#endif
# include "topology.c"
#endif
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 3dbf351d12d5..b2590f961139 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -160,9 +160,14 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu)
unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu);
sg_cpu->max = max;
+#ifndef CONFIG_SCHED_ALT
sg_cpu->bw_dl = cpu_bw_dl(rq);
sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu), max,
FREQUENCY_UTIL, NULL);
+#else
+ sg_cpu->bw_dl = 0;
+ sg_cpu->util = rq_load_util(rq, max);
+#endif /* CONFIG_SCHED_ALT */
}
/**
@@ -306,8 +311,10 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
*/
static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu)
{
+#ifndef CONFIG_SCHED_ALT
if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
sg_cpu->sg_policy->limits_changed = true;
+#endif
}
static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu,
@@ -607,6 +614,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
}
ret = sched_setattr_nocheck(thread, &attr);
+
if (ret) {
kthread_stop(thread);
pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__);
@@ -839,7 +847,9 @@ cpufreq_governor_init(schedutil_gov);
#ifdef CONFIG_ENERGY_MODEL
static void rebuild_sd_workfn(struct work_struct *work)
{
+#ifndef CONFIG_SCHED_ALT
rebuild_sched_domains_energy();
+#endif /* CONFIG_SCHED_ALT */
}
static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 78a233d43757..b3bbc87d4352 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -122,7 +122,7 @@ void account_user_time(struct task_struct *p, u64 cputime)
p->utime += cputime;
account_group_user_time(p, cputime);
- index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
+ index = task_running_nice(p) ? CPUTIME_NICE : CPUTIME_USER;
/* Add user time to cpustat. */
task_group_account_field(p, index, cputime);
@@ -146,7 +146,7 @@ void account_guest_time(struct task_struct *p, u64 cputime)
p->gtime += cputime;
/* Add guest time to cpustat. */
- if (task_nice(p) > 0) {
+ if (task_running_nice(p)) {
task_group_account_field(p, CPUTIME_NICE, cputime);
cpustat[CPUTIME_GUEST_NICE] += cputime;
} else {
@@ -269,7 +269,7 @@ static inline u64 account_other_time(u64 max)
#ifdef CONFIG_64BIT
static inline u64 read_sum_exec_runtime(struct task_struct *t)
{
- return t->se.sum_exec_runtime;
+ return tsk_seruntime(t);
}
#else
static u64 read_sum_exec_runtime(struct task_struct *t)
@@ -279,7 +279,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t)
struct rq *rq;
rq = task_rq_lock(t, &rf);
- ns = t->se.sum_exec_runtime;
+ ns = tsk_seruntime(t);
task_rq_unlock(rq, t, &rf);
return ns;
@@ -611,7 +611,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev,
void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
{
struct task_cputime cputime = {
- .sum_exec_runtime = p->se.sum_exec_runtime,
+ .sum_exec_runtime = tsk_seruntime(p),
};
if (task_cputime(p, &cputime.utime, &cputime.stime))
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index bb3d63bdf4ae..4e1680785704 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -7,6 +7,7 @@
* Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
*/
+#ifndef CONFIG_SCHED_ALT
/*
* This allows printing both to /proc/sched_debug and
* to the console
@@ -215,6 +216,7 @@ static const struct file_operations sched_scaling_fops = {
};
#endif /* SMP */
+#endif /* !CONFIG_SCHED_ALT */
#ifdef CONFIG_PREEMPT_DYNAMIC
@@ -278,6 +280,7 @@ static const struct file_operations sched_dynamic_fops = {
#endif /* CONFIG_PREEMPT_DYNAMIC */
+#ifndef CONFIG_SCHED_ALT
__read_mostly bool sched_debug_verbose;
static const struct seq_operations sched_debug_sops;
@@ -293,6 +296,7 @@ static const struct file_operations sched_debug_fops = {
.llseek = seq_lseek,
.release = seq_release,
};
+#endif /* !CONFIG_SCHED_ALT */
static struct dentry *debugfs_sched;
@@ -302,12 +306,15 @@ static __init int sched_init_debug(void)
debugfs_sched = debugfs_create_dir("sched", NULL);
+#ifndef CONFIG_SCHED_ALT
debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops);
debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose);
+#endif /* !CONFIG_SCHED_ALT */
#ifdef CONFIG_PREEMPT_DYNAMIC
debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops);
#endif
+#ifndef CONFIG_SCHED_ALT
debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency);
debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity);
debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity);
@@ -336,11 +343,13 @@ static __init int sched_init_debug(void)
#endif
debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops);
+#endif /* !CONFIG_SCHED_ALT */
return 0;
}
late_initcall(sched_init_debug);
+#ifndef CONFIG_SCHED_ALT
#ifdef CONFIG_SMP
static cpumask_var_t sd_sysctl_cpus;
@@ -1067,6 +1076,7 @@ void proc_sched_set_task(struct task_struct *p)
memset(&p->stats, 0, sizeof(p->stats));
#endif
}
+#endif /* !CONFIG_SCHED_ALT */
void resched_latency_warn(int cpu, u64 latency)
{
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 328cccbee444..aef991facc79 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -400,6 +400,7 @@ void cpu_startup_entry(enum cpuhp_state state)
do_idle();
}
+#ifndef CONFIG_SCHED_ALT
/*
* idle-task scheduling class.
*/
@@ -521,3 +522,4 @@ DEFINE_SCHED_CLASS(idle) = {
.switched_to = switched_to_idle,
.update_curr = update_curr_idle,
};
+#endif
diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h
new file mode 100644
index 000000000000..56a649d02e49
--- /dev/null
+++ b/kernel/sched/pds.h
@@ -0,0 +1,127 @@
+#define ALT_SCHED_VERSION_MSG "sched/pds: PDS CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n"
+
+static int sched_timeslice_shift = 22;
+
+#define NORMAL_PRIO_MOD(x) ((x) & (NORMAL_PRIO_NUM - 1))
+
+/*
+ * Common interfaces
+ */
+static inline void sched_timeslice_imp(const int timeslice_ms)
+{
+ if (2 == timeslice_ms)
+ sched_timeslice_shift = 21;
+}
+
+static inline int
+task_sched_prio_normal(const struct task_struct *p, const struct rq *rq)
+{
+ s64 delta = p->deadline - rq->time_edge + NORMAL_PRIO_NUM - NICE_WIDTH;
+
+ if (WARN_ONCE(delta > NORMAL_PRIO_NUM - 1,
+ "pds: task_sched_prio_normal() delta %lld\n", delta))
+ return NORMAL_PRIO_NUM - 1;
+
+ return (delta < 0) ? 0 : delta;
+}
+
+static inline int task_sched_prio(const struct task_struct *p)
+{
+ return (p->prio < MAX_RT_PRIO) ? p->prio :
+ MIN_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p));
+}
+
+static inline int
+task_sched_prio_idx(const struct task_struct *p, const struct rq *rq)
+{
+ return (p->prio < MAX_RT_PRIO) ? p->prio : MIN_NORMAL_PRIO +
+ NORMAL_PRIO_MOD(task_sched_prio_normal(p, rq) + rq->time_edge);
+}
+
+static inline int sched_prio2idx(int prio, struct rq *rq)
+{
+ return (IDLE_TASK_SCHED_PRIO == prio || prio < MAX_RT_PRIO) ? prio :
+ MIN_NORMAL_PRIO + NORMAL_PRIO_MOD((prio - MIN_NORMAL_PRIO) +
+ rq->time_edge);
+}
+
+static inline int sched_idx2prio(int idx, struct rq *rq)
+{
+ return (idx < MAX_RT_PRIO) ? idx : MIN_NORMAL_PRIO +
+ NORMAL_PRIO_MOD((idx - MIN_NORMAL_PRIO) + NORMAL_PRIO_NUM -
+ NORMAL_PRIO_MOD(rq->time_edge));
+}
+
+static inline void sched_renew_deadline(struct task_struct *p, const struct rq *rq)
+{
+ if (p->prio >= MAX_RT_PRIO)
+ p->deadline = (rq->clock >> sched_timeslice_shift) +
+ p->static_prio - (MAX_PRIO - NICE_WIDTH);
+}
+
+int task_running_nice(struct task_struct *p)
+{
+ return (p->prio > DEFAULT_PRIO);
+}
+
+static inline void update_rq_time_edge(struct rq *rq)
+{
+ struct list_head head;
+ u64 old = rq->time_edge;
+ u64 now = rq->clock >> sched_timeslice_shift;
+ u64 prio, delta;
+
+ if (now == old)
+ return;
+
+ delta = min_t(u64, NORMAL_PRIO_NUM, now - old);
+ INIT_LIST_HEAD(&head);
+
+ for_each_set_bit(prio, &rq->queue.bitmap[2], delta)
+ list_splice_tail_init(rq->queue.heads + MIN_NORMAL_PRIO +
+ NORMAL_PRIO_MOD(prio + old), &head);
+
+ rq->queue.bitmap[2] = (NORMAL_PRIO_NUM == delta) ? 0UL :
+ rq->queue.bitmap[2] >> delta;
+ rq->time_edge = now;
+ if (!list_empty(&head)) {
+ u64 idx = MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(now);
+ struct task_struct *p;
+
+ list_for_each_entry(p, &head, sq_node)
+ p->sq_idx = idx;
+
+ list_splice(&head, rq->queue.heads + idx);
+ rq->queue.bitmap[2] |= 1UL;
+ }
+}
+
+static inline void time_slice_expired(struct task_struct *p, struct rq *rq)
+{
+ p->time_slice = sched_timeslice_ns;
+ sched_renew_deadline(p, rq);
+ if (SCHED_FIFO != p->policy && task_on_rq_queued(p))
+ requeue_task(p, rq, task_sched_prio_idx(p, rq));
+}
+
+static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq)
+{
+ u64 max_dl = rq->time_edge + NICE_WIDTH - 1;
+ if (unlikely(p->deadline > max_dl))
+ p->deadline = max_dl;
+}
+
+static void sched_task_fork(struct task_struct *p, struct rq *rq)
+{
+ sched_renew_deadline(p, rq);
+}
+
+static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq)
+{
+ time_slice_expired(p, rq);
+}
+
+#ifdef CONFIG_SMP
+static inline void sched_task_ttwu(struct task_struct *p) {}
+#endif
+static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq) {}
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index 0f310768260c..bd38bf738fe9 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -266,6 +266,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
WRITE_ONCE(sa->util_avg, sa->util_sum / divider);
}
+#ifndef CONFIG_SCHED_ALT
/*
* sched_entity:
*
@@ -383,8 +384,9 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
return 0;
}
+#endif
-#ifdef CONFIG_SCHED_THERMAL_PRESSURE
+#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
/*
* thermal:
*
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
index 4ff2ed4f8fa1..226eeed61318 100644
--- a/kernel/sched/pelt.h
+++ b/kernel/sched/pelt.h
@@ -1,13 +1,15 @@
#ifdef CONFIG_SMP
#include "sched-pelt.h"
+#ifndef CONFIG_SCHED_ALT
int __update_load_avg_blocked_se(u64 now, struct sched_entity *se);
int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se);
int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq);
int update_rt_rq_load_avg(u64 now, struct rq *rq, int running);
int update_dl_rq_load_avg(u64 now, struct rq *rq, int running);
+#endif
-#ifdef CONFIG_SCHED_THERMAL_PRESSURE
+#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT)
int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity);
static inline u64 thermal_load_avg(struct rq *rq)
@@ -44,6 +46,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg)
return PELT_MIN_DIVIDER + avg->period_contrib;
}
+#ifndef CONFIG_SCHED_ALT
static inline void cfs_se_util_change(struct sched_avg *avg)
{
unsigned int enqueued;
@@ -155,9 +158,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq)
return rq_clock_pelt(rq_of(cfs_rq));
}
#endif
+#endif /* CONFIG_SCHED_ALT */
#else
+#ifndef CONFIG_SCHED_ALT
static inline int
update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
{
@@ -175,6 +180,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running)
{
return 0;
}
+#endif
static inline int
update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 47b89a0fc6e5..de2641a32c22 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -5,6 +5,10 @@
#ifndef _KERNEL_SCHED_SCHED_H
#define _KERNEL_SCHED_SCHED_H
+#ifdef CONFIG_SCHED_ALT
+#include "alt_sched.h"
+#else
+
#include <linux/sched/affinity.h>
#include <linux/sched/autogroup.h>
#include <linux/sched/cpufreq.h>
@@ -3116,4 +3120,9 @@ extern int sched_dynamic_mode(const char *str);
extern void sched_dynamic_update(int mode);
#endif
+static inline int task_running_nice(struct task_struct *p)
+{
+ return (task_nice(p) > 0);
+}
+#endif /* !CONFIG_SCHED_ALT */
#endif /* _KERNEL_SCHED_SCHED_H */
diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
index 857f837f52cb..5486c63e4790 100644
--- a/kernel/sched/stats.c
+++ b/kernel/sched/stats.c
@@ -125,8 +125,10 @@ static int show_schedstat(struct seq_file *seq, void *v)
} else {
struct rq *rq;
#ifdef CONFIG_SMP
+#ifndef CONFIG_SCHED_ALT
struct sched_domain *sd;
int dcount = 0;
+#endif
#endif
cpu = (unsigned long)(v - 2);
rq = cpu_rq(cpu);
@@ -143,6 +145,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
seq_printf(seq, "\n");
#ifdef CONFIG_SMP
+#ifndef CONFIG_SCHED_ALT
/* domain-specific stats */
rcu_read_lock();
for_each_domain(cpu, sd) {
@@ -171,6 +174,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
sd->ttwu_move_balance);
}
rcu_read_unlock();
+#endif
#endif
}
return 0;
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index baa839c1ba96..15238be0581b 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -89,6 +89,7 @@ static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delt
#endif /* CONFIG_SCHEDSTATS */
+#ifndef CONFIG_SCHED_ALT
#ifdef CONFIG_FAIR_GROUP_SCHED
struct sched_entity_stats {
struct sched_entity se;
@@ -105,6 +106,7 @@ __schedstats_from_se(struct sched_entity *se)
#endif
return &task_of(se)->stats;
}
+#endif /* CONFIG_SCHED_ALT */
#ifdef CONFIG_PSI
/*
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 05b6c2ad90b9..480ef393b3c9 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -3,6 +3,7 @@
* Scheduler topology setup/handling methods
*/
+#ifndef CONFIG_SCHED_ALT
DEFINE_MUTEX(sched_domains_mutex);
/* Protected by sched_domains_mutex: */
@@ -1413,8 +1414,10 @@ static void asym_cpu_capacity_scan(void)
*/
static int default_relax_domain_level = -1;
+#endif /* CONFIG_SCHED_ALT */
int sched_domain_level_max;
+#ifndef CONFIG_SCHED_ALT
static int __init setup_relax_domain_level(char *str)
{
if (kstrtoint(str, 0, &default_relax_domain_level))
@@ -1647,6 +1650,7 @@ sd_init(struct sched_domain_topology_level *tl,
return sd;
}
+#endif /* CONFIG_SCHED_ALT */
/*
* Topology list, bottom-up.
@@ -1683,6 +1687,7 @@ void set_sched_topology(struct sched_domain_topology_level *tl)
sched_domain_topology_saved = NULL;
}
+#ifndef CONFIG_SCHED_ALT
#ifdef CONFIG_NUMA
static const struct cpumask *sd_numa_mask(int cpu)
@@ -2638,3 +2643,15 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
partition_sched_domains_locked(ndoms_new, doms_new, dattr_new);
mutex_unlock(&sched_domains_mutex);
}
+#else /* CONFIG_SCHED_ALT */
+void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
+ struct sched_domain_attr *dattr_new)
+{}
+
+#ifdef CONFIG_NUMA
+int sched_numa_find_closest(const struct cpumask *cpus, int cpu)
+{
+ return best_mask_cpu(cpu, cpus);
+}
+#endif /* CONFIG_NUMA */
+#endif
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 35d034219513..23719c728677 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -86,6 +86,10 @@
/* Constants used for minimum and maximum */
+#ifdef CONFIG_SCHED_ALT
+extern int sched_yield_type;
+#endif
+
#ifdef CONFIG_PERF_EVENTS
static const int six_hundred_forty_kb = 640 * 1024;
#endif
@@ -1590,6 +1594,7 @@ int proc_do_static_key(struct ctl_table *table, int write,
}
static struct ctl_table kern_table[] = {
+#ifndef CONFIG_SCHED_ALT
#ifdef CONFIG_NUMA_BALANCING
{
.procname = "numa_balancing",
@@ -1601,6 +1606,7 @@ static struct ctl_table kern_table[] = {
.extra2 = SYSCTL_FOUR,
},
#endif /* CONFIG_NUMA_BALANCING */
+#endif /* !CONFIG_SCHED_ALT */
{
.procname = "panic",
.data = &panic_timeout,
@@ -1902,6 +1908,17 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec,
},
#endif
+#ifdef CONFIG_SCHED_ALT
+ {
+ .procname = "yield_type",
+ .data = &sched_yield_type,
+ .maxlen = sizeof (int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_TWO,
+ },
+#endif
#if defined(CONFIG_S390) && defined(CONFIG_SMP)
{
.procname = "spin_retry",
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 0ea8702eb516..a27a0f3a654d 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -2088,8 +2088,10 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
int ret = 0;
u64 slack;
+#ifndef CONFIG_SCHED_ALT
slack = current->timer_slack_ns;
if (dl_task(current) || rt_task(current))
+#endif
slack = 0;
hrtimer_init_sleeper_on_stack(&t, clockid, mode);
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index cb925e8ef9a8..67d823510f5c 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -223,7 +223,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples)
u64 stime, utime;
task_cputime(p, &utime, &stime);
- store_samples(samples, stime, utime, p->se.sum_exec_runtime);
+ store_samples(samples, stime, utime, tsk_seruntime(p));
}
static void proc_sample_cputime_atomic(struct task_cputime_atomic *at,
@@ -866,6 +866,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples,
}
}
+#ifndef CONFIG_SCHED_ALT
static inline void check_dl_overrun(struct task_struct *tsk)
{
if (tsk->dl.dl_overrun) {
@@ -873,6 +874,7 @@ static inline void check_dl_overrun(struct task_struct *tsk)
send_signal_locked(SIGXCPU, SEND_SIG_PRIV, tsk, PIDTYPE_TGID);
}
}
+#endif
static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard)
{
@@ -900,8 +902,10 @@ static void check_thread_timers(struct task_struct *tsk,
u64 samples[CPUCLOCK_MAX];
unsigned long soft;
+#ifndef CONFIG_SCHED_ALT
if (dl_task(tsk))
check_dl_overrun(tsk);
+#endif
if (expiry_cache_is_inactive(pct))
return;
@@ -915,7 +919,7 @@ static void check_thread_timers(struct task_struct *tsk,
soft = task_rlimit(tsk, RLIMIT_RTTIME);
if (soft != RLIM_INFINITY) {
/* Task RT timeout is accounted in jiffies. RTTIME is usec */
- unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ);
+ unsigned long rttime = tsk_rttimeout(tsk) * (USEC_PER_SEC / HZ);
unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME);
/* At the hard limit, send SIGKILL. No further action. */
@@ -1151,8 +1155,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk)
return true;
}
+#ifndef CONFIG_SCHED_ALT
if (dl_task(tsk) && tsk->dl.dl_overrun)
return true;
+#endif
return false;
}
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index a2d301f58ced..2ccdede8585c 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -1143,10 +1143,15 @@ static int trace_wakeup_test_thread(void *data)
{
/* Make this a -deadline thread */
static const struct sched_attr attr = {
+#ifdef CONFIG_SCHED_ALT
+ /* No deadline on BMQ/PDS, use RR */
+ .sched_policy = SCHED_RR,
+#else
.sched_policy = SCHED_DEADLINE,
.sched_runtime = 100000ULL,
.sched_deadline = 10000000ULL,
.sched_period = 10000000ULL
+#endif
};
struct wakeup_test_data *x = data;