diff --git a/sys-kernel/pinephone-pro-sources/Manifest b/sys-kernel/pinephone-pro-sources/Manifest new file mode 100644 index 0000000..3a33b67 --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/Manifest @@ -0,0 +1 @@ +DIST orange-pi-5.19-20220802-0940.tar.gz 214990340 BLAKE2B 9bbadd06a8d160d716838d709f7ca6adb6143cb2205337940fb2d4607f0b806400cc77fb4abd36856844536b0a4ced92737658fc7af60d10f141a21116d66eed SHA512 04d46f6065a138d3b206937fada3990f823a1937c14812bada6512d04ebf1c7634cdea0a57611066bd2b4951a38c8e354b187bffe2ca738f2fe2a3f50d922dc2 diff --git a/sys-kernel/pinephone-pro-sources/files/0101-arm64-dts-pinephone-drop-modem-power-node.patch b/sys-kernel/pinephone-pro-sources/files/0101-arm64-dts-pinephone-drop-modem-power-node.patch new file mode 100644 index 0000000..b90eced --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/0101-arm64-dts-pinephone-drop-modem-power-node.patch @@ -0,0 +1,175 @@ +From 602d05e416ae0d0fba3022fa2c3d195164b406c6 Mon Sep 17 00:00:00 2001 +From: Clayton Craft +Date: Wed, 16 Dec 2020 20:16:14 -0800 +Subject: [PATCH] dts: pinephone: drop modem-power node + +--- + .../allwinner/sun50i-a64-pinephone-1.0.dts | 26 +++--------------- + .../allwinner/sun50i-a64-pinephone-1.1.dts | 27 +++---------------- + .../allwinner/sun50i-a64-pinephone-1.2.dts | 27 +++---------------- + .../dts/allwinner/sun50i-a64-pinephone.dtsi | 12 +++++++++ + 4 files changed, 24 insertions(+), 68 deletions(-) + +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.0.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.0.dts +index a21c6d78a..7f0cfdafe 100644 +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.0.dts ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.0.dts +@@ -86,28 +86,6 @@ ®_drivevbus { + status = "okay"; + }; + +-&uart3 { +- modem { +- compatible = "quectel,eg25"; +- char-device-name = "modem-power"; +- +- power-supply = <®_vbat_bb>; /* PL7 */ +- +- enable-gpios = <&pio 7 8 GPIO_ACTIVE_LOW>; /* PH8 */ +- reset-gpios = <&pio 2 4 GPIO_ACTIVE_HIGH>; /* PC4 */ +- pwrkey-gpios = <&pio 1 3 GPIO_ACTIVE_HIGH>; /* PB3 */ +- +- sleep-gpios = <&pio 7 7 GPIO_ACTIVE_HIGH>; /* PH7 */ +- wakeup-gpios = <&pio 1 2 GPIO_ACTIVE_HIGH>; /* PB2-RI */ +- +- cts-gpios = <&pio 3 5 GPIO_ACTIVE_HIGH>; /* PD5-CTS */ +- dtr-gpios = <&r_pio 0 6 GPIO_ACTIVE_HIGH>; /* PL6-DTR */ +- rts-gpios = <&pio 3 4 GPIO_ACTIVE_HIGH>; /* PD4-RTS */ +- +- quectel,qdai = "1,1,0,1,0,0,1,1"; +- }; +-}; +- + &usbphy { + usb-role-switch; + +@@ -118,6 +96,10 @@ usb0_drd_sw: endpoint { + }; + }; + ++&ring_indicator { ++ gpios = <&pio 1 2 GPIO_ACTIVE_LOW>; /* PB2 */ ++}; ++ + &sgm3140 { + enable-gpios = <&pio 2 3 GPIO_ACTIVE_HIGH>; /* PC3 */ + flash-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* PD24 */ +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts +index 61ff56b17..5e85ddc12 100644 +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.1.dts +@@ -109,34 +109,15 @@ ®_drivevbus { + status = "okay"; + }; + ++&ring_indicator { ++ gpios = <&pio 1 2 GPIO_ACTIVE_LOW>; /* PB2 */ ++}; ++ + &sgm3140 { + enable-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* PD24 */ + flash-gpios = <&pio 2 3 GPIO_ACTIVE_HIGH>; /* PC3 */ + }; + +-&uart3 { +- modem { +- compatible = "quectel,eg25"; +- char-device-name = "modem-power"; +- +- power-supply = <®_vbat_bb>; /* PL7 */ +- +- enable-gpios = <&pio 7 8 GPIO_ACTIVE_LOW>; /* PH8 */ +- reset-gpios = <&pio 2 4 GPIO_ACTIVE_HIGH>; /* PC4 */ +- pwrkey-gpios = <&pio 1 3 GPIO_ACTIVE_HIGH>; /* PB3 */ +- //status-pwrkey-multiplexed; /* status acts as pwrkey */ +- +- sleep-gpios = <&pio 7 7 GPIO_ACTIVE_HIGH>; /* PH7 */ +- wakeup-gpios = <&pio 1 2 GPIO_ACTIVE_HIGH>; /* PB2-RI */ +- +- dtr-gpios = <&r_pio 0 6 GPIO_ACTIVE_HIGH>; /* PL6-DTR */ +- cts-gpios = <&pio 3 5 GPIO_ACTIVE_HIGH>; /* PD5-CTS */ +- rts-gpios = <&pio 3 4 GPIO_ACTIVE_HIGH>; /* PD4-RTS */ +- +- quectel,qdai = "1,1,0,1,0,0,1,1"; +- }; +-}; +- + &usbphy { + usb-role-switch; + +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.2.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.2.dts +index fe7d567a8..f4b9b0991 100644 +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.2.dts ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone-1.2.dts +@@ -101,34 +101,15 @@ ®_anx1v0 { + enable-active-high; + }; + ++&ring_indicator { ++ gpios = <&r_pio 0 6 GPIO_ACTIVE_LOW>; /* PL6 */ ++}; ++ + &sgm3140 { + enable-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>; /* PD24 */ + flash-gpios = <&pio 2 3 GPIO_ACTIVE_HIGH>; /* PC3 */ + }; + +-&uart3 { +- modem { +- compatible = "quectel,eg25"; +- char-device-name = "modem-power"; +- +- power-supply = <®_vbat_bb>; /* PL7 */ +- +- enable-gpios = <&pio 7 8 GPIO_ACTIVE_LOW>; /* PH8 */ +- reset-gpios = <&pio 2 4 GPIO_ACTIVE_HIGH>; /* PC4 */ +- status-gpios = <&pio 7 9 GPIO_ACTIVE_HIGH>; /* PH9 */ +- pwrkey-gpios = <&pio 1 3 GPIO_ACTIVE_HIGH>; /* PB3 */ +- +- host-ready-gpios = <&pio 7 7 GPIO_ACTIVE_HIGH>; /* PH7 */ +- wakeup-gpios = <&r_pio 0 6 GPIO_ACTIVE_HIGH>; /* PL6-RI */ +- +- dtr-gpios = <&pio 1 2 GPIO_ACTIVE_HIGH>; /* PB2-DTR */ +- cts-gpios = <&pio 3 5 GPIO_ACTIVE_HIGH>; /* PD5-CTS */ +- rts-gpios = <&pio 3 4 GPIO_ACTIVE_HIGH>; /* PD4-RTS */ +- +- quectel,qdai = "1,1,0,1,0,0,1,1"; +- }; +-}; +- + &usbphy { + usb-role-switch; + +diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi +index 346113382..7b48126d1 100644 +--- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi ++++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pinephone.dtsi +@@ -192,6 +192,17 @@ ec25_codec: ec25-codec { + sound-name-prefix = "Modem"; + }; + ++ gpio-keys { ++ compatible = "gpio-keys"; ++ ++ ring_indicator: ring-indicator { ++ label = "Ring Indicator"; ++ linux,can-disable; ++ linux,code = ; ++ wakeup-source; ++ }; ++ }; ++ + i2c_csi: i2c-csi { + compatible = "i2c-gpio"; + sda-gpios = <&pio 4 13 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; /* PE13 */ +@@ -264,6 +275,7 @@ reg_usb_5v: usb-5v { + reg_vbat_bb: vbat-bb { + compatible = "regulator-fixed"; + regulator-name = "vbat-bb"; ++ regulator-always-on; + regulator-min-microvolt = <3500000>; + regulator-max-microvolt = <3500000>; + gpio = <&r_pio 0 7 GPIO_ACTIVE_HIGH>; /* PL7 */ +-- +2.31.1 + diff --git a/sys-kernel/pinephone-pro-sources/files/0102-arm64-dts-pinephone-pro-remove-modem-node.patch b/sys-kernel/pinephone-pro-sources/files/0102-arm64-dts-pinephone-pro-remove-modem-node.patch new file mode 100644 index 0000000..24be3b4 --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/0102-arm64-dts-pinephone-pro-remove-modem-node.patch @@ -0,0 +1,86 @@ +From 60d8aedea7c8c390ee744730ab3e565ea84496fb Mon Sep 17 00:00:00 2001 +From: Danct12 +Date: Fri, 10 Dec 2021 23:01:34 +0700 +Subject: [PATCH] arm64: dts: rk3399-pinephone-pro: Remove modem node + +Since we don't use modem-power driver, this can be removed +for eg25-manager. +--- + .../dts/rockchip/rk3399-pinephone-pro.dts | 40 +------------------ + 1 file changed, 2 insertions(+), 38 deletions(-) + +diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts +index 61c990764..13141c643 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts ++++ b/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts +@@ -326,6 +326,7 @@ vcc_4g_5v: vcc-4g-5v { + regulator-min-microvolt = <5000000>; + regulator-max-microvolt = <5000000>; + vin-supply = <&vcc5v0_sys>; ++ regulator-always-on; + }; + + vcc_4g: vcc-4g { +@@ -338,6 +339,7 @@ vcc_4g: vcc-4g { + regulator-min-microvolt = <3800000>; + regulator-max-microvolt = <3800000>; + vin-supply = <&vcc_sysin>; ++ regulator-always-on; + }; + + vcc1v8_codec: vcc1v8-codec-regulator { +@@ -1058,31 +1060,6 @@ mipi_in_panel: endpoint { + + &uart3 { + status = "okay"; +- +- modem { +- compatible = "quectel,eg25"; +- char-device-name = "modem-power"; +- +- pinctrl-names = "default"; +- pinctrl-0 = <&modem_control_pins>; +- +- power-supply = <&vcc_4g>; +- vbus-supply = <&vcc_4g_5v>; +- +- enable-gpios = <&gpio0 RK_PB0 GPIO_ACTIVE_HIGH>; // W_DISABLE# +- reset-gpios = <&gpio3 RK_PB0 GPIO_ACTIVE_HIGH>; +- status-gpios = <&gpio3 RK_PA6 GPIO_ACTIVE_HIGH>; +- pwrkey-gpios = <&gpio0 RK_PB5 GPIO_ACTIVE_HIGH>; +- +- host-ready-gpios = <&gpio0 RK_PB4 GPIO_ACTIVE_HIGH>; // apready +- wakeup-gpios = <&gpio0 RK_PA1 GPIO_ACTIVE_HIGH>; // ri +- +- dtr-gpios = <&gpio0 RK_PA3 GPIO_ACTIVE_HIGH>; +- cts-gpios = <&gpio3 RK_PC0 GPIO_ACTIVE_HIGH>; +- rts-gpios = <&gpio3 RK_PC1 GPIO_ACTIVE_HIGH>; +- +- quectel,qdai = "3,0,0,4,0,0,1,1"; +- }; + }; + + &pmu_io_domains { +@@ -1153,19 +1130,6 @@ vcc_4g_5v_en: vcc-4g-5v-en-pin { + vcc_4g_en: vcc-4g-en-pin { + rockchip,pins = <4 RK_PC7 RK_FUNC_GPIO &pcfg_pull_none>; + }; +- +- modem_control_pins: modem-control-pins { +- rockchip,pins = +- <0 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>, +- <3 RK_PB0 RK_FUNC_GPIO &pcfg_pull_none>, +- <3 RK_PA6 RK_FUNC_GPIO &pcfg_pull_none>, +- <0 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>, +- <0 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>, +- <0 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>, +- <0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>, +- <3 RK_PC0 RK_FUNC_GPIO &pcfg_pull_none>, +- <3 RK_PC1 RK_FUNC_GPIO &pcfg_pull_none>; +- }; + }; + + pmic { +-- +2.34.1 + diff --git a/sys-kernel/pinephone-pro-sources/files/0103-arm64-dts-rk3399-pinephone-pro-add-modem-RI-pin.patch b/sys-kernel/pinephone-pro-sources/files/0103-arm64-dts-rk3399-pinephone-pro-add-modem-RI-pin.patch new file mode 100644 index 0000000..ebb70a7 --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/0103-arm64-dts-rk3399-pinephone-pro-add-modem-RI-pin.patch @@ -0,0 +1,48 @@ +From: Arnaud Ferraris +Date: Wed, 8 Dec 2021 23:43:08 +0100 +Subject: arm64: dts: rk3399-pinephone-pro: add modem RI pin + +Taht way the modem can wake the phone on incoming calls/messages. + +Signed-off-by: Arnaud Ferraris +--- + arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts | 19 +++++++++++++++++++ + 1 file changed, 19 insertions(+) + +diff --git a/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts b/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts +index de07518..91687f6 100644 +--- a/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts ++++ b/arch/arm64/boot/dts/rockchip/rk3399-pinephone-pro.dts +@@ -160,6 +160,21 @@ power { + }; + }; + ++ gpio-key-ri { ++ compatible = "gpio-keys"; ++ pinctrl-names = "default"; ++ pinctrl-0 = <&ri_pin>; ++ ++ ring_indicator: ring-indicator { ++ label = "ring-indicator"; ++ linux,can-disable; ++ linux,code = ; ++ gpios = <&gpio0 RK_PA1 GPIO_ACTIVE_LOW>; ++ wakeup-event-action = ; ++ wakeup-source; ++ }; ++ }; ++ + // in1 - digital mic daughhterboard + // in2 - headset mic + // in3 - modem output (muxed with mono) +@@ -1127,6 +1142,10 @@ flash_pins: flash-pins { + }; + + modem { ++ ri_pin: ri-pin { ++ rockchip,pins = <0 RK_PA1 RK_FUNC_GPIO &pcfg_pull_up>; ++ }; ++ + vcc_4g_5v_en: vcc-4g-5v-en-pin { + rockchip,pins = <1 RK_PC7 RK_FUNC_GPIO &pcfg_pull_none>; + }; diff --git a/sys-kernel/pinephone-pro-sources/files/0104-rk818_charger-use-type-battery-again.patch b/sys-kernel/pinephone-pro-sources/files/0104-rk818_charger-use-type-battery-again.patch new file mode 100644 index 0000000..74ed979 --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/0104-rk818_charger-use-type-battery-again.patch @@ -0,0 +1,11 @@ +--- a/drivers/power/supply/rk818_charger.c 2022-01-28 17:51:57.000000000 +0100 ++++ b/drivers/power/supply/rk818_charger.c 2022-02-02 15:06:51.303222817 +0100 +@@ -522,7 +522,7 @@ static enum power_supply_property rk818_ + */ + static const struct power_supply_desc rk818_charger_desc = { + .name = "rk818-charger", +- .type = POWER_SUPPLY_TYPE_MAINS, ++ .type = POWER_SUPPLY_TYPE_BATTERY, + .properties = rk818_charger_props, + .num_properties = ARRAY_SIZE(rk818_charger_props), + .property_is_writeable = rk818_charger_prop_writeable, diff --git a/sys-kernel/pinephone-pro-sources/files/0201-revert-fbcon-remove-now-unusued-softback_lines-cursor-argument.patch b/sys-kernel/pinephone-pro-sources/files/0201-revert-fbcon-remove-now-unusued-softback_lines-cursor-argument.patch new file mode 100644 index 0000000..e7d4da5 --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/0201-revert-fbcon-remove-now-unusued-softback_lines-cursor-argument.patch @@ -0,0 +1,150 @@ +--- b/drivers/video/fbdev/core/bitblit.c ++++ a/drivers/video/fbdev/core/bitblit.c +@@ -234,7 +234,7 @@ + } + + static void bit_cursor(struct vc_data *vc, struct fb_info *info, int mode, ++ int softback_lines, int fg, int bg) +- int fg, int bg) + { + struct fb_cursor cursor; + struct fbcon_ops *ops = info->fbcon_par; +@@ -247,6 +247,15 @@ + + cursor.set = 0; + ++ if (softback_lines) { ++ if (y + softback_lines >= vc->vc_rows) { ++ mode = CM_ERASE; ++ ops->cursor_flash = 0; ++ return; ++ } else ++ y += softback_lines; ++ } ++ + c = scr_readw((u16 *) vc->vc_pos); + attribute = get_attribute(info, c); + src = vc->vc_font.data + ((c & charmask) * (w * vc->vc_font.height)); +--- b/drivers/video/fbdev/core/fbcon.c ++++ a/drivers/video/fbdev/core/fbcon.c +@@ -394,7 +394,7 @@ + c = scr_readw((u16 *) vc->vc_pos); + mode = (!ops->cursor_flash || ops->cursor_state.enable) ? + CM_ERASE : CM_DRAW; ++ ops->cursor(vc, info, mode, 0, get_color(vc, info, c, 1), +- ops->cursor(vc, info, mode, get_color(vc, info, c, 1), + get_color(vc, info, c, 0)); + console_unlock(); + } +@@ -1345,7 +1345,7 @@ + + ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1; + ++ ops->cursor(vc, info, mode, 0, get_color(vc, info, c, 1), +- ops->cursor(vc, info, mode, get_color(vc, info, c, 1), + get_color(vc, info, c, 0)); + } + +--- b/drivers/video/fbdev/core/fbcon.h ++++ a/drivers/video/fbdev/core/fbcon.h +@@ -62,7 +62,7 @@ + void (*clear_margins)(struct vc_data *vc, struct fb_info *info, + int color, int bottom_only); + void (*cursor)(struct vc_data *vc, struct fb_info *info, int mode, ++ int softback_lines, int fg, int bg); +- int fg, int bg); + int (*update_start)(struct fb_info *info); + int (*rotate_font)(struct fb_info *info, struct vc_data *vc); + struct fb_var_screeninfo var; /* copy of the current fb_var_screeninfo */ +--- b/drivers/video/fbdev/core/fbcon_ccw.c ++++ a/drivers/video/fbdev/core/fbcon_ccw.c +@@ -219,7 +219,7 @@ + } + + static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, ++ int softback_lines, int fg, int bg) +- int fg, int bg) + { + struct fb_cursor cursor; + struct fbcon_ops *ops = info->fbcon_par; +@@ -236,6 +236,15 @@ + + cursor.set = 0; + ++ if (softback_lines) { ++ if (y + softback_lines >= vc->vc_rows) { ++ mode = CM_ERASE; ++ ops->cursor_flash = 0; ++ return; ++ } else ++ y += softback_lines; ++ } ++ + c = scr_readw((u16 *) vc->vc_pos); + attribute = get_attribute(info, c); + src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); +--- b/drivers/video/fbdev/core/fbcon_cw.c ++++ a/drivers/video/fbdev/core/fbcon_cw.c +@@ -202,7 +202,7 @@ + } + + static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, ++ int softback_lines, int fg, int bg) +- int fg, int bg) + { + struct fb_cursor cursor; + struct fbcon_ops *ops = info->fbcon_par; +@@ -219,6 +219,15 @@ + + cursor.set = 0; + ++ if (softback_lines) { ++ if (y + softback_lines >= vc->vc_rows) { ++ mode = CM_ERASE; ++ ops->cursor_flash = 0; ++ return; ++ } else ++ y += softback_lines; ++ } ++ + c = scr_readw((u16 *) vc->vc_pos); + attribute = get_attribute(info, c); + src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width)); +--- b/drivers/video/fbdev/core/fbcon_ud.c ++++ a/drivers/video/fbdev/core/fbcon_ud.c +@@ -249,7 +249,7 @@ + } + + static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, ++ int softback_lines, int fg, int bg) +- int fg, int bg) + { + struct fb_cursor cursor; + struct fbcon_ops *ops = info->fbcon_par; +@@ -267,6 +267,15 @@ + + cursor.set = 0; + ++ if (softback_lines) { ++ if (y + softback_lines >= vc->vc_rows) { ++ mode = CM_ERASE; ++ ops->cursor_flash = 0; ++ return; ++ } else ++ y += softback_lines; ++ } ++ + c = scr_readw((u16 *) vc->vc_pos); + attribute = get_attribute(info, c); + src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.height)); +--- b/drivers/video/fbdev/core/tileblit.c ++++ a/drivers/video/fbdev/core/tileblit.c +@@ -80,7 +80,7 @@ + } + + static void tile_cursor(struct vc_data *vc, struct fb_info *info, int mode, ++ int softback_lines, int fg, int bg) +- int fg, int bg) + { + struct fb_tilecursor cursor; + int use_sw = (vc->vc_cursor_type & 0x10); diff --git a/sys-kernel/pinephone-pro-sources/files/0202-revert-fbcon-remove-no-op-fbcon_set_origin.patch b/sys-kernel/pinephone-pro-sources/files/0202-revert-fbcon-remove-no-op-fbcon_set_origin.patch new file mode 100644 index 0000000..6491c54 --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/0202-revert-fbcon-remove-no-op-fbcon_set_origin.patch @@ -0,0 +1,31 @@ +--- b/drivers/video/fbdev/core/fbcon.c ++++ a/drivers/video/fbdev/core/fbcon.c +@@ -163,6 +163,8 @@ + + #define advance_row(p, delta) (unsigned short *)((unsigned long)(p) + (delta) * vc->vc_size_row) + ++static int fbcon_set_origin(struct vc_data *); ++ + static int fbcon_cursor_noblink; + + #define divides(a, b) ((!(a) || (b)%(a)) ? 0 : 1) +@@ -2633,6 +2635,11 @@ + } + } + ++static int fbcon_set_origin(struct vc_data *vc) ++{ ++ return 0; ++} ++ + void fbcon_suspended(struct fb_info *info) + { + struct vc_data *vc = NULL; +@@ -3103,6 +3110,7 @@ + .con_font_default = fbcon_set_def_font, + .con_font_copy = fbcon_copy_font, + .con_set_palette = fbcon_set_palette, ++ .con_set_origin = fbcon_set_origin, + .con_invert_region = fbcon_invert_region, + .con_screen_pos = fbcon_screen_pos, + .con_getxy = fbcon_getxy, diff --git a/sys-kernel/pinephone-pro-sources/files/0203-revert-fbcon-remove-soft-scrollback-code.patch b/sys-kernel/pinephone-pro-sources/files/0203-revert-fbcon-remove-soft-scrollback-code.patch new file mode 100644 index 0000000..4f97354 --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/0203-revert-fbcon-remove-soft-scrollback-code.patch @@ -0,0 +1,500 @@ +--- b/drivers/video/fbdev/core/fbcon.c ++++ a/drivers/video/fbdev/core/fbcon.c +@@ -122,6 +122,12 @@ + /* logo_shown is an index to vc_cons when >= 0; otherwise follows FBCON_LOGO + enums. */ + static int logo_shown = FBCON_LOGO_CANSHOW; ++/* Software scrollback */ ++static int fbcon_softback_size = 32768; ++static unsigned long softback_buf, softback_curr; ++static unsigned long softback_in; ++static unsigned long softback_top, softback_end; ++static int softback_lines; + /* console mappings */ + static int first_fb_vc; + static int last_fb_vc = MAX_NR_CONSOLES - 1; +@@ -161,6 +167,8 @@ + + static const struct consw fb_con; + ++#define CM_SOFTBACK (8) ++ + #define advance_row(p, delta) (unsigned short *)((unsigned long)(p) + (delta) * vc->vc_size_row) + + static int fbcon_set_origin(struct vc_data *); +@@ -365,6 +373,18 @@ + return color; + } + ++static void fbcon_update_softback(struct vc_data *vc) ++{ ++ int l = fbcon_softback_size / vc->vc_size_row; ++ ++ if (l > 5) ++ softback_end = softback_buf + l * vc->vc_size_row; ++ else ++ /* Smaller scrollback makes no sense, and 0 would screw ++ the operation totally */ ++ softback_top = 0; ++} ++ + static void fb_flashcursor(struct work_struct *work) + { + struct fb_info *info = container_of(work, struct fb_info, queue); +@@ -394,7 +414,7 @@ + c = scr_readw((u16 *) vc->vc_pos); + mode = (!ops->cursor_flash || ops->cursor_state.enable) ? + CM_ERASE : CM_DRAW; ++ ops->cursor(vc, info, mode, softback_lines, get_color(vc, info, c, 1), +- ops->cursor(vc, info, mode, 0, get_color(vc, info, c, 1), + get_color(vc, info, c, 0)); + console_unlock(); + } +@@ -451,7 +471,13 @@ + } + + if (!strncmp(options, "scrollback:", 11)) { ++ options += 11; ++ if (*options) { ++ fbcon_softback_size = simple_strtoul(options, &options, 0); ++ if (*options == 'k' || *options == 'K') { ++ fbcon_softback_size *= 1024; ++ } ++ } +- pr_warn("Ignoring scrollback size option\n"); + continue; + } + +@@ -996,6 +1022,31 @@ + + set_blitting_type(vc, info); + ++ if (info->fix.type != FB_TYPE_TEXT) { ++ if (fbcon_softback_size) { ++ if (!softback_buf) { ++ softback_buf = ++ (unsigned long) ++ kvmalloc(fbcon_softback_size, ++ GFP_KERNEL); ++ if (!softback_buf) { ++ fbcon_softback_size = 0; ++ softback_top = 0; ++ } ++ } ++ } else { ++ if (softback_buf) { ++ kvfree((void *) softback_buf); ++ softback_buf = 0; ++ softback_top = 0; ++ } ++ } ++ if (softback_buf) ++ softback_in = softback_top = softback_curr = ++ softback_buf; ++ softback_lines = 0; ++ } ++ + /* Setup default font */ + if (!p->fontdata && !vc->vc_font.data) { + if (!fontname[0] || !(font = find_font(fontname))) +@@ -1169,6 +1220,9 @@ + if (logo) + fbcon_prepare_logo(vc, info, cols, rows, new_cols, new_rows); + ++ if (vc == svc && softback_buf) ++ fbcon_update_softback(vc); ++ + if (ops->rotate_font && ops->rotate_font(info, vc)) { + ops->rotate = FB_ROTATE_UR; + set_blitting_type(vc, info); +@@ -1331,6 +1385,7 @@ + { + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_ops *ops = info->fbcon_par; ++ int y; + int c = scr_readw((u16 *) vc->vc_pos); + + ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); +@@ -1334,11 +1389,19 @@ static void fbcon_cursor(struct vc_data + fbcon_add_cursor_timer(info); + + ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1; ++ if (mode & CM_SOFTBACK) { ++ mode &= ~CM_SOFTBACK; ++ y = softback_lines; ++ } else { ++ if (softback_lines) ++ fbcon_set_origin(vc); ++ y = 0; ++ } + + if (!ops->cursor) + return; + +- ops->cursor(vc, info, mode, 0, get_color(vc, info, c, 1), ++ ops->cursor(vc, info, mode, y, get_color(vc, info, c, 1), + get_color(vc, info, c, 0)); + } + +@@ -1416,6 +1479,8 @@ + + if (con_is_visible(vc)) { + update_screen(vc); ++ if (softback_buf) ++ fbcon_update_softback(vc); + } + } + +@@ -1553,6 +1618,99 @@ + scrollback_current = 0; + } + ++static void fbcon_redraw_softback(struct vc_data *vc, struct fbcon_display *p, ++ long delta) ++{ ++ int count = vc->vc_rows; ++ unsigned short *d, *s; ++ unsigned long n; ++ int line = 0; ++ ++ d = (u16 *) softback_curr; ++ if (d == (u16 *) softback_in) ++ d = (u16 *) vc->vc_origin; ++ n = softback_curr + delta * vc->vc_size_row; ++ softback_lines -= delta; ++ if (delta < 0) { ++ if (softback_curr < softback_top && n < softback_buf) { ++ n += softback_end - softback_buf; ++ if (n < softback_top) { ++ softback_lines -= ++ (softback_top - n) / vc->vc_size_row; ++ n = softback_top; ++ } ++ } else if (softback_curr >= softback_top ++ && n < softback_top) { ++ softback_lines -= ++ (softback_top - n) / vc->vc_size_row; ++ n = softback_top; ++ } ++ } else { ++ if (softback_curr > softback_in && n >= softback_end) { ++ n += softback_buf - softback_end; ++ if (n > softback_in) { ++ n = softback_in; ++ softback_lines = 0; ++ } ++ } else if (softback_curr <= softback_in && n > softback_in) { ++ n = softback_in; ++ softback_lines = 0; ++ } ++ } ++ if (n == softback_curr) ++ return; ++ softback_curr = n; ++ s = (u16 *) softback_curr; ++ if (s == (u16 *) softback_in) ++ s = (u16 *) vc->vc_origin; ++ while (count--) { ++ unsigned short *start; ++ unsigned short *le; ++ unsigned short c; ++ int x = 0; ++ unsigned short attr = 1; ++ ++ start = s; ++ le = advance_row(s, 1); ++ do { ++ c = scr_readw(s); ++ if (attr != (c & 0xff00)) { ++ attr = c & 0xff00; ++ if (s > start) { ++ fbcon_putcs(vc, start, s - start, ++ line, x); ++ x += s - start; ++ start = s; ++ } ++ } ++ if (c == scr_readw(d)) { ++ if (s > start) { ++ fbcon_putcs(vc, start, s - start, ++ line, x); ++ x += s - start + 1; ++ start = s + 1; ++ } else { ++ x++; ++ start++; ++ } ++ } ++ s++; ++ d++; ++ } while (s < le); ++ if (s > start) ++ fbcon_putcs(vc, start, s - start, line, x); ++ line++; ++ if (d == (u16 *) softback_end) ++ d = (u16 *) softback_buf; ++ if (d == (u16 *) softback_in) ++ d = (u16 *) vc->vc_origin; ++ if (s == (u16 *) softback_end) ++ s = (u16 *) softback_buf; ++ if (s == (u16 *) softback_in) ++ s = (u16 *) vc->vc_origin; ++ } ++} ++ + static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p, + int line, int count, int dy) + { +@@ -1692,6 +1850,31 @@ + } + } + ++static inline void fbcon_softback_note(struct vc_data *vc, int t, ++ int count) ++{ ++ unsigned short *p; ++ ++ if (vc->vc_num != fg_console) ++ return; ++ p = (unsigned short *) (vc->vc_origin + t * vc->vc_size_row); ++ ++ while (count) { ++ scr_memcpyw((u16 *) softback_in, p, vc->vc_size_row); ++ count--; ++ p = advance_row(p, 1); ++ softback_in += vc->vc_size_row; ++ if (softback_in == softback_end) ++ softback_in = softback_buf; ++ if (softback_in == softback_top) { ++ softback_top += vc->vc_size_row; ++ if (softback_top == softback_end) ++ softback_top = softback_buf; ++ } ++ } ++ softback_curr = softback_in; ++} ++ + static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, + enum con_scroll dir, unsigned int count) + { +@@ -1714,6 +1897,8 @@ + case SM_UP: + if (count > vc->vc_rows) /* Maximum realistic size */ + count = vc->vc_rows; ++ if (softback_top) ++ fbcon_softback_note(vc, t, count); + if (logo_shown >= 0) + goto redraw_up; + switch (p->scrollmode) { +@@ -2084,6 +2269,14 @@ + info = registered_fb[con2fb_map[vc->vc_num]]; + ops = info->fbcon_par; + ++ if (softback_top) { ++ if (softback_lines) ++ fbcon_set_origin(vc); ++ softback_top = softback_curr = softback_in = softback_buf; ++ softback_lines = 0; ++ fbcon_update_softback(vc); ++ } ++ + if (logo_shown >= 0) { + struct vc_data *conp2 = vc_cons[logo_shown].d; + +@@ -2407,6 +2600,9 @@ + int cnt; + char *old_data = NULL; + ++ if (con_is_visible(vc) && softback_lines) ++ fbcon_set_origin(vc); ++ + resize = (w != vc->vc_font.width) || (h != vc->vc_font.height); + if (p->userfont) + old_data = vc->vc_font.data; +@@ -2432,6 +2628,8 @@ + cols /= w; + rows /= h; + vc_resize(vc, cols, rows); ++ if (con_is_visible(vc) && softback_buf) ++ fbcon_update_softback(vc); + } else if (con_is_visible(vc) + && vc->vc_mode == KD_TEXT) { + fbcon_clear_margins(vc, 0); +@@ -2590,7 +2788,19 @@ + + static u16 *fbcon_screen_pos(struct vc_data *vc, int offset) + { ++ unsigned long p; ++ int line; ++ ++ if (vc->vc_num != fg_console || !softback_lines) ++ return (u16 *) (vc->vc_origin + offset); ++ line = offset / vc->vc_size_row; ++ if (line >= softback_lines) ++ return (u16 *) (vc->vc_origin + offset - ++ softback_lines * vc->vc_size_row); ++ p = softback_curr + offset; ++ if (p >= softback_end) ++ p += softback_buf - softback_end; ++ return (u16 *) p; +- return (u16 *) (vc->vc_origin + offset); + } + + static unsigned long fbcon_getxy(struct vc_data *vc, unsigned long pos, +@@ -2604,7 +2814,22 @@ + + x = offset % vc->vc_cols; + y = offset / vc->vc_cols; ++ if (vc->vc_num == fg_console) ++ y += softback_lines; + ret = pos + (vc->vc_cols - x) * 2; ++ } else if (vc->vc_num == fg_console && softback_lines) { ++ unsigned long offset = pos - softback_curr; ++ ++ if (pos < softback_curr) ++ offset += softback_end - softback_buf; ++ offset /= 2; ++ x = offset % vc->vc_cols; ++ y = offset / vc->vc_cols; ++ ret = pos + (vc->vc_cols - x) * 2; ++ if (ret == softback_end) ++ ret = softback_buf; ++ if (ret == softback_in) ++ ret = vc->vc_origin; + } else { + /* Should not happen */ + x = y = 0; +@@ -2632,11 +2857,106 @@ + a = ((a) & 0x88ff) | (((a) & 0x7000) >> 4) | + (((a) & 0x0700) << 4); + scr_writew(a, p++); ++ if (p == (u16 *) softback_end) ++ p = (u16 *) softback_buf; ++ if (p == (u16 *) softback_in) ++ p = (u16 *) vc->vc_origin; ++ } ++} ++ ++static void fbcon_scrolldelta(struct vc_data *vc, int lines) ++{ ++ struct fb_info *info = registered_fb[con2fb_map[fg_console]]; ++ struct fbcon_ops *ops = info->fbcon_par; ++ struct fbcon_display *disp = &fb_display[fg_console]; ++ int offset, limit, scrollback_old; ++ ++ if (softback_top) { ++ if (vc->vc_num != fg_console) ++ return; ++ if (vc->vc_mode != KD_TEXT || !lines) ++ return; ++ if (logo_shown >= 0) { ++ struct vc_data *conp2 = vc_cons[logo_shown].d; ++ ++ if (conp2->vc_top == logo_lines ++ && conp2->vc_bottom == conp2->vc_rows) ++ conp2->vc_top = 0; ++ if (logo_shown == vc->vc_num) { ++ unsigned long p, q; ++ int i; ++ ++ p = softback_in; ++ q = vc->vc_origin + ++ logo_lines * vc->vc_size_row; ++ for (i = 0; i < logo_lines; i++) { ++ if (p == softback_top) ++ break; ++ if (p == softback_buf) ++ p = softback_end; ++ p -= vc->vc_size_row; ++ q -= vc->vc_size_row; ++ scr_memcpyw((u16 *) q, (u16 *) p, ++ vc->vc_size_row); ++ } ++ softback_in = softback_curr = p; ++ update_region(vc, vc->vc_origin, ++ logo_lines * vc->vc_cols); ++ } ++ logo_shown = FBCON_LOGO_CANSHOW; ++ } ++ fbcon_cursor(vc, CM_ERASE | CM_SOFTBACK); ++ fbcon_redraw_softback(vc, disp, lines); ++ fbcon_cursor(vc, CM_DRAW | CM_SOFTBACK); ++ return; + } ++ ++ if (!scrollback_phys_max) ++ return; ++ ++ scrollback_old = scrollback_current; ++ scrollback_current -= lines; ++ if (scrollback_current < 0) ++ scrollback_current = 0; ++ else if (scrollback_current > scrollback_max) ++ scrollback_current = scrollback_max; ++ if (scrollback_current == scrollback_old) ++ return; ++ ++ if (fbcon_is_inactive(vc, info)) ++ return; ++ ++ fbcon_cursor(vc, CM_ERASE); ++ ++ offset = disp->yscroll - scrollback_current; ++ limit = disp->vrows; ++ switch (disp->scrollmode) { ++ case SCROLL_WRAP_MOVE: ++ info->var.vmode |= FB_VMODE_YWRAP; ++ break; ++ case SCROLL_PAN_MOVE: ++ case SCROLL_PAN_REDRAW: ++ limit -= vc->vc_rows; ++ info->var.vmode &= ~FB_VMODE_YWRAP; ++ break; ++ } ++ if (offset < 0) ++ offset += limit; ++ else if (offset >= limit) ++ offset -= limit; ++ ++ ops->var.xoffset = 0; ++ ops->var.yoffset = offset * vc->vc_font.height; ++ ops->update_start(info); ++ ++ if (!scrollback_current) ++ fbcon_cursor(vc, CM_DRAW); + } + + static int fbcon_set_origin(struct vc_data *vc) + { ++ if (softback_lines) ++ fbcon_scrolldelta(vc, softback_lines); + return 0; + } + +@@ -2700,6 +3020,8 @@ + + fbcon_set_palette(vc, color_table); + update_screen(vc); ++ if (softback_buf) ++ fbcon_update_softback(vc); + } + } + +@@ -3110,6 +3432,7 @@ + .con_font_default = fbcon_set_def_font, + .con_font_copy = fbcon_copy_font, + .con_set_palette = fbcon_set_palette, ++ .con_scrolldelta = fbcon_scrolldelta, + .con_set_origin = fbcon_set_origin, + .con_invert_region = fbcon_invert_region, + .con_screen_pos = fbcon_screen_pos, +@@ -3344,6 +3667,9 @@ + } + #endif + ++ kvfree((void *)softback_buf); ++ softback_buf = 0UL; ++ + for_each_registered_fb(i) { + int pending = 0; + diff --git a/sys-kernel/pinephone-pro-sources/files/0301-bootsplash.patch b/sys-kernel/pinephone-pro-sources/files/0301-bootsplash.patch new file mode 100644 index 0000000..d5835e5 --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/0301-bootsplash.patch @@ -0,0 +1,746 @@ +diff --git a/MAINTAINERS b/MAINTAINERS +index a74227ad082e..b5633b56391e 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -2705,6 +2705,14 @@ S: Supported + F: drivers/net/bonding/ + F: include/uapi/linux/if_bonding.h + ++BOOTSPLASH ++M: Max Staudt ++L: linux-fbdev@vger.kernel.org ++S: Maintained ++F: drivers/video/fbdev/core/bootsplash*.* ++F: drivers/video/fbdev/core/dummycon.c ++F: include/linux/bootsplash.h ++ + BPF (Safe dynamic programs and tools) + M: Alexei Starovoitov + M: Daniel Borkmann +diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig +index 7f1f1fbcef9e..f3ff976266fe 100644 +--- a/drivers/video/console/Kconfig ++++ b/drivers/video/console/Kconfig +@@ -151,6 +151,30 @@ config FRAMEBUFFER_CONSOLE_ROTATION + such that other users of the framebuffer will remain normally + oriented. + ++config BOOTSPLASH ++ bool "Bootup splash screen" ++ depends on FRAMEBUFFER_CONSOLE ++ help ++ This option enables the Linux bootsplash screen. ++ ++ The bootsplash is a full-screen logo or animation indicating a ++ booting system. It replaces the classic scrolling text with a ++ graphical alternative, similar to other systems. ++ ++ Since this is technically implemented as a hook on top of fbcon, ++ it can only work if the FRAMEBUFFER_CONSOLE is enabled and a ++ framebuffer driver is active. Thus, to get a text-free boot, ++ the system needs to boot with vesafb, efifb, or similar. ++ ++ Once built into the kernel, the bootsplash needs to be enabled ++ with bootsplash.enabled=1 and a splash file needs to be supplied. ++ ++ Further documentation can be found in: ++ Documentation/fb/bootsplash.txt ++ ++ If unsure, say N. ++ This is typically used by distributors and system integrators. ++ + config STI_CONSOLE + bool "STI text console" + depends on PARISC +diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile +index 73493bbd7a15..66895321928e 100644 +--- a/drivers/video/fbdev/core/Makefile ++++ b/drivers/video/fbdev/core/Makefile +@@ -29,3 +29,6 @@ obj-$(CONFIG_FB_SYS_IMAGEBLIT) += sysimgblt.o + obj-$(CONFIG_FB_SYS_FOPS) += fb_sys_fops.o + obj-$(CONFIG_FB_SVGALIB) += svgalib.o + obj-$(CONFIG_FB_DDC) += fb_ddc.o ++ ++obj-$(CONFIG_BOOTSPLASH) += bootsplash.o bootsplash_render.o \ ++ dummyblit.o +diff --git a/drivers/video/fbdev/core/bootsplash.c b/drivers/video/fbdev/core/bootsplash.c +new file mode 100644 +index 000000000000..e449755af268 +--- /dev/null ++++ b/drivers/video/fbdev/core/bootsplash.c +@@ -0,0 +1,294 @@ ++/* ++ * Kernel based bootsplash. ++ * ++ * (Main file: Glue code, workers, timer, PM, kernel and userland API) ++ * ++ * Authors: ++ * Max Staudt ++ * ++ * SPDX-License-Identifier: GPL-2.0 ++ */ ++ ++#define pr_fmt(fmt) "bootsplash: " fmt ++ ++ ++#include ++#include ++#include ++#include /* dev_warn() */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include /* console_blanked */ ++#include ++#include ++#include ++#include ++#include ++ ++#include "bootsplash_internal.h" ++ ++ ++/* ++ * We only have one splash screen, so let's keep a single ++ * instance of the internal state. ++ */ ++static struct splash_priv splash_state; ++ ++ ++static void splash_callback_redraw_vc(struct work_struct *ignored) ++{ ++ if (console_blanked) ++ return; ++ ++ console_lock(); ++ if (vc_cons[fg_console].d) ++ update_screen(vc_cons[fg_console].d); ++ console_unlock(); ++} ++ ++ ++static bool is_fb_compatible(const struct fb_info *info) ++{ ++ if (!(info->flags & FBINFO_BE_MATH) ++ != !fb_be_math((struct fb_info *)info)) { ++ dev_warn(info->device, ++ "Can't draw on foreign endianness framebuffer.\n"); ++ ++ return false; ++ } ++ ++ if (info->flags & FBINFO_MISC_TILEBLITTING) { ++ dev_warn(info->device, ++ "Can't draw splash on tiling framebuffer.\n"); ++ ++ return false; ++ } ++ ++ if (info->fix.type != FB_TYPE_PACKED_PIXELS ++ || (info->fix.visual != FB_VISUAL_TRUECOLOR ++ && info->fix.visual != FB_VISUAL_DIRECTCOLOR)) { ++ dev_warn(info->device, ++ "Can't draw splash on non-packed or non-truecolor framebuffer.\n"); ++ ++ dev_warn(info->device, ++ " type: %u visual: %u\n", ++ info->fix.type, info->fix.visual); ++ ++ return false; ++ } ++ ++ if (info->var.bits_per_pixel != 16 ++ && info->var.bits_per_pixel != 24 ++ && info->var.bits_per_pixel != 32) { ++ dev_warn(info->device, ++ "We only support drawing on framebuffers with 16, 24, or 32 bpp, not %d.\n", ++ info->var.bits_per_pixel); ++ ++ return false; ++ } ++ ++ return true; ++} ++ ++ ++/* ++ * Called by fbcon_switch() when an instance is activated or refreshed. ++ */ ++void bootsplash_render_full(struct fb_info *info) ++{ ++ if (!is_fb_compatible(info)) ++ return; ++ ++ bootsplash_do_render_background(info); ++} ++ ++ ++/* ++ * External status enquiry and on/off switch ++ */ ++bool bootsplash_would_render_now(void) ++{ ++ return !oops_in_progress ++ && !console_blanked ++ && bootsplash_is_enabled(); ++} ++ ++bool bootsplash_is_enabled(void) ++{ ++ bool was_enabled; ++ ++ /* Make sure we have the newest state */ ++ smp_rmb(); ++ ++ was_enabled = test_bit(0, &splash_state.enabled); ++ ++ return was_enabled; ++} ++ ++void bootsplash_disable(void) ++{ ++ int was_enabled; ++ ++ was_enabled = test_and_clear_bit(0, &splash_state.enabled); ++ ++ if (was_enabled) { ++ if (oops_in_progress) { ++ /* Redraw screen now so we can see a panic */ ++ if (vc_cons[fg_console].d) ++ update_screen(vc_cons[fg_console].d); ++ } else { ++ /* No urgency, redraw at next opportunity */ ++ schedule_work(&splash_state.work_redraw_vc); ++ } ++ } ++} ++ ++void bootsplash_enable(void) ++{ ++ bool was_enabled; ++ ++ if (oops_in_progress) ++ return; ++ ++ was_enabled = test_and_set_bit(0, &splash_state.enabled); ++ ++ if (!was_enabled) ++ schedule_work(&splash_state.work_redraw_vc); ++} ++ ++ ++/* ++ * Userland API via platform device in sysfs ++ */ ++static ssize_t splash_show_enabled(struct device *dev, ++ struct device_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%d\n", bootsplash_is_enabled()); ++} ++ ++static ssize_t splash_store_enabled(struct device *device, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ bool enable; ++ int err; ++ ++ if (!buf || !count) ++ return -EFAULT; ++ ++ err = kstrtobool(buf, &enable); ++ if (err) ++ return err; ++ ++ if (enable) ++ bootsplash_enable(); ++ else ++ bootsplash_disable(); ++ ++ return count; ++} ++ ++static DEVICE_ATTR(enabled, 0644, splash_show_enabled, splash_store_enabled); ++ ++ ++static struct attribute *splash_dev_attrs[] = { ++ &dev_attr_enabled.attr, ++ NULL ++}; ++ ++ATTRIBUTE_GROUPS(splash_dev); ++ ++ ++ ++ ++/* ++ * Power management fixup via platform device ++ * ++ * When the system is woken from sleep or restored after hibernating, we ++ * cannot expect the screen contents to still be present in video RAM. ++ * Thus, we have to redraw the splash if we're currently active. ++ */ ++static int splash_resume(struct device *device) ++{ ++ if (bootsplash_would_render_now()) ++ schedule_work(&splash_state.work_redraw_vc); ++ ++ return 0; ++} ++ ++static int splash_suspend(struct device *device) ++{ ++ cancel_work_sync(&splash_state.work_redraw_vc); ++ ++ return 0; ++} ++ ++ ++static const struct dev_pm_ops splash_pm_ops = { ++ .thaw = splash_resume, ++ .restore = splash_resume, ++ .resume = splash_resume, ++ .suspend = splash_suspend, ++ .freeze = splash_suspend, ++}; ++ ++static struct platform_driver splash_driver = { ++ .driver = { ++ .name = "bootsplash", ++ .pm = &splash_pm_ops, ++ }, ++}; ++ ++ ++/* ++ * Main init ++ */ ++void bootsplash_init(void) ++{ ++ int ret; ++ ++ /* Initialized already? */ ++ if (splash_state.splash_device) ++ return; ++ ++ ++ /* Register platform device to export user API */ ++ ret = platform_driver_register(&splash_driver); ++ if (ret) { ++ pr_err("platform_driver_register() failed: %d\n", ret); ++ goto err; ++ } ++ ++ splash_state.splash_device ++ = platform_device_alloc("bootsplash", 0); ++ ++ if (!splash_state.splash_device) ++ goto err_driver; ++ ++ splash_state.splash_device->dev.groups = splash_dev_groups; ++ ++ ret = platform_device_add(splash_state.splash_device); ++ if (ret) { ++ pr_err("platform_device_add() failed: %d\n", ret); ++ goto err_device; ++ } ++ ++ ++ INIT_WORK(&splash_state.work_redraw_vc, splash_callback_redraw_vc); ++ ++ return; ++ ++err_device: ++ platform_device_put(splash_state.splash_device); ++ splash_state.splash_device = NULL; ++err_driver: ++ platform_driver_unregister(&splash_driver); ++err: ++ pr_err("Failed to initialize.\n"); ++} +diff --git a/drivers/video/fbdev/core/bootsplash_internal.h b/drivers/video/fbdev/core/bootsplash_internal.h +new file mode 100644 +index 000000000000..b11da5cb90bf +--- /dev/null ++++ b/drivers/video/fbdev/core/bootsplash_internal.h +@@ -0,0 +1,55 @@ ++/* ++ * Kernel based bootsplash. ++ * ++ * (Internal data structures used at runtime) ++ * ++ * Authors: ++ * Max Staudt ++ * ++ * SPDX-License-Identifier: GPL-2.0 ++ */ ++ ++#ifndef __BOOTSPLASH_INTERNAL_H ++#define __BOOTSPLASH_INTERNAL_H ++ ++ ++#include ++#include ++#include ++#include ++#include ++ ++ ++/* ++ * Runtime types ++ */ ++struct splash_priv { ++ /* ++ * Enabled/disabled state, to be used with atomic bit operations. ++ * Bit 0: 0 = Splash hidden ++ * 1 = Splash shown ++ * ++ * Note: fbcon.c uses this twice, by calling ++ * bootsplash_would_render_now() in set_blitting_type() and ++ * in fbcon_switch(). ++ * This is racy, but eventually consistent: Turning the ++ * splash on/off will cause a redraw, which calls ++ * fbcon_switch(), which calls set_blitting_type(). ++ * So the last on/off toggle will make things consistent. ++ */ ++ unsigned long enabled; ++ ++ /* Our gateway to userland via sysfs */ ++ struct platform_device *splash_device; ++ ++ struct work_struct work_redraw_vc; ++}; ++ ++ ++ ++/* ++ * Rendering functions ++ */ ++void bootsplash_do_render_background(struct fb_info *info); ++ ++#endif +diff --git a/drivers/video/fbdev/core/bootsplash_render.c b/drivers/video/fbdev/core/bootsplash_render.c +new file mode 100644 +index 000000000000..4d7e0117f653 +--- /dev/null ++++ b/drivers/video/fbdev/core/bootsplash_render.c +@@ -0,0 +1,93 @@ ++/* ++ * Kernel based bootsplash. ++ * ++ * (Rendering functions) ++ * ++ * Authors: ++ * Max Staudt ++ * ++ * SPDX-License-Identifier: GPL-2.0 ++ */ ++ ++#define pr_fmt(fmt) "bootsplash: " fmt ++ ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include "bootsplash_internal.h" ++ ++ ++ ++ ++/* ++ * Rendering: Internal drawing routines ++ */ ++ ++ ++/* ++ * Pack pixel into target format and do Big/Little Endian handling. ++ * This would be a good place to handle endianness conversion if necessary. ++ */ ++static inline u32 pack_pixel(const struct fb_var_screeninfo *dst_var, ++ u8 red, u8 green, u8 blue) ++{ ++ u32 dstpix; ++ ++ /* Quantize pixel */ ++ red = red >> (8 - dst_var->red.length); ++ green = green >> (8 - dst_var->green.length); ++ blue = blue >> (8 - dst_var->blue.length); ++ ++ /* Pack pixel */ ++ dstpix = red << (dst_var->red.offset) ++ | green << (dst_var->green.offset) ++ | blue << (dst_var->blue.offset); ++ ++ /* ++ * Move packed pixel to the beginning of the memory cell, ++ * so we can memcpy() it out easily ++ */ ++#ifdef __BIG_ENDIAN ++ switch (dst_var->bits_per_pixel) { ++ case 16: ++ dstpix <<= 16; ++ break; ++ case 24: ++ dstpix <<= 8; ++ break; ++ case 32: ++ break; ++ } ++#else ++ /* This is intrinsically unnecessary on Little Endian */ ++#endif ++ ++ return dstpix; ++} ++ ++ ++void bootsplash_do_render_background(struct fb_info *info) ++{ ++ unsigned int x, y; ++ u32 dstpix; ++ u32 dst_octpp = info->var.bits_per_pixel / 8; ++ ++ dstpix = pack_pixel(&info->var, ++ 0, ++ 0, ++ 0); ++ ++ for (y = 0; y < info->var.yres_virtual; y++) { ++ u8 *dstline = info->screen_buffer + (y * info->fix.line_length); ++ ++ for (x = 0; x < info->var.xres_virtual; x++) { ++ memcpy(dstline, &dstpix, dst_octpp); ++ ++ dstline += dst_octpp; ++ } ++ } ++} +diff --git a/drivers/video/fbdev/core/dummyblit.c b/drivers/video/fbdev/core/dummyblit.c +new file mode 100644 +index 000000000000..8c22ff92ce24 +--- /dev/null ++++ b/drivers/video/fbdev/core/dummyblit.c +@@ -0,0 +1,89 @@ ++/* ++ * linux/drivers/video/fbdev/core/dummyblit.c -- Dummy Blitting Operation ++ * ++ * Authors: ++ * Max Staudt ++ * ++ * These functions are used in place of blitblit/tileblit to suppress ++ * fbcon's text output while a splash is shown. ++ * ++ * Only suppressing actual rendering keeps the text buffer in the VC layer ++ * intact and makes it easy to switch back from the bootsplash to a full ++ * text console with a simple redraw (with the original functions in place). ++ * ++ * Based on linux/drivers/video/fbdev/core/bitblit.c ++ * and linux/drivers/video/fbdev/core/tileblit.c ++ * ++ * SPDX-License-Identifier: GPL-2.0 ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include "fbcon.h" ++ ++static void dummy_bmove(struct vc_data *vc, struct fb_info *info, int sy, ++ int sx, int dy, int dx, int height, int width) ++{ ++ ; ++} ++ ++static void dummy_clear(struct vc_data *vc, struct fb_info *info, int sy, ++ int sx, int height, int width) ++{ ++ ; ++} ++ ++static void dummy_putcs(struct vc_data *vc, struct fb_info *info, ++ const unsigned short *s, int count, int yy, int xx, ++ int fg, int bg) ++{ ++ ; ++} ++ ++static void dummy_clear_margins(struct vc_data *vc, struct fb_info *info, ++ int color, int bottom_only) ++{ ++ ; ++} ++ ++static void dummy_cursor(struct vc_data *vc, struct fb_info *info, int mode, ++ int softback_lines, int fg, int bg) ++{ ++ ; ++} ++ ++static int dummy_update_start(struct fb_info *info) ++{ ++ /* ++ * Copied from bitblit.c and tileblit.c ++ * ++ * As of Linux 4.12, nobody seems to care about our return value. ++ */ ++ struct fbcon_ops *ops = info->fbcon_par; ++ int err; ++ ++ err = fb_pan_display(info, &ops->var); ++ ops->var.xoffset = info->var.xoffset; ++ ops->var.yoffset = info->var.yoffset; ++ ops->var.vmode = info->var.vmode; ++ return err; ++} ++ ++void fbcon_set_dummyops(struct fbcon_ops *ops) ++{ ++ ops->bmove = dummy_bmove; ++ ops->clear = dummy_clear; ++ ops->putcs = dummy_putcs; ++ ops->clear_margins = dummy_clear_margins; ++ ops->cursor = dummy_cursor; ++ ops->update_start = dummy_update_start; ++ ops->rotate_font = NULL; ++} ++EXPORT_SYMBOL_GPL(fbcon_set_dummyops); ++ ++MODULE_AUTHOR("Max Staudt "); ++MODULE_DESCRIPTION("Dummy Blitting Operation"); ++MODULE_LICENSE("GPL"); +diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c +index 04612f938bab..9a39a6fcfe98 100644 +--- a/drivers/video/fbdev/core/fbcon.c ++++ b/drivers/video/fbdev/core/fbcon.c +@@ -80,6 +80,7 @@ + #include + + #include "fbcon.h" ++#include + + #ifdef FBCONDEBUG + # define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args) +@@ -542,6 +543,8 @@ static int do_fbcon_takeover(int show_logo) + for (i = first_fb_vc; i <= last_fb_vc; i++) + con2fb_map[i] = info_idx; + ++ bootsplash_init(); ++ + err = do_take_over_console(&fb_con, first_fb_vc, last_fb_vc, + fbcon_is_default); + +@@ -661,6 +664,9 @@ static void set_blitting_type(struct vc_data *vc, struct fb_info *info) + else { + fbcon_set_rotation(info); + fbcon_set_bitops(ops); ++ ++ if (bootsplash_would_render_now()) ++ fbcon_set_dummyops(ops); + } + } + +@@ -683,6 +689,19 @@ static void set_blitting_type(struct vc_data *vc, struct fb_info *info) + ops->p = &fb_display[vc->vc_num]; + fbcon_set_rotation(info); + fbcon_set_bitops(ops); ++ ++ /* ++ * Note: ++ * This is *eventually correct*. ++ * Setting the fbcon operations and drawing the splash happen at ++ * different points in time. If the splash is enabled/disabled ++ * in between, then bootsplash_{en,dis}able will schedule a ++ * redraw, which will again render the splash (or not) and set ++ * the correct fbcon ops. ++ * The last run will then be the right one. ++ */ ++ if (bootsplash_would_render_now()) ++ fbcon_set_dummyops(ops); + } + + static int fbcon_invalid_charcount(struct fb_info *info, unsigned charcount) +@@ -2184,6 +2203,9 @@ static int fbcon_switch(struct vc_data *vc) + info = registered_fb[con2fb_map[vc->vc_num]]; + ops = info->fbcon_par; + ++ if (bootsplash_would_render_now()) ++ bootsplash_render_full(info); ++ + if (softback_top) { + if (softback_lines) + fbcon_set_origin(vc); +diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h +index 18f3ac144237..45f94347fe5e 100644 +--- a/drivers/video/fbdev/core/fbcon.h ++++ b/drivers/video/fbdev/core/fbcon.h +@@ -214,6 +214,11 @@ static inline int attr_col_ec(int shift, struct vc_data *vc, + #define SCROLL_REDRAW 0x004 + #define SCROLL_PAN_REDRAW 0x005 + ++#ifdef CONFIG_BOOTSPLASH ++extern void fbcon_set_dummyops(struct fbcon_ops *ops); ++#else /* CONFIG_BOOTSPLASH */ ++#define fbcon_set_dummyops(x) ++#endif /* CONFIG_BOOTSPLASH */ + #ifdef CONFIG_FB_TILEBLITTING + extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info); + #endif +diff --git a/include/linux/bootsplash.h b/include/linux/bootsplash.h +new file mode 100644 +index 000000000000..c6dd0b43180d +--- /dev/null ++++ b/include/linux/bootsplash.h +@@ -0,0 +1,43 @@ ++/* ++ * Kernel based bootsplash. ++ * ++ * Authors: ++ * Max Staudt ++ * ++ * SPDX-License-Identifier: GPL-2.0 ++ */ ++ ++#ifndef __LINUX_BOOTSPLASH_H ++#define __LINUX_BOOTSPLASH_H ++ ++#include ++ ++ ++#ifdef CONFIG_BOOTSPLASH ++ ++extern void bootsplash_render_full(struct fb_info *info); ++ ++extern bool bootsplash_would_render_now(void); ++ ++extern bool bootsplash_is_enabled(void); ++extern void bootsplash_disable(void); ++extern void bootsplash_enable(void); ++ ++extern void bootsplash_init(void); ++ ++#else /* CONFIG_BOOTSPLASH */ ++ ++#define bootsplash_render_full(x) ++ ++#define bootsplash_would_render_now() (false) ++ ++#define bootsplash_is_enabled() (false) ++#define bootsplash_disable() ++#define bootsplash_enable() ++ ++#define bootsplash_init() ++ ++#endif /* CONFIG_BOOTSPLASH */ ++ ++ ++#endif diff --git a/sys-kernel/pinephone-pro-sources/files/0302-bootsplash.patch b/sys-kernel/pinephone-pro-sources/files/0302-bootsplash.patch new file mode 100644 index 0000000..92d62ca --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/0302-bootsplash.patch @@ -0,0 +1,669 @@ +diff --git a/MAINTAINERS b/MAINTAINERS +index b5633b56391e..5c237445761e 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -2712,6 +2712,7 @@ S: Maintained + F: drivers/video/fbdev/core/bootsplash*.* + F: drivers/video/fbdev/core/dummycon.c + F: include/linux/bootsplash.h ++F: include/uapi/linux/bootsplash_file.h + + BPF (Safe dynamic programs and tools) + M: Alexei Starovoitov +diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile +index 66895321928e..6a8d1bab8a01 100644 +--- a/drivers/video/fbdev/core/Makefile ++++ b/drivers/video/fbdev/core/Makefile +@@ -31,4 +31,4 @@ obj-$(CONFIG_FB_SVGALIB) += svgalib.o + obj-$(CONFIG_FB_DDC) += fb_ddc.o + + obj-$(CONFIG_BOOTSPLASH) += bootsplash.o bootsplash_render.o \ +- dummyblit.o ++ bootsplash_load.o dummyblit.o +diff --git a/drivers/video/fbdev/core/bootsplash.c b/drivers/video/fbdev/core/bootsplash.c +index e449755af268..843c5400fefc 100644 +--- a/drivers/video/fbdev/core/bootsplash.c ++++ b/drivers/video/fbdev/core/bootsplash.c +@@ -32,6 +32,7 @@ + #include + + #include "bootsplash_internal.h" ++#include "uapi/linux/bootsplash_file.h" + + + /* +@@ -102,10 +103,17 @@ static bool is_fb_compatible(const struct fb_info *info) + */ + void bootsplash_render_full(struct fb_info *info) + { ++ mutex_lock(&splash_state.data_lock); ++ + if (!is_fb_compatible(info)) +- return; ++ goto out; ++ ++ bootsplash_do_render_background(info, splash_state.file); ++ ++ bootsplash_do_render_pictures(info, splash_state.file); + +- bootsplash_do_render_background(info); ++out: ++ mutex_unlock(&splash_state.data_lock); + } + + +@@ -116,6 +124,7 @@ bool bootsplash_would_render_now(void) + { + return !oops_in_progress + && !console_blanked ++ && splash_state.file + && bootsplash_is_enabled(); + } + +@@ -252,6 +261,7 @@ static struct platform_driver splash_driver = { + void bootsplash_init(void) + { + int ret; ++ struct splash_file_priv *fp; + + /* Initialized already? */ + if (splash_state.splash_device) +@@ -280,8 +290,26 @@ void bootsplash_init(void) + } + + ++ mutex_init(&splash_state.data_lock); ++ set_bit(0, &splash_state.enabled); ++ + INIT_WORK(&splash_state.work_redraw_vc, splash_callback_redraw_vc); + ++ ++ if (!splash_state.bootfile || !strlen(splash_state.bootfile)) ++ return; ++ ++ fp = bootsplash_load_firmware(&splash_state.splash_device->dev, ++ splash_state.bootfile); ++ ++ if (!fp) ++ goto err; ++ ++ mutex_lock(&splash_state.data_lock); ++ splash_state.splash_fb = NULL; ++ splash_state.file = fp; ++ mutex_unlock(&splash_state.data_lock); ++ + return; + + err_device: +@@ -292,3 +320,7 @@ void bootsplash_init(void) + err: + pr_err("Failed to initialize.\n"); + } ++ ++ ++module_param_named(bootfile, splash_state.bootfile, charp, 0444); ++MODULE_PARM_DESC(bootfile, "Bootsplash file to load on boot"); +diff --git a/drivers/video/fbdev/core/bootsplash_internal.h b/drivers/video/fbdev/core/bootsplash_internal.h +index b11da5cb90bf..71e2a27ac0b8 100644 +--- a/drivers/video/fbdev/core/bootsplash_internal.h ++++ b/drivers/video/fbdev/core/bootsplash_internal.h +@@ -15,15 +15,43 @@ + + #include + #include ++#include + #include + #include + #include + ++#include "uapi/linux/bootsplash_file.h" ++ + + /* + * Runtime types + */ ++struct splash_blob_priv { ++ struct splash_blob_header *blob_header; ++ const void *data; ++}; ++ ++ ++struct splash_pic_priv { ++ const struct splash_pic_header *pic_header; ++ ++ struct splash_blob_priv *blobs; ++ u16 blobs_loaded; ++}; ++ ++ ++struct splash_file_priv { ++ const struct firmware *fw; ++ const struct splash_file_header *header; ++ ++ struct splash_pic_priv *pics; ++}; ++ ++ + struct splash_priv { ++ /* Bootup and runtime state */ ++ char *bootfile; ++ + /* + * Enabled/disabled state, to be used with atomic bit operations. + * Bit 0: 0 = Splash hidden +@@ -43,6 +71,13 @@ struct splash_priv { + struct platform_device *splash_device; + + struct work_struct work_redraw_vc; ++ ++ /* Splash data structures including lock for everything below */ ++ struct mutex data_lock; ++ ++ struct fb_info *splash_fb; ++ ++ struct splash_file_priv *file; + }; + + +@@ -50,6 +85,14 @@ struct splash_priv { + /* + * Rendering functions + */ +-void bootsplash_do_render_background(struct fb_info *info); ++void bootsplash_do_render_background(struct fb_info *info, ++ const struct splash_file_priv *fp); ++void bootsplash_do_render_pictures(struct fb_info *info, ++ const struct splash_file_priv *fp); ++ ++ ++void bootsplash_free_file(struct splash_file_priv *fp); ++struct splash_file_priv *bootsplash_load_firmware(struct device *device, ++ const char *path); + + #endif +diff --git a/drivers/video/fbdev/core/bootsplash_load.c b/drivers/video/fbdev/core/bootsplash_load.c +new file mode 100644 +index 000000000000..fd807571ab7d +--- /dev/null ++++ b/drivers/video/fbdev/core/bootsplash_load.c +@@ -0,0 +1,225 @@ ++/* ++ * Kernel based bootsplash. ++ * ++ * (Loading and freeing functions) ++ * ++ * Authors: ++ * Max Staudt ++ * ++ * SPDX-License-Identifier: GPL-2.0 ++ */ ++ ++#define pr_fmt(fmt) "bootsplash: " fmt ++ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "bootsplash_internal.h" ++#include "uapi/linux/bootsplash_file.h" ++ ++ ++ ++ ++/* ++ * Free all vmalloc()'d resources describing a splash file. ++ */ ++void bootsplash_free_file(struct splash_file_priv *fp) ++{ ++ if (!fp) ++ return; ++ ++ if (fp->pics) { ++ unsigned int i; ++ ++ for (i = 0; i < fp->header->num_pics; i++) { ++ struct splash_pic_priv *pp = &fp->pics[i]; ++ ++ if (pp->blobs) ++ vfree(pp->blobs); ++ } ++ ++ vfree(fp->pics); ++ } ++ ++ release_firmware(fp->fw); ++ vfree(fp); ++} ++ ++ ++ ++ ++/* ++ * Load a splash screen from a "firmware" file. ++ * ++ * Parsing, and sanity checks. ++ */ ++#ifdef __BIG_ENDIAN ++ #define BOOTSPLASH_MAGIC BOOTSPLASH_MAGIC_BE ++#else ++ #define BOOTSPLASH_MAGIC BOOTSPLASH_MAGIC_LE ++#endif ++ ++struct splash_file_priv *bootsplash_load_firmware(struct device *device, ++ const char *path) ++{ ++ const struct firmware *fw; ++ struct splash_file_priv *fp; ++ unsigned int i; ++ const u8 *walker; ++ ++ if (request_firmware(&fw, path, device)) ++ return NULL; ++ ++ if (fw->size < sizeof(struct splash_file_header) ++ || memcmp(fw->data, BOOTSPLASH_MAGIC, sizeof(fp->header->id))) { ++ pr_err("Not a bootsplash file.\n"); ++ ++ release_firmware(fw); ++ return NULL; ++ } ++ ++ fp = vzalloc(sizeof(struct splash_file_priv)); ++ if (!fp) { ++ release_firmware(fw); ++ return NULL; ++ } ++ ++ pr_info("Loading splash file (%li bytes)\n", fw->size); ++ ++ fp->fw = fw; ++ fp->header = (struct splash_file_header *)fw->data; ++ ++ /* Sanity checks */ ++ if (fp->header->version != BOOTSPLASH_VERSION) { ++ pr_err("Loaded v%d file, but we only support version %d\n", ++ fp->header->version, ++ BOOTSPLASH_VERSION); ++ ++ goto err; ++ } ++ ++ if (fw->size < sizeof(struct splash_file_header) ++ + fp->header->num_pics ++ * sizeof(struct splash_pic_header) ++ + fp->header->num_blobs ++ * sizeof(struct splash_blob_header)) { ++ pr_err("File incomplete.\n"); ++ ++ goto err; ++ } ++ ++ /* Read picture headers */ ++ if (fp->header->num_pics) { ++ fp->pics = vzalloc(fp->header->num_pics ++ * sizeof(struct splash_pic_priv)); ++ if (!fp->pics) ++ goto err; ++ } ++ ++ walker = fw->data + sizeof(struct splash_file_header); ++ for (i = 0; i < fp->header->num_pics; i++) { ++ struct splash_pic_priv *pp = &fp->pics[i]; ++ struct splash_pic_header *ph = (void *)walker; ++ ++ pr_debug("Picture %u: Size %ux%u\n", i, ph->width, ph->height); ++ ++ if (ph->num_blobs < 1) { ++ pr_err("Picture %u: Zero blobs? Aborting load.\n", i); ++ goto err; ++ } ++ ++ pp->pic_header = ph; ++ pp->blobs = vzalloc(ph->num_blobs ++ * sizeof(struct splash_blob_priv)); ++ if (!pp->blobs) ++ goto err; ++ ++ walker += sizeof(struct splash_pic_header); ++ } ++ ++ /* Read blob headers */ ++ for (i = 0; i < fp->header->num_blobs; i++) { ++ struct splash_blob_header *bh = (void *)walker; ++ struct splash_pic_priv *pp; ++ ++ if (walker + sizeof(struct splash_blob_header) ++ > fw->data + fw->size) ++ goto err; ++ ++ walker += sizeof(struct splash_blob_header); ++ ++ if (walker + bh->length > fw->data + fw->size) ++ goto err; ++ ++ if (bh->picture_id >= fp->header->num_pics) ++ goto nextblob; ++ ++ pp = &fp->pics[bh->picture_id]; ++ ++ pr_debug("Blob %u, pic %u, blobs_loaded %u, num_blobs %u.\n", ++ i, bh->picture_id, ++ pp->blobs_loaded, pp->pic_header->num_blobs); ++ ++ if (pp->blobs_loaded >= pp->pic_header->num_blobs) ++ goto nextblob; ++ ++ switch (bh->type) { ++ case 0: ++ /* Raw 24-bit packed pixels */ ++ if (bh->length != pp->pic_header->width ++ * pp->pic_header->height * 3) { ++ pr_err("Blob %u, type 1: Length doesn't match picture.\n", ++ i); ++ ++ goto err; ++ } ++ break; ++ default: ++ pr_warn("Blob %u, unknown type %u.\n", i, bh->type); ++ goto nextblob; ++ } ++ ++ pp->blobs[pp->blobs_loaded].blob_header = bh; ++ pp->blobs[pp->blobs_loaded].data = walker; ++ pp->blobs_loaded++; ++ ++nextblob: ++ walker += bh->length; ++ if (bh->length % 16) ++ walker += 16 - (bh->length % 16); ++ } ++ ++ if (walker != fw->data + fw->size) ++ pr_warn("Trailing data in splash file.\n"); ++ ++ /* Walk over pictures and ensure all blob slots are filled */ ++ for (i = 0; i < fp->header->num_pics; i++) { ++ struct splash_pic_priv *pp = &fp->pics[i]; ++ ++ if (pp->blobs_loaded != pp->pic_header->num_blobs) { ++ pr_err("Picture %u doesn't have all blob slots filled.\n", ++ i); ++ ++ goto err; ++ } ++ } ++ ++ pr_info("Loaded (%ld bytes, %u pics, %u blobs).\n", ++ fw->size, ++ fp->header->num_pics, ++ fp->header->num_blobs); ++ ++ return fp; ++ ++ ++err: ++ bootsplash_free_file(fp); ++ return NULL; ++} +diff --git a/drivers/video/fbdev/core/bootsplash_render.c b/drivers/video/fbdev/core/bootsplash_render.c +index 4d7e0117f653..2ae36949d0e3 100644 +--- a/drivers/video/fbdev/core/bootsplash_render.c ++++ b/drivers/video/fbdev/core/bootsplash_render.c +@@ -19,6 +19,7 @@ + #include + + #include "bootsplash_internal.h" ++#include "uapi/linux/bootsplash_file.h" + + + +@@ -70,16 +71,69 @@ static inline u32 pack_pixel(const struct fb_var_screeninfo *dst_var, + } + + +-void bootsplash_do_render_background(struct fb_info *info) ++/* ++ * Copy from source and blend into the destination picture. ++ * Currently assumes that the source picture is 24bpp. ++ * Currently assumes that the destination is <= 32bpp. ++ */ ++static int splash_convert_to_fb(u8 *dst, ++ const struct fb_var_screeninfo *dst_var, ++ unsigned int dst_stride, ++ unsigned int dst_xoff, ++ unsigned int dst_yoff, ++ const u8 *src, ++ unsigned int src_width, ++ unsigned int src_height) ++{ ++ unsigned int x, y; ++ unsigned int src_stride = 3 * src_width; /* Assume 24bpp packed */ ++ u32 dst_octpp = dst_var->bits_per_pixel / 8; ++ ++ dst_xoff += dst_var->xoffset; ++ dst_yoff += dst_var->yoffset; ++ ++ /* Copy with stride and pixel size adjustment */ ++ for (y = 0; ++ y < src_height && y + dst_yoff < dst_var->yres_virtual; ++ y++) { ++ const u8 *srcline = src + (y * src_stride); ++ u8 *dstline = dst + ((y + dst_yoff) * dst_stride) ++ + (dst_xoff * dst_octpp); ++ ++ for (x = 0; ++ x < src_width && x + dst_xoff < dst_var->xres_virtual; ++ x++) { ++ u8 red, green, blue; ++ u32 dstpix; ++ ++ /* Read pixel */ ++ red = *srcline++; ++ green = *srcline++; ++ blue = *srcline++; ++ ++ /* Write pixel */ ++ dstpix = pack_pixel(dst_var, red, green, blue); ++ memcpy(dstline, &dstpix, dst_octpp); ++ ++ dstline += dst_octpp; ++ } ++ } ++ ++ return 0; ++} ++ ++ ++void bootsplash_do_render_background(struct fb_info *info, ++ const struct splash_file_priv *fp) + { + unsigned int x, y; + u32 dstpix; + u32 dst_octpp = info->var.bits_per_pixel / 8; + + dstpix = pack_pixel(&info->var, +- 0, +- 0, +- 0); ++ fp->header->bg_red, ++ fp->header->bg_green, ++ fp->header->bg_blue); + + for (y = 0; y < info->var.yres_virtual; y++) { + u8 *dstline = info->screen_buffer + (y * info->fix.line_length); +@@ -91,3 +145,44 @@ void bootsplash_do_render_background(struct fb_info *info) + } + } + } ++ ++ ++void bootsplash_do_render_pictures(struct fb_info *info, ++ const struct splash_file_priv *fp) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < fp->header->num_pics; i++) { ++ struct splash_blob_priv *bp; ++ struct splash_pic_priv *pp = &fp->pics[i]; ++ long dst_xoff, dst_yoff; ++ ++ if (pp->blobs_loaded < 1) ++ continue; ++ ++ bp = &pp->blobs[0]; ++ ++ if (!bp || bp->blob_header->type != 0) ++ continue; ++ ++ dst_xoff = (info->var.xres - pp->pic_header->width) / 2; ++ dst_yoff = (info->var.yres - pp->pic_header->height) / 2; ++ ++ if (dst_xoff < 0 ++ || dst_yoff < 0 ++ || dst_xoff + pp->pic_header->width > info->var.xres ++ || dst_yoff + pp->pic_header->height > info->var.yres) { ++ pr_info_once("Picture %u is out of bounds at current resolution: %dx%d\n" ++ "(this will only be printed once every reboot)\n", ++ i, info->var.xres, info->var.yres); ++ ++ continue; ++ } ++ ++ /* Draw next splash frame */ ++ splash_convert_to_fb(info->screen_buffer, &info->var, ++ info->fix.line_length, dst_xoff, dst_yoff, ++ bp->data, ++ pp->pic_header->width, pp->pic_header->height); ++ } ++} +diff --git a/include/uapi/linux/bootsplash_file.h b/include/uapi/linux/bootsplash_file.h +new file mode 100644 +index 000000000000..89dc9cca8f0c +--- /dev/null ++++ b/include/uapi/linux/bootsplash_file.h +@@ -0,0 +1,118 @@ ++/* ++ * Kernel based bootsplash. ++ * ++ * (File format) ++ * ++ * Authors: ++ * Max Staudt ++ * ++ * SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note ++ */ ++ ++#ifndef __BOOTSPLASH_FILE_H ++#define __BOOTSPLASH_FILE_H ++ ++ ++#define BOOTSPLASH_VERSION 55561 ++ ++ ++#include ++#include ++ ++ ++/* ++ * On-disk types ++ * ++ * A splash file consists of: ++ * - One single 'struct splash_file_header' ++ * - An array of 'struct splash_pic_header' ++ * - An array of raw data blocks, each padded to 16 bytes and ++ * preceded by a 'struct splash_blob_header' ++ * ++ * A single-frame splash may look like this: ++ * ++ * +--------------------+ ++ * | | ++ * | splash_file_header | ++ * | -> num_blobs = 1 | ++ * | -> num_pics = 1 | ++ * | | ++ * +--------------------+ ++ * | | ++ * | splash_pic_header | ++ * | | ++ * +--------------------+ ++ * | | ++ * | splash_blob_header | ++ * | -> type = 0 | ++ * | -> picture_id = 0 | ++ * | | ++ * | (raw RGB data) | ++ * | (pad to 16 bytes) | ++ * | | ++ * +--------------------+ ++ * ++ * All multi-byte values are stored on disk in the native format ++ * expected by the system the file will be used on. ++ */ ++#define BOOTSPLASH_MAGIC_BE "Linux bootsplash" ++#define BOOTSPLASH_MAGIC_LE "hsalpstoob xuniL" ++ ++struct splash_file_header { ++ uint8_t id[16]; /* "Linux bootsplash" (no trailing NUL) */ ++ ++ /* Splash file format version to avoid clashes */ ++ uint16_t version; ++ ++ /* The background color */ ++ uint8_t bg_red; ++ uint8_t bg_green; ++ uint8_t bg_blue; ++ uint8_t bg_reserved; ++ ++ /* ++ * Number of pic/blobs so we can allocate memory for internal ++ * structures ahead of time when reading the file ++ */ ++ uint16_t num_blobs; ++ uint8_t num_pics; ++ ++ uint8_t padding[103]; ++} __attribute__((__packed__)); ++ ++ ++struct splash_pic_header { ++ uint16_t width; ++ uint16_t height; ++ ++ /* ++ * Number of data packages associated with this picture. ++ * Currently, the only use for more than 1 is for animations. ++ */ ++ uint8_t num_blobs; ++ ++ uint8_t padding[27]; ++} __attribute__((__packed__)); ++ ++ ++struct splash_blob_header { ++ /* Length of the data block in bytes. */ ++ uint32_t length; ++ ++ /* ++ * Type of the contents. ++ * 0 - Raw RGB data. ++ */ ++ uint16_t type; ++ ++ /* ++ * Picture this blob is associated with. ++ * Blobs will be added to a picture in the order they are ++ * found in the file. ++ */ ++ uint8_t picture_id; ++ ++ uint8_t padding[9]; ++} __attribute__((__packed__)); ++ ++#endif diff --git a/sys-kernel/pinephone-pro-sources/files/0303-bootsplash.patch b/sys-kernel/pinephone-pro-sources/files/0303-bootsplash.patch new file mode 100644 index 0000000..2169537 --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/0303-bootsplash.patch @@ -0,0 +1,66 @@ +diff --git a/drivers/video/fbdev/core/bootsplash.c b/drivers/video/fbdev/core/bootsplash.c +index 843c5400fefc..815b007f81ca 100644 +--- a/drivers/video/fbdev/core/bootsplash.c ++++ b/drivers/video/fbdev/core/bootsplash.c +@@ -112,6 +112,8 @@ void bootsplash_render_full(struct fb_info *info) + + bootsplash_do_render_pictures(info, splash_state.file); + ++ bootsplash_do_render_flush(info); ++ + out: + mutex_unlock(&splash_state.data_lock); + } +diff --git a/drivers/video/fbdev/core/bootsplash_internal.h b/drivers/video/fbdev/core/bootsplash_internal.h +index 71e2a27ac0b8..0acb383aa4e3 100644 +--- a/drivers/video/fbdev/core/bootsplash_internal.h ++++ b/drivers/video/fbdev/core/bootsplash_internal.h +@@ -89,6 +89,7 @@ void bootsplash_do_render_background(struct fb_info *info, + const struct splash_file_priv *fp); + void bootsplash_do_render_pictures(struct fb_info *info, + const struct splash_file_priv *fp); ++void bootsplash_do_render_flush(struct fb_info *info); + + + void bootsplash_free_file(struct splash_file_priv *fp); +diff --git a/drivers/video/fbdev/core/bootsplash_render.c b/drivers/video/fbdev/core/bootsplash_render.c +index 2ae36949d0e3..8c09c306ff67 100644 +--- a/drivers/video/fbdev/core/bootsplash_render.c ++++ b/drivers/video/fbdev/core/bootsplash_render.c +@@ -186,3 +186,36 @@ void bootsplash_do_render_pictures(struct fb_info *info, + pp->pic_header->width, pp->pic_header->height); + } + } ++ ++ ++void bootsplash_do_render_flush(struct fb_info *info) ++{ ++ /* ++ * FB drivers using deferred_io (such as Xen) need to sync the ++ * screen after modifying its contents. When the FB is mmap()ed ++ * from userspace, this happens via a dirty pages callback, but ++ * when modifying the FB from the kernel, there is no such thing. ++ * ++ * So let's issue a fake fb_copyarea (copying the FB onto itself) ++ * to trick the FB driver into syncing the screen. ++ * ++ * A few DRM drivers' FB implementations are broken by not using ++ * deferred_io when they really should - we match on the known ++ * bad ones manually for now. ++ */ ++ if (info->fbdefio ++ || !strcmp(info->fix.id, "astdrmfb") ++ || !strcmp(info->fix.id, "cirrusdrmfb") ++ || !strcmp(info->fix.id, "mgadrmfb")) { ++ struct fb_copyarea area; ++ ++ area.dx = 0; ++ area.dy = 0; ++ area.width = info->var.xres; ++ area.height = info->var.yres; ++ area.sx = 0; ++ area.sy = 0; ++ ++ info->fbops->fb_copyarea(info, &area); ++ } ++} diff --git a/sys-kernel/pinephone-pro-sources/files/0304-bootsplash.patch b/sys-kernel/pinephone-pro-sources/files/0304-bootsplash.patch new file mode 100644 index 0000000..7eb54af --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/0304-bootsplash.patch @@ -0,0 +1,215 @@ +diff --git a/drivers/video/fbdev/core/bootsplash_render.c b/drivers/video/fbdev/core/bootsplash_render.c +index 8c09c306ff67..07e3a4eab811 100644 +--- a/drivers/video/fbdev/core/bootsplash_render.c ++++ b/drivers/video/fbdev/core/bootsplash_render.c +@@ -155,6 +155,7 @@ void bootsplash_do_render_pictures(struct fb_info *info, + for (i = 0; i < fp->header->num_pics; i++) { + struct splash_blob_priv *bp; + struct splash_pic_priv *pp = &fp->pics[i]; ++ const struct splash_pic_header *ph = pp->pic_header; + long dst_xoff, dst_yoff; + + if (pp->blobs_loaded < 1) +@@ -165,8 +166,139 @@ void bootsplash_do_render_pictures(struct fb_info *info, + if (!bp || bp->blob_header->type != 0) + continue; + +- dst_xoff = (info->var.xres - pp->pic_header->width) / 2; +- dst_yoff = (info->var.yres - pp->pic_header->height) / 2; ++ switch (ph->position) { ++ case SPLASH_POS_FLAG_CORNER | SPLASH_CORNER_TOP_LEFT: ++ dst_xoff = 0; ++ dst_yoff = 0; ++ ++ dst_xoff += ph->position_offset; ++ dst_yoff += ph->position_offset; ++ break; ++ case SPLASH_POS_FLAG_CORNER | SPLASH_CORNER_TOP: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_xoff /= 2; ++ dst_yoff = 0; ++ ++ dst_yoff += ph->position_offset; ++ break; ++ case SPLASH_POS_FLAG_CORNER | SPLASH_CORNER_TOP_RIGHT: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_yoff = 0; ++ ++ dst_xoff -= ph->position_offset; ++ dst_yoff += ph->position_offset; ++ break; ++ case SPLASH_POS_FLAG_CORNER | SPLASH_CORNER_RIGHT: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ dst_yoff /= 2; ++ ++ dst_xoff -= ph->position_offset; ++ break; ++ case SPLASH_POS_FLAG_CORNER | SPLASH_CORNER_BOTTOM_RIGHT: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ ++ dst_xoff -= ph->position_offset; ++ dst_yoff -= ph->position_offset; ++ break; ++ case SPLASH_POS_FLAG_CORNER | SPLASH_CORNER_BOTTOM: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_xoff /= 2; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ ++ dst_yoff -= ph->position_offset; ++ break; ++ case SPLASH_POS_FLAG_CORNER | SPLASH_CORNER_BOTTOM_LEFT: ++ dst_xoff = 0 + ph->position_offset; ++ dst_yoff = info->var.yres - pp->pic_header->height ++ - ph->position_offset; ++ break; ++ case SPLASH_POS_FLAG_CORNER | SPLASH_CORNER_LEFT: ++ dst_xoff = 0; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ dst_yoff /= 2; ++ ++ dst_xoff += ph->position_offset; ++ break; ++ ++ case SPLASH_CORNER_TOP_LEFT: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_xoff /= 2; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ dst_yoff /= 2; ++ ++ dst_xoff -= ph->position_offset; ++ dst_yoff -= ph->position_offset; ++ break; ++ case SPLASH_CORNER_TOP: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_xoff /= 2; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ dst_yoff /= 2; ++ ++ dst_yoff -= ph->position_offset; ++ break; ++ case SPLASH_CORNER_TOP_RIGHT: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_xoff /= 2; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ dst_yoff /= 2; ++ ++ dst_xoff += ph->position_offset; ++ dst_yoff -= ph->position_offset; ++ break; ++ case SPLASH_CORNER_RIGHT: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_xoff /= 2; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ dst_yoff /= 2; ++ ++ dst_xoff += ph->position_offset; ++ break; ++ case SPLASH_CORNER_BOTTOM_RIGHT: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_xoff /= 2; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ dst_yoff /= 2; ++ ++ dst_xoff += ph->position_offset; ++ dst_yoff += ph->position_offset; ++ break; ++ case SPLASH_CORNER_BOTTOM: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_xoff /= 2; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ dst_yoff /= 2; ++ ++ dst_yoff += ph->position_offset; ++ break; ++ case SPLASH_CORNER_BOTTOM_LEFT: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_xoff /= 2; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ dst_yoff /= 2; ++ ++ dst_xoff -= ph->position_offset; ++ dst_yoff += ph->position_offset; ++ break; ++ case SPLASH_CORNER_LEFT: ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_xoff /= 2; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ dst_yoff /= 2; ++ ++ dst_xoff -= ph->position_offset; ++ break; ++ ++ default: ++ /* As a fallback, center the picture. */ ++ dst_xoff = info->var.xres - pp->pic_header->width; ++ dst_xoff /= 2; ++ dst_yoff = info->var.yres - pp->pic_header->height; ++ dst_yoff /= 2; ++ break; ++ } + + if (dst_xoff < 0 + || dst_yoff < 0 +diff --git a/include/uapi/linux/bootsplash_file.h b/include/uapi/linux/bootsplash_file.h +index 89dc9cca8f0c..71cedcc68933 100644 +--- a/include/uapi/linux/bootsplash_file.h ++++ b/include/uapi/linux/bootsplash_file.h +@@ -91,7 +91,32 @@ struct splash_pic_header { + */ + uint8_t num_blobs; + +- uint8_t padding[27]; ++ /* ++ * Corner to move the picture to / from. ++ * 0x00 - Top left ++ * 0x01 - Top ++ * 0x02 - Top right ++ * 0x03 - Right ++ * 0x04 - Bottom right ++ * 0x05 - Bottom ++ * 0x06 - Bottom left ++ * 0x07 - Left ++ * ++ * Flags: ++ * 0x10 - Calculate offset from the corner towards the center, ++ * rather than from the center towards the corner ++ */ ++ uint8_t position; ++ ++ /* ++ * Pixel offset from the selected position. ++ * Example: If the picture is in the top right corner, it will ++ * be placed position_offset pixels from the top and ++ * position_offset pixels from the right margin. ++ */ ++ uint16_t position_offset; ++ ++ uint8_t padding[24]; + } __attribute__((__packed__)); + + +@@ -115,4 +140,22 @@ struct splash_blob_header { + uint8_t padding[9]; + } __attribute__((__packed__)); + ++ ++ ++ ++/* ++ * Enums for on-disk types ++ */ ++enum splash_position { ++ SPLASH_CORNER_TOP_LEFT = 0, ++ SPLASH_CORNER_TOP = 1, ++ SPLASH_CORNER_TOP_RIGHT = 2, ++ SPLASH_CORNER_RIGHT = 3, ++ SPLASH_CORNER_BOTTOM_RIGHT = 4, ++ SPLASH_CORNER_BOTTOM = 5, ++ SPLASH_CORNER_BOTTOM_LEFT = 6, ++ SPLASH_CORNER_LEFT = 7, ++ SPLASH_POS_FLAG_CORNER = 0x10, ++}; ++ + #endif diff --git a/sys-kernel/pinephone-pro-sources/files/0305-bootsplash.patch b/sys-kernel/pinephone-pro-sources/files/0305-bootsplash.patch new file mode 100644 index 0000000..2785c5e --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/0305-bootsplash.patch @@ -0,0 +1,327 @@ +diff --git a/drivers/video/fbdev/core/bootsplash.c b/drivers/video/fbdev/core/bootsplash.c +index 815b007f81ca..c8642142cfea 100644 +--- a/drivers/video/fbdev/core/bootsplash.c ++++ b/drivers/video/fbdev/core/bootsplash.c +@@ -53,6 +53,14 @@ static void splash_callback_redraw_vc(struct work_struct *ignored) + console_unlock(); + } + ++static void splash_callback_animation(struct work_struct *ignored) ++{ ++ if (bootsplash_would_render_now()) { ++ /* This will also re-schedule this delayed worker */ ++ splash_callback_redraw_vc(ignored); ++ } ++} ++ + + static bool is_fb_compatible(const struct fb_info *info) + { +@@ -103,17 +111,44 @@ static bool is_fb_compatible(const struct fb_info *info) + */ + void bootsplash_render_full(struct fb_info *info) + { ++ bool is_update = false; ++ + mutex_lock(&splash_state.data_lock); + +- if (!is_fb_compatible(info)) +- goto out; ++ /* ++ * If we've painted on this FB recently, we don't have to do ++ * the sanity checks and background drawing again. ++ */ ++ if (splash_state.splash_fb == info) ++ is_update = true; ++ ++ ++ if (!is_update) { ++ /* Check whether we actually support this FB. */ ++ splash_state.splash_fb = NULL; ++ ++ if (!is_fb_compatible(info)) ++ goto out; ++ ++ /* Draw the background only once */ ++ bootsplash_do_render_background(info, splash_state.file); + +- bootsplash_do_render_background(info, splash_state.file); ++ /* Mark this FB as last seen */ ++ splash_state.splash_fb = info; ++ } + +- bootsplash_do_render_pictures(info, splash_state.file); ++ bootsplash_do_render_pictures(info, splash_state.file, is_update); + + bootsplash_do_render_flush(info); + ++ bootsplash_do_step_animations(splash_state.file); ++ ++ /* Schedule update for animated splash screens */ ++ if (splash_state.file->frame_ms > 0) ++ schedule_delayed_work(&splash_state.dwork_animation, ++ msecs_to_jiffies( ++ splash_state.file->frame_ms)); ++ + out: + mutex_unlock(&splash_state.data_lock); + } +@@ -169,8 +204,14 @@ void bootsplash_enable(void) + + was_enabled = test_and_set_bit(0, &splash_state.enabled); + +- if (!was_enabled) ++ if (!was_enabled) { ++ /* Force a full redraw when the splash is re-activated */ ++ mutex_lock(&splash_state.data_lock); ++ splash_state.splash_fb = NULL; ++ mutex_unlock(&splash_state.data_lock); ++ + schedule_work(&splash_state.work_redraw_vc); ++ } + } + + +@@ -227,6 +268,14 @@ ATTRIBUTE_GROUPS(splash_dev); + */ + static int splash_resume(struct device *device) + { ++ /* ++ * Force full redraw on resume since we've probably lost the ++ * framebuffer's contents meanwhile ++ */ ++ mutex_lock(&splash_state.data_lock); ++ splash_state.splash_fb = NULL; ++ mutex_unlock(&splash_state.data_lock); ++ + if (bootsplash_would_render_now()) + schedule_work(&splash_state.work_redraw_vc); + +@@ -235,6 +284,7 @@ static int splash_resume(struct device *device) + + static int splash_suspend(struct device *device) + { ++ cancel_delayed_work_sync(&splash_state.dwork_animation); + cancel_work_sync(&splash_state.work_redraw_vc); + + return 0; +@@ -296,6 +346,8 @@ void bootsplash_init(void) + set_bit(0, &splash_state.enabled); + + INIT_WORK(&splash_state.work_redraw_vc, splash_callback_redraw_vc); ++ INIT_DELAYED_WORK(&splash_state.dwork_animation, ++ splash_callback_animation); + + + if (!splash_state.bootfile || !strlen(splash_state.bootfile)) +diff --git a/drivers/video/fbdev/core/bootsplash_internal.h b/drivers/video/fbdev/core/bootsplash_internal.h +index 0acb383aa4e3..b3a74835d90f 100644 +--- a/drivers/video/fbdev/core/bootsplash_internal.h ++++ b/drivers/video/fbdev/core/bootsplash_internal.h +@@ -37,6 +37,8 @@ struct splash_pic_priv { + + struct splash_blob_priv *blobs; + u16 blobs_loaded; ++ ++ u16 anim_nextframe; + }; + + +@@ -45,6 +47,12 @@ struct splash_file_priv { + const struct splash_file_header *header; + + struct splash_pic_priv *pics; ++ ++ /* ++ * A local copy of the frame delay in the header. ++ * We modify it to keep the code simple. ++ */ ++ u16 frame_ms; + }; + + +@@ -71,6 +79,7 @@ struct splash_priv { + struct platform_device *splash_device; + + struct work_struct work_redraw_vc; ++ struct delayed_work dwork_animation; + + /* Splash data structures including lock for everything below */ + struct mutex data_lock; +@@ -88,8 +97,10 @@ struct splash_priv { + void bootsplash_do_render_background(struct fb_info *info, + const struct splash_file_priv *fp); + void bootsplash_do_render_pictures(struct fb_info *info, +- const struct splash_file_priv *fp); ++ const struct splash_file_priv *fp, ++ bool is_update); + void bootsplash_do_render_flush(struct fb_info *info); ++void bootsplash_do_step_animations(struct splash_file_priv *fp); + + + void bootsplash_free_file(struct splash_file_priv *fp); +diff --git a/drivers/video/fbdev/core/bootsplash_load.c b/drivers/video/fbdev/core/bootsplash_load.c +index fd807571ab7d..1f661b2d4cc9 100644 +--- a/drivers/video/fbdev/core/bootsplash_load.c ++++ b/drivers/video/fbdev/core/bootsplash_load.c +@@ -71,6 +71,7 @@ struct splash_file_priv *bootsplash_load_firmware(struct device *device, + { + const struct firmware *fw; + struct splash_file_priv *fp; ++ bool have_anim = false; + unsigned int i; + const u8 *walker; + +@@ -135,6 +136,13 @@ struct splash_file_priv *bootsplash_load_firmware(struct device *device, + goto err; + } + ++ if (ph->anim_type > SPLASH_ANIM_LOOP_FORWARD) { ++ pr_warn("Picture %u: Unsupported animation type %u.\n", ++ i, ph->anim_type); ++ ++ ph->anim_type = SPLASH_ANIM_NONE; ++ } ++ + pp->pic_header = ph; + pp->blobs = vzalloc(ph->num_blobs + * sizeof(struct splash_blob_priv)); +@@ -202,6 +210,7 @@ struct splash_file_priv *bootsplash_load_firmware(struct device *device, + /* Walk over pictures and ensure all blob slots are filled */ + for (i = 0; i < fp->header->num_pics; i++) { + struct splash_pic_priv *pp = &fp->pics[i]; ++ const struct splash_pic_header *ph = pp->pic_header; + + if (pp->blobs_loaded != pp->pic_header->num_blobs) { + pr_err("Picture %u doesn't have all blob slots filled.\n", +@@ -209,8 +218,20 @@ struct splash_file_priv *bootsplash_load_firmware(struct device *device, + + goto err; + } ++ ++ if (ph->anim_type ++ && ph->num_blobs > 1 ++ && ph->anim_loop < pp->blobs_loaded) ++ have_anim = true; + } + ++ if (!have_anim) ++ /* Disable animation timer if there is nothing to animate */ ++ fp->frame_ms = 0; ++ else ++ /* Enforce minimum delay between frames */ ++ fp->frame_ms = max((u16)20, fp->header->frame_ms); ++ + pr_info("Loaded (%ld bytes, %u pics, %u blobs).\n", + fw->size, + fp->header->num_pics, +diff --git a/drivers/video/fbdev/core/bootsplash_render.c b/drivers/video/fbdev/core/bootsplash_render.c +index 07e3a4eab811..76033606ca8a 100644 +--- a/drivers/video/fbdev/core/bootsplash_render.c ++++ b/drivers/video/fbdev/core/bootsplash_render.c +@@ -148,7 +148,8 @@ void bootsplash_do_render_background(struct fb_info *info, + + + void bootsplash_do_render_pictures(struct fb_info *info, +- const struct splash_file_priv *fp) ++ const struct splash_file_priv *fp, ++ bool is_update) + { + unsigned int i; + +@@ -161,7 +162,11 @@ void bootsplash_do_render_pictures(struct fb_info *info, + if (pp->blobs_loaded < 1) + continue; + +- bp = &pp->blobs[0]; ++ /* Skip static pictures when refreshing animations */ ++ if (ph->anim_type == SPLASH_ANIM_NONE && is_update) ++ continue; ++ ++ bp = &pp->blobs[pp->anim_nextframe]; + + if (!bp || bp->blob_header->type != 0) + continue; +@@ -351,3 +356,24 @@ void bootsplash_do_render_flush(struct fb_info *info) + info->fbops->fb_copyarea(info, &area); + } + } ++ ++ ++void bootsplash_do_step_animations(struct splash_file_priv *fp) ++{ ++ unsigned int i; ++ ++ /* Step every animation once */ ++ for (i = 0; i < fp->header->num_pics; i++) { ++ struct splash_pic_priv *pp = &fp->pics[i]; ++ ++ if (pp->blobs_loaded < 2 ++ || pp->pic_header->anim_loop > pp->blobs_loaded) ++ continue; ++ ++ if (pp->pic_header->anim_type == SPLASH_ANIM_LOOP_FORWARD) { ++ pp->anim_nextframe++; ++ if (pp->anim_nextframe >= pp->pic_header->num_blobs) ++ pp->anim_nextframe = pp->pic_header->anim_loop; ++ } ++ } ++} +diff --git a/include/uapi/linux/bootsplash_file.h b/include/uapi/linux/bootsplash_file.h +index 71cedcc68933..b3af0a3c6487 100644 +--- a/include/uapi/linux/bootsplash_file.h ++++ b/include/uapi/linux/bootsplash_file.h +@@ -77,7 +77,17 @@ struct splash_file_header { + uint16_t num_blobs; + uint8_t num_pics; + +- uint8_t padding[103]; ++ uint8_t unused_1; ++ ++ /* ++ * Milliseconds to wait before painting the next frame in ++ * an animation. ++ * This is actually a minimum, as the system is allowed to ++ * stall for longer between frames. ++ */ ++ uint16_t frame_ms; ++ ++ uint8_t padding[100]; + } __attribute__((__packed__)); + + +@@ -116,7 +126,23 @@ struct splash_pic_header { + */ + uint16_t position_offset; + +- uint8_t padding[24]; ++ /* ++ * Animation type. ++ * 0 - off ++ * 1 - forward loop ++ */ ++ uint8_t anim_type; ++ ++ /* ++ * Animation loop point. ++ * Actual meaning depends on animation type: ++ * Type 0 - Unused ++ * 1 - Frame at which to restart the forward loop ++ * (allowing for "intro" frames) ++ */ ++ uint8_t anim_loop; ++ ++ uint8_t padding[22]; + } __attribute__((__packed__)); + + +@@ -158,4 +184,9 @@ enum splash_position { + SPLASH_POS_FLAG_CORNER = 0x10, + }; + ++enum splash_anim_type { ++ SPLASH_ANIM_NONE = 0, ++ SPLASH_ANIM_LOOP_FORWARD = 1, ++}; ++ + #endif diff --git a/sys-kernel/pinephone-pro-sources/files/0306-bootsplash.patch b/sys-kernel/pinephone-pro-sources/files/0306-bootsplash.patch new file mode 100644 index 0000000..d6c6db6 --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/0306-bootsplash.patch @@ -0,0 +1,82 @@ +diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c +index 2ebaba16f785..416735ab6dc1 100644 +--- a/drivers/tty/vt/vt.c ++++ b/drivers/tty/vt/vt.c +@@ -105,6 +105,7 @@ + #include + #include + #include ++#include + + #define MAX_NR_CON_DRIVER 16 + +@@ -4235,6 +4236,7 @@ void do_unblank_screen(int leaving_gfx) + } + + console_blanked = 0; ++ bootsplash_mark_dirty(); + if (vc->vc_sw->con_blank(vc, 0, leaving_gfx)) + /* Low-level driver cannot restore -> do it ourselves */ + update_screen(vc); +diff --git a/drivers/video/fbdev/core/bootsplash.c b/drivers/video/fbdev/core/bootsplash.c +index c8642142cfea..13fcaabbc2ca 100644 +--- a/drivers/video/fbdev/core/bootsplash.c ++++ b/drivers/video/fbdev/core/bootsplash.c +@@ -165,6 +165,13 @@ bool bootsplash_would_render_now(void) + && bootsplash_is_enabled(); + } + ++void bootsplash_mark_dirty(void) ++{ ++ mutex_lock(&splash_state.data_lock); ++ splash_state.splash_fb = NULL; ++ mutex_unlock(&splash_state.data_lock); ++} ++ + bool bootsplash_is_enabled(void) + { + bool was_enabled; +@@ -206,9 +213,7 @@ void bootsplash_enable(void) + + if (!was_enabled) { + /* Force a full redraw when the splash is re-activated */ +- mutex_lock(&splash_state.data_lock); +- splash_state.splash_fb = NULL; +- mutex_unlock(&splash_state.data_lock); ++ bootsplash_mark_dirty(); + + schedule_work(&splash_state.work_redraw_vc); + } +@@ -272,9 +277,7 @@ static int splash_resume(struct device *device) + * Force full redraw on resume since we've probably lost the + * framebuffer's contents meanwhile + */ +- mutex_lock(&splash_state.data_lock); +- splash_state.splash_fb = NULL; +- mutex_unlock(&splash_state.data_lock); ++ bootsplash_mark_dirty(); + + if (bootsplash_would_render_now()) + schedule_work(&splash_state.work_redraw_vc); +diff --git a/include/linux/bootsplash.h b/include/linux/bootsplash.h +index c6dd0b43180d..4075098aaadd 100644 +--- a/include/linux/bootsplash.h ++++ b/include/linux/bootsplash.h +@@ -19,6 +19,8 @@ extern void bootsplash_render_full(struct fb_info *info); + + extern bool bootsplash_would_render_now(void); + ++extern void bootsplash_mark_dirty(void); ++ + extern bool bootsplash_is_enabled(void); + extern void bootsplash_disable(void); + extern void bootsplash_enable(void); +@@ -31,6 +33,8 @@ extern void bootsplash_init(void); + + #define bootsplash_would_render_now() (false) + ++#define bootsplash_mark_dirty() ++ + #define bootsplash_is_enabled() (false) + #define bootsplash_disable() + #define bootsplash_enable() diff --git a/sys-kernel/pinephone-pro-sources/files/0307-bootsplash.patch b/sys-kernel/pinephone-pro-sources/files/0307-bootsplash.patch new file mode 100644 index 0000000..3f82eb0 --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/0307-bootsplash.patch @@ -0,0 +1,42 @@ +diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c +index f4166263bb3a..a248429194bb 100644 +--- a/drivers/tty/vt/keyboard.c ++++ b/drivers/tty/vt/keyboard.c +@@ -49,6 +49,8 @@ + + #include + ++#include ++ + /* + * Exported functions/variables + */ +@@ -1413,6 +1415,28 @@ static void kbd_keycode(unsigned int key + } + #endif + ++ /* Trap keys when bootsplash is shown */ ++ if (bootsplash_would_render_now()) { ++ /* Deactivate bootsplash on ESC or Alt+Fxx VT switch */ ++ if (keycode >= KEY_F1 && keycode <= KEY_F12) { ++ bootsplash_disable(); ++ ++ /* ++ * No return here since we want to actually ++ * perform the VT switch. ++ */ ++ } else { ++ if (keycode == KEY_ESC) ++ bootsplash_disable(); ++ ++ /* ++ * Just drop any other keys. ++ * Their effect would be hidden by the splash. ++ */ ++ return; ++ } ++ } ++ + if (kbd->kbdmode == VC_MEDIUMRAW) { + /* + * This is extended medium raw mode, with keys above 127 diff --git a/sys-kernel/pinephone-pro-sources/files/0308-bootsplash.patch b/sys-kernel/pinephone-pro-sources/files/0308-bootsplash.patch new file mode 100644 index 0000000..8a3b715 --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/0308-bootsplash.patch @@ -0,0 +1,21 @@ +diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c +index 3ffc1ce29023..bc6a24c9dfa8 100644 +--- a/drivers/tty/sysrq.c ++++ b/drivers/tty/sysrq.c +@@ -49,6 +49,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -104,6 +105,8 @@ static void sysrq_handle_SAK(int key) + { + struct work_struct *SAK_work = &vc_cons[fg_console].SAK_work; + schedule_work(SAK_work); ++ ++ bootsplash_disable(); + } + static struct sysrq_key_op sysrq_SAK_op = { + .handler = sysrq_handle_SAK, diff --git a/sys-kernel/pinephone-pro-sources/files/0309-bootsplash.patch b/sys-kernel/pinephone-pro-sources/files/0309-bootsplash.patch new file mode 100644 index 0000000..add68e7 --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/0309-bootsplash.patch @@ -0,0 +1,21 @@ +diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c +index 9a39a6fcfe98..8a9c67e1c5d8 100644 +--- a/drivers/video/fbdev/core/fbcon.c ++++ b/drivers/video/fbdev/core/fbcon.c +@@ -1343,6 +1343,16 @@ static void fbcon_cursor(struct vc_data *vc, int mode) + int y; + int c = scr_readw((u16 *) vc->vc_pos); + ++ /* ++ * Disable the splash here so we don't have to hook into ++ * vt_console_print() in drivers/tty/vt/vt.c ++ * ++ * We'd disable the splash just before the call to ++ * hide_cursor() anyway, so this spot is just fine. ++ */ ++ if (oops_in_progress) ++ bootsplash_disable(); ++ + ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms); + + if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1) diff --git a/sys-kernel/pinephone-pro-sources/files/0310-bootsplash.patch b/sys-kernel/pinephone-pro-sources/files/0310-bootsplash.patch new file mode 100644 index 0000000..e5c1fd0 --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/0310-bootsplash.patch @@ -0,0 +1,321 @@ +diff --git a/Documentation/ABI/testing/sysfs-platform-bootsplash b/Documentation/ABI/testing/sysfs-platform-bootsplash +new file mode 100644 +index 000000000000..742c7b035ded +--- /dev/null ++++ b/Documentation/ABI/testing/sysfs-platform-bootsplash +@@ -0,0 +1,11 @@ ++What: /sys/devices/platform/bootsplash.0/enabled ++Date: Oct 2017 ++KernelVersion: 4.14 ++Contact: Max Staudt ++Description: ++ Can be set and read. ++ ++ 0: Splash is disabled. ++ 1: Splash is shown whenever fbcon would show a text console ++ (i.e. no graphical application is running), and a splash ++ file is loaded. +diff --git a/Documentation/bootsplash.rst b/Documentation/bootsplash.rst +new file mode 100644 +index 000000000000..611f0c558925 +--- /dev/null ++++ b/Documentation/bootsplash.rst +@@ -0,0 +1,285 @@ ++==================== ++The Linux bootsplash ++==================== ++ ++:Date: November, 2017 ++:Author: Max Staudt ++ ++ ++The Linux bootsplash is a graphical replacement for the '``quiet``' boot ++option, typically showing a logo and a spinner animation as the system starts. ++ ++Currently, it is a part of the Framebuffer Console support, and can be found ++as ``CONFIG_BOOTSPLASH`` in the kernel configuration. This means that as long ++as it is enabled, it hijacks fbcon's output and draws a splash screen instead. ++ ++Purely compiling in the bootsplash will not render it functional - to actually ++render a splash, you will also need a splash theme file. See the example ++utility and script in ``tools/bootsplash`` for a live demo. ++ ++ ++ ++Motivation ++========== ++ ++- The '``quiet``' boot option only suppresses most messages during boot, but ++ errors are still shown. ++ ++- A user space implementation can only show a logo once user space has been ++ initialized far enough to allow this. A kernel splash can display a splash ++ immediately as soon as fbcon can be displayed. ++ ++- Implementing a splash screen in user space (e.g. Plymouth) is problematic ++ due to resource conflicts. ++ ++ For example, if Plymouth is keeping ``/dev/fb0`` (provided via vesafb/efifb) ++ open, then most DRM drivers can't replace it because the address space is ++ still busy - thus leading to a VRAM reservation error. ++ ++ See: https://bugzilla.opensuse.org/show_bug.cgi?id=980750 ++ ++ ++ ++Command line arguments ++====================== ++ ++``bootsplash.bootfile`` ++ Which file in the initramfs to load. ++ ++ The splash theme is loaded via request_firmware(), thus to load ++ ``/lib/firmware/bootsplash/mytheme`` pass the command line: ++ ++ ``bootsplash.bootfile=bootsplash/mytheme`` ++ ++ Note: The splash file *has to be* in the initramfs, as it needs to be ++ available when the splash is initialized early on. ++ ++ Default: none, i.e. a non-functional splash, falling back to showing text. ++ ++ ++ ++sysfs run-time configuration ++============================ ++ ++``/sys/devices/platform/bootsplash.0/enabled`` ++ Enable/disable the bootsplash. ++ The system boots with this set to 1, but will not show a splash unless ++ a splash theme file is also loaded. ++ ++ ++ ++Kconfig ++======= ++ ++``BOOTSPLASH`` ++ Whether to compile in bootsplash support ++ (depends on fbcon compiled in, i.e. ``FRAMEBUFFER_CONSOLE=y``) ++ ++ ++ ++Bootsplash file format ++====================== ++ ++A file specified in the kernel configuration as ``CONFIG_BOOTSPLASH_FILE`` ++or specified on the command line as ``bootsplash.bootfile`` will be loaded ++and displayed as soon as fbcon is initialized. ++ ++ ++Main blocks ++----------- ++ ++There are 3 main blocks in each file: ++ ++ - one File header ++ - n Picture headers ++ - m (Blob header + payload) blocks ++ ++ ++Structures ++---------- ++ ++The on-disk structures are defined in ++``drivers/video/fbdev/core/bootsplash_file.h`` and represent these blocks: ++ ++ - ``struct splash_file_header`` ++ ++ Represents the file header, with splash-wide information including: ++ ++ - The magic string "``Linux bootsplash``" on big-endian platforms ++ (the reverse on little endian) ++ - The file format version (for incompatible updates, hopefully never) ++ - The background color ++ - Number of picture and blob blocks ++ - Animation speed (we only allow one delay for all animations) ++ ++ The file header is followed by the first picture header. ++ ++ ++ - ``struct splash_picture_header`` ++ ++ Represents an object (picture) drawn on screen, including its immutable ++ properties: ++ - Width, height ++ - Positioning relative to screen corners or in the center ++ - Animation, if any ++ - Animation type ++ - Number of blobs ++ ++ The picture header is followed by another picture header, up until n ++ picture headers (as defined in the file header) have been read. Then, ++ the (blob header, payload) pairs follow. ++ ++ ++ - ``struct splash_blob_header`` ++ (followed by payload) ++ ++ Represents one raw data stream. So far, only picture data is defined. ++ ++ The blob header is followed by a payload, then padding to n*16 bytes, ++ then (if further blobs are defined in the file header) a further blob ++ header. ++ ++ ++Alignment ++--------- ++ ++The bootsplash file is designed to be loaded into memory as-is. ++ ++All structures are a multiple of 16 bytes long, all elements therein are ++aligned to multiples of their length, and the payloads are always padded ++up to multiples of 16 bytes. This is to allow aligned accesses in all ++cases while still simply mapping the structures over an in-memory copy of ++the bootsplash file. ++ ++ ++Further information ++------------------- ++ ++Please see ``drivers/video/fbdev/core/bootsplash_file.h`` for further ++details and possible values in the file. ++ ++ ++ ++Hooks - how the bootsplash is integrated ++======================================== ++ ++``drivers/video/fbdev/core/fbcon.c`` ++ ``fbcon_init()`` calls ``bootsplash_init()``, which loads the default ++ bootsplash file or the one specified on the kernel command line. ++ ++ ``fbcon_switch()`` draws the bootsplash when it's active, and is also ++ one of the callers of ``set_blitting_type()``. ++ ++ ``set_blitting_type()`` calls ``fbcon_set_dummyops()`` when the ++ bootsplash is active, overriding the text rendering functions. ++ ++ ``fbcon_cursor()`` will call ``bootsplash_disable()`` when an oops is ++ being printed in order to make a kernel panic visible. ++ ++``drivers/video/fbdev/core/dummyblit.c`` ++ This contains the dummy text rendering functions used to suppress text ++ output while the bootsplash is shown. ++ ++``drivers/tty/vt/keyboard.c`` ++ ``kbd_keycode()`` can call ``bootsplash_disable()`` when the user ++ presses ESC or F1-F12 (changing VT). This is to provide a built-in way ++ of disabling the splash manually at any time. ++ ++ ++ ++FAQ: Frequently Asked Questions ++=============================== ++ ++I want to see the log! How do I show the log? ++--------------------------------------------- ++ ++Press ESC while the splash is shown, or remove the ``bootsplash.bootfile`` ++parameter from the kernel cmdline. Without that parameter, the bootsplash ++will boot disabled. ++ ++ ++Why use FB instead of modern DRM/KMS? ++------------------------------------- ++ ++This is a semantic problem: ++ - What memory to draw the splash to? ++ - And what mode will the screen be set to? ++ ++Using the fbdev emulation solves these issues. ++ ++Let's start from a bare KMS system, without fbcon, and without fbdev ++emulation. In this case, as long as userspace doesn't open the KMS ++device, the state of the screen is undefined. No framebuffer is ++allocated in video RAM, and no particular mode is set. ++ ++In this case, we'd have to allocate a framebuffer to show the splash, ++and set our mode ourselves. This either wastes a screenful of video RAM ++if the splash is to co-exist with the userspace program's own allocated ++framebuffer, or there is a flicker as we deactivate and delete the ++bootsplash's framebuffer and hand control over to userspace. Since we ++may set a different mode than userspace, we'd also have flicker due ++to mode switching. ++ ++This logic is already contained in every KMS driver that performs fbdev ++emulation. So we might as well use that. And the correct API to do so is ++fbdev. Plus, we get compatibility with old, pure fbdev drivers for free. ++With the fbdev emulation, there is *always* a well-defined framebuffer ++to draw on. And the selection of mode has already been done by the ++graphics driver, so we don't need to reinvent that wheel, either. ++Finally, if userspace decides to use /dev/fbX, we don't have to worry ++about wasting video RAM, either. ++ ++ ++Why is the bootsplash integrated in fbcon? ++------------------------------------------ ++ ++Right now, the bootsplash is drawn from within fbcon, as this allows us ++to easily know *when* to draw - i.e. when we're safe from fbcon and ++userspace drawing all over our beautiful splash logo. ++ ++Separating them is not easy - see the to-do list below. ++ ++ ++ ++TO DO list for future development ++================================= ++ ++Second enable/disable switch for the system ++------------------------------------------- ++ ++It may be helpful to differentiate between the system and the user ++switching off the bootsplash. Thus, the system may make it disappear and ++reappear e.g. for a password prompt, yet once the user has pressed ESC, ++it could stay gone. ++ ++ ++Fix buggy DRM/KMS drivers ++------------------------- ++ ++Currently, the splash code manually checks for fbdev emulation provided by ++the ast, cirrus, and mgag200 DRM/KMS drivers. ++These drivers use a manual mechanism similar to deferred I/O for their FB ++emulation, and thus need to be manually flushed onto the screen in the same ++way. ++ ++This may be improved upon in several ways: ++ ++1. Changing these drivers to expose the fbdev BO's memory directly, like ++ bochsdrmfb does. ++2. Creating a new fb_ops->fb_flush() API to allow the kernel to flush the ++ framebuffer once the bootsplash has been drawn into it. ++ ++ ++Separating from fbcon ++--------------------- ++ ++Separating these two components would yield independence from fbcon being ++compiled into the kernel, and thus lowering code size in embedded ++applications. ++ ++To do this cleanly will involve a clean separation of users of an FB device ++within the kernel, i.e. fbcon, bootsplash, and userspace. Right now, the ++legacy fbcon code and VT code co-operate to switch between fbcon and ++userspace (by setting the VT into KD_GRAPHICS mode). Installing a muxer ++between these components ensues refactoring of old code and checking for ++correct locking. +diff --git a/MAINTAINERS b/MAINTAINERS +index 5c237445761e..7ffac272434e 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -2709,6 +2709,8 @@ BOOTSPLASH + M: Max Staudt + L: linux-fbdev@vger.kernel.org + S: Maintained ++F: Documentation/ABI/testing/sysfs-platform-bootsplash ++F: Documentation/bootsplash.rst + F: drivers/video/fbdev/core/bootsplash*.* + F: drivers/video/fbdev/core/dummycon.c + F: include/linux/bootsplash.h diff --git a/sys-kernel/pinephone-pro-sources/files/0311-bootsplash.patch b/sys-kernel/pinephone-pro-sources/files/0311-bootsplash.patch new file mode 100644 index 0000000..8e87eb4 --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/0311-bootsplash.patch @@ -0,0 +1,129 @@ +diff --git a/Documentation/ABI/testing/sysfs-platform-bootsplash b/Documentation/ABI/testing/sysfs-platform-bootsplash +index 742c7b035ded..f8f4b259220e 100644 +--- a/Documentation/ABI/testing/sysfs-platform-bootsplash ++++ b/Documentation/ABI/testing/sysfs-platform-bootsplash +@@ -9,3 +9,35 @@ Description: + 1: Splash is shown whenever fbcon would show a text console + (i.e. no graphical application is running), and a splash + file is loaded. ++ ++What: /sys/devices/platform/bootsplash.0/drop_splash ++Date: Oct 2017 ++KernelVersion: 4.14 ++Contact: Max Staudt ++Description: ++ Can only be set. ++ ++ Any value written will cause the current splash theme file ++ to be unloaded and the text console to be redrawn. ++ ++What: /sys/devices/platform/bootsplash.0/load_file ++Date: Oct 2017 ++KernelVersion: 4.14 ++Contact: Max Staudt ++Description: ++ Can only be set. ++ ++ Any value written will cause the splash to be disabled and ++ internal memory structures to be freed. ++ ++ A firmware path written will cause a new theme file to be ++ loaded and the current bootsplash to be replaced. ++ The current enabled/disabled status is not touched. ++ If the splash is already active, it will be redrawn. ++ ++ The path has to be a path in /lib/firmware since ++ request_firmware() is used to fetch the data. ++ ++ When setting the splash from the shell, echo -n has to be ++ used as any trailing '\n' newline will be interpreted as ++ part of the path. +diff --git a/Documentation/bootsplash.rst b/Documentation/bootsplash.rst +index 611f0c558925..b35aba5093e8 100644 +--- a/Documentation/bootsplash.rst ++++ b/Documentation/bootsplash.rst +@@ -67,6 +67,14 @@ sysfs run-time configuration + a splash theme file is also loaded. + + ++``/sys/devices/platform/bootsplash.0/drop_splash`` ++ Unload splash data and free memory. ++ ++``/sys/devices/platform/bootsplash.0/load_file`` ++ Load a splash file from ``/lib/firmware/``. ++ Note that trailing newlines will be interpreted as part of the file name. ++ ++ + + Kconfig + ======= +diff --git a/drivers/video/fbdev/core/bootsplash.c b/drivers/video/fbdev/core/bootsplash.c +index 13fcaabbc2ca..16cb0493629d 100644 +--- a/drivers/video/fbdev/core/bootsplash.c ++++ b/drivers/video/fbdev/core/bootsplash.c +@@ -251,11 +251,65 @@ static ssize_t splash_store_enabled(struct device *device, + return count; + } + ++static ssize_t splash_store_drop_splash(struct device *device, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct splash_file_priv *fp; ++ ++ if (!buf || !count || !splash_state.file) ++ return count; ++ ++ mutex_lock(&splash_state.data_lock); ++ fp = splash_state.file; ++ splash_state.file = NULL; ++ mutex_unlock(&splash_state.data_lock); ++ ++ /* Redraw the text console */ ++ schedule_work(&splash_state.work_redraw_vc); ++ ++ bootsplash_free_file(fp); ++ ++ return count; ++} ++ ++static ssize_t splash_store_load_file(struct device *device, ++ struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct splash_file_priv *fp, *fp_old; ++ ++ if (!count) ++ return 0; ++ ++ fp = bootsplash_load_firmware(&splash_state.splash_device->dev, ++ buf); ++ ++ if (!fp) ++ return -ENXIO; ++ ++ mutex_lock(&splash_state.data_lock); ++ fp_old = splash_state.file; ++ splash_state.splash_fb = NULL; ++ splash_state.file = fp; ++ mutex_unlock(&splash_state.data_lock); ++ ++ /* Update the splash or text console */ ++ schedule_work(&splash_state.work_redraw_vc); ++ ++ bootsplash_free_file(fp_old); ++ return count; ++} ++ + static DEVICE_ATTR(enabled, 0644, splash_show_enabled, splash_store_enabled); ++static DEVICE_ATTR(drop_splash, 0200, NULL, splash_store_drop_splash); ++static DEVICE_ATTR(load_file, 0200, NULL, splash_store_load_file); + + + static struct attribute *splash_dev_attrs[] = { + &dev_attr_enabled.attr, ++ &dev_attr_drop_splash.attr, ++ &dev_attr_load_file.attr, + NULL + }; + diff --git a/sys-kernel/pinephone-pro-sources/files/0312-bootsplash.patch b/sys-kernel/pinephone-pro-sources/files/0312-bootsplash.patch new file mode 100644 index 0000000..5d8ea1f --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/0312-bootsplash.patch @@ -0,0 +1,511 @@ +diff --git a/MAINTAINERS b/MAINTAINERS +index 7ffac272434e..ddff07cd794c 100644 +--- a/MAINTAINERS ++++ b/MAINTAINERS +@@ -2715,6 +2715,7 @@ F: drivers/video/fbdev/core/bootsplash*.* + F: drivers/video/fbdev/core/dummycon.c + F: include/linux/bootsplash.h + F: include/uapi/linux/bootsplash_file.h ++F: tools/bootsplash/* + + BPF (Safe dynamic programs and tools) + M: Alexei Starovoitov +diff --git a/tools/bootsplash/.gitignore b/tools/bootsplash/.gitignore +new file mode 100644 +index 000000000000..091b99a17567 +--- /dev/null ++++ b/tools/bootsplash/.gitignore +@@ -0,0 +1 @@ ++bootsplash-packer +diff --git a/tools/bootsplash/Makefile b/tools/bootsplash/Makefile +new file mode 100644 +index 000000000000..0ad8e8a84942 +--- /dev/null ++++ b/tools/bootsplash/Makefile +@@ -0,0 +1,9 @@ ++CC := $(CROSS_COMPILE)gcc ++CFLAGS := -I../../usr/include ++ ++PROGS := bootsplash-packer ++ ++all: $(PROGS) ++ ++clean: ++ rm -fr $(PROGS) +diff --git a/tools/bootsplash/bootsplash-packer.c b/tools/bootsplash/bootsplash-packer.c +new file mode 100644 +index 000000000000..ffb6a8b69885 +--- /dev/null ++++ b/tools/bootsplash/bootsplash-packer.c +@@ -0,0 +1,471 @@ ++/* ++ * Kernel based bootsplash. ++ * ++ * (Splash file packer tool) ++ * ++ * Authors: ++ * Max Staudt ++ * ++ * SPDX-License-Identifier: GPL-2.0 ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++ ++static void print_help(char *progname) ++{ ++ printf("Usage: %s [OPTIONS] outfile\n", progname); ++ printf("\n" ++ "Options, executed in order given:\n" ++ " -h, --help Print this help message\n" ++ "\n" ++ " --bg_red Background color (red part)\n" ++ " --bg_green Background color (green part)\n" ++ " --bg_blue Background color (blue part)\n" ++ " --bg_reserved (do not use)\n" ++ " --frame_ms Minimum milliseconds between animation steps\n" ++ "\n" ++ " --picture Start describing the next picture\n" ++ " --pic_width Picture width in pixels\n" ++ " --pic_height Picture height in pixels\n" ++ " --pic_position Coarse picture placement:\n" ++ " 0x00 - Top left\n" ++ " 0x01 - Top\n" ++ " 0x02 - Top right\n" ++ " 0x03 - Right\n" ++ " 0x04 - Bottom right\n" ++ " 0x05 - Bottom\n" ++ " 0x06 - Bottom left\n" ++ " 0x07 - Left\n" ++ "\n" ++ " Flags:\n" ++ " 0x10 - Calculate offset from corner towards center,\n" ++ " rather than from center towards corner\n" ++ " --pic_position_offset Distance from base position in pixels\n" ++ " --pic_anim_type Animation type:\n" ++ " 0 - None\n" ++ " 1 - Forward loop\n" ++ " --pic_anim_loop Loop point for animation\n" ++ "\n" ++ " --blob Include next data stream\n" ++ " --blob_type Type of data\n" ++ " --blob_picture_id Picture to associate this blob with, starting at 0\n" ++ " (default: number of last --picture)\n" ++ "\n"); ++ printf("This tool will write %s files.\n\n", ++#if __BYTE_ORDER == __BIG_ENDIAN ++ "Big Endian (BE)"); ++#elif __BYTE_ORDER == __LITTLE_ENDIAN ++ "Little Endian (LE)"); ++#else ++#error ++#endif ++} ++ ++ ++struct blob_entry { ++ struct blob_entry *next; ++ ++ char *fn; ++ ++ struct splash_blob_header header; ++}; ++ ++ ++static void dump_file_header(struct splash_file_header *h) ++{ ++ printf(" --- File header ---\n"); ++ printf("\n"); ++ printf(" version: %5u\n", h->version); ++ printf("\n"); ++ printf(" bg_red: %5u\n", h->bg_red); ++ printf(" bg_green: %5u\n", h->bg_green); ++ printf(" bg_blue: %5u\n", h->bg_blue); ++ printf(" bg_reserved: %5u\n", h->bg_reserved); ++ printf("\n"); ++ printf(" num_blobs: %5u\n", h->num_blobs); ++ printf(" num_pics: %5u\n", h->num_pics); ++ printf("\n"); ++ printf(" frame_ms: %5u\n", h->frame_ms); ++ printf("\n"); ++} ++ ++static void dump_pic_header(struct splash_pic_header *ph) ++{ ++ printf(" --- Picture header ---\n"); ++ printf("\n"); ++ printf(" width: %5u\n", ph->width); ++ printf(" height: %5u\n", ph->height); ++ printf("\n"); ++ printf(" num_blobs: %5u\n", ph->num_blobs); ++ printf("\n"); ++ printf(" position: %0x3x\n", ph->position); ++ printf(" position_offset: %5u\n", ph->position_offset); ++ printf("\n"); ++ printf(" anim_type: %5u\n", ph->anim_type); ++ printf(" anim_loop: %5u\n", ph->anim_loop); ++ printf("\n"); ++} ++ ++static void dump_blob(struct blob_entry *b) ++{ ++ printf(" --- Blob header ---\n"); ++ printf("\n"); ++ printf(" length: %7u\n", b->header.length); ++ printf(" type: %7u\n", b->header.type); ++ printf("\n"); ++ printf(" picture_id: %7u\n", b->header.picture_id); ++ printf("\n"); ++} ++ ++ ++#define OPT_MAX(var, max) \ ++ do { \ ++ if ((var) > max) { \ ++ fprintf(stderr, "--%s: Invalid value\n", \ ++ long_options[option_index].name); \ ++ break; \ ++ } \ ++ } while (0) ++ ++static struct option long_options[] = { ++ {"help", 0, 0, 'h'}, ++ {"bg_red", 1, 0, 10001}, ++ {"bg_green", 1, 0, 10002}, ++ {"bg_blue", 1, 0, 10003}, ++ {"bg_reserved", 1, 0, 10004}, ++ {"frame_ms", 1, 0, 10005}, ++ {"picture", 0, 0, 20000}, ++ {"pic_width", 1, 0, 20001}, ++ {"pic_height", 1, 0, 20002}, ++ {"pic_position", 1, 0, 20003}, ++ {"pic_position_offset", 1, 0, 20004}, ++ {"pic_anim_type", 1, 0, 20005}, ++ {"pic_anim_loop", 1, 0, 20006}, ++ {"blob", 1, 0, 30000}, ++ {"blob_type", 1, 0, 30001}, ++ {"blob_picture_id", 1, 0, 30002}, ++ {NULL, 0, NULL, 0} ++}; ++ ++ ++int main(int argc, char **argv) ++{ ++ FILE *of; ++ char *ofn; ++ int c; ++ int option_index = 0; ++ ++ unsigned long ul; ++ struct splash_file_header fh = {}; ++ struct splash_pic_header ph[255]; ++ struct blob_entry *blob_first = NULL; ++ struct blob_entry *blob_last = NULL; ++ struct blob_entry *blob_cur = NULL; ++ ++ if (argc < 2) { ++ print_help(argv[0]); ++ return EXIT_FAILURE; ++ } ++ ++ ++ /* Parse and and execute user commands */ ++ while ((c = getopt_long(argc, argv, "h", ++ long_options, &option_index)) != -1) { ++ switch (c) { ++ case 10001: /* bg_red */ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 255); ++ fh.bg_red = ul; ++ break; ++ case 10002: /* bg_green */ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 255); ++ fh.bg_green = ul; ++ break; ++ case 10003: /* bg_blue */ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 255); ++ fh.bg_blue = ul; ++ break; ++ case 10004: /* bg_reserved */ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 255); ++ fh.bg_reserved = ul; ++ break; ++ case 10005: /* frame_ms */ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 65535); ++ fh.frame_ms = ul; ++ break; ++ ++ ++ case 20000: /* picture */ ++ if (fh.num_pics >= 255) { ++ fprintf(stderr, "--%s: Picture array full\n", ++ long_options[option_index].name); ++ break; ++ } ++ ++ fh.num_pics++; ++ break; ++ ++ case 20001: /* pic_width */ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 65535); ++ ph[fh.num_pics - 1].width = ul; ++ break; ++ ++ case 20002: /* pic_height */ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 65535); ++ ph[fh.num_pics - 1].height = ul; ++ break; ++ ++ case 20003: /* pic_position */ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 255); ++ ph[fh.num_pics - 1].position = ul; ++ break; ++ ++ case 20004: /* pic_position_offset */ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 255); ++ ph[fh.num_pics - 1].position_offset = ul; ++ break; ++ ++ case 20005: /* pic_anim_type */ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 255); ++ ph[fh.num_pics - 1].anim_type = ul; ++ break; ++ ++ case 20006: /* pic_anim_loop */ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 255); ++ ph[fh.num_pics - 1].anim_loop = ul; ++ break; ++ ++ ++ case 30000: /* blob */ ++ if (fh.num_blobs >= 65535) { ++ fprintf(stderr, "--%s: Blob array full\n", ++ long_options[option_index].name); ++ break; ++ } ++ ++ blob_cur = calloc(1, sizeof(struct blob_entry)); ++ if (!blob_cur) { ++ fprintf(stderr, "--%s: Out of memory\n", ++ long_options[option_index].name); ++ break; ++ } ++ ++ blob_cur->fn = optarg; ++ if (fh.num_pics) ++ blob_cur->header.picture_id = fh.num_pics - 1; ++ ++ if (!blob_first) ++ blob_first = blob_cur; ++ if (blob_last) ++ blob_last->next = blob_cur; ++ blob_last = blob_cur; ++ fh.num_blobs++; ++ break; ++ ++ case 30001: /* blob_type */ ++ if (!blob_cur) { ++ fprintf(stderr, "--%s: No blob selected\n", ++ long_options[option_index].name); ++ break; ++ } ++ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 255); ++ blob_cur->header.type = ul; ++ break; ++ ++ case 30002: /* blob_picture_id */ ++ if (!blob_cur) { ++ fprintf(stderr, "--%s: No blob selected\n", ++ long_options[option_index].name); ++ break; ++ } ++ ++ ul = strtoul(optarg, NULL, 0); ++ OPT_MAX(ul, 255); ++ blob_cur->header.picture_id = ul; ++ break; ++ ++ ++ ++ case 'h': ++ case '?': ++ default: ++ print_help(argv[0]); ++ goto EXIT; ++ } /* switch (c) */ ++ } /* while ((c = getopt_long(...)) != -1) */ ++ ++ /* Consume and drop lone arguments */ ++ while (optind < argc) { ++ ofn = argv[optind]; ++ optind++; ++ } ++ ++ ++ /* Read file lengths */ ++ for (blob_cur = blob_first; blob_cur; blob_cur = blob_cur->next) { ++ FILE *f; ++ long pos; ++ int i; ++ ++ if (!blob_cur->fn) ++ continue; ++ ++ f = fopen(blob_cur->fn, "rb"); ++ if (!f) ++ goto ERR_FILE_LEN; ++ ++ if (fseek(f, 0, SEEK_END)) ++ goto ERR_FILE_LEN; ++ ++ pos = ftell(f); ++ if (pos < 0 || pos > (1 << 30)) ++ goto ERR_FILE_LEN; ++ ++ blob_cur->header.length = pos; ++ ++ fclose(f); ++ continue; ++ ++ERR_FILE_LEN: ++ fprintf(stderr, "Error getting file length (or too long): %s\n", ++ blob_cur->fn); ++ if (f) ++ fclose(f); ++ continue; ++ } ++ ++ ++ /* Set magic headers */ ++#if __BYTE_ORDER == __BIG_ENDIAN ++ memcpy(&fh.id[0], BOOTSPLASH_MAGIC_BE, 16); ++#elif __BYTE_ORDER == __LITTLE_ENDIAN ++ memcpy(&fh.id[0], BOOTSPLASH_MAGIC_LE, 16); ++#else ++#error ++#endif ++ fh.version = BOOTSPLASH_VERSION; ++ ++ /* Set blob counts */ ++ for (blob_cur = blob_first; blob_cur; blob_cur = blob_cur->next) { ++ if (blob_cur->header.picture_id < fh.num_pics) ++ ph[blob_cur->header.picture_id].num_blobs++; ++ } ++ ++ ++ /* Dump structs */ ++ dump_file_header(&fh); ++ ++ for (ul = 0; ul < fh.num_pics; ul++) ++ dump_pic_header(&ph[ul]); ++ ++ for (blob_cur = blob_first; blob_cur; blob_cur = blob_cur->next) ++ dump_blob(blob_cur); ++ ++ ++ /* Write to file */ ++ printf("Writing splash to file: %s\n", ofn); ++ of = fopen(ofn, "wb"); ++ if (!of) ++ goto ERR_WRITING; ++ ++ if (fwrite(&fh, sizeof(struct splash_file_header), 1, of) != 1) ++ goto ERR_WRITING; ++ ++ for (ul = 0; ul < fh.num_pics; ul++) { ++ if (fwrite(&ph[ul], sizeof(struct splash_pic_header), 1, of) ++ != 1) ++ goto ERR_WRITING; ++ } ++ ++ blob_cur = blob_first; ++ while (blob_cur) { ++ struct blob_entry *blob_old = blob_cur; ++ FILE *f; ++ char *buf[256]; ++ uint32_t left; ++ ++ if (fwrite(&blob_cur->header, ++ sizeof(struct splash_blob_header), 1, of) != 1) ++ goto ERR_WRITING; ++ ++ if (!blob_cur->header.length || !blob_cur->fn) ++ continue; ++ ++ f = fopen(blob_cur->fn, "rb"); ++ if (!f) ++ goto ERR_FILE_COPY; ++ ++ left = blob_cur->header.length; ++ while (left >= sizeof(buf)) { ++ if (fread(buf, sizeof(buf), 1, f) != 1) ++ goto ERR_FILE_COPY; ++ if (fwrite(buf, sizeof(buf), 1, of) != 1) ++ goto ERR_FILE_COPY; ++ left -= sizeof(buf); ++ } ++ if (left) { ++ if (fread(buf, left, 1, f) != 1) ++ goto ERR_FILE_COPY; ++ if (fwrite(buf, left, 1, of) != 1) ++ goto ERR_FILE_COPY; ++ } ++ ++ /* Pad data stream to 16 bytes */ ++ if (left % 16) { ++ if (fwrite("\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0", ++ 16 - (left % 16), 1, of) != 1) ++ goto ERR_FILE_COPY; ++ } ++ ++ fclose(f); ++ blob_cur = blob_cur->next; ++ free(blob_old); ++ continue; ++ ++ERR_FILE_COPY: ++ if (f) ++ fclose(f); ++ goto ERR_WRITING; ++ } ++ ++ fclose(of); ++ ++EXIT: ++ return EXIT_SUCCESS; ++ ++ ++ERR_WRITING: ++ fprintf(stderr, "Error writing splash.\n"); ++ fprintf(stderr, "The output file is probably corrupt.\n"); ++ if (of) ++ fclose(of); ++ ++ while (blob_cur) { ++ struct blob_entry *blob_old = blob_cur; ++ ++ blob_cur = blob_cur->next; ++ free(blob_old); ++ } ++ ++ return EXIT_FAILURE; ++} diff --git a/sys-kernel/pinephone-pro-sources/files/1500_XATTR_USER_PREFIX.patch b/sys-kernel/pinephone-pro-sources/files/1500_XATTR_USER_PREFIX.patch new file mode 100644 index 0000000..245dcc2 --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/1500_XATTR_USER_PREFIX.patch @@ -0,0 +1,67 @@ +From: Anthony G. Basile + +This patch adds support for a restricted user-controlled namespace on +tmpfs filesystem used to house PaX flags. The namespace must be of the +form user.pax.* and its value cannot exceed a size of 8 bytes. + +This is needed even on all Gentoo systems so that XATTR_PAX flags +are preserved for users who might build packages using portage on +a tmpfs system with a non-hardened kernel and then switch to a +hardened kernel with XATTR_PAX enabled. + +The namespace is added to any user with Extended Attribute support +enabled for tmpfs. Users who do not enable xattrs will not have +the XATTR_PAX flags preserved. + +diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h +index 1590c49..5eab462 100644 +--- a/include/uapi/linux/xattr.h ++++ b/include/uapi/linux/xattr.h +@@ -73,5 +73,9 @@ + #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default" + #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT + ++/* User namespace */ ++#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax." ++#define XATTR_PAX_FLAGS_SUFFIX "flags" ++#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX + + #endif /* _UAPI_LINUX_XATTR_H */ +--- a/mm/shmem.c 2020-05-04 15:30:27.042035334 -0400 ++++ b/mm/shmem.c 2020-05-04 15:34:57.013881725 -0400 +@@ -3238,6 +3238,14 @@ static int shmem_xattr_handler_set(const + struct shmem_inode_info *info = SHMEM_I(inode); + + name = xattr_full_name(handler, name); ++ ++ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) { ++ if (strcmp(name, XATTR_NAME_PAX_FLAGS)) ++ return -EOPNOTSUPP; ++ if (size > 8) ++ return -EINVAL; ++ } ++ + return simple_xattr_set(&info->xattrs, name, value, size, flags, NULL); + } + +@@ -3253,6 +3261,12 @@ static const struct xattr_handler shmem_ + .set = shmem_xattr_handler_set, + }; + ++static const struct xattr_handler shmem_user_xattr_handler = { ++ .prefix = XATTR_USER_PREFIX, ++ .get = shmem_xattr_handler_get, ++ .set = shmem_xattr_handler_set, ++}; ++ + static const struct xattr_handler *shmem_xattr_handlers[] = { + #ifdef CONFIG_TMPFS_POSIX_ACL + &posix_acl_access_xattr_handler, +@@ -3260,6 +3274,7 @@ static const struct xattr_handler *shmem + #endif + &shmem_security_xattr_handler, + &shmem_trusted_xattr_handler, ++ &shmem_user_xattr_handler, + NULL + }; + diff --git a/sys-kernel/pinephone-pro-sources/files/1510_fs-enable-link-security-restrictions-by-default.patch b/sys-kernel/pinephone-pro-sources/files/1510_fs-enable-link-security-restrictions-by-default.patch new file mode 100644 index 0000000..e8c3015 --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/1510_fs-enable-link-security-restrictions-by-default.patch @@ -0,0 +1,17 @@ +--- a/fs/namei.c 2022-01-23 13:02:27.876558299 -0500 ++++ b/fs/namei.c 2022-03-06 12:47:39.375719693 -0500 +@@ -1020,10 +1020,10 @@ static inline void put_link(struct namei + path_put(&last->link); + } + +-static int sysctl_protected_symlinks __read_mostly; +-static int sysctl_protected_hardlinks __read_mostly; +-static int sysctl_protected_fifos __read_mostly; +-static int sysctl_protected_regular __read_mostly; ++static int sysctl_protected_symlinks __read_mostly = 1; ++static int sysctl_protected_hardlinks __read_mostly = 1; ++int sysctl_protected_fifos __read_mostly = 1; ++int sysctl_protected_regular __read_mostly = 1; + + #ifdef CONFIG_SYSCTL + static struct ctl_table namei_sysctls[] = { diff --git a/sys-kernel/pinephone-pro-sources/files/1700_sparc-address-warray-bound-warnings.patch b/sys-kernel/pinephone-pro-sources/files/1700_sparc-address-warray-bound-warnings.patch new file mode 100644 index 0000000..f939355 --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/1700_sparc-address-warray-bound-warnings.patch @@ -0,0 +1,17 @@ +--- a/arch/sparc/mm/init_64.c 2022-05-24 16:48:40.749677491 -0400 ++++ b/arch/sparc/mm/init_64.c 2022-05-24 16:55:15.511356945 -0400 +@@ -3052,11 +3052,11 @@ static inline resource_size_t compute_ke + static void __init kernel_lds_init(void) + { + code_resource.start = compute_kern_paddr(_text); +- code_resource.end = compute_kern_paddr(_etext - 1); ++ code_resource.end = compute_kern_paddr(_etext) - 1; + data_resource.start = compute_kern_paddr(_etext); +- data_resource.end = compute_kern_paddr(_edata - 1); ++ data_resource.end = compute_kern_paddr(_edata) - 1; + bss_resource.start = compute_kern_paddr(__bss_start); +- bss_resource.end = compute_kern_paddr(_end - 1); ++ bss_resource.end = compute_kern_paddr(_end) - 1; + } + + static int __init report_memory(void) diff --git a/sys-kernel/pinephone-pro-sources/files/2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch b/sys-kernel/pinephone-pro-sources/files/2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch new file mode 100644 index 0000000..394ad48 --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch @@ -0,0 +1,37 @@ +The encryption is only mandatory to be enforced when both sides are using +Secure Simple Pairing and this means the key size check makes only sense +in that case. + +On legacy Bluetooth 2.0 and earlier devices like mice the encryption was +optional and thus causing an issue if the key size check is not bound to +using Secure Simple Pairing. + +Fixes: d5bb334a8e17 ("Bluetooth: Align minimum encryption key size for LE and BR/EDR connections") +Signed-off-by: Marcel Holtmann +Cc: stable@vger.kernel.org +--- + net/bluetooth/hci_conn.c | 9 +++++++-- + 1 file changed, 7 insertions(+), 2 deletions(-) + +diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c +index 3cf0764d5793..7516cdde3373 100644 +--- a/net/bluetooth/hci_conn.c ++++ b/net/bluetooth/hci_conn.c +@@ -1272,8 +1272,13 @@ int hci_conn_check_link_mode(struct hci_conn *conn) + return 0; + } + +- if (hci_conn_ssp_enabled(conn) && +- !test_bit(HCI_CONN_ENCRYPT, &conn->flags)) ++ /* If Secure Simple Pairing is not enabled, then legacy connection ++ * setup is used and no encryption or key sizes can be enforced. ++ */ ++ if (!hci_conn_ssp_enabled(conn)) ++ return 1; ++ ++ if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags)) + return 0; + + /* The minimum encryption key size needs to be enforced by the +-- +2.20.1 diff --git a/sys-kernel/pinephone-pro-sources/files/2900_tmp513-Fix-build-issue-by-selecting-CONFIG_REG.patch b/sys-kernel/pinephone-pro-sources/files/2900_tmp513-Fix-build-issue-by-selecting-CONFIG_REG.patch new file mode 100644 index 0000000..4335685 --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/2900_tmp513-Fix-build-issue-by-selecting-CONFIG_REG.patch @@ -0,0 +1,30 @@ +From dc328d75a6f37f4ff11a81ae16b1ec88c3197640 Mon Sep 17 00:00:00 2001 +From: Mike Pagano +Date: Mon, 23 Mar 2020 08:20:06 -0400 +Subject: [PATCH 1/1] This driver requires REGMAP_I2C to build. Select it by + default in Kconfig. Reported at gentoo bugzilla: + https://bugs.gentoo.org/710790 +Cc: mpagano@gentoo.org + +Reported-by: Phil Stracchino + +Signed-off-by: Mike Pagano +--- + drivers/hwmon/Kconfig | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig +index 47ac20aee06f..530b4f29ba85 100644 +--- a/drivers/hwmon/Kconfig ++++ b/drivers/hwmon/Kconfig +@@ -1769,6 +1769,7 @@ config SENSORS_TMP421 + config SENSORS_TMP513 + tristate "Texas Instruments TMP513 and compatibles" + depends on I2C ++ select REGMAP_I2C + help + If you say yes here you get support for Texas Instruments TMP512, + and TMP513 temperature and power supply sensor chips. +-- +2.24.1 + diff --git a/sys-kernel/pinephone-pro-sources/files/2920_sign-file-patch-for-libressl.patch b/sys-kernel/pinephone-pro-sources/files/2920_sign-file-patch-for-libressl.patch new file mode 100644 index 0000000..e6ec017 --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/2920_sign-file-patch-for-libressl.patch @@ -0,0 +1,16 @@ +--- a/scripts/sign-file.c 2020-05-20 18:47:21.282820662 -0400 ++++ b/scripts/sign-file.c 2020-05-20 18:48:37.991081899 -0400 +@@ -41,9 +41,10 @@ + * signing with anything other than SHA1 - so we're stuck with that if such is + * the case. + */ +-#if defined(LIBRESSL_VERSION_NUMBER) || \ +- OPENSSL_VERSION_NUMBER < 0x10000000L || \ +- defined(OPENSSL_NO_CMS) ++#if defined(OPENSSL_NO_CMS) || \ ++ ( defined(LIBRESSL_VERSION_NUMBER) \ ++ && (LIBRESSL_VERSION_NUMBER < 0x3010000fL) ) || \ ++ OPENSSL_VERSION_NUMBER < 0x10000000L + #define USE_PKCS7 + #endif + #ifndef USE_PKCS7 diff --git a/sys-kernel/pinephone-pro-sources/files/3000_Support-printing-firmware-info.patch b/sys-kernel/pinephone-pro-sources/files/3000_Support-printing-firmware-info.patch new file mode 100644 index 0000000..a630cfb --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/3000_Support-printing-firmware-info.patch @@ -0,0 +1,14 @@ +--- a/drivers/base/firmware_loader/main.c 2021-08-24 15:42:07.025482085 -0400 ++++ b/drivers/base/firmware_loader/main.c 2021-08-24 15:44:40.782975313 -0400 +@@ -809,6 +809,11 @@ _request_firmware(const struct firmware + + ret = _request_firmware_prepare(&fw, name, device, buf, size, + offset, opt_flags); ++ ++#ifdef CONFIG_GENTOO_PRINT_FIRMWARE_INFO ++ printk(KERN_NOTICE "Loading firmware: %s\n", name); ++#endif ++ + if (ret <= 0) /* error or already assigned */ + goto out; + diff --git a/sys-kernel/pinephone-pro-sources/files/4567_distro-Gentoo-Kconfig.patch b/sys-kernel/pinephone-pro-sources/files/4567_distro-Gentoo-Kconfig.patch new file mode 100644 index 0000000..0a38098 --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/4567_distro-Gentoo-Kconfig.patch @@ -0,0 +1,341 @@ +--- a/Kconfig 2022-05-11 13:20:07.110347567 -0400 ++++ b/Kconfig 2022-05-11 13:21:12.127174393 -0400 +@@ -30,3 +30,5 @@ source "lib/Kconfig" + source "lib/Kconfig.debug" + + source "Documentation/Kconfig" ++ ++source "distro/Kconfig" +--- /dev/null 2022-05-10 13:47:17.750578524 -0400 ++++ b/distro/Kconfig 2022-05-11 13:21:20.540529032 -0400 +@@ -0,0 +1,290 @@ ++menu "Gentoo Linux" ++ ++config GENTOO_LINUX ++ bool "Gentoo Linux support" ++ ++ default y ++ ++ select CPU_FREQ_DEFAULT_GOV_SCHEDUTIL ++ ++ help ++ In order to boot Gentoo Linux a minimal set of config settings needs to ++ be enabled in the kernel; to avoid the users from having to enable them ++ manually as part of a Gentoo Linux installation or a new clean config, ++ we enable these config settings by default for convenience. ++ ++ See the settings that become available for more details and fine-tuning. ++ ++config GENTOO_LINUX_UDEV ++ bool "Linux dynamic and persistent device naming (userspace devfs) support" ++ ++ depends on GENTOO_LINUX ++ default y if GENTOO_LINUX ++ ++ select DEVTMPFS ++ select TMPFS ++ select UNIX ++ ++ select MMU ++ select SHMEM ++ ++ help ++ In order to boot Gentoo Linux a minimal set of config settings needs to ++ be enabled in the kernel; to avoid the users from having to enable them ++ manually as part of a Gentoo Linux installation or a new clean config, ++ we enable these config settings by default for convenience. ++ ++ Currently this only selects TMPFS, DEVTMPFS and their dependencies. ++ TMPFS is enabled to maintain a tmpfs file system at /dev/shm, /run and ++ /sys/fs/cgroup; DEVTMPFS to maintain a devtmpfs file system at /dev. ++ ++ Some of these are critical files that need to be available early in the ++ boot process; if not available, it causes sysfs and udev to malfunction. ++ ++ To ensure Gentoo Linux boots, it is best to leave this setting enabled; ++ if you run a custom setup, you could consider whether to disable this. ++ ++config GENTOO_LINUX_PORTAGE ++ bool "Select options required by Portage features" ++ ++ depends on GENTOO_LINUX ++ default y if GENTOO_LINUX ++ ++ select CGROUPS ++ select NAMESPACES ++ select IPC_NS ++ select NET_NS ++ select PID_NS ++ select SYSVIPC ++ select USER_NS ++ select UTS_NS ++ ++ help ++ This enables options required by various Portage FEATURES. ++ Currently this selects: ++ ++ CGROUPS (required for FEATURES=cgroup) ++ IPC_NS (required for FEATURES=ipc-sandbox) ++ NET_NS (required for FEATURES=network-sandbox) ++ PID_NS (required for FEATURES=pid-sandbox) ++ SYSVIPC (required by IPC_NS) ++ ++ ++ It is highly recommended that you leave this enabled as these FEATURES ++ are, or will soon be, enabled by default. ++ ++menu "Support for init systems, system and service managers" ++ visible if GENTOO_LINUX ++ ++config GENTOO_LINUX_INIT_SCRIPT ++ bool "OpenRC, runit and other script based systems and managers" ++ ++ default y if GENTOO_LINUX ++ ++ depends on GENTOO_LINUX ++ ++ select BINFMT_SCRIPT ++ select CGROUPS ++ select EPOLL ++ select FILE_LOCKING ++ select INOTIFY_USER ++ select SIGNALFD ++ select TIMERFD ++ ++ help ++ The init system is the first thing that loads after the kernel booted. ++ ++ These config settings allow you to select which init systems to support; ++ instead of having to select all the individual settings all over the ++ place, these settings allows you to select all the settings at once. ++ ++ This particular setting enables all the known requirements for OpenRC, ++ runit and similar script based systems and managers. ++ ++ If you are unsure about this, it is best to leave this setting enabled. ++ ++config GENTOO_LINUX_INIT_SYSTEMD ++ bool "systemd" ++ ++ default n ++ ++ depends on GENTOO_LINUX && GENTOO_LINUX_UDEV ++ ++ select AUTOFS_FS ++ select BLK_DEV_BSG ++ select BPF_SYSCALL ++ select CGROUP_BPF ++ select CGROUPS ++ select CRYPTO_HMAC ++ select CRYPTO_SHA256 ++ select CRYPTO_USER_API_HASH ++ select DEVPTS_MULTIPLE_INSTANCES ++ select DMIID if X86_32 || X86_64 || X86 ++ select EPOLL ++ select FANOTIFY ++ select FHANDLE ++ select FILE_LOCKING ++ select INOTIFY_USER ++ select IPV6 ++ select KCMP ++ select NET ++ select NET_NS ++ select PROC_FS ++ select SECCOMP if HAVE_ARCH_SECCOMP ++ select SECCOMP_FILTER if HAVE_ARCH_SECCOMP_FILTER ++ select SIGNALFD ++ select SYSFS ++ select TIMERFD ++ select TMPFS_POSIX_ACL ++ select TMPFS_XATTR ++ ++ select ANON_INODES ++ select BLOCK ++ select EVENTFD ++ select FSNOTIFY ++ select INET ++ select NLATTR ++ ++ help ++ The init system is the first thing that loads after the kernel booted. ++ ++ These config settings allow you to select which init systems to support; ++ instead of having to select all the individual settings all over the ++ place, these settings allows you to select all the settings at once. ++ ++ This particular setting enables all the known requirements for systemd; ++ it also enables suggested optional settings, as the package suggests to. ++ ++endmenu ++ ++menuconfig GENTOO_KERNEL_SELF_PROTECTION ++ bool "Kernel Self Protection Project" ++ depends on GENTOO_LINUX ++ help ++ Recommended Kernel settings based on the suggestions from the Kernel Self Protection Project ++ See: https://kernsec.org/wiki/index.php/Kernel_Self_Protection_Project/Recommended_Settings ++ Note, there may be additional settings for which the CONFIG_ setting is invisible in menuconfig due ++ to unmet dependencies. Search for GENTOO_KERNEL_SELF_PROTECTION_COMMON and search for ++ GENTOO_KERNEL_SELF_PROTECTION_{X86_64, ARM64, X86_32, ARM} for dependency information on your ++ specific architecture. ++ Note 2: Please see the URL above for numeric settings, e.g. CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 ++ for X86_64 ++ ++if GENTOO_KERNEL_SELF_PROTECTION ++config GENTOO_KERNEL_SELF_PROTECTION_COMMON ++ bool "Enable Kernel Self Protection Project Recommendations" ++ ++ depends on GENTOO_LINUX && !ACPI_CUSTOM_METHOD && !COMPAT_BRK && !PROC_KCORE && !COMPAT_VDSO && !KEXEC && !HIBERNATION && !LEGACY_PTYS && !X86_X32 && !MODIFY_LDT_SYSCALL && GCC_PLUGINS && !IOMMU_DEFAULT_DMA_LAZY && !IOMMU_DEFAULT_PASSTHROUGH && IOMMU_DEFAULT_DMA_STRICT ++ ++ select BUG ++ select STRICT_KERNEL_RWX ++ select DEBUG_WX ++ select STACKPROTECTOR ++ select STACKPROTECTOR_STRONG ++ select STRICT_DEVMEM if DEVMEM=y ++ select IO_STRICT_DEVMEM if DEVMEM=y ++ select SYN_COOKIES ++ select DEBUG_CREDENTIALS ++ select DEBUG_NOTIFIERS ++ select DEBUG_LIST ++ select DEBUG_SG ++ select HARDENED_USERCOPY if HAVE_HARDENED_USERCOPY_ALLOCATOR=y ++ select KFENCE if HAVE_ARCH_KFENCE && (!SLAB || SLUB) ++ select RANDOMIZE_KSTACK_OFFSET_DEFAULT if HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET && (INIT_STACK_NONE || !CC_IS_CLANG || CLANG_VERSION>=140000) ++ select SCHED_CORE if SCHED_SMT ++ select BUG_ON_DATA_CORRUPTION ++ select SCHED_STACK_END_CHECK ++ select SECCOMP if HAVE_ARCH_SECCOMP ++ select SECCOMP_FILTER if HAVE_ARCH_SECCOMP_FILTER ++ select SECURITY_YAMA ++ select SLAB_FREELIST_RANDOM ++ select SLAB_FREELIST_HARDENED ++ select SHUFFLE_PAGE_ALLOCATOR ++ select SLUB_DEBUG ++ select PAGE_POISONING ++ select PAGE_POISONING_NO_SANITY ++ select PAGE_POISONING_ZERO ++ select INIT_ON_ALLOC_DEFAULT_ON ++ select INIT_ON_FREE_DEFAULT_ON ++ select REFCOUNT_FULL ++ select FORTIFY_SOURCE ++ select SECURITY_DMESG_RESTRICT ++ select PANIC_ON_OOPS ++ select GCC_PLUGIN_LATENT_ENTROPY ++ select GCC_PLUGIN_STRUCTLEAK ++ select GCC_PLUGIN_STRUCTLEAK_BYREF_ALL ++ select GCC_PLUGIN_RANDSTRUCT ++ select GCC_PLUGIN_RANDSTRUCT_PERFORMANCE ++ select ZERO_CALL_USED_REGS if CC_HAS_ZERO_CALL_USED_REGS ++ ++ help ++ Search for GENTOO_KERNEL_SELF_PROTECTION_{X86_64, ARM64, X86_32, ARM} for dependency ++ information on your specific architecture. Note 2: Please see the URL above for ++ numeric settings, e.g. CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 for X86_64 ++ ++config GENTOO_KERNEL_SELF_PROTECTION_X86_64 ++ bool "X86_64 KSPP Settings" if GENTOO_KERNEL_SELF_PROTECTION_COMMON ++ ++ depends on !X86_MSR && X86_64 && GENTOO_KERNEL_SELF_PROTECTION ++ default n ++ ++ select RANDOMIZE_BASE ++ select RANDOMIZE_MEMORY ++ select RELOCATABLE ++ select LEGACY_VSYSCALL_NONE ++ select PAGE_TABLE_ISOLATION ++ select GCC_PLUGIN_STACKLEAK ++ select VMAP_STACK ++ ++ ++config GENTOO_KERNEL_SELF_PROTECTION_ARM64 ++ bool "ARM64 KSPP Settings" ++ ++ depends on ARM64 ++ default n ++ ++ select RANDOMIZE_BASE ++ select RELOCATABLE ++ select ARM64_SW_TTBR0_PAN ++ select CONFIG_UNMAP_KERNEL_AT_EL0 ++ select GCC_PLUGIN_STACKLEAK ++ select VMAP_STACK ++ ++config GENTOO_KERNEL_SELF_PROTECTION_X86_32 ++ bool "X86_32 KSPP Settings" ++ ++ depends on !X86_MSR && !MODIFY_LDT_SYSCALL && !M486 && X86_32 ++ default n ++ ++ select HIGHMEM64G ++ select X86_PAE ++ select RANDOMIZE_BASE ++ select RELOCATABLE ++ select PAGE_TABLE_ISOLATION ++ ++config GENTOO_KERNEL_SELF_PROTECTION_ARM ++ bool "ARM KSPP Settings" ++ ++ depends on !OABI_COMPAT && ARM ++ default n ++ ++ select VMSPLIT_3G ++ select STRICT_MEMORY_RWX ++ select CPU_SW_DOMAIN_PAN ++ ++endif ++ ++config GENTOO_PRINT_FIRMWARE_INFO ++ bool "Print firmware information that the kernel attempts to load" ++ ++ depends on GENTOO_LINUX ++ default y ++ ++ help ++ Enable this option to print information about firmware that the kernel ++ is attempting to load. This information can be accessible via the ++ dmesg command-line utility ++ ++ See the settings that become available for more details and fine-tuning. ++ ++endmenu +diff --git a/security/selinux/Kconfig b/security/selinux/Kconfig +index 9e921fc72..f29bc13fa 100644 +--- a/security/selinux/Kconfig ++++ b/security/selinux/Kconfig +@@ -26,6 +26,7 @@ config SECURITY_SELINUX_BOOTPARAM + config SECURITY_SELINUX_DISABLE + bool "NSA SELinux runtime disable" + depends on SECURITY_SELINUX ++ depends on !GENTOO_KERNEL_SELF_PROTECTION + select SECURITY_WRITABLE_HOOKS + default n + help +-- +2.31.1 + +From bd3ff0b16792c18c0614c2b95e148943209f460a Mon Sep 17 00:00:00 2001 +From: Georgy Yakovlev +Date: Tue, 8 Jun 2021 13:59:57 -0700 +Subject: [PATCH 2/2] set DEFAULT_MMAP_MIN_ADDR by default + +--- + mm/Kconfig | 2 ++ + 1 file changed, 2 insertions(+) + +diff --git a/mm/Kconfig b/mm/Kconfig +index 24c045b24..e13fc740c 100644 +--- a/mm/Kconfig ++++ b/mm/Kconfig +@@ -321,6 +321,8 @@ config KSM + config DEFAULT_MMAP_MIN_ADDR + int "Low address space to protect from user allocation" + depends on MMU ++ default 65536 if ( X86_64 || X86_32 || PPC64 || IA64 ) && GENTOO_KERNEL_SELF_PROTECTION ++ default 32768 if ( ARM64 || ARM ) && GENTOO_KERNEL_SELF_PROTECTION + default 4096 + help + This is the portion of low virtual memory which should be protected +-- +2.31.1 +``` diff --git a/sys-kernel/pinephone-pro-sources/files/5010_enable-cpu-optimizations-universal.patch b/sys-kernel/pinephone-pro-sources/files/5010_enable-cpu-optimizations-universal.patch new file mode 100644 index 0000000..b9c03cb --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/5010_enable-cpu-optimizations-universal.patch @@ -0,0 +1,675 @@ +From b5892719c43f739343c628e3d357471a3bdaa368 Mon Sep 17 00:00:00 2001 +From: graysky +Date: Tue, 15 Mar 2022 05:58:43 -0400 +Subject: [PATCH] more uarches for kernel 5.17+ +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +FEATURES +This patch adds additional CPU options to the Linux kernel accessible under: + Processor type and features ---> + Processor family ---> + +With the release of gcc 11.1 and clang 12.0, several generic 64-bit levels are +offered which are good for supported Intel or AMD CPUs: +• x86-64-v2 +• x86-64-v3 +• x86-64-v4 + +Users of glibc 2.33 and above can see which level is supported by current +hardware by running: + /lib/ld-linux-x86-64.so.2 --help | grep supported + +Alternatively, compare the flags from /proc/cpuinfo to this list.[1] + +CPU-specific microarchitectures include: +• AMD Improved K8-family +• AMD K10-family +• AMD Family 10h (Barcelona) +• AMD Family 14h (Bobcat) +• AMD Family 16h (Jaguar) +• AMD Family 15h (Bulldozer) +• AMD Family 15h (Piledriver) +• AMD Family 15h (Steamroller) +• AMD Family 15h (Excavator) +• AMD Family 17h (Zen) +• AMD Family 17h (Zen 2) +• AMD Family 19h (Zen 3)† +• Intel Silvermont low-power processors +• Intel Goldmont low-power processors (Apollo Lake and Denverton) +• Intel Goldmont Plus low-power processors (Gemini Lake) +• Intel 1st Gen Core i3/i5/i7 (Nehalem) +• Intel 1.5 Gen Core i3/i5/i7 (Westmere) +• Intel 2nd Gen Core i3/i5/i7 (Sandybridge) +• Intel 3rd Gen Core i3/i5/i7 (Ivybridge) +• Intel 4th Gen Core i3/i5/i7 (Haswell) +• Intel 5th Gen Core i3/i5/i7 (Broadwell) +• Intel 6th Gen Core i3/i5/i7 (Skylake) +• Intel 6th Gen Core i7/i9 (Skylake X) +• Intel 8th Gen Core i3/i5/i7 (Cannon Lake) +• Intel 10th Gen Core i7/i9 (Ice Lake) +• Intel Xeon (Cascade Lake) +• Intel Xeon (Cooper Lake)* +• Intel 3rd Gen 10nm++ i3/i5/i7/i9-family (Tiger Lake)* +• Intel 3rd Gen 10nm++ Xeon (Sapphire Rapids)‡ +• Intel 11th Gen i3/i5/i7/i9-family (Rocket Lake)‡ +• Intel 12th Gen i3/i5/i7/i9-family (Alder Lake)‡ + +Notes: If not otherwise noted, gcc >=9.1 is required for support. + *Requires gcc >=10.1 or clang >=10.0 + †Required gcc >=10.3 or clang >=12.0 + ‡Required gcc >=11.1 or clang >=12.0 + +It also offers to compile passing the 'native' option which, "selects the CPU +to generate code for at compilation time by determining the processor type of +the compiling machine. Using -march=native enables all instruction subsets +supported by the local machine and will produce code optimized for the local +machine under the constraints of the selected instruction set."[2] + +Users of Intel CPUs should select the 'Intel-Native' option and users of AMD +CPUs should select the 'AMD-Native' option. + +MINOR NOTES RELATING TO INTEL ATOM PROCESSORS +This patch also changes -march=atom to -march=bonnell in accordance with the +gcc v4.9 changes. Upstream is using the deprecated -match=atom flags when I +believe it should use the newer -march=bonnell flag for atom processors.[3] + +It is not recommended to compile on Atom-CPUs with the 'native' option.[4] The +recommendation is to use the 'atom' option instead. + +BENEFITS +Small but real speed increases are measurable using a make endpoint comparing +a generic kernel to one built with one of the respective microarchs. + +See the following experimental evidence supporting this statement: +https://github.com/graysky2/kernel_gcc_patch + +REQUIREMENTS +linux version 5.17+ +gcc version >=9.0 or clang version >=9.0 + +ACKNOWLEDGMENTS +This patch builds on the seminal work by Jeroen.[5] + +REFERENCES +1. https://gitlab.com/x86-psABIs/x86-64-ABI/-/commit/77566eb03bc6a326811cb7e9 +2. https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html#index-x86-Options +3. https://bugzilla.kernel.org/show_bug.cgi?id=77461 +4. https://github.com/graysky2/kernel_gcc_patch/issues/15 +5. http://www.linuxforge.net/docs/linux/linux-gcc.php + +Signed-off-by: graysky +--- + arch/x86/Kconfig.cpu | 332 ++++++++++++++++++++++++++++++-- + arch/x86/Makefile | 40 +++- + arch/x86/include/asm/vermagic.h | 66 +++++++ + 3 files changed, 424 insertions(+), 14 deletions(-) + +diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu +index 542377cd419d..22b919cdb6d1 100644 +--- a/arch/x86/Kconfig.cpu ++++ b/arch/x86/Kconfig.cpu +@@ -157,7 +157,7 @@ config MPENTIUM4 + + + config MK6 +- bool "K6/K6-II/K6-III" ++ bool "AMD K6/K6-II/K6-III" + depends on X86_32 + help + Select this for an AMD K6-family processor. Enables use of +@@ -165,7 +165,7 @@ config MK6 + flags to GCC. + + config MK7 +- bool "Athlon/Duron/K7" ++ bool "AMD Athlon/Duron/K7" + depends on X86_32 + help + Select this for an AMD Athlon K7-family processor. Enables use of +@@ -173,12 +173,98 @@ config MK7 + flags to GCC. + + config MK8 +- bool "Opteron/Athlon64/Hammer/K8" ++ bool "AMD Opteron/Athlon64/Hammer/K8" + help + Select this for an AMD Opteron or Athlon64 Hammer-family processor. + Enables use of some extended instructions, and passes appropriate + optimization flags to GCC. + ++config MK8SSE3 ++ bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3" ++ help ++ Select this for improved AMD Opteron or Athlon64 Hammer-family processors. ++ Enables use of some extended instructions, and passes appropriate ++ optimization flags to GCC. ++ ++config MK10 ++ bool "AMD 61xx/7x50/PhenomX3/X4/II/K10" ++ help ++ Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50, ++ Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor. ++ Enables use of some extended instructions, and passes appropriate ++ optimization flags to GCC. ++ ++config MBARCELONA ++ bool "AMD Barcelona" ++ help ++ Select this for AMD Family 10h Barcelona processors. ++ ++ Enables -march=barcelona ++ ++config MBOBCAT ++ bool "AMD Bobcat" ++ help ++ Select this for AMD Family 14h Bobcat processors. ++ ++ Enables -march=btver1 ++ ++config MJAGUAR ++ bool "AMD Jaguar" ++ help ++ Select this for AMD Family 16h Jaguar processors. ++ ++ Enables -march=btver2 ++ ++config MBULLDOZER ++ bool "AMD Bulldozer" ++ help ++ Select this for AMD Family 15h Bulldozer processors. ++ ++ Enables -march=bdver1 ++ ++config MPILEDRIVER ++ bool "AMD Piledriver" ++ help ++ Select this for AMD Family 15h Piledriver processors. ++ ++ Enables -march=bdver2 ++ ++config MSTEAMROLLER ++ bool "AMD Steamroller" ++ help ++ Select this for AMD Family 15h Steamroller processors. ++ ++ Enables -march=bdver3 ++ ++config MEXCAVATOR ++ bool "AMD Excavator" ++ help ++ Select this for AMD Family 15h Excavator processors. ++ ++ Enables -march=bdver4 ++ ++config MZEN ++ bool "AMD Zen" ++ help ++ Select this for AMD Family 17h Zen processors. ++ ++ Enables -march=znver1 ++ ++config MZEN2 ++ bool "AMD Zen 2" ++ help ++ Select this for AMD Family 17h Zen 2 processors. ++ ++ Enables -march=znver2 ++ ++config MZEN3 ++ bool "AMD Zen 3" ++ depends on (CC_IS_GCC && GCC_VERSION >= 100300) || (CC_IS_CLANG && CLANG_VERSION >= 120000) ++ help ++ Select this for AMD Family 19h Zen 3 processors. ++ ++ Enables -march=znver3 ++ + config MCRUSOE + bool "Crusoe" + depends on X86_32 +@@ -270,7 +356,7 @@ config MPSC + in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one. + + config MCORE2 +- bool "Core 2/newer Xeon" ++ bool "Intel Core 2" + help + + Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and +@@ -278,6 +364,8 @@ config MCORE2 + family in /proc/cpuinfo. Newer ones have 6 and older ones 15 + (not a typo) + ++ Enables -march=core2 ++ + config MATOM + bool "Intel Atom" + help +@@ -287,6 +375,182 @@ config MATOM + accordingly optimized code. Use a recent GCC with specific Atom + support in order to fully benefit from selecting this option. + ++config MNEHALEM ++ bool "Intel Nehalem" ++ select X86_P6_NOP ++ help ++ ++ Select this for 1st Gen Core processors in the Nehalem family. ++ ++ Enables -march=nehalem ++ ++config MWESTMERE ++ bool "Intel Westmere" ++ select X86_P6_NOP ++ help ++ ++ Select this for the Intel Westmere formerly Nehalem-C family. ++ ++ Enables -march=westmere ++ ++config MSILVERMONT ++ bool "Intel Silvermont" ++ select X86_P6_NOP ++ help ++ ++ Select this for the Intel Silvermont platform. ++ ++ Enables -march=silvermont ++ ++config MGOLDMONT ++ bool "Intel Goldmont" ++ select X86_P6_NOP ++ help ++ ++ Select this for the Intel Goldmont platform including Apollo Lake and Denverton. ++ ++ Enables -march=goldmont ++ ++config MGOLDMONTPLUS ++ bool "Intel Goldmont Plus" ++ select X86_P6_NOP ++ help ++ ++ Select this for the Intel Goldmont Plus platform including Gemini Lake. ++ ++ Enables -march=goldmont-plus ++ ++config MSANDYBRIDGE ++ bool "Intel Sandy Bridge" ++ select X86_P6_NOP ++ help ++ ++ Select this for 2nd Gen Core processors in the Sandy Bridge family. ++ ++ Enables -march=sandybridge ++ ++config MIVYBRIDGE ++ bool "Intel Ivy Bridge" ++ select X86_P6_NOP ++ help ++ ++ Select this for 3rd Gen Core processors in the Ivy Bridge family. ++ ++ Enables -march=ivybridge ++ ++config MHASWELL ++ bool "Intel Haswell" ++ select X86_P6_NOP ++ help ++ ++ Select this for 4th Gen Core processors in the Haswell family. ++ ++ Enables -march=haswell ++ ++config MBROADWELL ++ bool "Intel Broadwell" ++ select X86_P6_NOP ++ help ++ ++ Select this for 5th Gen Core processors in the Broadwell family. ++ ++ Enables -march=broadwell ++ ++config MSKYLAKE ++ bool "Intel Skylake" ++ select X86_P6_NOP ++ help ++ ++ Select this for 6th Gen Core processors in the Skylake family. ++ ++ Enables -march=skylake ++ ++config MSKYLAKEX ++ bool "Intel Skylake X" ++ select X86_P6_NOP ++ help ++ ++ Select this for 6th Gen Core processors in the Skylake X family. ++ ++ Enables -march=skylake-avx512 ++ ++config MCANNONLAKE ++ bool "Intel Cannon Lake" ++ select X86_P6_NOP ++ help ++ ++ Select this for 8th Gen Core processors ++ ++ Enables -march=cannonlake ++ ++config MICELAKE ++ bool "Intel Ice Lake" ++ select X86_P6_NOP ++ help ++ ++ Select this for 10th Gen Core processors in the Ice Lake family. ++ ++ Enables -march=icelake-client ++ ++config MCASCADELAKE ++ bool "Intel Cascade Lake" ++ select X86_P6_NOP ++ help ++ ++ Select this for Xeon processors in the Cascade Lake family. ++ ++ Enables -march=cascadelake ++ ++config MCOOPERLAKE ++ bool "Intel Cooper Lake" ++ depends on (CC_IS_GCC && GCC_VERSION > 100100) || (CC_IS_CLANG && CLANG_VERSION >= 100000) ++ select X86_P6_NOP ++ help ++ ++ Select this for Xeon processors in the Cooper Lake family. ++ ++ Enables -march=cooperlake ++ ++config MTIGERLAKE ++ bool "Intel Tiger Lake" ++ depends on (CC_IS_GCC && GCC_VERSION > 100100) || (CC_IS_CLANG && CLANG_VERSION >= 100000) ++ select X86_P6_NOP ++ help ++ ++ Select this for third-generation 10 nm process processors in the Tiger Lake family. ++ ++ Enables -march=tigerlake ++ ++config MSAPPHIRERAPIDS ++ bool "Intel Sapphire Rapids" ++ depends on (CC_IS_GCC && GCC_VERSION > 110000) || (CC_IS_CLANG && CLANG_VERSION >= 120000) ++ select X86_P6_NOP ++ help ++ ++ Select this for third-generation 10 nm process processors in the Sapphire Rapids family. ++ ++ Enables -march=sapphirerapids ++ ++config MROCKETLAKE ++ bool "Intel Rocket Lake" ++ depends on (CC_IS_GCC && GCC_VERSION > 110000) || (CC_IS_CLANG && CLANG_VERSION >= 120000) ++ select X86_P6_NOP ++ help ++ ++ Select this for eleventh-generation processors in the Rocket Lake family. ++ ++ Enables -march=rocketlake ++ ++config MALDERLAKE ++ bool "Intel Alder Lake" ++ depends on (CC_IS_GCC && GCC_VERSION > 110000) || (CC_IS_CLANG && CLANG_VERSION >= 120000) ++ select X86_P6_NOP ++ help ++ ++ Select this for twelfth-generation processors in the Alder Lake family. ++ ++ Enables -march=alderlake ++ + config GENERIC_CPU + bool "Generic-x86-64" + depends on X86_64 +@@ -294,6 +558,50 @@ config GENERIC_CPU + Generic x86-64 CPU. + Run equally well on all x86-64 CPUs. + ++config GENERIC_CPU2 ++ bool "Generic-x86-64-v2" ++ depends on (CC_IS_GCC && GCC_VERSION > 110000) || (CC_IS_CLANG && CLANG_VERSION >= 120000) ++ depends on X86_64 ++ help ++ Generic x86-64 CPU. ++ Run equally well on all x86-64 CPUs with min support of x86-64-v2. ++ ++config GENERIC_CPU3 ++ bool "Generic-x86-64-v3" ++ depends on (CC_IS_GCC && GCC_VERSION > 110000) || (CC_IS_CLANG && CLANG_VERSION >= 120000) ++ depends on X86_64 ++ help ++ Generic x86-64-v3 CPU with v3 instructions. ++ Run equally well on all x86-64 CPUs with min support of x86-64-v3. ++ ++config GENERIC_CPU4 ++ bool "Generic-x86-64-v4" ++ depends on (CC_IS_GCC && GCC_VERSION > 110000) || (CC_IS_CLANG && CLANG_VERSION >= 120000) ++ depends on X86_64 ++ help ++ Generic x86-64 CPU with v4 instructions. ++ Run equally well on all x86-64 CPUs with min support of x86-64-v4. ++ ++config MNATIVE_INTEL ++ bool "Intel-Native optimizations autodetected by the compiler" ++ help ++ ++ Clang 3.8, GCC 4.2 and above support -march=native, which automatically detects ++ the optimum settings to use based on your processor. Do NOT use this ++ for AMD CPUs. Intel Only! ++ ++ Enables -march=native ++ ++config MNATIVE_AMD ++ bool "AMD-Native optimizations autodetected by the compiler" ++ help ++ ++ Clang 3.8, GCC 4.2 and above support -march=native, which automatically detects ++ the optimum settings to use based on your processor. Do NOT use this ++ for Intel CPUs. AMD Only! ++ ++ Enables -march=native ++ + endchoice + + config X86_GENERIC +@@ -318,7 +626,7 @@ config X86_INTERNODE_CACHE_SHIFT + config X86_L1_CACHE_SHIFT + int + default "7" if MPENTIUM4 || MPSC +- default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU ++ default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD || X86_GENERIC || GENERIC_CPU || GENERIC_CPU2 || GENERIC_CPU3 || GENERIC_CPU4 + default "4" if MELAN || M486SX || M486 || MGEODEGX1 + default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX + +@@ -336,11 +644,11 @@ config X86_ALIGNMENT_16 + + config X86_INTEL_USERCOPY + def_bool y +- depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 ++ depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL + + config X86_USE_PPRO_CHECKSUM + def_bool y +- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM ++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD + + # + # P6_NOPs are a relatively minor optimization that require a family >= +@@ -356,26 +664,26 @@ config X86_USE_PPRO_CHECKSUM + config X86_P6_NOP + def_bool y + depends on X86_64 +- depends on (MCORE2 || MPENTIUM4 || MPSC) ++ depends on (MCORE2 || MPENTIUM4 || MPSC || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL) + + config X86_TSC + def_bool y +- depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64 ++ depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD) || X86_64 + + config X86_CMPXCHG64 + def_bool y +- depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 ++ depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD + + # this should be set for all -march=.. options where the compiler + # generates cmov. + config X86_CMOV + def_bool y +- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) ++ depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD) + + config X86_MINIMUM_CPU_FAMILY + int + default "64" if X86_64 +- default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8) ++ default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MJAGUAR || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MZEN2 || MZEN3 || MNEHALEM || MWESTMERE || MSILVERMONT || MGOLDMONT || MGOLDMONTPLUS || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MSKYLAKEX || MCANNONLAKE || MICELAKE || MCASCADELAKE || MCOOPERLAKE || MTIGERLAKE || MSAPPHIRERAPIDS || MROCKETLAKE || MALDERLAKE || MNATIVE_INTEL || MNATIVE_AMD) + default "5" if X86_32 && X86_CMPXCHG64 + default "4" + +diff --git a/arch/x86/Makefile b/arch/x86/Makefile +index e84cdd409b64..7d3bbf060079 100644 +--- a/arch/x86/Makefile ++++ b/arch/x86/Makefile +@@ -131,8 +131,44 @@ else + # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu) + cflags-$(CONFIG_MK8) += -march=k8 + cflags-$(CONFIG_MPSC) += -march=nocona +- cflags-$(CONFIG_MCORE2) += -march=core2 +- cflags-$(CONFIG_MATOM) += -march=atom ++ cflags-$(CONFIG_MK8SSE3) += -march=k8-sse3 ++ cflags-$(CONFIG_MK10) += -march=amdfam10 ++ cflags-$(CONFIG_MBARCELONA) += -march=barcelona ++ cflags-$(CONFIG_MBOBCAT) += -march=btver1 ++ cflags-$(CONFIG_MJAGUAR) += -march=btver2 ++ cflags-$(CONFIG_MBULLDOZER) += -march=bdver1 ++ cflags-$(CONFIG_MPILEDRIVER) += -march=bdver2 -mno-tbm ++ cflags-$(CONFIG_MSTEAMROLLER) += -march=bdver3 -mno-tbm ++ cflags-$(CONFIG_MEXCAVATOR) += -march=bdver4 -mno-tbm ++ cflags-$(CONFIG_MZEN) += -march=znver1 ++ cflags-$(CONFIG_MZEN2) += -march=znver2 ++ cflags-$(CONFIG_MZEN3) += -march=znver3 ++ cflags-$(CONFIG_MNATIVE_INTEL) += -march=native ++ cflags-$(CONFIG_MNATIVE_AMD) += -march=native ++ cflags-$(CONFIG_MATOM) += -march=bonnell ++ cflags-$(CONFIG_MCORE2) += -march=core2 ++ cflags-$(CONFIG_MNEHALEM) += -march=nehalem ++ cflags-$(CONFIG_MWESTMERE) += -march=westmere ++ cflags-$(CONFIG_MSILVERMONT) += -march=silvermont ++ cflags-$(CONFIG_MGOLDMONT) += -march=goldmont ++ cflags-$(CONFIG_MGOLDMONTPLUS) += -march=goldmont-plus ++ cflags-$(CONFIG_MSANDYBRIDGE) += -march=sandybridge ++ cflags-$(CONFIG_MIVYBRIDGE) += -march=ivybridge ++ cflags-$(CONFIG_MHASWELL) += -march=haswell ++ cflags-$(CONFIG_MBROADWELL) += -march=broadwell ++ cflags-$(CONFIG_MSKYLAKE) += -march=skylake ++ cflags-$(CONFIG_MSKYLAKEX) += -march=skylake-avx512 ++ cflags-$(CONFIG_MCANNONLAKE) += -march=cannonlake ++ cflags-$(CONFIG_MICELAKE) += -march=icelake-client ++ cflags-$(CONFIG_MCASCADELAKE) += -march=cascadelake ++ cflags-$(CONFIG_MCOOPERLAKE) += -march=cooperlake ++ cflags-$(CONFIG_MTIGERLAKE) += -march=tigerlake ++ cflags-$(CONFIG_MSAPPHIRERAPIDS) += -march=sapphirerapids ++ cflags-$(CONFIG_MROCKETLAKE) += -march=rocketlake ++ cflags-$(CONFIG_MALDERLAKE) += -march=alderlake ++ cflags-$(CONFIG_GENERIC_CPU2) += -march=x86-64-v2 ++ cflags-$(CONFIG_GENERIC_CPU3) += -march=x86-64-v3 ++ cflags-$(CONFIG_GENERIC_CPU4) += -march=x86-64-v4 + cflags-$(CONFIG_GENERIC_CPU) += -mtune=generic + KBUILD_CFLAGS += $(cflags-y) + +diff --git a/arch/x86/include/asm/vermagic.h b/arch/x86/include/asm/vermagic.h +index 75884d2cdec3..4e6a08d4c7e5 100644 +--- a/arch/x86/include/asm/vermagic.h ++++ b/arch/x86/include/asm/vermagic.h +@@ -17,6 +17,48 @@ + #define MODULE_PROC_FAMILY "586MMX " + #elif defined CONFIG_MCORE2 + #define MODULE_PROC_FAMILY "CORE2 " ++#elif defined CONFIG_MNATIVE_INTEL ++#define MODULE_PROC_FAMILY "NATIVE_INTEL " ++#elif defined CONFIG_MNATIVE_AMD ++#define MODULE_PROC_FAMILY "NATIVE_AMD " ++#elif defined CONFIG_MNEHALEM ++#define MODULE_PROC_FAMILY "NEHALEM " ++#elif defined CONFIG_MWESTMERE ++#define MODULE_PROC_FAMILY "WESTMERE " ++#elif defined CONFIG_MSILVERMONT ++#define MODULE_PROC_FAMILY "SILVERMONT " ++#elif defined CONFIG_MGOLDMONT ++#define MODULE_PROC_FAMILY "GOLDMONT " ++#elif defined CONFIG_MGOLDMONTPLUS ++#define MODULE_PROC_FAMILY "GOLDMONTPLUS " ++#elif defined CONFIG_MSANDYBRIDGE ++#define MODULE_PROC_FAMILY "SANDYBRIDGE " ++#elif defined CONFIG_MIVYBRIDGE ++#define MODULE_PROC_FAMILY "IVYBRIDGE " ++#elif defined CONFIG_MHASWELL ++#define MODULE_PROC_FAMILY "HASWELL " ++#elif defined CONFIG_MBROADWELL ++#define MODULE_PROC_FAMILY "BROADWELL " ++#elif defined CONFIG_MSKYLAKE ++#define MODULE_PROC_FAMILY "SKYLAKE " ++#elif defined CONFIG_MSKYLAKEX ++#define MODULE_PROC_FAMILY "SKYLAKEX " ++#elif defined CONFIG_MCANNONLAKE ++#define MODULE_PROC_FAMILY "CANNONLAKE " ++#elif defined CONFIG_MICELAKE ++#define MODULE_PROC_FAMILY "ICELAKE " ++#elif defined CONFIG_MCASCADELAKE ++#define MODULE_PROC_FAMILY "CASCADELAKE " ++#elif defined CONFIG_MCOOPERLAKE ++#define MODULE_PROC_FAMILY "COOPERLAKE " ++#elif defined CONFIG_MTIGERLAKE ++#define MODULE_PROC_FAMILY "TIGERLAKE " ++#elif defined CONFIG_MSAPPHIRERAPIDS ++#define MODULE_PROC_FAMILY "SAPPHIRERAPIDS " ++#elif defined CONFIG_ROCKETLAKE ++#define MODULE_PROC_FAMILY "ROCKETLAKE " ++#elif defined CONFIG_MALDERLAKE ++#define MODULE_PROC_FAMILY "ALDERLAKE " + #elif defined CONFIG_MATOM + #define MODULE_PROC_FAMILY "ATOM " + #elif defined CONFIG_M686 +@@ -35,6 +77,30 @@ + #define MODULE_PROC_FAMILY "K7 " + #elif defined CONFIG_MK8 + #define MODULE_PROC_FAMILY "K8 " ++#elif defined CONFIG_MK8SSE3 ++#define MODULE_PROC_FAMILY "K8SSE3 " ++#elif defined CONFIG_MK10 ++#define MODULE_PROC_FAMILY "K10 " ++#elif defined CONFIG_MBARCELONA ++#define MODULE_PROC_FAMILY "BARCELONA " ++#elif defined CONFIG_MBOBCAT ++#define MODULE_PROC_FAMILY "BOBCAT " ++#elif defined CONFIG_MBULLDOZER ++#define MODULE_PROC_FAMILY "BULLDOZER " ++#elif defined CONFIG_MPILEDRIVER ++#define MODULE_PROC_FAMILY "PILEDRIVER " ++#elif defined CONFIG_MSTEAMROLLER ++#define MODULE_PROC_FAMILY "STEAMROLLER " ++#elif defined CONFIG_MJAGUAR ++#define MODULE_PROC_FAMILY "JAGUAR " ++#elif defined CONFIG_MEXCAVATOR ++#define MODULE_PROC_FAMILY "EXCAVATOR " ++#elif defined CONFIG_MZEN ++#define MODULE_PROC_FAMILY "ZEN " ++#elif defined CONFIG_MZEN2 ++#define MODULE_PROC_FAMILY "ZEN2 " ++#elif defined CONFIG_MZEN3 ++#define MODULE_PROC_FAMILY "ZEN3 " + #elif defined CONFIG_MELAN + #define MODULE_PROC_FAMILY "ELAN " + #elif defined CONFIG_MCRUSOE +-- +2.35.1 + diff --git a/sys-kernel/pinephone-pro-sources/files/5020_BMQ-and-PDS-io-scheduler-v5.19-r0.patch b/sys-kernel/pinephone-pro-sources/files/5020_BMQ-and-PDS-io-scheduler-v5.19-r0.patch new file mode 100644 index 0000000..610cfe8 --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/5020_BMQ-and-PDS-io-scheduler-v5.19-r0.patch @@ -0,0 +1,9956 @@ +diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt +index cc3ea8febc62..ab4c5a35b999 100644 +--- a/Documentation/admin-guide/kernel-parameters.txt ++++ b/Documentation/admin-guide/kernel-parameters.txt +@@ -5299,6 +5299,12 @@ + sa1100ir [NET] + See drivers/net/irda/sa1100_ir.c. + ++ sched_timeslice= ++ [KNL] Time slice in ms for Project C BMQ/PDS scheduler. ++ Format: integer 2, 4 ++ Default: 4 ++ See Documentation/scheduler/sched-BMQ.txt ++ + sched_verbose [KNL] Enables verbose scheduler debug messages. + + schedstats= [KNL,X86] Enable or disable scheduled statistics. +diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst +index ddccd1077462..e24781970a3d 100644 +--- a/Documentation/admin-guide/sysctl/kernel.rst ++++ b/Documentation/admin-guide/sysctl/kernel.rst +@@ -1524,3 +1524,13 @@ is 10 seconds. + + The softlockup threshold is (``2 * watchdog_thresh``). Setting this + tunable to zero will disable lockup detection altogether. ++ ++yield_type: ++=========== ++ ++BMQ/PDS CPU scheduler only. This determines what type of yield calls ++to sched_yield will perform. ++ ++ 0 - No yield. ++ 1 - Deboost and requeue task. (default) ++ 2 - Set run queue skip task. +diff --git a/Documentation/scheduler/sched-BMQ.txt b/Documentation/scheduler/sched-BMQ.txt +new file mode 100644 +index 000000000000..05c84eec0f31 +--- /dev/null ++++ b/Documentation/scheduler/sched-BMQ.txt +@@ -0,0 +1,110 @@ ++ BitMap queue CPU Scheduler ++ -------------------------- ++ ++CONTENT ++======== ++ ++ Background ++ Design ++ Overview ++ Task policy ++ Priority management ++ BitMap Queue ++ CPU Assignment and Migration ++ ++ ++Background ++========== ++ ++BitMap Queue CPU scheduler, referred to as BMQ from here on, is an evolution ++of previous Priority and Deadline based Skiplist multiple queue scheduler(PDS), ++and inspired by Zircon scheduler. The goal of it is to keep the scheduler code ++simple, while efficiency and scalable for interactive tasks, such as desktop, ++movie playback and gaming etc. ++ ++Design ++====== ++ ++Overview ++-------- ++ ++BMQ use per CPU run queue design, each CPU(logical) has it's own run queue, ++each CPU is responsible for scheduling the tasks that are putting into it's ++run queue. ++ ++The run queue is a set of priority queues. Note that these queues are fifo ++queue for non-rt tasks or priority queue for rt tasks in data structure. See ++BitMap Queue below for details. BMQ is optimized for non-rt tasks in the fact ++that most applications are non-rt tasks. No matter the queue is fifo or ++priority, In each queue is an ordered list of runnable tasks awaiting execution ++and the data structures are the same. When it is time for a new task to run, ++the scheduler simply looks the lowest numbered queueue that contains a task, ++and runs the first task from the head of that queue. And per CPU idle task is ++also in the run queue, so the scheduler can always find a task to run on from ++its run queue. ++ ++Each task will assigned the same timeslice(default 4ms) when it is picked to ++start running. Task will be reinserted at the end of the appropriate priority ++queue when it uses its whole timeslice. When the scheduler selects a new task ++from the priority queue it sets the CPU's preemption timer for the remainder of ++the previous timeslice. When that timer fires the scheduler will stop execution ++on that task, select another task and start over again. ++ ++If a task blocks waiting for a shared resource then it's taken out of its ++priority queue and is placed in a wait queue for the shared resource. When it ++is unblocked it will be reinserted in the appropriate priority queue of an ++eligible CPU. ++ ++Task policy ++----------- ++ ++BMQ supports DEADLINE, FIFO, RR, NORMAL, BATCH and IDLE task policy like the ++mainline CFS scheduler. But BMQ is heavy optimized for non-rt task, that's ++NORMAL/BATCH/IDLE policy tasks. Below is the implementation detail of each ++policy. ++ ++DEADLINE ++ It is squashed as priority 0 FIFO task. ++ ++FIFO/RR ++ All RT tasks share one single priority queue in BMQ run queue designed. The ++complexity of insert operation is O(n). BMQ is not designed for system runs ++with major rt policy tasks. ++ ++NORMAL/BATCH/IDLE ++ BATCH and IDLE tasks are treated as the same policy. They compete CPU with ++NORMAL policy tasks, but they just don't boost. To control the priority of ++NORMAL/BATCH/IDLE tasks, simply use nice level. ++ ++ISO ++ ISO policy is not supported in BMQ. Please use nice level -20 NORMAL policy ++task instead. ++ ++Priority management ++------------------- ++ ++RT tasks have priority from 0-99. For non-rt tasks, there are three different ++factors used to determine the effective priority of a task. The effective ++priority being what is used to determine which queue it will be in. ++ ++The first factor is simply the task’s static priority. Which is assigned from ++task's nice level, within [-20, 19] in userland's point of view and [0, 39] ++internally. ++ ++The second factor is the priority boost. This is a value bounded between ++[-MAX_PRIORITY_ADJ, MAX_PRIORITY_ADJ] used to offset the base priority, it is ++modified by the following cases: ++ ++*When a thread has used up its entire timeslice, always deboost its boost by ++increasing by one. ++*When a thread gives up cpu control(voluntary or non-voluntary) to reschedule, ++and its switch-in time(time after last switch and run) below the thredhold ++based on its priority boost, will boost its boost by decreasing by one buti is ++capped at 0 (won’t go negative). ++ ++The intent in this system is to ensure that interactive threads are serviced ++quickly. These are usually the threads that interact directly with the user ++and cause user-perceivable latency. These threads usually do little work and ++spend most of their time blocked awaiting another user event. So they get the ++priority boost from unblocking while background threads that do most of the ++processing receive the priority penalty for using their entire timeslice. +diff --git a/fs/proc/base.c b/fs/proc/base.c +index 8dfa36a99c74..46397c606e01 100644 +--- a/fs/proc/base.c ++++ b/fs/proc/base.c +@@ -479,7 +479,7 @@ static int proc_pid_schedstat(struct seq_file *m, struct pid_namespace *ns, + seq_puts(m, "0 0 0\n"); + else + seq_printf(m, "%llu %llu %lu\n", +- (unsigned long long)task->se.sum_exec_runtime, ++ (unsigned long long)tsk_seruntime(task), + (unsigned long long)task->sched_info.run_delay, + task->sched_info.pcount); + +diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h +index 8874f681b056..59eb72bf7d5f 100644 +--- a/include/asm-generic/resource.h ++++ b/include/asm-generic/resource.h +@@ -23,7 +23,7 @@ + [RLIMIT_LOCKS] = { RLIM_INFINITY, RLIM_INFINITY }, \ + [RLIMIT_SIGPENDING] = { 0, 0 }, \ + [RLIMIT_MSGQUEUE] = { MQ_BYTES_MAX, MQ_BYTES_MAX }, \ +- [RLIMIT_NICE] = { 0, 0 }, \ ++ [RLIMIT_NICE] = { 30, 30 }, \ + [RLIMIT_RTPRIO] = { 0, 0 }, \ + [RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \ + } +diff --git a/include/linux/sched.h b/include/linux/sched.h +index c46f3a63b758..7c65e6317d97 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -751,8 +751,14 @@ struct task_struct { + unsigned int ptrace; + + #ifdef CONFIG_SMP +- int on_cpu; + struct __call_single_node wake_entry; ++#endif ++#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_ALT) ++ int on_cpu; ++#endif ++ ++#ifdef CONFIG_SMP ++#ifndef CONFIG_SCHED_ALT + unsigned int wakee_flips; + unsigned long wakee_flip_decay_ts; + struct task_struct *last_wakee; +@@ -766,6 +772,7 @@ struct task_struct { + */ + int recent_used_cpu; + int wake_cpu; ++#endif /* !CONFIG_SCHED_ALT */ + #endif + int on_rq; + +@@ -774,6 +781,20 @@ struct task_struct { + int normal_prio; + unsigned int rt_priority; + ++#ifdef CONFIG_SCHED_ALT ++ u64 last_ran; ++ s64 time_slice; ++ int sq_idx; ++ struct list_head sq_node; ++#ifdef CONFIG_SCHED_BMQ ++ int boost_prio; ++#endif /* CONFIG_SCHED_BMQ */ ++#ifdef CONFIG_SCHED_PDS ++ u64 deadline; ++#endif /* CONFIG_SCHED_PDS */ ++ /* sched_clock time spent running */ ++ u64 sched_time; ++#else /* !CONFIG_SCHED_ALT */ + struct sched_entity se; + struct sched_rt_entity rt; + struct sched_dl_entity dl; +@@ -784,6 +805,7 @@ struct task_struct { + unsigned long core_cookie; + unsigned int core_occupation; + #endif ++#endif /* !CONFIG_SCHED_ALT */ + + #ifdef CONFIG_CGROUP_SCHED + struct task_group *sched_task_group; +@@ -1517,6 +1539,15 @@ struct task_struct { + */ + }; + ++#ifdef CONFIG_SCHED_ALT ++#define tsk_seruntime(t) ((t)->sched_time) ++/* replace the uncertian rt_timeout with 0UL */ ++#define tsk_rttimeout(t) (0UL) ++#else /* CFS */ ++#define tsk_seruntime(t) ((t)->se.sum_exec_runtime) ++#define tsk_rttimeout(t) ((t)->rt.timeout) ++#endif /* !CONFIG_SCHED_ALT */ ++ + static inline struct pid *task_pid(struct task_struct *task) + { + return task->thread_pid; +diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h +index 7c83d4d5a971..fa30f98cb2be 100644 +--- a/include/linux/sched/deadline.h ++++ b/include/linux/sched/deadline.h +@@ -1,5 +1,24 @@ + /* SPDX-License-Identifier: GPL-2.0 */ + ++#ifdef CONFIG_SCHED_ALT ++ ++static inline int dl_task(struct task_struct *p) ++{ ++ return 0; ++} ++ ++#ifdef CONFIG_SCHED_BMQ ++#define __tsk_deadline(p) (0UL) ++#endif ++ ++#ifdef CONFIG_SCHED_PDS ++#define __tsk_deadline(p) ((((u64) ((p)->prio))<<56) | (p)->deadline) ++#endif ++ ++#else ++ ++#define __tsk_deadline(p) ((p)->dl.deadline) ++ + /* + * SCHED_DEADLINE tasks has negative priorities, reflecting + * the fact that any of them has higher prio than RT and +@@ -21,6 +40,7 @@ static inline int dl_task(struct task_struct *p) + { + return dl_prio(p->prio); + } ++#endif /* CONFIG_SCHED_ALT */ + + static inline bool dl_time_before(u64 a, u64 b) + { +diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h +index ab83d85e1183..6af9ae681116 100644 +--- a/include/linux/sched/prio.h ++++ b/include/linux/sched/prio.h +@@ -18,6 +18,32 @@ + #define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH) + #define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2) + ++#ifdef CONFIG_SCHED_ALT ++ ++/* Undefine MAX_PRIO and DEFAULT_PRIO */ ++#undef MAX_PRIO ++#undef DEFAULT_PRIO ++ ++/* +/- priority levels from the base priority */ ++#ifdef CONFIG_SCHED_BMQ ++#define MAX_PRIORITY_ADJ (7) ++ ++#define MIN_NORMAL_PRIO (MAX_RT_PRIO) ++#define MAX_PRIO (MIN_NORMAL_PRIO + NICE_WIDTH) ++#define DEFAULT_PRIO (MIN_NORMAL_PRIO + NICE_WIDTH / 2) ++#endif ++ ++#ifdef CONFIG_SCHED_PDS ++#define MAX_PRIORITY_ADJ (0) ++ ++#define MIN_NORMAL_PRIO (128) ++#define NORMAL_PRIO_NUM (64) ++#define MAX_PRIO (MIN_NORMAL_PRIO + NORMAL_PRIO_NUM) ++#define DEFAULT_PRIO (MAX_PRIO - NICE_WIDTH / 2) ++#endif ++ ++#endif /* CONFIG_SCHED_ALT */ ++ + /* + * Convert user-nice values [ -20 ... 0 ... 19 ] + * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], +diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h +index e5af028c08b4..0a7565d0d3cf 100644 +--- a/include/linux/sched/rt.h ++++ b/include/linux/sched/rt.h +@@ -24,8 +24,10 @@ static inline bool task_is_realtime(struct task_struct *tsk) + + if (policy == SCHED_FIFO || policy == SCHED_RR) + return true; ++#ifndef CONFIG_SCHED_ALT + if (policy == SCHED_DEADLINE) + return true; ++#endif + return false; + } + +diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h +index 56cffe42abbc..e020fc572b22 100644 +--- a/include/linux/sched/topology.h ++++ b/include/linux/sched/topology.h +@@ -233,7 +233,8 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu) + + #endif /* !CONFIG_SMP */ + +-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) ++#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) && \ ++ !defined(CONFIG_SCHED_ALT) + extern void rebuild_sched_domains_energy(void); + #else + static inline void rebuild_sched_domains_energy(void) +diff --git a/init/Kconfig b/init/Kconfig +index c7900e8975f1..d2b593e3807d 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -812,6 +812,7 @@ menu "Scheduler features" + config UCLAMP_TASK + bool "Enable utilization clamping for RT/FAIR tasks" + depends on CPU_FREQ_GOV_SCHEDUTIL ++ depends on !SCHED_ALT + help + This feature enables the scheduler to track the clamped utilization + of each CPU based on RUNNABLE tasks scheduled on that CPU. +@@ -858,6 +859,35 @@ config UCLAMP_BUCKETS_COUNT + + If in doubt, use the default value. + ++menuconfig SCHED_ALT ++ bool "Alternative CPU Schedulers" ++ default y ++ help ++ This feature enable alternative CPU scheduler" ++ ++if SCHED_ALT ++ ++choice ++ prompt "Alternative CPU Scheduler" ++ default SCHED_BMQ ++ ++config SCHED_BMQ ++ bool "BMQ CPU scheduler" ++ help ++ The BitMap Queue CPU scheduler for excellent interactivity and ++ responsiveness on the desktop and solid scalability on normal ++ hardware and commodity servers. ++ ++config SCHED_PDS ++ bool "PDS CPU scheduler" ++ help ++ The Priority and Deadline based Skip list multiple queue CPU ++ Scheduler. ++ ++endchoice ++ ++endif ++ + endmenu + + # +@@ -911,6 +941,7 @@ config NUMA_BALANCING + depends on ARCH_SUPPORTS_NUMA_BALANCING + depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY + depends on SMP && NUMA && MIGRATION && !PREEMPT_RT ++ depends on !SCHED_ALT + help + This option adds support for automatic NUMA aware memory/task placement. + The mechanism is quite primitive and is based on migrating memory when +@@ -1003,6 +1034,7 @@ config FAIR_GROUP_SCHED + depends on CGROUP_SCHED + default CGROUP_SCHED + ++if !SCHED_ALT + config CFS_BANDWIDTH + bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED" + depends on FAIR_GROUP_SCHED +@@ -1025,6 +1057,7 @@ config RT_GROUP_SCHED + realtime bandwidth for them. + See Documentation/scheduler/sched-rt-group.rst for more information. + ++endif #!SCHED_ALT + endif #CGROUP_SCHED + + config UCLAMP_TASK_GROUP +@@ -1268,6 +1301,7 @@ config CHECKPOINT_RESTORE + + config SCHED_AUTOGROUP + bool "Automatic process group scheduling" ++ depends on !SCHED_ALT + select CGROUPS + select CGROUP_SCHED + select FAIR_GROUP_SCHED +diff --git a/init/init_task.c b/init/init_task.c +index 73cc8f03511a..2d0bad762895 100644 +--- a/init/init_task.c ++++ b/init/init_task.c +@@ -75,9 +75,15 @@ struct task_struct init_task + .stack = init_stack, + .usage = REFCOUNT_INIT(2), + .flags = PF_KTHREAD, ++#ifdef CONFIG_SCHED_ALT ++ .prio = DEFAULT_PRIO + MAX_PRIORITY_ADJ, ++ .static_prio = DEFAULT_PRIO, ++ .normal_prio = DEFAULT_PRIO + MAX_PRIORITY_ADJ, ++#else + .prio = MAX_PRIO - 20, + .static_prio = MAX_PRIO - 20, + .normal_prio = MAX_PRIO - 20, ++#endif + .policy = SCHED_NORMAL, + .cpus_ptr = &init_task.cpus_mask, + .user_cpus_ptr = NULL, +@@ -88,6 +94,17 @@ struct task_struct init_task + .restart_block = { + .fn = do_no_restart_syscall, + }, ++#ifdef CONFIG_SCHED_ALT ++ .sq_node = LIST_HEAD_INIT(init_task.sq_node), ++#ifdef CONFIG_SCHED_BMQ ++ .boost_prio = 0, ++ .sq_idx = 15, ++#endif ++#ifdef CONFIG_SCHED_PDS ++ .deadline = 0, ++#endif ++ .time_slice = HZ, ++#else + .se = { + .group_node = LIST_HEAD_INIT(init_task.se.group_node), + }, +@@ -95,6 +112,7 @@ struct task_struct init_task + .run_list = LIST_HEAD_INIT(init_task.rt.run_list), + .time_slice = RR_TIMESLICE, + }, ++#endif + .tasks = LIST_HEAD_INIT(init_task.tasks), + #ifdef CONFIG_SMP + .pushable_tasks = PLIST_NODE_INIT(init_task.pushable_tasks, MAX_PRIO), +diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt +index c2f1fd95a821..41654679b1b2 100644 +--- a/kernel/Kconfig.preempt ++++ b/kernel/Kconfig.preempt +@@ -117,7 +117,7 @@ config PREEMPT_DYNAMIC + + config SCHED_CORE + bool "Core Scheduling for SMT" +- depends on SCHED_SMT ++ depends on SCHED_SMT && !SCHED_ALT + help + This option permits Core Scheduling, a means of coordinated task + selection across SMT siblings. When enabled -- see +diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c +index 71a418858a5e..7e3016873db1 100644 +--- a/kernel/cgroup/cpuset.c ++++ b/kernel/cgroup/cpuset.c +@@ -704,7 +704,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) + return ret; + } + +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_ALT) + /* + * Helper routine for generate_sched_domains(). + * Do cpusets a, b have overlapping effective cpus_allowed masks? +@@ -1100,7 +1100,7 @@ static void rebuild_sched_domains_locked(void) + /* Have scheduler rebuild the domains */ + partition_and_rebuild_sched_domains(ndoms, doms, attr); + } +-#else /* !CONFIG_SMP */ ++#else /* !CONFIG_SMP || CONFIG_SCHED_ALT */ + static void rebuild_sched_domains_locked(void) + { + } +diff --git a/kernel/delayacct.c b/kernel/delayacct.c +index 164ed9ef77a3..c974a84b056f 100644 +--- a/kernel/delayacct.c ++++ b/kernel/delayacct.c +@@ -150,7 +150,7 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) + */ + t1 = tsk->sched_info.pcount; + t2 = tsk->sched_info.run_delay; +- t3 = tsk->se.sum_exec_runtime; ++ t3 = tsk_seruntime(tsk); + + d->cpu_count += t1; + +diff --git a/kernel/exit.c b/kernel/exit.c +index 64c938ce36fe..a353f7ef5392 100644 +--- a/kernel/exit.c ++++ b/kernel/exit.c +@@ -124,7 +124,7 @@ static void __exit_signal(struct task_struct *tsk) + sig->curr_target = next_thread(tsk); + } + +- add_device_randomness((const void*) &tsk->se.sum_exec_runtime, ++ add_device_randomness((const void*) &tsk_seruntime(tsk), + sizeof(unsigned long long)); + + /* +@@ -145,7 +145,7 @@ static void __exit_signal(struct task_struct *tsk) + sig->inblock += task_io_get_inblock(tsk); + sig->oublock += task_io_get_oublock(tsk); + task_io_accounting_add(&sig->ioac, &tsk->ioac); +- sig->sum_sched_runtime += tsk->se.sum_exec_runtime; ++ sig->sum_sched_runtime += tsk_seruntime(tsk); + sig->nr_threads--; + __unhash_process(tsk, group_dead); + write_sequnlock(&sig->stats_lock); +diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c +index 7779ee8abc2a..5b9893cdfb1b 100644 +--- a/kernel/locking/rtmutex.c ++++ b/kernel/locking/rtmutex.c +@@ -300,21 +300,25 @@ static __always_inline void + waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task) + { + waiter->prio = __waiter_prio(task); +- waiter->deadline = task->dl.deadline; ++ waiter->deadline = __tsk_deadline(task); + } + + /* + * Only use with rt_mutex_waiter_{less,equal}() + */ + #define task_to_waiter(p) \ +- &(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline } ++ &(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = __tsk_deadline(p) } + + static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left, + struct rt_mutex_waiter *right) + { ++#ifdef CONFIG_SCHED_PDS ++ return (left->deadline < right->deadline); ++#else + if (left->prio < right->prio) + return 1; + ++#ifndef CONFIG_SCHED_BMQ + /* + * If both waiters have dl_prio(), we check the deadlines of the + * associated tasks. +@@ -323,16 +327,22 @@ static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left, + */ + if (dl_prio(left->prio)) + return dl_time_before(left->deadline, right->deadline); ++#endif + + return 0; ++#endif + } + + static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left, + struct rt_mutex_waiter *right) + { ++#ifdef CONFIG_SCHED_PDS ++ return (left->deadline == right->deadline); ++#else + if (left->prio != right->prio) + return 0; + ++#ifndef CONFIG_SCHED_BMQ + /* + * If both waiters have dl_prio(), we check the deadlines of the + * associated tasks. +@@ -341,8 +351,10 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left, + */ + if (dl_prio(left->prio)) + return left->deadline == right->deadline; ++#endif + + return 1; ++#endif + } + + static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter, +diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile +index 976092b7bd45..31d587c16ec1 100644 +--- a/kernel/sched/Makefile ++++ b/kernel/sched/Makefile +@@ -28,7 +28,12 @@ endif + # These compilation units have roughly the same size and complexity - so their + # build parallelizes well and finishes roughly at once: + # ++ifdef CONFIG_SCHED_ALT ++obj-y += alt_core.o ++obj-$(CONFIG_SCHED_DEBUG) += alt_debug.o ++else + obj-y += core.o + obj-y += fair.o ++endif + obj-y += build_policy.o + obj-y += build_utility.o +diff --git a/kernel/sched/alt_core.c b/kernel/sched/alt_core.c +new file mode 100644 +index 000000000000..d0ab41c4d9ad +--- /dev/null ++++ b/kernel/sched/alt_core.c +@@ -0,0 +1,7807 @@ ++/* ++ * kernel/sched/alt_core.c ++ * ++ * Core alternative kernel scheduler code and related syscalls ++ * ++ * Copyright (C) 1991-2002 Linus Torvalds ++ * ++ * 2009-08-13 Brainfuck deadline scheduling policy by Con Kolivas deletes ++ * a whole lot of those previous things. ++ * 2017-09-06 Priority and Deadline based Skip list multiple queue kernel ++ * scheduler by Alfred Chen. ++ * 2019-02-20 BMQ(BitMap Queue) kernel scheduler by Alfred Chen. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include ++ ++#define CREATE_TRACE_POINTS ++#include ++#undef CREATE_TRACE_POINTS ++ ++#include "sched.h" ++ ++#include "pelt.h" ++ ++#include "../../fs/io-wq.h" ++#include "../smpboot.h" ++ ++/* ++ * Export tracepoints that act as a bare tracehook (ie: have no trace event ++ * associated with them) to allow external modules to probe them. ++ */ ++EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_irq_tp); ++ ++#ifdef CONFIG_SCHED_DEBUG ++#define sched_feat(x) (1) ++/* ++ * Print a warning if need_resched is set for the given duration (if ++ * LATENCY_WARN is enabled). ++ * ++ * If sysctl_resched_latency_warn_once is set, only one warning will be shown ++ * per boot. ++ */ ++__read_mostly int sysctl_resched_latency_warn_ms = 100; ++__read_mostly int sysctl_resched_latency_warn_once = 1; ++#else ++#define sched_feat(x) (0) ++#endif /* CONFIG_SCHED_DEBUG */ ++ ++#define ALT_SCHED_VERSION "v5.19-r0" ++ ++/* rt_prio(prio) defined in include/linux/sched/rt.h */ ++#define rt_task(p) rt_prio((p)->prio) ++#define rt_policy(policy) ((policy) == SCHED_FIFO || (policy) == SCHED_RR) ++#define task_has_rt_policy(p) (rt_policy((p)->policy)) ++ ++#define STOP_PRIO (MAX_RT_PRIO - 1) ++ ++/* Default time slice is 4 in ms, can be set via kernel parameter "sched_timeslice" */ ++u64 sched_timeslice_ns __read_mostly = (4 << 20); ++ ++static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx); ++ ++#ifdef CONFIG_SCHED_BMQ ++#include "bmq.h" ++#endif ++#ifdef CONFIG_SCHED_PDS ++#include "pds.h" ++#endif ++ ++static int __init sched_timeslice(char *str) ++{ ++ int timeslice_ms; ++ ++ get_option(&str, ×lice_ms); ++ if (2 != timeslice_ms) ++ timeslice_ms = 4; ++ sched_timeslice_ns = timeslice_ms << 20; ++ sched_timeslice_imp(timeslice_ms); ++ ++ return 0; ++} ++early_param("sched_timeslice", sched_timeslice); ++ ++/* Reschedule if less than this many μs left */ ++#define RESCHED_NS (100 << 10) ++ ++/** ++ * sched_yield_type - Choose what sort of yield sched_yield will perform. ++ * 0: No yield. ++ * 1: Deboost and requeue task. (default) ++ * 2: Set rq skip task. ++ */ ++int sched_yield_type __read_mostly = 1; ++ ++#ifdef CONFIG_SMP ++static cpumask_t sched_rq_pending_mask ____cacheline_aligned_in_smp; ++ ++DEFINE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks); ++DEFINE_PER_CPU(cpumask_t *, sched_cpu_llc_mask); ++DEFINE_PER_CPU(cpumask_t *, sched_cpu_topo_end_mask); ++ ++#ifdef CONFIG_SCHED_SMT ++DEFINE_STATIC_KEY_FALSE(sched_smt_present); ++EXPORT_SYMBOL_GPL(sched_smt_present); ++#endif ++ ++/* ++ * Keep a unique ID per domain (we use the first CPUs number in the cpumask of ++ * the domain), this allows us to quickly tell if two cpus are in the same cache ++ * domain, see cpus_share_cache(). ++ */ ++DEFINE_PER_CPU(int, sd_llc_id); ++#endif /* CONFIG_SMP */ ++ ++static DEFINE_MUTEX(sched_hotcpu_mutex); ++ ++DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); ++ ++#ifndef prepare_arch_switch ++# define prepare_arch_switch(next) do { } while (0) ++#endif ++#ifndef finish_arch_post_lock_switch ++# define finish_arch_post_lock_switch() do { } while (0) ++#endif ++ ++#ifdef CONFIG_SCHED_SMT ++static cpumask_t sched_sg_idle_mask ____cacheline_aligned_in_smp; ++#endif ++static cpumask_t sched_rq_watermark[SCHED_QUEUE_BITS] ____cacheline_aligned_in_smp; ++ ++/* sched_queue related functions */ ++static inline void sched_queue_init(struct sched_queue *q) ++{ ++ int i; ++ ++ bitmap_zero(q->bitmap, SCHED_QUEUE_BITS); ++ for(i = 0; i < SCHED_BITS; i++) ++ INIT_LIST_HEAD(&q->heads[i]); ++} ++ ++/* ++ * Init idle task and put into queue structure of rq ++ * IMPORTANT: may be called multiple times for a single cpu ++ */ ++static inline void sched_queue_init_idle(struct sched_queue *q, ++ struct task_struct *idle) ++{ ++ idle->sq_idx = IDLE_TASK_SCHED_PRIO; ++ INIT_LIST_HEAD(&q->heads[idle->sq_idx]); ++ list_add(&idle->sq_node, &q->heads[idle->sq_idx]); ++} ++ ++/* water mark related functions */ ++static inline void update_sched_rq_watermark(struct rq *rq) ++{ ++ unsigned long watermark = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS); ++ unsigned long last_wm = rq->watermark; ++ unsigned long i; ++ int cpu; ++ ++ if (watermark == last_wm) ++ return; ++ ++ rq->watermark = watermark; ++ cpu = cpu_of(rq); ++ if (watermark < last_wm) { ++ for (i = last_wm; i > watermark; i--) ++ cpumask_clear_cpu(cpu, sched_rq_watermark + SCHED_QUEUE_BITS - i); ++#ifdef CONFIG_SCHED_SMT ++ if (static_branch_likely(&sched_smt_present) && ++ IDLE_TASK_SCHED_PRIO == last_wm) ++ cpumask_andnot(&sched_sg_idle_mask, ++ &sched_sg_idle_mask, cpu_smt_mask(cpu)); ++#endif ++ return; ++ } ++ /* last_wm < watermark */ ++ for (i = watermark; i > last_wm; i--) ++ cpumask_set_cpu(cpu, sched_rq_watermark + SCHED_QUEUE_BITS - i); ++#ifdef CONFIG_SCHED_SMT ++ if (static_branch_likely(&sched_smt_present) && ++ IDLE_TASK_SCHED_PRIO == watermark) { ++ cpumask_t tmp; ++ ++ cpumask_and(&tmp, cpu_smt_mask(cpu), sched_rq_watermark); ++ if (cpumask_equal(&tmp, cpu_smt_mask(cpu))) ++ cpumask_or(&sched_sg_idle_mask, ++ &sched_sg_idle_mask, cpu_smt_mask(cpu)); ++ } ++#endif ++} ++ ++/* ++ * This routine assume that the idle task always in queue ++ */ ++static inline struct task_struct *sched_rq_first_task(struct rq *rq) ++{ ++ unsigned long idx = find_first_bit(rq->queue.bitmap, SCHED_QUEUE_BITS); ++ const struct list_head *head = &rq->queue.heads[sched_prio2idx(idx, rq)]; ++ ++ return list_first_entry(head, struct task_struct, sq_node); ++} ++ ++static inline struct task_struct * ++sched_rq_next_task(struct task_struct *p, struct rq *rq) ++{ ++ unsigned long idx = p->sq_idx; ++ struct list_head *head = &rq->queue.heads[idx]; ++ ++ if (list_is_last(&p->sq_node, head)) { ++ idx = find_next_bit(rq->queue.bitmap, SCHED_QUEUE_BITS, ++ sched_idx2prio(idx, rq) + 1); ++ head = &rq->queue.heads[sched_prio2idx(idx, rq)]; ++ ++ return list_first_entry(head, struct task_struct, sq_node); ++ } ++ ++ return list_next_entry(p, sq_node); ++} ++ ++static inline struct task_struct *rq_runnable_task(struct rq *rq) ++{ ++ struct task_struct *next = sched_rq_first_task(rq); ++ ++ if (unlikely(next == rq->skip)) ++ next = sched_rq_next_task(next, rq); ++ ++ return next; ++} ++ ++/* ++ * Serialization rules: ++ * ++ * Lock order: ++ * ++ * p->pi_lock ++ * rq->lock ++ * hrtimer_cpu_base->lock (hrtimer_start() for bandwidth controls) ++ * ++ * rq1->lock ++ * rq2->lock where: rq1 < rq2 ++ * ++ * Regular state: ++ * ++ * Normal scheduling state is serialized by rq->lock. __schedule() takes the ++ * local CPU's rq->lock, it optionally removes the task from the runqueue and ++ * always looks at the local rq data structures to find the most eligible task ++ * to run next. ++ * ++ * Task enqueue is also under rq->lock, possibly taken from another CPU. ++ * Wakeups from another LLC domain might use an IPI to transfer the enqueue to ++ * the local CPU to avoid bouncing the runqueue state around [ see ++ * ttwu_queue_wakelist() ] ++ * ++ * Task wakeup, specifically wakeups that involve migration, are horribly ++ * complicated to avoid having to take two rq->locks. ++ * ++ * Special state: ++ * ++ * System-calls and anything external will use task_rq_lock() which acquires ++ * both p->pi_lock and rq->lock. As a consequence the state they change is ++ * stable while holding either lock: ++ * ++ * - sched_setaffinity()/ ++ * set_cpus_allowed_ptr(): p->cpus_ptr, p->nr_cpus_allowed ++ * - set_user_nice(): p->se.load, p->*prio ++ * - __sched_setscheduler(): p->sched_class, p->policy, p->*prio, ++ * p->se.load, p->rt_priority, ++ * p->dl.dl_{runtime, deadline, period, flags, bw, density} ++ * - sched_setnuma(): p->numa_preferred_nid ++ * - sched_move_task()/ ++ * cpu_cgroup_fork(): p->sched_task_group ++ * - uclamp_update_active() p->uclamp* ++ * ++ * p->state <- TASK_*: ++ * ++ * is changed locklessly using set_current_state(), __set_current_state() or ++ * set_special_state(), see their respective comments, or by ++ * try_to_wake_up(). This latter uses p->pi_lock to serialize against ++ * concurrent self. ++ * ++ * p->on_rq <- { 0, 1 = TASK_ON_RQ_QUEUED, 2 = TASK_ON_RQ_MIGRATING }: ++ * ++ * is set by activate_task() and cleared by deactivate_task(), under ++ * rq->lock. Non-zero indicates the task is runnable, the special ++ * ON_RQ_MIGRATING state is used for migration without holding both ++ * rq->locks. It indicates task_cpu() is not stable, see task_rq_lock(). ++ * ++ * p->on_cpu <- { 0, 1 }: ++ * ++ * is set by prepare_task() and cleared by finish_task() such that it will be ++ * set before p is scheduled-in and cleared after p is scheduled-out, both ++ * under rq->lock. Non-zero indicates the task is running on its CPU. ++ * ++ * [ The astute reader will observe that it is possible for two tasks on one ++ * CPU to have ->on_cpu = 1 at the same time. ] ++ * ++ * task_cpu(p): is changed by set_task_cpu(), the rules are: ++ * ++ * - Don't call set_task_cpu() on a blocked task: ++ * ++ * We don't care what CPU we're not running on, this simplifies hotplug, ++ * the CPU assignment of blocked tasks isn't required to be valid. ++ * ++ * - for try_to_wake_up(), called under p->pi_lock: ++ * ++ * This allows try_to_wake_up() to only take one rq->lock, see its comment. ++ * ++ * - for migration called under rq->lock: ++ * [ see task_on_rq_migrating() in task_rq_lock() ] ++ * ++ * o move_queued_task() ++ * o detach_task() ++ * ++ * - for migration called under double_rq_lock(): ++ * ++ * o __migrate_swap_task() ++ * o push_rt_task() / pull_rt_task() ++ * o push_dl_task() / pull_dl_task() ++ * o dl_task_offline_migration() ++ * ++ */ ++ ++/* ++ * Context: p->pi_lock ++ */ ++static inline struct rq ++*__task_access_lock(struct task_struct *p, raw_spinlock_t **plock) ++{ ++ struct rq *rq; ++ for (;;) { ++ rq = task_rq(p); ++ if (p->on_cpu || task_on_rq_queued(p)) { ++ raw_spin_lock(&rq->lock); ++ if (likely((p->on_cpu || task_on_rq_queued(p)) ++ && rq == task_rq(p))) { ++ *plock = &rq->lock; ++ return rq; ++ } ++ raw_spin_unlock(&rq->lock); ++ } else if (task_on_rq_migrating(p)) { ++ do { ++ cpu_relax(); ++ } while (unlikely(task_on_rq_migrating(p))); ++ } else { ++ *plock = NULL; ++ return rq; ++ } ++ } ++} ++ ++static inline void ++__task_access_unlock(struct task_struct *p, raw_spinlock_t *lock) ++{ ++ if (NULL != lock) ++ raw_spin_unlock(lock); ++} ++ ++static inline struct rq ++*task_access_lock_irqsave(struct task_struct *p, raw_spinlock_t **plock, ++ unsigned long *flags) ++{ ++ struct rq *rq; ++ for (;;) { ++ rq = task_rq(p); ++ if (p->on_cpu || task_on_rq_queued(p)) { ++ raw_spin_lock_irqsave(&rq->lock, *flags); ++ if (likely((p->on_cpu || task_on_rq_queued(p)) ++ && rq == task_rq(p))) { ++ *plock = &rq->lock; ++ return rq; ++ } ++ raw_spin_unlock_irqrestore(&rq->lock, *flags); ++ } else if (task_on_rq_migrating(p)) { ++ do { ++ cpu_relax(); ++ } while (unlikely(task_on_rq_migrating(p))); ++ } else { ++ raw_spin_lock_irqsave(&p->pi_lock, *flags); ++ if (likely(!p->on_cpu && !p->on_rq && ++ rq == task_rq(p))) { ++ *plock = &p->pi_lock; ++ return rq; ++ } ++ raw_spin_unlock_irqrestore(&p->pi_lock, *flags); ++ } ++ } ++} ++ ++static inline void ++task_access_unlock_irqrestore(struct task_struct *p, raw_spinlock_t *lock, ++ unsigned long *flags) ++{ ++ raw_spin_unlock_irqrestore(lock, *flags); ++} ++ ++/* ++ * __task_rq_lock - lock the rq @p resides on. ++ */ ++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) ++ __acquires(rq->lock) ++{ ++ struct rq *rq; ++ ++ lockdep_assert_held(&p->pi_lock); ++ ++ for (;;) { ++ rq = task_rq(p); ++ raw_spin_lock(&rq->lock); ++ if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) ++ return rq; ++ raw_spin_unlock(&rq->lock); ++ ++ while (unlikely(task_on_rq_migrating(p))) ++ cpu_relax(); ++ } ++} ++ ++/* ++ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. ++ */ ++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) ++ __acquires(p->pi_lock) ++ __acquires(rq->lock) ++{ ++ struct rq *rq; ++ ++ for (;;) { ++ raw_spin_lock_irqsave(&p->pi_lock, rf->flags); ++ rq = task_rq(p); ++ raw_spin_lock(&rq->lock); ++ /* ++ * move_queued_task() task_rq_lock() ++ * ++ * ACQUIRE (rq->lock) ++ * [S] ->on_rq = MIGRATING [L] rq = task_rq() ++ * WMB (__set_task_cpu()) ACQUIRE (rq->lock); ++ * [S] ->cpu = new_cpu [L] task_rq() ++ * [L] ->on_rq ++ * RELEASE (rq->lock) ++ * ++ * If we observe the old CPU in task_rq_lock(), the acquire of ++ * the old rq->lock will fully serialize against the stores. ++ * ++ * If we observe the new CPU in task_rq_lock(), the address ++ * dependency headed by '[L] rq = task_rq()' and the acquire ++ * will pair with the WMB to ensure we then also see migrating. ++ */ ++ if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) { ++ return rq; ++ } ++ raw_spin_unlock(&rq->lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); ++ ++ while (unlikely(task_on_rq_migrating(p))) ++ cpu_relax(); ++ } ++} ++ ++static inline void ++rq_lock_irqsave(struct rq *rq, struct rq_flags *rf) ++ __acquires(rq->lock) ++{ ++ raw_spin_lock_irqsave(&rq->lock, rf->flags); ++} ++ ++static inline void ++rq_unlock_irqrestore(struct rq *rq, struct rq_flags *rf) ++ __releases(rq->lock) ++{ ++ raw_spin_unlock_irqrestore(&rq->lock, rf->flags); ++} ++ ++void raw_spin_rq_lock_nested(struct rq *rq, int subclass) ++{ ++ raw_spinlock_t *lock; ++ ++ /* Matches synchronize_rcu() in __sched_core_enable() */ ++ preempt_disable(); ++ ++ for (;;) { ++ lock = __rq_lockp(rq); ++ raw_spin_lock_nested(lock, subclass); ++ if (likely(lock == __rq_lockp(rq))) { ++ /* preempt_count *MUST* be > 1 */ ++ preempt_enable_no_resched(); ++ return; ++ } ++ raw_spin_unlock(lock); ++ } ++} ++ ++void raw_spin_rq_unlock(struct rq *rq) ++{ ++ raw_spin_unlock(rq_lockp(rq)); ++} ++ ++/* ++ * RQ-clock updating methods: ++ */ ++ ++static void update_rq_clock_task(struct rq *rq, s64 delta) ++{ ++/* ++ * In theory, the compile should just see 0 here, and optimize out the call ++ * to sched_rt_avg_update. But I don't trust it... ++ */ ++ s64 __maybe_unused steal = 0, irq_delta = 0; ++ ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING ++ irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; ++ ++ /* ++ * Since irq_time is only updated on {soft,}irq_exit, we might run into ++ * this case when a previous update_rq_clock() happened inside a ++ * {soft,}irq region. ++ * ++ * When this happens, we stop ->clock_task and only update the ++ * prev_irq_time stamp to account for the part that fit, so that a next ++ * update will consume the rest. This ensures ->clock_task is ++ * monotonic. ++ * ++ * It does however cause some slight miss-attribution of {soft,}irq ++ * time, a more accurate solution would be to update the irq_time using ++ * the current rq->clock timestamp, except that would require using ++ * atomic ops. ++ */ ++ if (irq_delta > delta) ++ irq_delta = delta; ++ ++ rq->prev_irq_time += irq_delta; ++ delta -= irq_delta; ++#endif ++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING ++ if (static_key_false((¶virt_steal_rq_enabled))) { ++ steal = paravirt_steal_clock(cpu_of(rq)); ++ steal -= rq->prev_steal_time_rq; ++ ++ if (unlikely(steal > delta)) ++ steal = delta; ++ ++ rq->prev_steal_time_rq += steal; ++ delta -= steal; ++ } ++#endif ++ ++ rq->clock_task += delta; ++ ++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ ++ if ((irq_delta + steal)) ++ update_irq_load_avg(rq, irq_delta + steal); ++#endif ++} ++ ++static inline void update_rq_clock(struct rq *rq) ++{ ++ s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; ++ ++ if (unlikely(delta <= 0)) ++ return; ++ rq->clock += delta; ++ update_rq_time_edge(rq); ++ update_rq_clock_task(rq, delta); ++} ++ ++/* ++ * RQ Load update routine ++ */ ++#define RQ_LOAD_HISTORY_BITS (sizeof(s32) * 8ULL) ++#define RQ_UTIL_SHIFT (8) ++#define RQ_LOAD_HISTORY_TO_UTIL(l) (((l) >> (RQ_LOAD_HISTORY_BITS - 1 - RQ_UTIL_SHIFT)) & 0xff) ++ ++#define LOAD_BLOCK(t) ((t) >> 17) ++#define LOAD_HALF_BLOCK(t) ((t) >> 16) ++#define BLOCK_MASK(t) ((t) & ((0x01 << 18) - 1)) ++#define LOAD_BLOCK_BIT(b) (1UL << (RQ_LOAD_HISTORY_BITS - 1 - (b))) ++#define CURRENT_LOAD_BIT LOAD_BLOCK_BIT(0) ++ ++static inline void rq_load_update(struct rq *rq) ++{ ++ u64 time = rq->clock; ++ u64 delta = min(LOAD_BLOCK(time) - LOAD_BLOCK(rq->load_stamp), ++ RQ_LOAD_HISTORY_BITS - 1); ++ u64 prev = !!(rq->load_history & CURRENT_LOAD_BIT); ++ u64 curr = !!rq->nr_running; ++ ++ if (delta) { ++ rq->load_history = rq->load_history >> delta; ++ ++ if (delta < RQ_UTIL_SHIFT) { ++ rq->load_block += (~BLOCK_MASK(rq->load_stamp)) * prev; ++ if (!!LOAD_HALF_BLOCK(rq->load_block) ^ curr) ++ rq->load_history ^= LOAD_BLOCK_BIT(delta); ++ } ++ ++ rq->load_block = BLOCK_MASK(time) * prev; ++ } else { ++ rq->load_block += (time - rq->load_stamp) * prev; ++ } ++ if (prev ^ curr) ++ rq->load_history ^= CURRENT_LOAD_BIT; ++ rq->load_stamp = time; ++} ++ ++unsigned long rq_load_util(struct rq *rq, unsigned long max) ++{ ++ return RQ_LOAD_HISTORY_TO_UTIL(rq->load_history) * (max >> RQ_UTIL_SHIFT); ++} ++ ++#ifdef CONFIG_SMP ++unsigned long sched_cpu_util(int cpu, unsigned long max) ++{ ++ return rq_load_util(cpu_rq(cpu), max); ++} ++#endif /* CONFIG_SMP */ ++ ++#ifdef CONFIG_CPU_FREQ ++/** ++ * cpufreq_update_util - Take a note about CPU utilization changes. ++ * @rq: Runqueue to carry out the update for. ++ * @flags: Update reason flags. ++ * ++ * This function is called by the scheduler on the CPU whose utilization is ++ * being updated. ++ * ++ * It can only be called from RCU-sched read-side critical sections. ++ * ++ * The way cpufreq is currently arranged requires it to evaluate the CPU ++ * performance state (frequency/voltage) on a regular basis to prevent it from ++ * being stuck in a completely inadequate performance level for too long. ++ * That is not guaranteed to happen if the updates are only triggered from CFS ++ * and DL, though, because they may not be coming in if only RT tasks are ++ * active all the time (or there are RT tasks only). ++ * ++ * As a workaround for that issue, this function is called periodically by the ++ * RT sched class to trigger extra cpufreq updates to prevent it from stalling, ++ * but that really is a band-aid. Going forward it should be replaced with ++ * solutions targeted more specifically at RT tasks. ++ */ ++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) ++{ ++ struct update_util_data *data; ++ ++#ifdef CONFIG_SMP ++ rq_load_update(rq); ++#endif ++ data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data, ++ cpu_of(rq))); ++ if (data) ++ data->func(data, rq_clock(rq), flags); ++} ++#else ++static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) ++{ ++#ifdef CONFIG_SMP ++ rq_load_update(rq); ++#endif ++} ++#endif /* CONFIG_CPU_FREQ */ ++ ++#ifdef CONFIG_NO_HZ_FULL ++/* ++ * Tick may be needed by tasks in the runqueue depending on their policy and ++ * requirements. If tick is needed, lets send the target an IPI to kick it out ++ * of nohz mode if necessary. ++ */ ++static inline void sched_update_tick_dependency(struct rq *rq) ++{ ++ int cpu = cpu_of(rq); ++ ++ if (!tick_nohz_full_cpu(cpu)) ++ return; ++ ++ if (rq->nr_running < 2) ++ tick_nohz_dep_clear_cpu(cpu, TICK_DEP_BIT_SCHED); ++ else ++ tick_nohz_dep_set_cpu(cpu, TICK_DEP_BIT_SCHED); ++} ++#else /* !CONFIG_NO_HZ_FULL */ ++static inline void sched_update_tick_dependency(struct rq *rq) { } ++#endif ++ ++bool sched_task_on_rq(struct task_struct *p) ++{ ++ return task_on_rq_queued(p); ++} ++ ++unsigned long get_wchan(struct task_struct *p) ++{ ++ unsigned long ip = 0; ++ unsigned int state; ++ ++ if (!p || p == current) ++ return 0; ++ ++ /* Only get wchan if task is blocked and we can keep it that way. */ ++ raw_spin_lock_irq(&p->pi_lock); ++ state = READ_ONCE(p->__state); ++ smp_rmb(); /* see try_to_wake_up() */ ++ if (state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq) ++ ip = __get_wchan(p); ++ raw_spin_unlock_irq(&p->pi_lock); ++ ++ return ip; ++} ++ ++/* ++ * Add/Remove/Requeue task to/from the runqueue routines ++ * Context: rq->lock ++ */ ++#define __SCHED_DEQUEUE_TASK(p, rq, flags) \ ++ psi_dequeue(p, flags & DEQUEUE_SLEEP); \ ++ sched_info_dequeue(rq, p); \ ++ \ ++ list_del(&p->sq_node); \ ++ if (list_empty(&rq->queue.heads[p->sq_idx])) \ ++ clear_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap); ++ ++#define __SCHED_ENQUEUE_TASK(p, rq, flags) \ ++ sched_info_enqueue(rq, p); \ ++ psi_enqueue(p, flags); \ ++ \ ++ p->sq_idx = task_sched_prio_idx(p, rq); \ ++ list_add_tail(&p->sq_node, &rq->queue.heads[p->sq_idx]); \ ++ set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap); ++ ++static inline void dequeue_task(struct task_struct *p, struct rq *rq, int flags) ++{ ++ lockdep_assert_held(&rq->lock); ++ ++ /*printk(KERN_INFO "sched: dequeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/ ++ WARN_ONCE(task_rq(p) != rq, "sched: dequeue task reside on cpu%d from cpu%d\n", ++ task_cpu(p), cpu_of(rq)); ++ ++ __SCHED_DEQUEUE_TASK(p, rq, flags); ++ --rq->nr_running; ++#ifdef CONFIG_SMP ++ if (1 == rq->nr_running) ++ cpumask_clear_cpu(cpu_of(rq), &sched_rq_pending_mask); ++#endif ++ ++ sched_update_tick_dependency(rq); ++} ++ ++static inline void enqueue_task(struct task_struct *p, struct rq *rq, int flags) ++{ ++ lockdep_assert_held(&rq->lock); ++ ++ /*printk(KERN_INFO "sched: enqueue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/ ++ WARN_ONCE(task_rq(p) != rq, "sched: enqueue task reside on cpu%d to cpu%d\n", ++ task_cpu(p), cpu_of(rq)); ++ ++ __SCHED_ENQUEUE_TASK(p, rq, flags); ++ update_sched_rq_watermark(rq); ++ ++rq->nr_running; ++#ifdef CONFIG_SMP ++ if (2 == rq->nr_running) ++ cpumask_set_cpu(cpu_of(rq), &sched_rq_pending_mask); ++#endif ++ ++ sched_update_tick_dependency(rq); ++} ++ ++static inline void requeue_task(struct task_struct *p, struct rq *rq, int idx) ++{ ++ lockdep_assert_held(&rq->lock); ++ /*printk(KERN_INFO "sched: requeue(%d) %px %016llx\n", cpu_of(rq), p, p->priodl);*/ ++ WARN_ONCE(task_rq(p) != rq, "sched: cpu[%d] requeue task reside on cpu%d\n", ++ cpu_of(rq), task_cpu(p)); ++ ++ list_del(&p->sq_node); ++ list_add_tail(&p->sq_node, &rq->queue.heads[idx]); ++ if (idx != p->sq_idx) { ++ if (list_empty(&rq->queue.heads[p->sq_idx])) ++ clear_bit(sched_idx2prio(p->sq_idx, rq), ++ rq->queue.bitmap); ++ p->sq_idx = idx; ++ set_bit(sched_idx2prio(p->sq_idx, rq), rq->queue.bitmap); ++ update_sched_rq_watermark(rq); ++ } ++} ++ ++/* ++ * cmpxchg based fetch_or, macro so it works for different integer types ++ */ ++#define fetch_or(ptr, mask) \ ++ ({ \ ++ typeof(ptr) _ptr = (ptr); \ ++ typeof(mask) _mask = (mask); \ ++ typeof(*_ptr) _old, _val = *_ptr; \ ++ \ ++ for (;;) { \ ++ _old = cmpxchg(_ptr, _val, _val | _mask); \ ++ if (_old == _val) \ ++ break; \ ++ _val = _old; \ ++ } \ ++ _old; \ ++}) ++ ++#if defined(CONFIG_SMP) && defined(TIF_POLLING_NRFLAG) ++/* ++ * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_NRFLAG, ++ * this avoids any races wrt polling state changes and thereby avoids ++ * spurious IPIs. ++ */ ++static bool set_nr_and_not_polling(struct task_struct *p) ++{ ++ struct thread_info *ti = task_thread_info(p); ++ return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _TIF_POLLING_NRFLAG); ++} ++ ++/* ++ * Atomically set TIF_NEED_RESCHED if TIF_POLLING_NRFLAG is set. ++ * ++ * If this returns true, then the idle task promises to call ++ * sched_ttwu_pending() and reschedule soon. ++ */ ++static bool set_nr_if_polling(struct task_struct *p) ++{ ++ struct thread_info *ti = task_thread_info(p); ++ typeof(ti->flags) old, val = READ_ONCE(ti->flags); ++ ++ for (;;) { ++ if (!(val & _TIF_POLLING_NRFLAG)) ++ return false; ++ if (val & _TIF_NEED_RESCHED) ++ return true; ++ old = cmpxchg(&ti->flags, val, val | _TIF_NEED_RESCHED); ++ if (old == val) ++ break; ++ val = old; ++ } ++ return true; ++} ++ ++#else ++static bool set_nr_and_not_polling(struct task_struct *p) ++{ ++ set_tsk_need_resched(p); ++ return true; ++} ++ ++#ifdef CONFIG_SMP ++static bool set_nr_if_polling(struct task_struct *p) ++{ ++ return false; ++} ++#endif ++#endif ++ ++static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task) ++{ ++ struct wake_q_node *node = &task->wake_q; ++ ++ /* ++ * Atomically grab the task, if ->wake_q is !nil already it means ++ * it's already queued (either by us or someone else) and will get the ++ * wakeup due to that. ++ * ++ * In order to ensure that a pending wakeup will observe our pending ++ * state, even in the failed case, an explicit smp_mb() must be used. ++ */ ++ smp_mb__before_atomic(); ++ if (unlikely(cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))) ++ return false; ++ ++ /* ++ * The head is context local, there can be no concurrency. ++ */ ++ *head->lastp = node; ++ head->lastp = &node->next; ++ return true; ++} ++ ++/** ++ * wake_q_add() - queue a wakeup for 'later' waking. ++ * @head: the wake_q_head to add @task to ++ * @task: the task to queue for 'later' wakeup ++ * ++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the ++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come ++ * instantly. ++ * ++ * This function must be used as-if it were wake_up_process(); IOW the task ++ * must be ready to be woken at this location. ++ */ ++void wake_q_add(struct wake_q_head *head, struct task_struct *task) ++{ ++ if (__wake_q_add(head, task)) ++ get_task_struct(task); ++} ++ ++/** ++ * wake_q_add_safe() - safely queue a wakeup for 'later' waking. ++ * @head: the wake_q_head to add @task to ++ * @task: the task to queue for 'later' wakeup ++ * ++ * Queue a task for later wakeup, most likely by the wake_up_q() call in the ++ * same context, _HOWEVER_ this is not guaranteed, the wakeup can come ++ * instantly. ++ * ++ * This function must be used as-if it were wake_up_process(); IOW the task ++ * must be ready to be woken at this location. ++ * ++ * This function is essentially a task-safe equivalent to wake_q_add(). Callers ++ * that already hold reference to @task can call the 'safe' version and trust ++ * wake_q to do the right thing depending whether or not the @task is already ++ * queued for wakeup. ++ */ ++void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task) ++{ ++ if (!__wake_q_add(head, task)) ++ put_task_struct(task); ++} ++ ++void wake_up_q(struct wake_q_head *head) ++{ ++ struct wake_q_node *node = head->first; ++ ++ while (node != WAKE_Q_TAIL) { ++ struct task_struct *task; ++ ++ task = container_of(node, struct task_struct, wake_q); ++ /* task can safely be re-inserted now: */ ++ node = node->next; ++ task->wake_q.next = NULL; ++ ++ /* ++ * wake_up_process() executes a full barrier, which pairs with ++ * the queueing in wake_q_add() so as not to miss wakeups. ++ */ ++ wake_up_process(task); ++ put_task_struct(task); ++ } ++} ++ ++/* ++ * resched_curr - mark rq's current task 'to be rescheduled now'. ++ * ++ * On UP this means the setting of the need_resched flag, on SMP it ++ * might also involve a cross-CPU call to trigger the scheduler on ++ * the target CPU. ++ */ ++void resched_curr(struct rq *rq) ++{ ++ struct task_struct *curr = rq->curr; ++ int cpu; ++ ++ lockdep_assert_held(&rq->lock); ++ ++ if (test_tsk_need_resched(curr)) ++ return; ++ ++ cpu = cpu_of(rq); ++ if (cpu == smp_processor_id()) { ++ set_tsk_need_resched(curr); ++ set_preempt_need_resched(); ++ return; ++ } ++ ++ if (set_nr_and_not_polling(curr)) ++ smp_send_reschedule(cpu); ++ else ++ trace_sched_wake_idle_without_ipi(cpu); ++} ++ ++void resched_cpu(int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&rq->lock, flags); ++ if (cpu_online(cpu) || cpu == smp_processor_id()) ++ resched_curr(cpu_rq(cpu)); ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++} ++ ++#ifdef CONFIG_SMP ++#ifdef CONFIG_NO_HZ_COMMON ++void nohz_balance_enter_idle(int cpu) {} ++ ++void select_nohz_load_balancer(int stop_tick) {} ++ ++void set_cpu_sd_state_idle(void) {} ++ ++/* ++ * In the semi idle case, use the nearest busy CPU for migrating timers ++ * from an idle CPU. This is good for power-savings. ++ * ++ * We don't do similar optimization for completely idle system, as ++ * selecting an idle CPU will add more delays to the timers than intended ++ * (as that CPU's timer base may not be uptodate wrt jiffies etc). ++ */ ++int get_nohz_timer_target(void) ++{ ++ int i, cpu = smp_processor_id(), default_cpu = -1; ++ struct cpumask *mask; ++ const struct cpumask *hk_mask; ++ ++ if (housekeeping_cpu(cpu, HK_TYPE_TIMER)) { ++ if (!idle_cpu(cpu)) ++ return cpu; ++ default_cpu = cpu; ++ } ++ ++ hk_mask = housekeeping_cpumask(HK_TYPE_TIMER); ++ ++ for (mask = per_cpu(sched_cpu_topo_masks, cpu) + 1; ++ mask < per_cpu(sched_cpu_topo_end_mask, cpu); mask++) ++ for_each_cpu_and(i, mask, hk_mask) ++ if (!idle_cpu(i)) ++ return i; ++ ++ if (default_cpu == -1) ++ default_cpu = housekeeping_any_cpu(HK_TYPE_TIMER); ++ cpu = default_cpu; ++ ++ return cpu; ++} ++ ++/* ++ * When add_timer_on() enqueues a timer into the timer wheel of an ++ * idle CPU then this timer might expire before the next timer event ++ * which is scheduled to wake up that CPU. In case of a completely ++ * idle system the next event might even be infinite time into the ++ * future. wake_up_idle_cpu() ensures that the CPU is woken up and ++ * leaves the inner idle loop so the newly added timer is taken into ++ * account when the CPU goes back to idle and evaluates the timer ++ * wheel for the next timer event. ++ */ ++static inline void wake_up_idle_cpu(int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ ++ if (cpu == smp_processor_id()) ++ return; ++ ++ if (set_nr_and_not_polling(rq->idle)) ++ smp_send_reschedule(cpu); ++ else ++ trace_sched_wake_idle_without_ipi(cpu); ++} ++ ++static inline bool wake_up_full_nohz_cpu(int cpu) ++{ ++ /* ++ * We just need the target to call irq_exit() and re-evaluate ++ * the next tick. The nohz full kick at least implies that. ++ * If needed we can still optimize that later with an ++ * empty IRQ. ++ */ ++ if (cpu_is_offline(cpu)) ++ return true; /* Don't try to wake offline CPUs. */ ++ if (tick_nohz_full_cpu(cpu)) { ++ if (cpu != smp_processor_id() || ++ tick_nohz_tick_stopped()) ++ tick_nohz_full_kick_cpu(cpu); ++ return true; ++ } ++ ++ return false; ++} ++ ++void wake_up_nohz_cpu(int cpu) ++{ ++ if (!wake_up_full_nohz_cpu(cpu)) ++ wake_up_idle_cpu(cpu); ++} ++ ++static void nohz_csd_func(void *info) ++{ ++ struct rq *rq = info; ++ int cpu = cpu_of(rq); ++ unsigned int flags; ++ ++ /* ++ * Release the rq::nohz_csd. ++ */ ++ flags = atomic_fetch_andnot(NOHZ_KICK_MASK, nohz_flags(cpu)); ++ WARN_ON(!(flags & NOHZ_KICK_MASK)); ++ ++ rq->idle_balance = idle_cpu(cpu); ++ if (rq->idle_balance && !need_resched()) { ++ rq->nohz_idle_balance = flags; ++ raise_softirq_irqoff(SCHED_SOFTIRQ); ++ } ++} ++ ++#endif /* CONFIG_NO_HZ_COMMON */ ++#endif /* CONFIG_SMP */ ++ ++static inline void check_preempt_curr(struct rq *rq) ++{ ++ if (sched_rq_first_task(rq) != rq->curr) ++ resched_curr(rq); ++} ++ ++#ifdef CONFIG_SCHED_HRTICK ++/* ++ * Use HR-timers to deliver accurate preemption points. ++ */ ++ ++static void hrtick_clear(struct rq *rq) ++{ ++ if (hrtimer_active(&rq->hrtick_timer)) ++ hrtimer_cancel(&rq->hrtick_timer); ++} ++ ++/* ++ * High-resolution timer tick. ++ * Runs from hardirq context with interrupts disabled. ++ */ ++static enum hrtimer_restart hrtick(struct hrtimer *timer) ++{ ++ struct rq *rq = container_of(timer, struct rq, hrtick_timer); ++ ++ WARN_ON_ONCE(cpu_of(rq) != smp_processor_id()); ++ ++ raw_spin_lock(&rq->lock); ++ resched_curr(rq); ++ raw_spin_unlock(&rq->lock); ++ ++ return HRTIMER_NORESTART; ++} ++ ++/* ++ * Use hrtick when: ++ * - enabled by features ++ * - hrtimer is actually high res ++ */ ++static inline int hrtick_enabled(struct rq *rq) ++{ ++ /** ++ * Alt schedule FW doesn't support sched_feat yet ++ if (!sched_feat(HRTICK)) ++ return 0; ++ */ ++ if (!cpu_active(cpu_of(rq))) ++ return 0; ++ return hrtimer_is_hres_active(&rq->hrtick_timer); ++} ++ ++#ifdef CONFIG_SMP ++ ++static void __hrtick_restart(struct rq *rq) ++{ ++ struct hrtimer *timer = &rq->hrtick_timer; ++ ktime_t time = rq->hrtick_time; ++ ++ hrtimer_start(timer, time, HRTIMER_MODE_ABS_PINNED_HARD); ++} ++ ++/* ++ * called from hardirq (IPI) context ++ */ ++static void __hrtick_start(void *arg) ++{ ++ struct rq *rq = arg; ++ ++ raw_spin_lock(&rq->lock); ++ __hrtick_restart(rq); ++ raw_spin_unlock(&rq->lock); ++} ++ ++/* ++ * Called to set the hrtick timer state. ++ * ++ * called with rq->lock held and irqs disabled ++ */ ++void hrtick_start(struct rq *rq, u64 delay) ++{ ++ struct hrtimer *timer = &rq->hrtick_timer; ++ s64 delta; ++ ++ /* ++ * Don't schedule slices shorter than 10000ns, that just ++ * doesn't make sense and can cause timer DoS. ++ */ ++ delta = max_t(s64, delay, 10000LL); ++ ++ rq->hrtick_time = ktime_add_ns(timer->base->get_time(), delta); ++ ++ if (rq == this_rq()) ++ __hrtick_restart(rq); ++ else ++ smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd); ++} ++ ++#else ++/* ++ * Called to set the hrtick timer state. ++ * ++ * called with rq->lock held and irqs disabled ++ */ ++void hrtick_start(struct rq *rq, u64 delay) ++{ ++ /* ++ * Don't schedule slices shorter than 10000ns, that just ++ * doesn't make sense. Rely on vruntime for fairness. ++ */ ++ delay = max_t(u64, delay, 10000LL); ++ hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), ++ HRTIMER_MODE_REL_PINNED_HARD); ++} ++#endif /* CONFIG_SMP */ ++ ++static void hrtick_rq_init(struct rq *rq) ++{ ++#ifdef CONFIG_SMP ++ INIT_CSD(&rq->hrtick_csd, __hrtick_start, rq); ++#endif ++ ++ hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); ++ rq->hrtick_timer.function = hrtick; ++} ++#else /* CONFIG_SCHED_HRTICK */ ++static inline int hrtick_enabled(struct rq *rq) ++{ ++ return 0; ++} ++ ++static inline void hrtick_clear(struct rq *rq) ++{ ++} ++ ++static inline void hrtick_rq_init(struct rq *rq) ++{ ++} ++#endif /* CONFIG_SCHED_HRTICK */ ++ ++static inline int __normal_prio(int policy, int rt_prio, int static_prio) ++{ ++ return rt_policy(policy) ? (MAX_RT_PRIO - 1 - rt_prio) : ++ static_prio + MAX_PRIORITY_ADJ; ++} ++ ++/* ++ * Calculate the expected normal priority: i.e. priority ++ * without taking RT-inheritance into account. Might be ++ * boosted by interactivity modifiers. Changes upon fork, ++ * setprio syscalls, and whenever the interactivity ++ * estimator recalculates. ++ */ ++static inline int normal_prio(struct task_struct *p) ++{ ++ return __normal_prio(p->policy, p->rt_priority, p->static_prio); ++} ++ ++/* ++ * Calculate the current priority, i.e. the priority ++ * taken into account by the scheduler. This value might ++ * be boosted by RT tasks as it will be RT if the task got ++ * RT-boosted. If not then it returns p->normal_prio. ++ */ ++static int effective_prio(struct task_struct *p) ++{ ++ p->normal_prio = normal_prio(p); ++ /* ++ * If we are RT tasks or we were boosted to RT priority, ++ * keep the priority unchanged. Otherwise, update priority ++ * to the normal priority: ++ */ ++ if (!rt_prio(p->prio)) ++ return p->normal_prio; ++ return p->prio; ++} ++ ++/* ++ * activate_task - move a task to the runqueue. ++ * ++ * Context: rq->lock ++ */ ++static void activate_task(struct task_struct *p, struct rq *rq) ++{ ++ enqueue_task(p, rq, ENQUEUE_WAKEUP); ++ p->on_rq = TASK_ON_RQ_QUEUED; ++ ++ /* ++ * If in_iowait is set, the code below may not trigger any cpufreq ++ * utilization updates, so do it here explicitly with the IOWAIT flag ++ * passed. ++ */ ++ cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT * p->in_iowait); ++} ++ ++/* ++ * deactivate_task - remove a task from the runqueue. ++ * ++ * Context: rq->lock ++ */ ++static inline void deactivate_task(struct task_struct *p, struct rq *rq) ++{ ++ dequeue_task(p, rq, DEQUEUE_SLEEP); ++ p->on_rq = 0; ++ cpufreq_update_util(rq, 0); ++} ++ ++static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu) ++{ ++#ifdef CONFIG_SMP ++ /* ++ * After ->cpu is set up to a new value, task_access_lock(p, ...) can be ++ * successfully executed on another CPU. We must ensure that updates of ++ * per-task data have been completed by this moment. ++ */ ++ smp_wmb(); ++ ++ WRITE_ONCE(task_thread_info(p)->cpu, cpu); ++#endif ++} ++ ++static inline bool is_migration_disabled(struct task_struct *p) ++{ ++#ifdef CONFIG_SMP ++ return p->migration_disabled; ++#else ++ return false; ++#endif ++} ++ ++#define SCA_CHECK 0x01 ++#define SCA_USER 0x08 ++ ++#ifdef CONFIG_SMP ++ ++void set_task_cpu(struct task_struct *p, unsigned int new_cpu) ++{ ++#ifdef CONFIG_SCHED_DEBUG ++ unsigned int state = READ_ONCE(p->__state); ++ ++ /* ++ * We should never call set_task_cpu() on a blocked task, ++ * ttwu() will sort out the placement. ++ */ ++ WARN_ON_ONCE(state != TASK_RUNNING && state != TASK_WAKING && !p->on_rq); ++ ++#ifdef CONFIG_LOCKDEP ++ /* ++ * The caller should hold either p->pi_lock or rq->lock, when changing ++ * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks. ++ * ++ * sched_move_task() holds both and thus holding either pins the cgroup, ++ * see task_group(). ++ */ ++ WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || ++ lockdep_is_held(&task_rq(p)->lock))); ++#endif ++ /* ++ * Clearly, migrating tasks to offline CPUs is a fairly daft thing. ++ */ ++ WARN_ON_ONCE(!cpu_online(new_cpu)); ++ ++ WARN_ON_ONCE(is_migration_disabled(p)); ++#endif ++ if (task_cpu(p) == new_cpu) ++ return; ++ trace_sched_migrate_task(p, new_cpu); ++ rseq_migrate(p); ++ perf_event_task_migrate(p); ++ ++ __set_task_cpu(p, new_cpu); ++} ++ ++#define MDF_FORCE_ENABLED 0x80 ++ ++static void ++__do_set_cpus_ptr(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ /* ++ * This here violates the locking rules for affinity, since we're only ++ * supposed to change these variables while holding both rq->lock and ++ * p->pi_lock. ++ * ++ * HOWEVER, it magically works, because ttwu() is the only code that ++ * accesses these variables under p->pi_lock and only does so after ++ * smp_cond_load_acquire(&p->on_cpu, !VAL), and we're in __schedule() ++ * before finish_task(). ++ * ++ * XXX do further audits, this smells like something putrid. ++ */ ++ SCHED_WARN_ON(!p->on_cpu); ++ p->cpus_ptr = new_mask; ++} ++ ++void migrate_disable(void) ++{ ++ struct task_struct *p = current; ++ int cpu; ++ ++ if (p->migration_disabled) { ++ p->migration_disabled++; ++ return; ++ } ++ ++ preempt_disable(); ++ cpu = smp_processor_id(); ++ if (cpumask_test_cpu(cpu, &p->cpus_mask)) { ++ cpu_rq(cpu)->nr_pinned++; ++ p->migration_disabled = 1; ++ p->migration_flags &= ~MDF_FORCE_ENABLED; ++ ++ /* ++ * Violates locking rules! see comment in __do_set_cpus_ptr(). ++ */ ++ if (p->cpus_ptr == &p->cpus_mask) ++ __do_set_cpus_ptr(p, cpumask_of(cpu)); ++ } ++ preempt_enable(); ++} ++EXPORT_SYMBOL_GPL(migrate_disable); ++ ++void migrate_enable(void) ++{ ++ struct task_struct *p = current; ++ ++ if (0 == p->migration_disabled) ++ return; ++ ++ if (p->migration_disabled > 1) { ++ p->migration_disabled--; ++ return; ++ } ++ ++ if (WARN_ON_ONCE(!p->migration_disabled)) ++ return; ++ ++ /* ++ * Ensure stop_task runs either before or after this, and that ++ * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule(). ++ */ ++ preempt_disable(); ++ /* ++ * Assumption: current should be running on allowed cpu ++ */ ++ WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &p->cpus_mask)); ++ if (p->cpus_ptr != &p->cpus_mask) ++ __do_set_cpus_ptr(p, &p->cpus_mask); ++ /* ++ * Mustn't clear migration_disabled() until cpus_ptr points back at the ++ * regular cpus_mask, otherwise things that race (eg. ++ * select_fallback_rq) get confused. ++ */ ++ barrier(); ++ p->migration_disabled = 0; ++ this_rq()->nr_pinned--; ++ preempt_enable(); ++} ++EXPORT_SYMBOL_GPL(migrate_enable); ++ ++static inline bool rq_has_pinned_tasks(struct rq *rq) ++{ ++ return rq->nr_pinned; ++} ++ ++/* ++ * Per-CPU kthreads are allowed to run on !active && online CPUs, see ++ * __set_cpus_allowed_ptr() and select_fallback_rq(). ++ */ ++static inline bool is_cpu_allowed(struct task_struct *p, int cpu) ++{ ++ /* When not in the task's cpumask, no point in looking further. */ ++ if (!cpumask_test_cpu(cpu, p->cpus_ptr)) ++ return false; ++ ++ /* migrate_disabled() must be allowed to finish. */ ++ if (is_migration_disabled(p)) ++ return cpu_online(cpu); ++ ++ /* Non kernel threads are not allowed during either online or offline. */ ++ if (!(p->flags & PF_KTHREAD)) ++ return cpu_active(cpu) && task_cpu_possible(cpu, p); ++ ++ /* KTHREAD_IS_PER_CPU is always allowed. */ ++ if (kthread_is_per_cpu(p)) ++ return cpu_online(cpu); ++ ++ /* Regular kernel threads don't get to stay during offline. */ ++ if (cpu_dying(cpu)) ++ return false; ++ ++ /* But are allowed during online. */ ++ return cpu_online(cpu); ++} ++ ++/* ++ * This is how migration works: ++ * ++ * 1) we invoke migration_cpu_stop() on the target CPU using ++ * stop_one_cpu(). ++ * 2) stopper starts to run (implicitly forcing the migrated thread ++ * off the CPU) ++ * 3) it checks whether the migrated task is still in the wrong runqueue. ++ * 4) if it's in the wrong runqueue then the migration thread removes ++ * it and puts it into the right queue. ++ * 5) stopper completes and stop_one_cpu() returns and the migration ++ * is done. ++ */ ++ ++/* ++ * move_queued_task - move a queued task to new rq. ++ * ++ * Returns (locked) new rq. Old rq's lock is released. ++ */ ++static struct rq *move_queued_task(struct rq *rq, struct task_struct *p, int ++ new_cpu) ++{ ++ lockdep_assert_held(&rq->lock); ++ ++ WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING); ++ dequeue_task(p, rq, 0); ++ update_sched_rq_watermark(rq); ++ set_task_cpu(p, new_cpu); ++ raw_spin_unlock(&rq->lock); ++ ++ rq = cpu_rq(new_cpu); ++ ++ raw_spin_lock(&rq->lock); ++ BUG_ON(task_cpu(p) != new_cpu); ++ sched_task_sanity_check(p, rq); ++ enqueue_task(p, rq, 0); ++ p->on_rq = TASK_ON_RQ_QUEUED; ++ check_preempt_curr(rq); ++ ++ return rq; ++} ++ ++struct migration_arg { ++ struct task_struct *task; ++ int dest_cpu; ++}; ++ ++/* ++ * Move (not current) task off this CPU, onto the destination CPU. We're doing ++ * this because either it can't run here any more (set_cpus_allowed() ++ * away from this CPU, or CPU going down), or because we're ++ * attempting to rebalance this task on exec (sched_exec). ++ * ++ * So we race with normal scheduler movements, but that's OK, as long ++ * as the task is no longer on this CPU. ++ */ ++static struct rq *__migrate_task(struct rq *rq, struct task_struct *p, int ++ dest_cpu) ++{ ++ /* Affinity changed (again). */ ++ if (!is_cpu_allowed(p, dest_cpu)) ++ return rq; ++ ++ update_rq_clock(rq); ++ return move_queued_task(rq, p, dest_cpu); ++} ++ ++/* ++ * migration_cpu_stop - this will be executed by a highprio stopper thread ++ * and performs thread migration by bumping thread off CPU then ++ * 'pushing' onto another runqueue. ++ */ ++static int migration_cpu_stop(void *data) ++{ ++ struct migration_arg *arg = data; ++ struct task_struct *p = arg->task; ++ struct rq *rq = this_rq(); ++ unsigned long flags; ++ ++ /* ++ * The original target CPU might have gone down and we might ++ * be on another CPU but it doesn't matter. ++ */ ++ local_irq_save(flags); ++ /* ++ * We need to explicitly wake pending tasks before running ++ * __migrate_task() such that we will not miss enforcing cpus_ptr ++ * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. ++ */ ++ flush_smp_call_function_queue(); ++ ++ raw_spin_lock(&p->pi_lock); ++ raw_spin_lock(&rq->lock); ++ /* ++ * If task_rq(p) != rq, it cannot be migrated here, because we're ++ * holding rq->lock, if p->on_rq == 0 it cannot get enqueued because ++ * we're holding p->pi_lock. ++ */ ++ if (task_rq(p) == rq && task_on_rq_queued(p)) ++ rq = __migrate_task(rq, p, arg->dest_cpu); ++ raw_spin_unlock(&rq->lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ ++ return 0; ++} ++ ++static inline void ++set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ cpumask_copy(&p->cpus_mask, new_mask); ++ p->nr_cpus_allowed = cpumask_weight(new_mask); ++} ++ ++static void ++__do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ lockdep_assert_held(&p->pi_lock); ++ set_cpus_allowed_common(p, new_mask); ++} ++ ++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ __do_set_cpus_allowed(p, new_mask); ++} ++ ++int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, ++ int node) ++{ ++ if (!src->user_cpus_ptr) ++ return 0; ++ ++ dst->user_cpus_ptr = kmalloc_node(cpumask_size(), GFP_KERNEL, node); ++ if (!dst->user_cpus_ptr) ++ return -ENOMEM; ++ ++ cpumask_copy(dst->user_cpus_ptr, src->user_cpus_ptr); ++ return 0; ++} ++ ++static inline struct cpumask *clear_user_cpus_ptr(struct task_struct *p) ++{ ++ struct cpumask *user_mask = NULL; ++ ++ swap(p->user_cpus_ptr, user_mask); ++ ++ return user_mask; ++} ++ ++void release_user_cpus_ptr(struct task_struct *p) ++{ ++ kfree(clear_user_cpus_ptr(p)); ++} ++ ++#endif ++ ++/** ++ * task_curr - is this task currently executing on a CPU? ++ * @p: the task in question. ++ * ++ * Return: 1 if the task is currently executing. 0 otherwise. ++ */ ++inline int task_curr(const struct task_struct *p) ++{ ++ return cpu_curr(task_cpu(p)) == p; ++} ++ ++#ifdef CONFIG_SMP ++/* ++ * wait_task_inactive - wait for a thread to unschedule. ++ * ++ * If @match_state is nonzero, it's the @p->state value just checked and ++ * not expected to change. If it changes, i.e. @p might have woken up, ++ * then return zero. When we succeed in waiting for @p to be off its CPU, ++ * we return a positive number (its total switch count). If a second call ++ * a short while later returns the same number, the caller can be sure that ++ * @p has remained unscheduled the whole time. ++ * ++ * The caller must ensure that the task *will* unschedule sometime soon, ++ * else this function might spin for a *long* time. This function can't ++ * be called with interrupts off, or it may introduce deadlock with ++ * smp_call_function() if an IPI is sent by the same process we are ++ * waiting to become inactive. ++ */ ++unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state) ++{ ++ unsigned long flags; ++ bool running, on_rq; ++ unsigned long ncsw; ++ struct rq *rq; ++ raw_spinlock_t *lock; ++ ++ for (;;) { ++ rq = task_rq(p); ++ ++ /* ++ * If the task is actively running on another CPU ++ * still, just relax and busy-wait without holding ++ * any locks. ++ * ++ * NOTE! Since we don't hold any locks, it's not ++ * even sure that "rq" stays as the right runqueue! ++ * But we don't care, since this will return false ++ * if the runqueue has changed and p is actually now ++ * running somewhere else! ++ */ ++ while (task_running(p) && p == rq->curr) { ++ if (match_state && unlikely(READ_ONCE(p->__state) != match_state)) ++ return 0; ++ cpu_relax(); ++ } ++ ++ /* ++ * Ok, time to look more closely! We need the rq ++ * lock now, to be *sure*. If we're wrong, we'll ++ * just go back and repeat. ++ */ ++ task_access_lock_irqsave(p, &lock, &flags); ++ trace_sched_wait_task(p); ++ running = task_running(p); ++ on_rq = p->on_rq; ++ ncsw = 0; ++ if (!match_state || READ_ONCE(p->__state) == match_state) ++ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ ++ task_access_unlock_irqrestore(p, lock, &flags); ++ ++ /* ++ * If it changed from the expected state, bail out now. ++ */ ++ if (unlikely(!ncsw)) ++ break; ++ ++ /* ++ * Was it really running after all now that we ++ * checked with the proper locks actually held? ++ * ++ * Oops. Go back and try again.. ++ */ ++ if (unlikely(running)) { ++ cpu_relax(); ++ continue; ++ } ++ ++ /* ++ * It's not enough that it's not actively running, ++ * it must be off the runqueue _entirely_, and not ++ * preempted! ++ * ++ * So if it was still runnable (but just not actively ++ * running right now), it's preempted, and we should ++ * yield - it could be a while. ++ */ ++ if (unlikely(on_rq)) { ++ ktime_t to = NSEC_PER_SEC / HZ; ++ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ schedule_hrtimeout(&to, HRTIMER_MODE_REL_HARD); ++ continue; ++ } ++ ++ /* ++ * Ahh, all good. It wasn't running, and it wasn't ++ * runnable, which means that it will never become ++ * running in the future either. We're all done! ++ */ ++ break; ++ } ++ ++ return ncsw; ++} ++ ++/*** ++ * kick_process - kick a running thread to enter/exit the kernel ++ * @p: the to-be-kicked thread ++ * ++ * Cause a process which is running on another CPU to enter ++ * kernel-mode, without any delay. (to get signals handled.) ++ * ++ * NOTE: this function doesn't have to take the runqueue lock, ++ * because all it wants to ensure is that the remote task enters ++ * the kernel. If the IPI races and the task has been migrated ++ * to another CPU then no harm is done and the purpose has been ++ * achieved as well. ++ */ ++void kick_process(struct task_struct *p) ++{ ++ int cpu; ++ ++ preempt_disable(); ++ cpu = task_cpu(p); ++ if ((cpu != smp_processor_id()) && task_curr(p)) ++ smp_send_reschedule(cpu); ++ preempt_enable(); ++} ++EXPORT_SYMBOL_GPL(kick_process); ++ ++/* ++ * ->cpus_ptr is protected by both rq->lock and p->pi_lock ++ * ++ * A few notes on cpu_active vs cpu_online: ++ * ++ * - cpu_active must be a subset of cpu_online ++ * ++ * - on CPU-up we allow per-CPU kthreads on the online && !active CPU, ++ * see __set_cpus_allowed_ptr(). At this point the newly online ++ * CPU isn't yet part of the sched domains, and balancing will not ++ * see it. ++ * ++ * - on cpu-down we clear cpu_active() to mask the sched domains and ++ * avoid the load balancer to place new tasks on the to be removed ++ * CPU. Existing tasks will remain running there and will be taken ++ * off. ++ * ++ * This means that fallback selection must not select !active CPUs. ++ * And can assume that any active CPU must be online. Conversely ++ * select_task_rq() below may allow selection of !active CPUs in order ++ * to satisfy the above rules. ++ */ ++static int select_fallback_rq(int cpu, struct task_struct *p) ++{ ++ int nid = cpu_to_node(cpu); ++ const struct cpumask *nodemask = NULL; ++ enum { cpuset, possible, fail } state = cpuset; ++ int dest_cpu; ++ ++ /* ++ * If the node that the CPU is on has been offlined, cpu_to_node() ++ * will return -1. There is no CPU on the node, and we should ++ * select the CPU on the other node. ++ */ ++ if (nid != -1) { ++ nodemask = cpumask_of_node(nid); ++ ++ /* Look for allowed, online CPU in same node. */ ++ for_each_cpu(dest_cpu, nodemask) { ++ if (is_cpu_allowed(p, dest_cpu)) ++ return dest_cpu; ++ } ++ } ++ ++ for (;;) { ++ /* Any allowed, online CPU? */ ++ for_each_cpu(dest_cpu, p->cpus_ptr) { ++ if (!is_cpu_allowed(p, dest_cpu)) ++ continue; ++ goto out; ++ } ++ ++ /* No more Mr. Nice Guy. */ ++ switch (state) { ++ case cpuset: ++ if (cpuset_cpus_allowed_fallback(p)) { ++ state = possible; ++ break; ++ } ++ fallthrough; ++ case possible: ++ /* ++ * XXX When called from select_task_rq() we only ++ * hold p->pi_lock and again violate locking order. ++ * ++ * More yuck to audit. ++ */ ++ do_set_cpus_allowed(p, task_cpu_possible_mask(p)); ++ state = fail; ++ break; ++ ++ case fail: ++ BUG(); ++ break; ++ } ++ } ++ ++out: ++ if (state != cpuset) { ++ /* ++ * Don't tell them about moving exiting tasks or ++ * kernel threads (both mm NULL), since they never ++ * leave kernel. ++ */ ++ if (p->mm && printk_ratelimit()) { ++ printk_deferred("process %d (%s) no longer affine to cpu%d\n", ++ task_pid_nr(p), p->comm, cpu); ++ } ++ } ++ ++ return dest_cpu; ++} ++ ++static inline int select_task_rq(struct task_struct *p) ++{ ++ cpumask_t chk_mask, tmp; ++ ++ if (unlikely(!cpumask_and(&chk_mask, p->cpus_ptr, cpu_active_mask))) ++ return select_fallback_rq(task_cpu(p), p); ++ ++ if ( ++#ifdef CONFIG_SCHED_SMT ++ cpumask_and(&tmp, &chk_mask, &sched_sg_idle_mask) || ++#endif ++ cpumask_and(&tmp, &chk_mask, sched_rq_watermark) || ++ cpumask_and(&tmp, &chk_mask, ++ sched_rq_watermark + SCHED_QUEUE_BITS - 1 - task_sched_prio(p))) ++ return best_mask_cpu(task_cpu(p), &tmp); ++ ++ return best_mask_cpu(task_cpu(p), &chk_mask); ++} ++ ++void sched_set_stop_task(int cpu, struct task_struct *stop) ++{ ++ static struct lock_class_key stop_pi_lock; ++ struct sched_param stop_param = { .sched_priority = STOP_PRIO }; ++ struct sched_param start_param = { .sched_priority = 0 }; ++ struct task_struct *old_stop = cpu_rq(cpu)->stop; ++ ++ if (stop) { ++ /* ++ * Make it appear like a SCHED_FIFO task, its something ++ * userspace knows about and won't get confused about. ++ * ++ * Also, it will make PI more or less work without too ++ * much confusion -- but then, stop work should not ++ * rely on PI working anyway. ++ */ ++ sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param); ++ ++ /* ++ * The PI code calls rt_mutex_setprio() with ->pi_lock held to ++ * adjust the effective priority of a task. As a result, ++ * rt_mutex_setprio() can trigger (RT) balancing operations, ++ * which can then trigger wakeups of the stop thread to push ++ * around the current task. ++ * ++ * The stop task itself will never be part of the PI-chain, it ++ * never blocks, therefore that ->pi_lock recursion is safe. ++ * Tell lockdep about this by placing the stop->pi_lock in its ++ * own class. ++ */ ++ lockdep_set_class(&stop->pi_lock, &stop_pi_lock); ++ } ++ ++ cpu_rq(cpu)->stop = stop; ++ ++ if (old_stop) { ++ /* ++ * Reset it back to a normal scheduling policy so that ++ * it can die in pieces. ++ */ ++ sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param); ++ } ++} ++ ++static int affine_move_task(struct rq *rq, struct task_struct *p, int dest_cpu, ++ raw_spinlock_t *lock, unsigned long irq_flags) ++{ ++ /* Can the task run on the task's current CPU? If so, we're done */ ++ if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) { ++ if (p->migration_disabled) { ++ if (likely(p->cpus_ptr != &p->cpus_mask)) ++ __do_set_cpus_ptr(p, &p->cpus_mask); ++ p->migration_disabled = 0; ++ p->migration_flags |= MDF_FORCE_ENABLED; ++ /* When p is migrate_disabled, rq->lock should be held */ ++ rq->nr_pinned--; ++ } ++ ++ if (task_running(p) || READ_ONCE(p->__state) == TASK_WAKING) { ++ struct migration_arg arg = { p, dest_cpu }; ++ ++ /* Need help from migration thread: drop lock and wait. */ ++ __task_access_unlock(p, lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags); ++ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); ++ return 0; ++ } ++ if (task_on_rq_queued(p)) { ++ /* ++ * OK, since we're going to drop the lock immediately ++ * afterwards anyway. ++ */ ++ update_rq_clock(rq); ++ rq = move_queued_task(rq, p, dest_cpu); ++ lock = &rq->lock; ++ } ++ } ++ __task_access_unlock(p, lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags); ++ return 0; ++} ++ ++static int __set_cpus_allowed_ptr_locked(struct task_struct *p, ++ const struct cpumask *new_mask, ++ u32 flags, ++ struct rq *rq, ++ raw_spinlock_t *lock, ++ unsigned long irq_flags) ++{ ++ const struct cpumask *cpu_allowed_mask = task_cpu_possible_mask(p); ++ const struct cpumask *cpu_valid_mask = cpu_active_mask; ++ bool kthread = p->flags & PF_KTHREAD; ++ struct cpumask *user_mask = NULL; ++ int dest_cpu; ++ int ret = 0; ++ ++ if (kthread || is_migration_disabled(p)) { ++ /* ++ * Kernel threads are allowed on online && !active CPUs, ++ * however, during cpu-hot-unplug, even these might get pushed ++ * away if not KTHREAD_IS_PER_CPU. ++ * ++ * Specifically, migration_disabled() tasks must not fail the ++ * cpumask_any_and_distribute() pick below, esp. so on ++ * SCA_MIGRATE_ENABLE, otherwise we'll not call ++ * set_cpus_allowed_common() and actually reset p->cpus_ptr. ++ */ ++ cpu_valid_mask = cpu_online_mask; ++ } ++ ++ if (!kthread && !cpumask_subset(new_mask, cpu_allowed_mask)) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ /* ++ * Must re-check here, to close a race against __kthread_bind(), ++ * sched_setaffinity() is not guaranteed to observe the flag. ++ */ ++ if ((flags & SCA_CHECK) && (p->flags & PF_NO_SETAFFINITY)) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ if (cpumask_equal(&p->cpus_mask, new_mask)) ++ goto out; ++ ++ dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); ++ if (dest_cpu >= nr_cpu_ids) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ __do_set_cpus_allowed(p, new_mask); ++ ++ if (flags & SCA_USER) ++ user_mask = clear_user_cpus_ptr(p); ++ ++ ret = affine_move_task(rq, p, dest_cpu, lock, irq_flags); ++ ++ kfree(user_mask); ++ ++ return ret; ++ ++out: ++ __task_access_unlock(p, lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags); ++ ++ return ret; ++} ++ ++/* ++ * Change a given task's CPU affinity. Migrate the thread to a ++ * proper CPU and schedule it away if the CPU it's executing on ++ * is removed from the allowed bitmask. ++ * ++ * NOTE: the caller must have a valid reference to the task, the ++ * task must not exit() & deallocate itself prematurely. The ++ * call is not atomic; no spinlocks may be held. ++ */ ++static int __set_cpus_allowed_ptr(struct task_struct *p, ++ const struct cpumask *new_mask, u32 flags) ++{ ++ unsigned long irq_flags; ++ struct rq *rq; ++ raw_spinlock_t *lock; ++ ++ raw_spin_lock_irqsave(&p->pi_lock, irq_flags); ++ rq = __task_access_lock(p, &lock); ++ ++ return __set_cpus_allowed_ptr_locked(p, new_mask, flags, rq, lock, irq_flags); ++} ++ ++int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ return __set_cpus_allowed_ptr(p, new_mask, 0); ++} ++EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); ++ ++/* ++ * Change a given task's CPU affinity to the intersection of its current ++ * affinity mask and @subset_mask, writing the resulting mask to @new_mask ++ * and pointing @p->user_cpus_ptr to a copy of the old mask. ++ * If the resulting mask is empty, leave the affinity unchanged and return ++ * -EINVAL. ++ */ ++static int restrict_cpus_allowed_ptr(struct task_struct *p, ++ struct cpumask *new_mask, ++ const struct cpumask *subset_mask) ++{ ++ struct cpumask *user_mask = NULL; ++ unsigned long irq_flags; ++ raw_spinlock_t *lock; ++ struct rq *rq; ++ int err; ++ ++ if (!p->user_cpus_ptr) { ++ user_mask = kmalloc(cpumask_size(), GFP_KERNEL); ++ if (!user_mask) ++ return -ENOMEM; ++ } ++ ++ raw_spin_lock_irqsave(&p->pi_lock, irq_flags); ++ rq = __task_access_lock(p, &lock); ++ ++ if (!cpumask_and(new_mask, &p->cpus_mask, subset_mask)) { ++ err = -EINVAL; ++ goto err_unlock; ++ } ++ ++ /* ++ * We're about to butcher the task affinity, so keep track of what ++ * the user asked for in case we're able to restore it later on. ++ */ ++ if (user_mask) { ++ cpumask_copy(user_mask, p->cpus_ptr); ++ p->user_cpus_ptr = user_mask; ++ } ++ ++ /*return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, &rf);*/ ++ return __set_cpus_allowed_ptr_locked(p, new_mask, 0, rq, lock, irq_flags); ++ ++err_unlock: ++ __task_access_unlock(p, lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, irq_flags); ++ kfree(user_mask); ++ return err; ++} ++ ++/* ++ * Restrict the CPU affinity of task @p so that it is a subset of ++ * task_cpu_possible_mask() and point @p->user_cpu_ptr to a copy of the ++ * old affinity mask. If the resulting mask is empty, we warn and walk ++ * up the cpuset hierarchy until we find a suitable mask. ++ */ ++void force_compatible_cpus_allowed_ptr(struct task_struct *p) ++{ ++ cpumask_var_t new_mask; ++ const struct cpumask *override_mask = task_cpu_possible_mask(p); ++ ++ alloc_cpumask_var(&new_mask, GFP_KERNEL); ++ ++ /* ++ * __migrate_task() can fail silently in the face of concurrent ++ * offlining of the chosen destination CPU, so take the hotplug ++ * lock to ensure that the migration succeeds. ++ */ ++ cpus_read_lock(); ++ if (!cpumask_available(new_mask)) ++ goto out_set_mask; ++ ++ if (!restrict_cpus_allowed_ptr(p, new_mask, override_mask)) ++ goto out_free_mask; ++ ++ /* ++ * We failed to find a valid subset of the affinity mask for the ++ * task, so override it based on its cpuset hierarchy. ++ */ ++ cpuset_cpus_allowed(p, new_mask); ++ override_mask = new_mask; ++ ++out_set_mask: ++ if (printk_ratelimit()) { ++ printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n", ++ task_pid_nr(p), p->comm, ++ cpumask_pr_args(override_mask)); ++ } ++ ++ WARN_ON(set_cpus_allowed_ptr(p, override_mask)); ++out_free_mask: ++ cpus_read_unlock(); ++ free_cpumask_var(new_mask); ++} ++ ++static int ++__sched_setaffinity(struct task_struct *p, const struct cpumask *mask); ++ ++/* ++ * Restore the affinity of a task @p which was previously restricted by a ++ * call to force_compatible_cpus_allowed_ptr(). This will clear (and free) ++ * @p->user_cpus_ptr. ++ * ++ * It is the caller's responsibility to serialise this with any calls to ++ * force_compatible_cpus_allowed_ptr(@p). ++ */ ++void relax_compatible_cpus_allowed_ptr(struct task_struct *p) ++{ ++ struct cpumask *user_mask = p->user_cpus_ptr; ++ unsigned long flags; ++ ++ /* ++ * Try to restore the old affinity mask. If this fails, then ++ * we free the mask explicitly to avoid it being inherited across ++ * a subsequent fork(). ++ */ ++ if (!user_mask || !__sched_setaffinity(p, user_mask)) ++ return; ++ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ user_mask = clear_user_cpus_ptr(p); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ ++ kfree(user_mask); ++} ++ ++#else /* CONFIG_SMP */ ++ ++static inline int select_task_rq(struct task_struct *p) ++{ ++ return 0; ++} ++ ++static inline int ++__set_cpus_allowed_ptr(struct task_struct *p, ++ const struct cpumask *new_mask, u32 flags) ++{ ++ return set_cpus_allowed_ptr(p, new_mask); ++} ++ ++static inline bool rq_has_pinned_tasks(struct rq *rq) ++{ ++ return false; ++} ++ ++#endif /* !CONFIG_SMP */ ++ ++static void ++ttwu_stat(struct task_struct *p, int cpu, int wake_flags) ++{ ++ struct rq *rq; ++ ++ if (!schedstat_enabled()) ++ return; ++ ++ rq = this_rq(); ++ ++#ifdef CONFIG_SMP ++ if (cpu == rq->cpu) { ++ __schedstat_inc(rq->ttwu_local); ++ __schedstat_inc(p->stats.nr_wakeups_local); ++ } else { ++ /** Alt schedule FW ToDo: ++ * How to do ttwu_wake_remote ++ */ ++ } ++#endif /* CONFIG_SMP */ ++ ++ __schedstat_inc(rq->ttwu_count); ++ __schedstat_inc(p->stats.nr_wakeups); ++} ++ ++/* ++ * Mark the task runnable and perform wakeup-preemption. ++ */ ++static inline void ++ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) ++{ ++ check_preempt_curr(rq); ++ WRITE_ONCE(p->__state, TASK_RUNNING); ++ trace_sched_wakeup(p); ++} ++ ++static inline void ++ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags) ++{ ++ if (p->sched_contributes_to_load) ++ rq->nr_uninterruptible--; ++ ++ if ( ++#ifdef CONFIG_SMP ++ !(wake_flags & WF_MIGRATED) && ++#endif ++ p->in_iowait) { ++ delayacct_blkio_end(p); ++ atomic_dec(&task_rq(p)->nr_iowait); ++ } ++ ++ activate_task(p, rq); ++ ttwu_do_wakeup(rq, p, 0); ++} ++ ++/* ++ * Consider @p being inside a wait loop: ++ * ++ * for (;;) { ++ * set_current_state(TASK_UNINTERRUPTIBLE); ++ * ++ * if (CONDITION) ++ * break; ++ * ++ * schedule(); ++ * } ++ * __set_current_state(TASK_RUNNING); ++ * ++ * between set_current_state() and schedule(). In this case @p is still ++ * runnable, so all that needs doing is change p->state back to TASK_RUNNING in ++ * an atomic manner. ++ * ++ * By taking task_rq(p)->lock we serialize against schedule(), if @p->on_rq ++ * then schedule() must still happen and p->state can be changed to ++ * TASK_RUNNING. Otherwise we lost the race, schedule() has happened, and we ++ * need to do a full wakeup with enqueue. ++ * ++ * Returns: %true when the wakeup is done, ++ * %false otherwise. ++ */ ++static int ttwu_runnable(struct task_struct *p, int wake_flags) ++{ ++ struct rq *rq; ++ raw_spinlock_t *lock; ++ int ret = 0; ++ ++ rq = __task_access_lock(p, &lock); ++ if (task_on_rq_queued(p)) { ++ /* check_preempt_curr() may use rq clock */ ++ update_rq_clock(rq); ++ ttwu_do_wakeup(rq, p, wake_flags); ++ ret = 1; ++ } ++ __task_access_unlock(p, lock); ++ ++ return ret; ++} ++ ++#ifdef CONFIG_SMP ++void sched_ttwu_pending(void *arg) ++{ ++ struct llist_node *llist = arg; ++ struct rq *rq = this_rq(); ++ struct task_struct *p, *t; ++ struct rq_flags rf; ++ ++ if (!llist) ++ return; ++ ++ /* ++ * rq::ttwu_pending racy indication of out-standing wakeups. ++ * Races such that false-negatives are possible, since they ++ * are shorter lived that false-positives would be. ++ */ ++ WRITE_ONCE(rq->ttwu_pending, 0); ++ ++ rq_lock_irqsave(rq, &rf); ++ update_rq_clock(rq); ++ ++ llist_for_each_entry_safe(p, t, llist, wake_entry.llist) { ++ if (WARN_ON_ONCE(p->on_cpu)) ++ smp_cond_load_acquire(&p->on_cpu, !VAL); ++ ++ if (WARN_ON_ONCE(task_cpu(p) != cpu_of(rq))) ++ set_task_cpu(p, cpu_of(rq)); ++ ++ ttwu_do_activate(rq, p, p->sched_remote_wakeup ? WF_MIGRATED : 0); ++ } ++ ++ rq_unlock_irqrestore(rq, &rf); ++} ++ ++void send_call_function_single_ipi(int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ ++ if (!set_nr_if_polling(rq->idle)) ++ arch_send_call_function_single_ipi(cpu); ++ else ++ trace_sched_wake_idle_without_ipi(cpu); ++} ++ ++/* ++ * Queue a task on the target CPUs wake_list and wake the CPU via IPI if ++ * necessary. The wakee CPU on receipt of the IPI will queue the task ++ * via sched_ttwu_wakeup() for activation so the wakee incurs the cost ++ * of the wakeup instead of the waker. ++ */ ++static void __ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ ++ p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED); ++ ++ WRITE_ONCE(rq->ttwu_pending, 1); ++ __smp_call_single_queue(cpu, &p->wake_entry.llist); ++} ++ ++static inline bool ttwu_queue_cond(int cpu, int wake_flags) ++{ ++ /* ++ * Do not complicate things with the async wake_list while the CPU is ++ * in hotplug state. ++ */ ++ if (!cpu_active(cpu)) ++ return false; ++ ++ /* ++ * If the CPU does not share cache, then queue the task on the ++ * remote rqs wakelist to avoid accessing remote data. ++ */ ++ if (!cpus_share_cache(smp_processor_id(), cpu)) ++ return true; ++ ++ /* ++ * If the task is descheduling and the only running task on the ++ * CPU then use the wakelist to offload the task activation to ++ * the soon-to-be-idle CPU as the current CPU is likely busy. ++ * nr_running is checked to avoid unnecessary task stacking. ++ */ ++ if ((wake_flags & WF_ON_CPU) && cpu_rq(cpu)->nr_running <= 1) ++ return true; ++ ++ return false; ++} ++ ++static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) ++{ ++ if (__is_defined(ALT_SCHED_TTWU_QUEUE) && ttwu_queue_cond(cpu, wake_flags)) { ++ if (WARN_ON_ONCE(cpu == smp_processor_id())) ++ return false; ++ ++ sched_clock_cpu(cpu); /* Sync clocks across CPUs */ ++ __ttwu_queue_wakelist(p, cpu, wake_flags); ++ return true; ++ } ++ ++ return false; ++} ++ ++void wake_up_if_idle(int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ ++ rcu_read_lock(); ++ ++ if (!is_idle_task(rcu_dereference(rq->curr))) ++ goto out; ++ ++ raw_spin_lock_irqsave(&rq->lock, flags); ++ if (is_idle_task(rq->curr)) ++ resched_curr(rq); ++ /* Else CPU is not idle, do nothing here */ ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++ ++out: ++ rcu_read_unlock(); ++} ++ ++bool cpus_share_cache(int this_cpu, int that_cpu) ++{ ++ if (this_cpu == that_cpu) ++ return true; ++ ++ return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); ++} ++#else /* !CONFIG_SMP */ ++ ++static inline bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags) ++{ ++ return false; ++} ++ ++#endif /* CONFIG_SMP */ ++ ++static inline void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ ++ if (ttwu_queue_wakelist(p, cpu, wake_flags)) ++ return; ++ ++ raw_spin_lock(&rq->lock); ++ update_rq_clock(rq); ++ ttwu_do_activate(rq, p, wake_flags); ++ raw_spin_unlock(&rq->lock); ++} ++ ++/* ++ * Invoked from try_to_wake_up() to check whether the task can be woken up. ++ * ++ * The caller holds p::pi_lock if p != current or has preemption ++ * disabled when p == current. ++ * ++ * The rules of PREEMPT_RT saved_state: ++ * ++ * The related locking code always holds p::pi_lock when updating ++ * p::saved_state, which means the code is fully serialized in both cases. ++ * ++ * The lock wait and lock wakeups happen via TASK_RTLOCK_WAIT. No other ++ * bits set. This allows to distinguish all wakeup scenarios. ++ */ ++static __always_inline ++bool ttwu_state_match(struct task_struct *p, unsigned int state, int *success) ++{ ++ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT)) { ++ WARN_ON_ONCE((state & TASK_RTLOCK_WAIT) && ++ state != TASK_RTLOCK_WAIT); ++ } ++ ++ if (READ_ONCE(p->__state) & state) { ++ *success = 1; ++ return true; ++ } ++ ++#ifdef CONFIG_PREEMPT_RT ++ /* ++ * Saved state preserves the task state across blocking on ++ * an RT lock. If the state matches, set p::saved_state to ++ * TASK_RUNNING, but do not wake the task because it waits ++ * for a lock wakeup. Also indicate success because from ++ * the regular waker's point of view this has succeeded. ++ * ++ * After acquiring the lock the task will restore p::__state ++ * from p::saved_state which ensures that the regular ++ * wakeup is not lost. The restore will also set ++ * p::saved_state to TASK_RUNNING so any further tests will ++ * not result in false positives vs. @success ++ */ ++ if (p->saved_state & state) { ++ p->saved_state = TASK_RUNNING; ++ *success = 1; ++ } ++#endif ++ return false; ++} ++ ++/* ++ * Notes on Program-Order guarantees on SMP systems. ++ * ++ * MIGRATION ++ * ++ * The basic program-order guarantee on SMP systems is that when a task [t] ++ * migrates, all its activity on its old CPU [c0] happens-before any subsequent ++ * execution on its new CPU [c1]. ++ * ++ * For migration (of runnable tasks) this is provided by the following means: ++ * ++ * A) UNLOCK of the rq(c0)->lock scheduling out task t ++ * B) migration for t is required to synchronize *both* rq(c0)->lock and ++ * rq(c1)->lock (if not at the same time, then in that order). ++ * C) LOCK of the rq(c1)->lock scheduling in task ++ * ++ * Transitivity guarantees that B happens after A and C after B. ++ * Note: we only require RCpc transitivity. ++ * Note: the CPU doing B need not be c0 or c1 ++ * ++ * Example: ++ * ++ * CPU0 CPU1 CPU2 ++ * ++ * LOCK rq(0)->lock ++ * sched-out X ++ * sched-in Y ++ * UNLOCK rq(0)->lock ++ * ++ * LOCK rq(0)->lock // orders against CPU0 ++ * dequeue X ++ * UNLOCK rq(0)->lock ++ * ++ * LOCK rq(1)->lock ++ * enqueue X ++ * UNLOCK rq(1)->lock ++ * ++ * LOCK rq(1)->lock // orders against CPU2 ++ * sched-out Z ++ * sched-in X ++ * UNLOCK rq(1)->lock ++ * ++ * ++ * BLOCKING -- aka. SLEEP + WAKEUP ++ * ++ * For blocking we (obviously) need to provide the same guarantee as for ++ * migration. However the means are completely different as there is no lock ++ * chain to provide order. Instead we do: ++ * ++ * 1) smp_store_release(X->on_cpu, 0) -- finish_task() ++ * 2) smp_cond_load_acquire(!X->on_cpu) -- try_to_wake_up() ++ * ++ * Example: ++ * ++ * CPU0 (schedule) CPU1 (try_to_wake_up) CPU2 (schedule) ++ * ++ * LOCK rq(0)->lock LOCK X->pi_lock ++ * dequeue X ++ * sched-out X ++ * smp_store_release(X->on_cpu, 0); ++ * ++ * smp_cond_load_acquire(&X->on_cpu, !VAL); ++ * X->state = WAKING ++ * set_task_cpu(X,2) ++ * ++ * LOCK rq(2)->lock ++ * enqueue X ++ * X->state = RUNNING ++ * UNLOCK rq(2)->lock ++ * ++ * LOCK rq(2)->lock // orders against CPU1 ++ * sched-out Z ++ * sched-in X ++ * UNLOCK rq(2)->lock ++ * ++ * UNLOCK X->pi_lock ++ * UNLOCK rq(0)->lock ++ * ++ * ++ * However; for wakeups there is a second guarantee we must provide, namely we ++ * must observe the state that lead to our wakeup. That is, not only must our ++ * task observe its own prior state, it must also observe the stores prior to ++ * its wakeup. ++ * ++ * This means that any means of doing remote wakeups must order the CPU doing ++ * the wakeup against the CPU the task is going to end up running on. This, ++ * however, is already required for the regular Program-Order guarantee above, ++ * since the waking CPU is the one issueing the ACQUIRE (smp_cond_load_acquire). ++ * ++ */ ++ ++/** ++ * try_to_wake_up - wake up a thread ++ * @p: the thread to be awakened ++ * @state: the mask of task states that can be woken ++ * @wake_flags: wake modifier flags (WF_*) ++ * ++ * Conceptually does: ++ * ++ * If (@state & @p->state) @p->state = TASK_RUNNING. ++ * ++ * If the task was not queued/runnable, also place it back on a runqueue. ++ * ++ * This function is atomic against schedule() which would dequeue the task. ++ * ++ * It issues a full memory barrier before accessing @p->state, see the comment ++ * with set_current_state(). ++ * ++ * Uses p->pi_lock to serialize against concurrent wake-ups. ++ * ++ * Relies on p->pi_lock stabilizing: ++ * - p->sched_class ++ * - p->cpus_ptr ++ * - p->sched_task_group ++ * in order to do migration, see its use of select_task_rq()/set_task_cpu(). ++ * ++ * Tries really hard to only take one task_rq(p)->lock for performance. ++ * Takes rq->lock in: ++ * - ttwu_runnable() -- old rq, unavoidable, see comment there; ++ * - ttwu_queue() -- new rq, for enqueue of the task; ++ * - psi_ttwu_dequeue() -- much sadness :-( accounting will kill us. ++ * ++ * As a consequence we race really badly with just about everything. See the ++ * many memory barriers and their comments for details. ++ * ++ * Return: %true if @p->state changes (an actual wakeup was done), ++ * %false otherwise. ++ */ ++static int try_to_wake_up(struct task_struct *p, unsigned int state, ++ int wake_flags) ++{ ++ unsigned long flags; ++ int cpu, success = 0; ++ ++ preempt_disable(); ++ if (p == current) { ++ /* ++ * We're waking current, this means 'p->on_rq' and 'task_cpu(p) ++ * == smp_processor_id()'. Together this means we can special ++ * case the whole 'p->on_rq && ttwu_runnable()' case below ++ * without taking any locks. ++ * ++ * In particular: ++ * - we rely on Program-Order guarantees for all the ordering, ++ * - we're serialized against set_special_state() by virtue of ++ * it disabling IRQs (this allows not taking ->pi_lock). ++ */ ++ if (!ttwu_state_match(p, state, &success)) ++ goto out; ++ ++ trace_sched_waking(p); ++ WRITE_ONCE(p->__state, TASK_RUNNING); ++ trace_sched_wakeup(p); ++ goto out; ++ } ++ ++ /* ++ * If we are going to wake up a thread waiting for CONDITION we ++ * need to ensure that CONDITION=1 done by the caller can not be ++ * reordered with p->state check below. This pairs with smp_store_mb() ++ * in set_current_state() that the waiting thread does. ++ */ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ smp_mb__after_spinlock(); ++ if (!ttwu_state_match(p, state, &success)) ++ goto unlock; ++ ++ trace_sched_waking(p); ++ ++ /* ++ * Ensure we load p->on_rq _after_ p->state, otherwise it would ++ * be possible to, falsely, observe p->on_rq == 0 and get stuck ++ * in smp_cond_load_acquire() below. ++ * ++ * sched_ttwu_pending() try_to_wake_up() ++ * STORE p->on_rq = 1 LOAD p->state ++ * UNLOCK rq->lock ++ * ++ * __schedule() (switch to task 'p') ++ * LOCK rq->lock smp_rmb(); ++ * smp_mb__after_spinlock(); ++ * UNLOCK rq->lock ++ * ++ * [task p] ++ * STORE p->state = UNINTERRUPTIBLE LOAD p->on_rq ++ * ++ * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in ++ * __schedule(). See the comment for smp_mb__after_spinlock(). ++ * ++ * A similar smb_rmb() lives in try_invoke_on_locked_down_task(). ++ */ ++ smp_rmb(); ++ if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags)) ++ goto unlock; ++ ++#ifdef CONFIG_SMP ++ /* ++ * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be ++ * possible to, falsely, observe p->on_cpu == 0. ++ * ++ * One must be running (->on_cpu == 1) in order to remove oneself ++ * from the runqueue. ++ * ++ * __schedule() (switch to task 'p') try_to_wake_up() ++ * STORE p->on_cpu = 1 LOAD p->on_rq ++ * UNLOCK rq->lock ++ * ++ * __schedule() (put 'p' to sleep) ++ * LOCK rq->lock smp_rmb(); ++ * smp_mb__after_spinlock(); ++ * STORE p->on_rq = 0 LOAD p->on_cpu ++ * ++ * Pairs with the LOCK+smp_mb__after_spinlock() on rq->lock in ++ * __schedule(). See the comment for smp_mb__after_spinlock(). ++ * ++ * Form a control-dep-acquire with p->on_rq == 0 above, to ensure ++ * schedule()'s deactivate_task() has 'happened' and p will no longer ++ * care about it's own p->state. See the comment in __schedule(). ++ */ ++ smp_acquire__after_ctrl_dep(); ++ ++ /* ++ * We're doing the wakeup (@success == 1), they did a dequeue (p->on_rq ++ * == 0), which means we need to do an enqueue, change p->state to ++ * TASK_WAKING such that we can unlock p->pi_lock before doing the ++ * enqueue, such as ttwu_queue_wakelist(). ++ */ ++ WRITE_ONCE(p->__state, TASK_WAKING); ++ ++ /* ++ * If the owning (remote) CPU is still in the middle of schedule() with ++ * this task as prev, considering queueing p on the remote CPUs wake_list ++ * which potentially sends an IPI instead of spinning on p->on_cpu to ++ * let the waker make forward progress. This is safe because IRQs are ++ * disabled and the IPI will deliver after on_cpu is cleared. ++ * ++ * Ensure we load task_cpu(p) after p->on_cpu: ++ * ++ * set_task_cpu(p, cpu); ++ * STORE p->cpu = @cpu ++ * __schedule() (switch to task 'p') ++ * LOCK rq->lock ++ * smp_mb__after_spin_lock() smp_cond_load_acquire(&p->on_cpu) ++ * STORE p->on_cpu = 1 LOAD p->cpu ++ * ++ * to ensure we observe the correct CPU on which the task is currently ++ * scheduling. ++ */ ++ if (smp_load_acquire(&p->on_cpu) && ++ ttwu_queue_wakelist(p, task_cpu(p), wake_flags | WF_ON_CPU)) ++ goto unlock; ++ ++ /* ++ * If the owning (remote) CPU is still in the middle of schedule() with ++ * this task as prev, wait until it's done referencing the task. ++ * ++ * Pairs with the smp_store_release() in finish_task(). ++ * ++ * This ensures that tasks getting woken will be fully ordered against ++ * their previous state and preserve Program Order. ++ */ ++ smp_cond_load_acquire(&p->on_cpu, !VAL); ++ ++ sched_task_ttwu(p); ++ ++ cpu = select_task_rq(p); ++ ++ if (cpu != task_cpu(p)) { ++ if (p->in_iowait) { ++ delayacct_blkio_end(p); ++ atomic_dec(&task_rq(p)->nr_iowait); ++ } ++ ++ wake_flags |= WF_MIGRATED; ++ psi_ttwu_dequeue(p); ++ set_task_cpu(p, cpu); ++ } ++#else ++ cpu = task_cpu(p); ++#endif /* CONFIG_SMP */ ++ ++ ttwu_queue(p, cpu, wake_flags); ++unlock: ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++out: ++ if (success) ++ ttwu_stat(p, task_cpu(p), wake_flags); ++ preempt_enable(); ++ ++ return success; ++} ++ ++/** ++ * task_call_func - Invoke a function on task in fixed state ++ * @p: Process for which the function is to be invoked, can be @current. ++ * @func: Function to invoke. ++ * @arg: Argument to function. ++ * ++ * Fix the task in it's current state by avoiding wakeups and or rq operations ++ * and call @func(@arg) on it. This function can use ->on_rq and task_curr() ++ * to work out what the state is, if required. Given that @func can be invoked ++ * with a runqueue lock held, it had better be quite lightweight. ++ * ++ * Returns: ++ * Whatever @func returns ++ */ ++int task_call_func(struct task_struct *p, task_call_f func, void *arg) ++{ ++ struct rq *rq = NULL; ++ unsigned int state; ++ struct rq_flags rf; ++ int ret; ++ ++ raw_spin_lock_irqsave(&p->pi_lock, rf.flags); ++ ++ state = READ_ONCE(p->__state); ++ ++ /* ++ * Ensure we load p->on_rq after p->__state, otherwise it would be ++ * possible to, falsely, observe p->on_rq == 0. ++ * ++ * See try_to_wake_up() for a longer comment. ++ */ ++ smp_rmb(); ++ ++ /* ++ * Since pi->lock blocks try_to_wake_up(), we don't need rq->lock when ++ * the task is blocked. Make sure to check @state since ttwu() can drop ++ * locks at the end, see ttwu_queue_wakelist(). ++ */ ++ if (state == TASK_RUNNING || state == TASK_WAKING || p->on_rq) ++ rq = __task_rq_lock(p, &rf); ++ ++ /* ++ * At this point the task is pinned; either: ++ * - blocked and we're holding off wakeups (pi->lock) ++ * - woken, and we're holding off enqueue (rq->lock) ++ * - queued, and we're holding off schedule (rq->lock) ++ * - running, and we're holding off de-schedule (rq->lock) ++ * ++ * The called function (@func) can use: task_curr(), p->on_rq and ++ * p->__state to differentiate between these states. ++ */ ++ ret = func(p, arg); ++ ++ if (rq) ++ __task_rq_unlock(rq, &rf); ++ ++ raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags); ++ return ret; ++} ++ ++/** ++ * wake_up_process - Wake up a specific process ++ * @p: The process to be woken up. ++ * ++ * Attempt to wake up the nominated process and move it to the set of runnable ++ * processes. ++ * ++ * Return: 1 if the process was woken up, 0 if it was already running. ++ * ++ * This function executes a full memory barrier before accessing the task state. ++ */ ++int wake_up_process(struct task_struct *p) ++{ ++ return try_to_wake_up(p, TASK_NORMAL, 0); ++} ++EXPORT_SYMBOL(wake_up_process); ++ ++int wake_up_state(struct task_struct *p, unsigned int state) ++{ ++ return try_to_wake_up(p, state, 0); ++} ++ ++/* ++ * Perform scheduler related setup for a newly forked process p. ++ * p is forked by current. ++ * ++ * __sched_fork() is basic setup used by init_idle() too: ++ */ ++static inline void __sched_fork(unsigned long clone_flags, struct task_struct *p) ++{ ++ p->on_rq = 0; ++ p->on_cpu = 0; ++ p->utime = 0; ++ p->stime = 0; ++ p->sched_time = 0; ++ ++#ifdef CONFIG_SCHEDSTATS ++ /* Even if schedstat is disabled, there should not be garbage */ ++ memset(&p->stats, 0, sizeof(p->stats)); ++#endif ++ ++#ifdef CONFIG_PREEMPT_NOTIFIERS ++ INIT_HLIST_HEAD(&p->preempt_notifiers); ++#endif ++ ++#ifdef CONFIG_COMPACTION ++ p->capture_control = NULL; ++#endif ++#ifdef CONFIG_SMP ++ p->wake_entry.u_flags = CSD_TYPE_TTWU; ++#endif ++} ++ ++/* ++ * fork()/clone()-time setup: ++ */ ++int sched_fork(unsigned long clone_flags, struct task_struct *p) ++{ ++ __sched_fork(clone_flags, p); ++ /* ++ * We mark the process as NEW here. This guarantees that ++ * nobody will actually run it, and a signal or other external ++ * event cannot wake it up and insert it on the runqueue either. ++ */ ++ p->__state = TASK_NEW; ++ ++ /* ++ * Make sure we do not leak PI boosting priority to the child. ++ */ ++ p->prio = current->normal_prio; ++ ++ /* ++ * Revert to default priority/policy on fork if requested. ++ */ ++ if (unlikely(p->sched_reset_on_fork)) { ++ if (task_has_rt_policy(p)) { ++ p->policy = SCHED_NORMAL; ++ p->static_prio = NICE_TO_PRIO(0); ++ p->rt_priority = 0; ++ } else if (PRIO_TO_NICE(p->static_prio) < 0) ++ p->static_prio = NICE_TO_PRIO(0); ++ ++ p->prio = p->normal_prio = p->static_prio; ++ ++ /* ++ * We don't need the reset flag anymore after the fork. It has ++ * fulfilled its duty: ++ */ ++ p->sched_reset_on_fork = 0; ++ } ++ ++#ifdef CONFIG_SCHED_INFO ++ if (unlikely(sched_info_on())) ++ memset(&p->sched_info, 0, sizeof(p->sched_info)); ++#endif ++ init_task_preempt_count(p); ++ ++ return 0; ++} ++ ++void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs) ++{ ++ unsigned long flags; ++ struct rq *rq; ++ ++ /* ++ * Because we're not yet on the pid-hash, p->pi_lock isn't strictly ++ * required yet, but lockdep gets upset if rules are violated. ++ */ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ /* ++ * Share the timeslice between parent and child, thus the ++ * total amount of pending timeslices in the system doesn't change, ++ * resulting in more scheduling fairness. ++ */ ++ rq = this_rq(); ++ raw_spin_lock(&rq->lock); ++ ++ rq->curr->time_slice /= 2; ++ p->time_slice = rq->curr->time_slice; ++#ifdef CONFIG_SCHED_HRTICK ++ hrtick_start(rq, rq->curr->time_slice); ++#endif ++ ++ if (p->time_slice < RESCHED_NS) { ++ p->time_slice = sched_timeslice_ns; ++ resched_curr(rq); ++ } ++ sched_task_fork(p, rq); ++ raw_spin_unlock(&rq->lock); ++ ++ rseq_migrate(p); ++ /* ++ * We're setting the CPU for the first time, we don't migrate, ++ * so use __set_task_cpu(). ++ */ ++ __set_task_cpu(p, smp_processor_id()); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++} ++ ++void sched_post_fork(struct task_struct *p) ++{ ++} ++ ++#ifdef CONFIG_SCHEDSTATS ++ ++DEFINE_STATIC_KEY_FALSE(sched_schedstats); ++ ++static void set_schedstats(bool enabled) ++{ ++ if (enabled) ++ static_branch_enable(&sched_schedstats); ++ else ++ static_branch_disable(&sched_schedstats); ++} ++ ++void force_schedstat_enabled(void) ++{ ++ if (!schedstat_enabled()) { ++ pr_info("kernel profiling enabled schedstats, disable via kernel.sched_schedstats.\n"); ++ static_branch_enable(&sched_schedstats); ++ } ++} ++ ++static int __init setup_schedstats(char *str) ++{ ++ int ret = 0; ++ if (!str) ++ goto out; ++ ++ if (!strcmp(str, "enable")) { ++ set_schedstats(true); ++ ret = 1; ++ } else if (!strcmp(str, "disable")) { ++ set_schedstats(false); ++ ret = 1; ++ } ++out: ++ if (!ret) ++ pr_warn("Unable to parse schedstats=\n"); ++ ++ return ret; ++} ++__setup("schedstats=", setup_schedstats); ++ ++#ifdef CONFIG_PROC_SYSCTL ++static int sysctl_schedstats(struct ctl_table *table, int write, void *buffer, ++ size_t *lenp, loff_t *ppos) ++{ ++ struct ctl_table t; ++ int err; ++ int state = static_branch_likely(&sched_schedstats); ++ ++ if (write && !capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++ ++ t = *table; ++ t.data = &state; ++ err = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); ++ if (err < 0) ++ return err; ++ if (write) ++ set_schedstats(state); ++ return err; ++} ++ ++static struct ctl_table sched_core_sysctls[] = { ++ { ++ .procname = "sched_schedstats", ++ .data = NULL, ++ .maxlen = sizeof(unsigned int), ++ .mode = 0644, ++ .proc_handler = sysctl_schedstats, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_ONE, ++ }, ++ {} ++}; ++static int __init sched_core_sysctl_init(void) ++{ ++ register_sysctl_init("kernel", sched_core_sysctls); ++ return 0; ++} ++late_initcall(sched_core_sysctl_init); ++#endif /* CONFIG_PROC_SYSCTL */ ++#endif /* CONFIG_SCHEDSTATS */ ++ ++/* ++ * wake_up_new_task - wake up a newly created task for the first time. ++ * ++ * This function will do some initial scheduler statistics housekeeping ++ * that must be done for every newly created context, then puts the task ++ * on the runqueue and wakes it. ++ */ ++void wake_up_new_task(struct task_struct *p) ++{ ++ unsigned long flags; ++ struct rq *rq; ++ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ WRITE_ONCE(p->__state, TASK_RUNNING); ++ rq = cpu_rq(select_task_rq(p)); ++#ifdef CONFIG_SMP ++ rseq_migrate(p); ++ /* ++ * Fork balancing, do it here and not earlier because: ++ * - cpus_ptr can change in the fork path ++ * - any previously selected CPU might disappear through hotplug ++ * ++ * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, ++ * as we're not fully set-up yet. ++ */ ++ __set_task_cpu(p, cpu_of(rq)); ++#endif ++ ++ raw_spin_lock(&rq->lock); ++ update_rq_clock(rq); ++ ++ activate_task(p, rq); ++ trace_sched_wakeup_new(p); ++ check_preempt_curr(rq); ++ ++ raw_spin_unlock(&rq->lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++} ++ ++#ifdef CONFIG_PREEMPT_NOTIFIERS ++ ++static DEFINE_STATIC_KEY_FALSE(preempt_notifier_key); ++ ++void preempt_notifier_inc(void) ++{ ++ static_branch_inc(&preempt_notifier_key); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_inc); ++ ++void preempt_notifier_dec(void) ++{ ++ static_branch_dec(&preempt_notifier_key); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_dec); ++ ++/** ++ * preempt_notifier_register - tell me when current is being preempted & rescheduled ++ * @notifier: notifier struct to register ++ */ ++void preempt_notifier_register(struct preempt_notifier *notifier) ++{ ++ if (!static_branch_unlikely(&preempt_notifier_key)) ++ WARN(1, "registering preempt_notifier while notifiers disabled\n"); ++ ++ hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_register); ++ ++/** ++ * preempt_notifier_unregister - no longer interested in preemption notifications ++ * @notifier: notifier struct to unregister ++ * ++ * This is *not* safe to call from within a preemption notifier. ++ */ ++void preempt_notifier_unregister(struct preempt_notifier *notifier) ++{ ++ hlist_del(¬ifier->link); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_unregister); ++ ++static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) ++{ ++ struct preempt_notifier *notifier; ++ ++ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) ++ notifier->ops->sched_in(notifier, raw_smp_processor_id()); ++} ++ ++static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) ++{ ++ if (static_branch_unlikely(&preempt_notifier_key)) ++ __fire_sched_in_preempt_notifiers(curr); ++} ++ ++static void ++__fire_sched_out_preempt_notifiers(struct task_struct *curr, ++ struct task_struct *next) ++{ ++ struct preempt_notifier *notifier; ++ ++ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) ++ notifier->ops->sched_out(notifier, next); ++} ++ ++static __always_inline void ++fire_sched_out_preempt_notifiers(struct task_struct *curr, ++ struct task_struct *next) ++{ ++ if (static_branch_unlikely(&preempt_notifier_key)) ++ __fire_sched_out_preempt_notifiers(curr, next); ++} ++ ++#else /* !CONFIG_PREEMPT_NOTIFIERS */ ++ ++static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) ++{ ++} ++ ++static inline void ++fire_sched_out_preempt_notifiers(struct task_struct *curr, ++ struct task_struct *next) ++{ ++} ++ ++#endif /* CONFIG_PREEMPT_NOTIFIERS */ ++ ++static inline void prepare_task(struct task_struct *next) ++{ ++ /* ++ * Claim the task as running, we do this before switching to it ++ * such that any running task will have this set. ++ * ++ * See the ttwu() WF_ON_CPU case and its ordering comment. ++ */ ++ WRITE_ONCE(next->on_cpu, 1); ++} ++ ++static inline void finish_task(struct task_struct *prev) ++{ ++#ifdef CONFIG_SMP ++ /* ++ * This must be the very last reference to @prev from this CPU. After ++ * p->on_cpu is cleared, the task can be moved to a different CPU. We ++ * must ensure this doesn't happen until the switch is completely ++ * finished. ++ * ++ * In particular, the load of prev->state in finish_task_switch() must ++ * happen before this. ++ * ++ * Pairs with the smp_cond_load_acquire() in try_to_wake_up(). ++ */ ++ smp_store_release(&prev->on_cpu, 0); ++#else ++ prev->on_cpu = 0; ++#endif ++} ++ ++#ifdef CONFIG_SMP ++ ++static void do_balance_callbacks(struct rq *rq, struct callback_head *head) ++{ ++ void (*func)(struct rq *rq); ++ struct callback_head *next; ++ ++ lockdep_assert_held(&rq->lock); ++ ++ while (head) { ++ func = (void (*)(struct rq *))head->func; ++ next = head->next; ++ head->next = NULL; ++ head = next; ++ ++ func(rq); ++ } ++} ++ ++static void balance_push(struct rq *rq); ++ ++/* ++ * balance_push_callback is a right abuse of the callback interface and plays ++ * by significantly different rules. ++ * ++ * Where the normal balance_callback's purpose is to be ran in the same context ++ * that queued it (only later, when it's safe to drop rq->lock again), ++ * balance_push_callback is specifically targeted at __schedule(). ++ * ++ * This abuse is tolerated because it places all the unlikely/odd cases behind ++ * a single test, namely: rq->balance_callback == NULL. ++ */ ++struct callback_head balance_push_callback = { ++ .next = NULL, ++ .func = (void (*)(struct callback_head *))balance_push, ++}; ++ ++static inline struct callback_head * ++__splice_balance_callbacks(struct rq *rq, bool split) ++{ ++ struct callback_head *head = rq->balance_callback; ++ ++ if (likely(!head)) ++ return NULL; ++ ++ lockdep_assert_rq_held(rq); ++ /* ++ * Must not take balance_push_callback off the list when ++ * splice_balance_callbacks() and balance_callbacks() are not ++ * in the same rq->lock section. ++ * ++ * In that case it would be possible for __schedule() to interleave ++ * and observe the list empty. ++ */ ++ if (split && head == &balance_push_callback) ++ head = NULL; ++ else ++ rq->balance_callback = NULL; ++ ++ return head; ++} ++ ++static inline struct callback_head *splice_balance_callbacks(struct rq *rq) ++{ ++ return __splice_balance_callbacks(rq, true); ++} ++ ++static void __balance_callbacks(struct rq *rq) ++{ ++ do_balance_callbacks(rq, __splice_balance_callbacks(rq, false)); ++} ++ ++static inline void balance_callbacks(struct rq *rq, struct callback_head *head) ++{ ++ unsigned long flags; ++ ++ if (unlikely(head)) { ++ raw_spin_lock_irqsave(&rq->lock, flags); ++ do_balance_callbacks(rq, head); ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++ } ++} ++ ++#else ++ ++static inline void __balance_callbacks(struct rq *rq) ++{ ++} ++ ++static inline struct callback_head *splice_balance_callbacks(struct rq *rq) ++{ ++ return NULL; ++} ++ ++static inline void balance_callbacks(struct rq *rq, struct callback_head *head) ++{ ++} ++ ++#endif ++ ++static inline void ++prepare_lock_switch(struct rq *rq, struct task_struct *next) ++{ ++ /* ++ * Since the runqueue lock will be released by the next ++ * task (which is an invalid locking op but in the case ++ * of the scheduler it's an obvious special-case), so we ++ * do an early lockdep release here: ++ */ ++ spin_release(&rq->lock.dep_map, _THIS_IP_); ++#ifdef CONFIG_DEBUG_SPINLOCK ++ /* this is a valid case when another task releases the spinlock */ ++ rq->lock.owner = next; ++#endif ++} ++ ++static inline void finish_lock_switch(struct rq *rq) ++{ ++ /* ++ * If we are tracking spinlock dependencies then we have to ++ * fix up the runqueue lock - which gets 'carried over' from ++ * prev into current: ++ */ ++ spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_); ++ __balance_callbacks(rq); ++ raw_spin_unlock_irq(&rq->lock); ++} ++ ++/* ++ * NOP if the arch has not defined these: ++ */ ++ ++#ifndef prepare_arch_switch ++# define prepare_arch_switch(next) do { } while (0) ++#endif ++ ++#ifndef finish_arch_post_lock_switch ++# define finish_arch_post_lock_switch() do { } while (0) ++#endif ++ ++static inline void kmap_local_sched_out(void) ++{ ++#ifdef CONFIG_KMAP_LOCAL ++ if (unlikely(current->kmap_ctrl.idx)) ++ __kmap_local_sched_out(); ++#endif ++} ++ ++static inline void kmap_local_sched_in(void) ++{ ++#ifdef CONFIG_KMAP_LOCAL ++ if (unlikely(current->kmap_ctrl.idx)) ++ __kmap_local_sched_in(); ++#endif ++} ++ ++/** ++ * prepare_task_switch - prepare to switch tasks ++ * @rq: the runqueue preparing to switch ++ * @next: the task we are going to switch to. ++ * ++ * This is called with the rq lock held and interrupts off. It must ++ * be paired with a subsequent finish_task_switch after the context ++ * switch. ++ * ++ * prepare_task_switch sets up locking and calls architecture specific ++ * hooks. ++ */ ++static inline void ++prepare_task_switch(struct rq *rq, struct task_struct *prev, ++ struct task_struct *next) ++{ ++ kcov_prepare_switch(prev); ++ sched_info_switch(rq, prev, next); ++ perf_event_task_sched_out(prev, next); ++ rseq_preempt(prev); ++ fire_sched_out_preempt_notifiers(prev, next); ++ kmap_local_sched_out(); ++ prepare_task(next); ++ prepare_arch_switch(next); ++} ++ ++/** ++ * finish_task_switch - clean up after a task-switch ++ * @rq: runqueue associated with task-switch ++ * @prev: the thread we just switched away from. ++ * ++ * finish_task_switch must be called after the context switch, paired ++ * with a prepare_task_switch call before the context switch. ++ * finish_task_switch will reconcile locking set up by prepare_task_switch, ++ * and do any other architecture-specific cleanup actions. ++ * ++ * Note that we may have delayed dropping an mm in context_switch(). If ++ * so, we finish that here outside of the runqueue lock. (Doing it ++ * with the lock held can cause deadlocks; see schedule() for ++ * details.) ++ * ++ * The context switch have flipped the stack from under us and restored the ++ * local variables which were saved when this task called schedule() in the ++ * past. prev == current is still correct but we need to recalculate this_rq ++ * because prev may have moved to another CPU. ++ */ ++static struct rq *finish_task_switch(struct task_struct *prev) ++ __releases(rq->lock) ++{ ++ struct rq *rq = this_rq(); ++ struct mm_struct *mm = rq->prev_mm; ++ unsigned int prev_state; ++ ++ /* ++ * The previous task will have left us with a preempt_count of 2 ++ * because it left us after: ++ * ++ * schedule() ++ * preempt_disable(); // 1 ++ * __schedule() ++ * raw_spin_lock_irq(&rq->lock) // 2 ++ * ++ * Also, see FORK_PREEMPT_COUNT. ++ */ ++ if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, ++ "corrupted preempt_count: %s/%d/0x%x\n", ++ current->comm, current->pid, preempt_count())) ++ preempt_count_set(FORK_PREEMPT_COUNT); ++ ++ rq->prev_mm = NULL; ++ ++ /* ++ * A task struct has one reference for the use as "current". ++ * If a task dies, then it sets TASK_DEAD in tsk->state and calls ++ * schedule one last time. The schedule call will never return, and ++ * the scheduled task must drop that reference. ++ * ++ * We must observe prev->state before clearing prev->on_cpu (in ++ * finish_task), otherwise a concurrent wakeup can get prev ++ * running on another CPU and we could rave with its RUNNING -> DEAD ++ * transition, resulting in a double drop. ++ */ ++ prev_state = READ_ONCE(prev->__state); ++ vtime_task_switch(prev); ++ perf_event_task_sched_in(prev, current); ++ finish_task(prev); ++ tick_nohz_task_switch(); ++ finish_lock_switch(rq); ++ finish_arch_post_lock_switch(); ++ kcov_finish_switch(current); ++ /* ++ * kmap_local_sched_out() is invoked with rq::lock held and ++ * interrupts disabled. There is no requirement for that, but the ++ * sched out code does not have an interrupt enabled section. ++ * Restoring the maps on sched in does not require interrupts being ++ * disabled either. ++ */ ++ kmap_local_sched_in(); ++ ++ fire_sched_in_preempt_notifiers(current); ++ /* ++ * When switching through a kernel thread, the loop in ++ * membarrier_{private,global}_expedited() may have observed that ++ * kernel thread and not issued an IPI. It is therefore possible to ++ * schedule between user->kernel->user threads without passing though ++ * switch_mm(). Membarrier requires a barrier after storing to ++ * rq->curr, before returning to userspace, so provide them here: ++ * ++ * - a full memory barrier for {PRIVATE,GLOBAL}_EXPEDITED, implicitly ++ * provided by mmdrop(), ++ * - a sync_core for SYNC_CORE. ++ */ ++ if (mm) { ++ membarrier_mm_sync_core_before_usermode(mm); ++ mmdrop_sched(mm); ++ } ++ if (unlikely(prev_state == TASK_DEAD)) { ++ /* Task is done with its stack. */ ++ put_task_stack(prev); ++ ++ put_task_struct_rcu_user(prev); ++ } ++ ++ return rq; ++} ++ ++/** ++ * schedule_tail - first thing a freshly forked thread must call. ++ * @prev: the thread we just switched away from. ++ */ ++asmlinkage __visible void schedule_tail(struct task_struct *prev) ++ __releases(rq->lock) ++{ ++ /* ++ * New tasks start with FORK_PREEMPT_COUNT, see there and ++ * finish_task_switch() for details. ++ * ++ * finish_task_switch() will drop rq->lock() and lower preempt_count ++ * and the preempt_enable() will end up enabling preemption (on ++ * PREEMPT_COUNT kernels). ++ */ ++ ++ finish_task_switch(prev); ++ preempt_enable(); ++ ++ if (current->set_child_tid) ++ put_user(task_pid_vnr(current), current->set_child_tid); ++ ++ calculate_sigpending(); ++} ++ ++/* ++ * context_switch - switch to the new MM and the new thread's register state. ++ */ ++static __always_inline struct rq * ++context_switch(struct rq *rq, struct task_struct *prev, ++ struct task_struct *next) ++{ ++ prepare_task_switch(rq, prev, next); ++ ++ /* ++ * For paravirt, this is coupled with an exit in switch_to to ++ * combine the page table reload and the switch backend into ++ * one hypercall. ++ */ ++ arch_start_context_switch(prev); ++ ++ /* ++ * kernel -> kernel lazy + transfer active ++ * user -> kernel lazy + mmgrab() active ++ * ++ * kernel -> user switch + mmdrop() active ++ * user -> user switch ++ */ ++ if (!next->mm) { // to kernel ++ enter_lazy_tlb(prev->active_mm, next); ++ ++ next->active_mm = prev->active_mm; ++ if (prev->mm) // from user ++ mmgrab(prev->active_mm); ++ else ++ prev->active_mm = NULL; ++ } else { // to user ++ membarrier_switch_mm(rq, prev->active_mm, next->mm); ++ /* ++ * sys_membarrier() requires an smp_mb() between setting ++ * rq->curr / membarrier_switch_mm() and returning to userspace. ++ * ++ * The below provides this either through switch_mm(), or in ++ * case 'prev->active_mm == next->mm' through ++ * finish_task_switch()'s mmdrop(). ++ */ ++ switch_mm_irqs_off(prev->active_mm, next->mm, next); ++ ++ if (!prev->mm) { // from kernel ++ /* will mmdrop() in finish_task_switch(). */ ++ rq->prev_mm = prev->active_mm; ++ prev->active_mm = NULL; ++ } ++ } ++ ++ prepare_lock_switch(rq, next); ++ ++ /* Here we just switch the register state and the stack. */ ++ switch_to(prev, next, prev); ++ barrier(); ++ ++ return finish_task_switch(prev); ++} ++ ++/* ++ * nr_running, nr_uninterruptible and nr_context_switches: ++ * ++ * externally visible scheduler statistics: current number of runnable ++ * threads, total number of context switches performed since bootup. ++ */ ++unsigned int nr_running(void) ++{ ++ unsigned int i, sum = 0; ++ ++ for_each_online_cpu(i) ++ sum += cpu_rq(i)->nr_running; ++ ++ return sum; ++} ++ ++/* ++ * Check if only the current task is running on the CPU. ++ * ++ * Caution: this function does not check that the caller has disabled ++ * preemption, thus the result might have a time-of-check-to-time-of-use ++ * race. The caller is responsible to use it correctly, for example: ++ * ++ * - from a non-preemptible section (of course) ++ * ++ * - from a thread that is bound to a single CPU ++ * ++ * - in a loop with very short iterations (e.g. a polling loop) ++ */ ++bool single_task_running(void) ++{ ++ return raw_rq()->nr_running == 1; ++} ++EXPORT_SYMBOL(single_task_running); ++ ++unsigned long long nr_context_switches(void) ++{ ++ int i; ++ unsigned long long sum = 0; ++ ++ for_each_possible_cpu(i) ++ sum += cpu_rq(i)->nr_switches; ++ ++ return sum; ++} ++ ++/* ++ * Consumers of these two interfaces, like for example the cpuidle menu ++ * governor, are using nonsensical data. Preferring shallow idle state selection ++ * for a CPU that has IO-wait which might not even end up running the task when ++ * it does become runnable. ++ */ ++ ++unsigned int nr_iowait_cpu(int cpu) ++{ ++ return atomic_read(&cpu_rq(cpu)->nr_iowait); ++} ++ ++/* ++ * IO-wait accounting, and how it's mostly bollocks (on SMP). ++ * ++ * The idea behind IO-wait account is to account the idle time that we could ++ * have spend running if it were not for IO. That is, if we were to improve the ++ * storage performance, we'd have a proportional reduction in IO-wait time. ++ * ++ * This all works nicely on UP, where, when a task blocks on IO, we account ++ * idle time as IO-wait, because if the storage were faster, it could've been ++ * running and we'd not be idle. ++ * ++ * This has been extended to SMP, by doing the same for each CPU. This however ++ * is broken. ++ * ++ * Imagine for instance the case where two tasks block on one CPU, only the one ++ * CPU will have IO-wait accounted, while the other has regular idle. Even ++ * though, if the storage were faster, both could've ran at the same time, ++ * utilising both CPUs. ++ * ++ * This means, that when looking globally, the current IO-wait accounting on ++ * SMP is a lower bound, by reason of under accounting. ++ * ++ * Worse, since the numbers are provided per CPU, they are sometimes ++ * interpreted per CPU, and that is nonsensical. A blocked task isn't strictly ++ * associated with any one particular CPU, it can wake to another CPU than it ++ * blocked on. This means the per CPU IO-wait number is meaningless. ++ * ++ * Task CPU affinities can make all that even more 'interesting'. ++ */ ++ ++unsigned int nr_iowait(void) ++{ ++ unsigned int i, sum = 0; ++ ++ for_each_possible_cpu(i) ++ sum += nr_iowait_cpu(i); ++ ++ return sum; ++} ++ ++#ifdef CONFIG_SMP ++ ++/* ++ * sched_exec - execve() is a valuable balancing opportunity, because at ++ * this point the task has the smallest effective memory and cache ++ * footprint. ++ */ ++void sched_exec(void) ++{ ++} ++ ++#endif ++ ++DEFINE_PER_CPU(struct kernel_stat, kstat); ++DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); ++ ++EXPORT_PER_CPU_SYMBOL(kstat); ++EXPORT_PER_CPU_SYMBOL(kernel_cpustat); ++ ++static inline void update_curr(struct rq *rq, struct task_struct *p) ++{ ++ s64 ns = rq->clock_task - p->last_ran; ++ ++ p->sched_time += ns; ++ cgroup_account_cputime(p, ns); ++ account_group_exec_runtime(p, ns); ++ ++ p->time_slice -= ns; ++ p->last_ran = rq->clock_task; ++} ++ ++/* ++ * Return accounted runtime for the task. ++ * Return separately the current's pending runtime that have not been ++ * accounted yet. ++ */ ++unsigned long long task_sched_runtime(struct task_struct *p) ++{ ++ unsigned long flags; ++ struct rq *rq; ++ raw_spinlock_t *lock; ++ u64 ns; ++ ++#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) ++ /* ++ * 64-bit doesn't need locks to atomically read a 64-bit value. ++ * So we have a optimization chance when the task's delta_exec is 0. ++ * Reading ->on_cpu is racy, but this is ok. ++ * ++ * If we race with it leaving CPU, we'll take a lock. So we're correct. ++ * If we race with it entering CPU, unaccounted time is 0. This is ++ * indistinguishable from the read occurring a few cycles earlier. ++ * If we see ->on_cpu without ->on_rq, the task is leaving, and has ++ * been accounted, so we're correct here as well. ++ */ ++ if (!p->on_cpu || !task_on_rq_queued(p)) ++ return tsk_seruntime(p); ++#endif ++ ++ rq = task_access_lock_irqsave(p, &lock, &flags); ++ /* ++ * Must be ->curr _and_ ->on_rq. If dequeued, we would ++ * project cycles that may never be accounted to this ++ * thread, breaking clock_gettime(). ++ */ ++ if (p == rq->curr && task_on_rq_queued(p)) { ++ update_rq_clock(rq); ++ update_curr(rq, p); ++ } ++ ns = tsk_seruntime(p); ++ task_access_unlock_irqrestore(p, lock, &flags); ++ ++ return ns; ++} ++ ++/* This manages tasks that have run out of timeslice during a scheduler_tick */ ++static inline void scheduler_task_tick(struct rq *rq) ++{ ++ struct task_struct *p = rq->curr; ++ ++ if (is_idle_task(p)) ++ return; ++ ++ update_curr(rq, p); ++ cpufreq_update_util(rq, 0); ++ ++ /* ++ * Tasks have less than RESCHED_NS of time slice left they will be ++ * rescheduled. ++ */ ++ if (p->time_slice >= RESCHED_NS) ++ return; ++ set_tsk_need_resched(p); ++ set_preempt_need_resched(); ++} ++ ++#ifdef CONFIG_SCHED_DEBUG ++static u64 cpu_resched_latency(struct rq *rq) ++{ ++ int latency_warn_ms = READ_ONCE(sysctl_resched_latency_warn_ms); ++ u64 resched_latency, now = rq_clock(rq); ++ static bool warned_once; ++ ++ if (sysctl_resched_latency_warn_once && warned_once) ++ return 0; ++ ++ if (!need_resched() || !latency_warn_ms) ++ return 0; ++ ++ if (system_state == SYSTEM_BOOTING) ++ return 0; ++ ++ if (!rq->last_seen_need_resched_ns) { ++ rq->last_seen_need_resched_ns = now; ++ rq->ticks_without_resched = 0; ++ return 0; ++ } ++ ++ rq->ticks_without_resched++; ++ resched_latency = now - rq->last_seen_need_resched_ns; ++ if (resched_latency <= latency_warn_ms * NSEC_PER_MSEC) ++ return 0; ++ ++ warned_once = true; ++ ++ return resched_latency; ++} ++ ++static int __init setup_resched_latency_warn_ms(char *str) ++{ ++ long val; ++ ++ if ((kstrtol(str, 0, &val))) { ++ pr_warn("Unable to set resched_latency_warn_ms\n"); ++ return 1; ++ } ++ ++ sysctl_resched_latency_warn_ms = val; ++ return 1; ++} ++__setup("resched_latency_warn_ms=", setup_resched_latency_warn_ms); ++#else ++static inline u64 cpu_resched_latency(struct rq *rq) { return 0; } ++#endif /* CONFIG_SCHED_DEBUG */ ++ ++/* ++ * This function gets called by the timer code, with HZ frequency. ++ * We call it with interrupts disabled. ++ */ ++void scheduler_tick(void) ++{ ++ int cpu __maybe_unused = smp_processor_id(); ++ struct rq *rq = cpu_rq(cpu); ++ u64 resched_latency; ++ ++ arch_scale_freq_tick(); ++ sched_clock_tick(); ++ ++ raw_spin_lock(&rq->lock); ++ update_rq_clock(rq); ++ ++ scheduler_task_tick(rq); ++ if (sched_feat(LATENCY_WARN)) ++ resched_latency = cpu_resched_latency(rq); ++ calc_global_load_tick(rq); ++ ++ rq->last_tick = rq->clock; ++ raw_spin_unlock(&rq->lock); ++ ++ if (sched_feat(LATENCY_WARN) && resched_latency) ++ resched_latency_warn(cpu, resched_latency); ++ ++ perf_event_task_tick(); ++} ++ ++#ifdef CONFIG_SCHED_SMT ++static inline int sg_balance_cpu_stop(void *data) ++{ ++ struct rq *rq = this_rq(); ++ struct task_struct *p = data; ++ cpumask_t tmp; ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ ++ raw_spin_lock(&p->pi_lock); ++ raw_spin_lock(&rq->lock); ++ ++ rq->active_balance = 0; ++ /* _something_ may have changed the task, double check again */ ++ if (task_on_rq_queued(p) && task_rq(p) == rq && ++ cpumask_and(&tmp, p->cpus_ptr, &sched_sg_idle_mask) && ++ !is_migration_disabled(p)) { ++ int cpu = cpu_of(rq); ++ int dcpu = __best_mask_cpu(&tmp, per_cpu(sched_cpu_llc_mask, cpu)); ++ rq = move_queued_task(rq, p, dcpu); ++ } ++ ++ raw_spin_unlock(&rq->lock); ++ raw_spin_unlock(&p->pi_lock); ++ ++ local_irq_restore(flags); ++ ++ return 0; ++} ++ ++/* sg_balance_trigger - trigger slibing group balance for @cpu */ ++static inline int sg_balance_trigger(const int cpu) ++{ ++ struct rq *rq= cpu_rq(cpu); ++ unsigned long flags; ++ struct task_struct *curr; ++ int res; ++ ++ if (!raw_spin_trylock_irqsave(&rq->lock, flags)) ++ return 0; ++ curr = rq->curr; ++ res = (!is_idle_task(curr)) && (1 == rq->nr_running) &&\ ++ cpumask_intersects(curr->cpus_ptr, &sched_sg_idle_mask) &&\ ++ !is_migration_disabled(curr) && (!rq->active_balance); ++ ++ if (res) ++ rq->active_balance = 1; ++ ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++ ++ if (res) ++ stop_one_cpu_nowait(cpu, sg_balance_cpu_stop, curr, ++ &rq->active_balance_work); ++ return res; ++} ++ ++/* ++ * sg_balance - slibing group balance check for run queue @rq ++ */ ++static inline void sg_balance(struct rq *rq) ++{ ++ cpumask_t chk; ++ int cpu = cpu_of(rq); ++ ++ /* exit when cpu is offline */ ++ if (unlikely(!rq->online)) ++ return; ++ ++ /* ++ * Only cpu in slibing idle group will do the checking and then ++ * find potential cpus which can migrate the current running task ++ */ ++ if (cpumask_test_cpu(cpu, &sched_sg_idle_mask) && ++ cpumask_andnot(&chk, cpu_online_mask, sched_rq_watermark) && ++ cpumask_andnot(&chk, &chk, &sched_rq_pending_mask)) { ++ int i; ++ ++ for_each_cpu_wrap(i, &chk, cpu) { ++ if (cpumask_subset(cpu_smt_mask(i), &chk) && ++ sg_balance_trigger(i)) ++ return; ++ } ++ } ++} ++#endif /* CONFIG_SCHED_SMT */ ++ ++#ifdef CONFIG_NO_HZ_FULL ++ ++struct tick_work { ++ int cpu; ++ atomic_t state; ++ struct delayed_work work; ++}; ++/* Values for ->state, see diagram below. */ ++#define TICK_SCHED_REMOTE_OFFLINE 0 ++#define TICK_SCHED_REMOTE_OFFLINING 1 ++#define TICK_SCHED_REMOTE_RUNNING 2 ++ ++/* ++ * State diagram for ->state: ++ * ++ * ++ * TICK_SCHED_REMOTE_OFFLINE ++ * | ^ ++ * | | ++ * | | sched_tick_remote() ++ * | | ++ * | | ++ * +--TICK_SCHED_REMOTE_OFFLINING ++ * | ^ ++ * | | ++ * sched_tick_start() | | sched_tick_stop() ++ * | | ++ * V | ++ * TICK_SCHED_REMOTE_RUNNING ++ * ++ * ++ * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote() ++ * and sched_tick_start() are happy to leave the state in RUNNING. ++ */ ++ ++static struct tick_work __percpu *tick_work_cpu; ++ ++static void sched_tick_remote(struct work_struct *work) ++{ ++ struct delayed_work *dwork = to_delayed_work(work); ++ struct tick_work *twork = container_of(dwork, struct tick_work, work); ++ int cpu = twork->cpu; ++ struct rq *rq = cpu_rq(cpu); ++ struct task_struct *curr; ++ unsigned long flags; ++ u64 delta; ++ int os; ++ ++ /* ++ * Handle the tick only if it appears the remote CPU is running in full ++ * dynticks mode. The check is racy by nature, but missing a tick or ++ * having one too much is no big deal because the scheduler tick updates ++ * statistics and checks timeslices in a time-independent way, regardless ++ * of when exactly it is running. ++ */ ++ if (!tick_nohz_tick_stopped_cpu(cpu)) ++ goto out_requeue; ++ ++ raw_spin_lock_irqsave(&rq->lock, flags); ++ curr = rq->curr; ++ if (cpu_is_offline(cpu)) ++ goto out_unlock; ++ ++ update_rq_clock(rq); ++ if (!is_idle_task(curr)) { ++ /* ++ * Make sure the next tick runs within a reasonable ++ * amount of time. ++ */ ++ delta = rq_clock_task(rq) - curr->last_ran; ++ WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3); ++ } ++ scheduler_task_tick(rq); ++ ++ calc_load_nohz_remote(rq); ++out_unlock: ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++ ++out_requeue: ++ /* ++ * Run the remote tick once per second (1Hz). This arbitrary ++ * frequency is large enough to avoid overload but short enough ++ * to keep scheduler internal stats reasonably up to date. But ++ * first update state to reflect hotplug activity if required. ++ */ ++ os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING); ++ WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE); ++ if (os == TICK_SCHED_REMOTE_RUNNING) ++ queue_delayed_work(system_unbound_wq, dwork, HZ); ++} ++ ++static void sched_tick_start(int cpu) ++{ ++ int os; ++ struct tick_work *twork; ++ ++ if (housekeeping_cpu(cpu, HK_TYPE_TICK)) ++ return; ++ ++ WARN_ON_ONCE(!tick_work_cpu); ++ ++ twork = per_cpu_ptr(tick_work_cpu, cpu); ++ os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING); ++ WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING); ++ if (os == TICK_SCHED_REMOTE_OFFLINE) { ++ twork->cpu = cpu; ++ INIT_DELAYED_WORK(&twork->work, sched_tick_remote); ++ queue_delayed_work(system_unbound_wq, &twork->work, HZ); ++ } ++} ++ ++#ifdef CONFIG_HOTPLUG_CPU ++static void sched_tick_stop(int cpu) ++{ ++ struct tick_work *twork; ++ ++ if (housekeeping_cpu(cpu, HK_TYPE_TICK)) ++ return; ++ ++ WARN_ON_ONCE(!tick_work_cpu); ++ ++ twork = per_cpu_ptr(tick_work_cpu, cpu); ++ cancel_delayed_work_sync(&twork->work); ++} ++#endif /* CONFIG_HOTPLUG_CPU */ ++ ++int __init sched_tick_offload_init(void) ++{ ++ tick_work_cpu = alloc_percpu(struct tick_work); ++ BUG_ON(!tick_work_cpu); ++ return 0; ++} ++ ++#else /* !CONFIG_NO_HZ_FULL */ ++static inline void sched_tick_start(int cpu) { } ++static inline void sched_tick_stop(int cpu) { } ++#endif ++ ++#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \ ++ defined(CONFIG_PREEMPT_TRACER)) ++/* ++ * If the value passed in is equal to the current preempt count ++ * then we just disabled preemption. Start timing the latency. ++ */ ++static inline void preempt_latency_start(int val) ++{ ++ if (preempt_count() == val) { ++ unsigned long ip = get_lock_parent_ip(); ++#ifdef CONFIG_DEBUG_PREEMPT ++ current->preempt_disable_ip = ip; ++#endif ++ trace_preempt_off(CALLER_ADDR0, ip); ++ } ++} ++ ++void preempt_count_add(int val) ++{ ++#ifdef CONFIG_DEBUG_PREEMPT ++ /* ++ * Underflow? ++ */ ++ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) ++ return; ++#endif ++ __preempt_count_add(val); ++#ifdef CONFIG_DEBUG_PREEMPT ++ /* ++ * Spinlock count overflowing soon? ++ */ ++ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= ++ PREEMPT_MASK - 10); ++#endif ++ preempt_latency_start(val); ++} ++EXPORT_SYMBOL(preempt_count_add); ++NOKPROBE_SYMBOL(preempt_count_add); ++ ++/* ++ * If the value passed in equals to the current preempt count ++ * then we just enabled preemption. Stop timing the latency. ++ */ ++static inline void preempt_latency_stop(int val) ++{ ++ if (preempt_count() == val) ++ trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); ++} ++ ++void preempt_count_sub(int val) ++{ ++#ifdef CONFIG_DEBUG_PREEMPT ++ /* ++ * Underflow? ++ */ ++ if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) ++ return; ++ /* ++ * Is the spinlock portion underflowing? ++ */ ++ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && ++ !(preempt_count() & PREEMPT_MASK))) ++ return; ++#endif ++ ++ preempt_latency_stop(val); ++ __preempt_count_sub(val); ++} ++EXPORT_SYMBOL(preempt_count_sub); ++NOKPROBE_SYMBOL(preempt_count_sub); ++ ++#else ++static inline void preempt_latency_start(int val) { } ++static inline void preempt_latency_stop(int val) { } ++#endif ++ ++static inline unsigned long get_preempt_disable_ip(struct task_struct *p) ++{ ++#ifdef CONFIG_DEBUG_PREEMPT ++ return p->preempt_disable_ip; ++#else ++ return 0; ++#endif ++} ++ ++/* ++ * Print scheduling while atomic bug: ++ */ ++static noinline void __schedule_bug(struct task_struct *prev) ++{ ++ /* Save this before calling printk(), since that will clobber it */ ++ unsigned long preempt_disable_ip = get_preempt_disable_ip(current); ++ ++ if (oops_in_progress) ++ return; ++ ++ printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", ++ prev->comm, prev->pid, preempt_count()); ++ ++ debug_show_held_locks(prev); ++ print_modules(); ++ if (irqs_disabled()) ++ print_irqtrace_events(prev); ++ if (IS_ENABLED(CONFIG_DEBUG_PREEMPT) ++ && in_atomic_preempt_off()) { ++ pr_err("Preemption disabled at:"); ++ print_ip_sym(KERN_ERR, preempt_disable_ip); ++ } ++ if (panic_on_warn) ++ panic("scheduling while atomic\n"); ++ ++ dump_stack(); ++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); ++} ++ ++/* ++ * Various schedule()-time debugging checks and statistics: ++ */ ++static inline void schedule_debug(struct task_struct *prev, bool preempt) ++{ ++#ifdef CONFIG_SCHED_STACK_END_CHECK ++ if (task_stack_end_corrupted(prev)) ++ panic("corrupted stack end detected inside scheduler\n"); ++ ++ if (task_scs_end_corrupted(prev)) ++ panic("corrupted shadow stack detected inside scheduler\n"); ++#endif ++ ++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP ++ if (!preempt && READ_ONCE(prev->__state) && prev->non_block_count) { ++ printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n", ++ prev->comm, prev->pid, prev->non_block_count); ++ dump_stack(); ++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); ++ } ++#endif ++ ++ if (unlikely(in_atomic_preempt_off())) { ++ __schedule_bug(prev); ++ preempt_count_set(PREEMPT_DISABLED); ++ } ++ rcu_sleep_check(); ++ SCHED_WARN_ON(ct_state() == CONTEXT_USER); ++ ++ profile_hit(SCHED_PROFILING, __builtin_return_address(0)); ++ ++ schedstat_inc(this_rq()->sched_count); ++} ++ ++/* ++ * Compile time debug macro ++ * #define ALT_SCHED_DEBUG ++ */ ++ ++#ifdef ALT_SCHED_DEBUG ++void alt_sched_debug(void) ++{ ++ printk(KERN_INFO "sched: pending: 0x%04lx, idle: 0x%04lx, sg_idle: 0x%04lx\n", ++ sched_rq_pending_mask.bits[0], ++ sched_rq_watermark[0].bits[0], ++ sched_sg_idle_mask.bits[0]); ++} ++#else ++inline void alt_sched_debug(void) {} ++#endif ++ ++#ifdef CONFIG_SMP ++ ++#define SCHED_RQ_NR_MIGRATION (32U) ++/* ++ * Migrate pending tasks in @rq to @dest_cpu ++ * Will try to migrate mininal of half of @rq nr_running tasks and ++ * SCHED_RQ_NR_MIGRATION to @dest_cpu ++ */ ++static inline int ++migrate_pending_tasks(struct rq *rq, struct rq *dest_rq, const int dest_cpu) ++{ ++ struct task_struct *p, *skip = rq->curr; ++ int nr_migrated = 0; ++ int nr_tries = min(rq->nr_running / 2, SCHED_RQ_NR_MIGRATION); ++ ++ while (skip != rq->idle && nr_tries && ++ (p = sched_rq_next_task(skip, rq)) != rq->idle) { ++ skip = sched_rq_next_task(p, rq); ++ if (cpumask_test_cpu(dest_cpu, p->cpus_ptr)) { ++ __SCHED_DEQUEUE_TASK(p, rq, 0); ++ set_task_cpu(p, dest_cpu); ++ sched_task_sanity_check(p, dest_rq); ++ __SCHED_ENQUEUE_TASK(p, dest_rq, 0); ++ nr_migrated++; ++ } ++ nr_tries--; ++ } ++ ++ return nr_migrated; ++} ++ ++static inline int take_other_rq_tasks(struct rq *rq, int cpu) ++{ ++ struct cpumask *topo_mask, *end_mask; ++ ++ if (unlikely(!rq->online)) ++ return 0; ++ ++ if (cpumask_empty(&sched_rq_pending_mask)) ++ return 0; ++ ++ topo_mask = per_cpu(sched_cpu_topo_masks, cpu) + 1; ++ end_mask = per_cpu(sched_cpu_topo_end_mask, cpu); ++ do { ++ int i; ++ for_each_cpu_and(i, &sched_rq_pending_mask, topo_mask) { ++ int nr_migrated; ++ struct rq *src_rq; ++ ++ src_rq = cpu_rq(i); ++ if (!do_raw_spin_trylock(&src_rq->lock)) ++ continue; ++ spin_acquire(&src_rq->lock.dep_map, ++ SINGLE_DEPTH_NESTING, 1, _RET_IP_); ++ ++ if ((nr_migrated = migrate_pending_tasks(src_rq, rq, cpu))) { ++ src_rq->nr_running -= nr_migrated; ++ if (src_rq->nr_running < 2) ++ cpumask_clear_cpu(i, &sched_rq_pending_mask); ++ ++ rq->nr_running += nr_migrated; ++ if (rq->nr_running > 1) ++ cpumask_set_cpu(cpu, &sched_rq_pending_mask); ++ ++ cpufreq_update_util(rq, 0); ++ ++ spin_release(&src_rq->lock.dep_map, _RET_IP_); ++ do_raw_spin_unlock(&src_rq->lock); ++ ++ return 1; ++ } ++ ++ spin_release(&src_rq->lock.dep_map, _RET_IP_); ++ do_raw_spin_unlock(&src_rq->lock); ++ } ++ } while (++topo_mask < end_mask); ++ ++ return 0; ++} ++#endif ++ ++/* ++ * Timeslices below RESCHED_NS are considered as good as expired as there's no ++ * point rescheduling when there's so little time left. ++ */ ++static inline void check_curr(struct task_struct *p, struct rq *rq) ++{ ++ if (unlikely(rq->idle == p)) ++ return; ++ ++ update_curr(rq, p); ++ ++ if (p->time_slice < RESCHED_NS) ++ time_slice_expired(p, rq); ++} ++ ++static inline struct task_struct * ++choose_next_task(struct rq *rq, int cpu, struct task_struct *prev) ++{ ++ struct task_struct *next; ++ ++ if (unlikely(rq->skip)) { ++ next = rq_runnable_task(rq); ++ if (next == rq->idle) { ++#ifdef CONFIG_SMP ++ if (!take_other_rq_tasks(rq, cpu)) { ++#endif ++ rq->skip = NULL; ++ schedstat_inc(rq->sched_goidle); ++ return next; ++#ifdef CONFIG_SMP ++ } ++ next = rq_runnable_task(rq); ++#endif ++ } ++ rq->skip = NULL; ++#ifdef CONFIG_HIGH_RES_TIMERS ++ hrtick_start(rq, next->time_slice); ++#endif ++ return next; ++ } ++ ++ next = sched_rq_first_task(rq); ++ if (next == rq->idle) { ++#ifdef CONFIG_SMP ++ if (!take_other_rq_tasks(rq, cpu)) { ++#endif ++ schedstat_inc(rq->sched_goidle); ++ /*printk(KERN_INFO "sched: choose_next_task(%d) idle %px\n", cpu, next);*/ ++ return next; ++#ifdef CONFIG_SMP ++ } ++ next = sched_rq_first_task(rq); ++#endif ++ } ++#ifdef CONFIG_HIGH_RES_TIMERS ++ hrtick_start(rq, next->time_slice); ++#endif ++ /*printk(KERN_INFO "sched: choose_next_task(%d) next %px\n", cpu, ++ * next);*/ ++ return next; ++} ++ ++/* ++ * Constants for the sched_mode argument of __schedule(). ++ * ++ * The mode argument allows RT enabled kernels to differentiate a ++ * preemption from blocking on an 'sleeping' spin/rwlock. Note that ++ * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to ++ * optimize the AND operation out and just check for zero. ++ */ ++#define SM_NONE 0x0 ++#define SM_PREEMPT 0x1 ++#define SM_RTLOCK_WAIT 0x2 ++ ++#ifndef CONFIG_PREEMPT_RT ++# define SM_MASK_PREEMPT (~0U) ++#else ++# define SM_MASK_PREEMPT SM_PREEMPT ++#endif ++ ++/* ++ * schedule() is the main scheduler function. ++ * ++ * The main means of driving the scheduler and thus entering this function are: ++ * ++ * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. ++ * ++ * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return ++ * paths. For example, see arch/x86/entry_64.S. ++ * ++ * To drive preemption between tasks, the scheduler sets the flag in timer ++ * interrupt handler scheduler_tick(). ++ * ++ * 3. Wakeups don't really cause entry into schedule(). They add a ++ * task to the run-queue and that's it. ++ * ++ * Now, if the new task added to the run-queue preempts the current ++ * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets ++ * called on the nearest possible occasion: ++ * ++ * - If the kernel is preemptible (CONFIG_PREEMPTION=y): ++ * ++ * - in syscall or exception context, at the next outmost ++ * preempt_enable(). (this might be as soon as the wake_up()'s ++ * spin_unlock()!) ++ * ++ * - in IRQ context, return from interrupt-handler to ++ * preemptible context ++ * ++ * - If the kernel is not preemptible (CONFIG_PREEMPTION is not set) ++ * then at the next: ++ * ++ * - cond_resched() call ++ * - explicit schedule() call ++ * - return from syscall or exception to user-space ++ * - return from interrupt-handler to user-space ++ * ++ * WARNING: must be called with preemption disabled! ++ */ ++static void __sched notrace __schedule(unsigned int sched_mode) ++{ ++ struct task_struct *prev, *next; ++ unsigned long *switch_count; ++ unsigned long prev_state; ++ struct rq *rq; ++ int cpu; ++ int deactivated = 0; ++ ++ cpu = smp_processor_id(); ++ rq = cpu_rq(cpu); ++ prev = rq->curr; ++ ++ schedule_debug(prev, !!sched_mode); ++ ++ /* by passing sched_feat(HRTICK) checking which Alt schedule FW doesn't support */ ++ hrtick_clear(rq); ++ ++ local_irq_disable(); ++ rcu_note_context_switch(!!sched_mode); ++ ++ /* ++ * Make sure that signal_pending_state()->signal_pending() below ++ * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) ++ * done by the caller to avoid the race with signal_wake_up(): ++ * ++ * __set_current_state(@state) signal_wake_up() ++ * schedule() set_tsk_thread_flag(p, TIF_SIGPENDING) ++ * wake_up_state(p, state) ++ * LOCK rq->lock LOCK p->pi_state ++ * smp_mb__after_spinlock() smp_mb__after_spinlock() ++ * if (signal_pending_state()) if (p->state & @state) ++ * ++ * Also, the membarrier system call requires a full memory barrier ++ * after coming from user-space, before storing to rq->curr. ++ */ ++ raw_spin_lock(&rq->lock); ++ smp_mb__after_spinlock(); ++ ++ update_rq_clock(rq); ++ ++ switch_count = &prev->nivcsw; ++ /* ++ * We must load prev->state once (task_struct::state is volatile), such ++ * that we form a control dependency vs deactivate_task() below. ++ */ ++ prev_state = READ_ONCE(prev->__state); ++ if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) { ++ if (signal_pending_state(prev_state, prev)) { ++ WRITE_ONCE(prev->__state, TASK_RUNNING); ++ } else { ++ prev->sched_contributes_to_load = ++ (prev_state & TASK_UNINTERRUPTIBLE) && ++ !(prev_state & TASK_NOLOAD) && ++ !(prev->flags & PF_FROZEN); ++ ++ if (prev->sched_contributes_to_load) ++ rq->nr_uninterruptible++; ++ ++ /* ++ * __schedule() ttwu() ++ * prev_state = prev->state; if (p->on_rq && ...) ++ * if (prev_state) goto out; ++ * p->on_rq = 0; smp_acquire__after_ctrl_dep(); ++ * p->state = TASK_WAKING ++ * ++ * Where __schedule() and ttwu() have matching control dependencies. ++ * ++ * After this, schedule() must not care about p->state any more. ++ */ ++ sched_task_deactivate(prev, rq); ++ deactivate_task(prev, rq); ++ deactivated = 1; ++ ++ if (prev->in_iowait) { ++ atomic_inc(&rq->nr_iowait); ++ delayacct_blkio_start(); ++ } ++ } ++ switch_count = &prev->nvcsw; ++ } ++ ++ check_curr(prev, rq); ++ ++ next = choose_next_task(rq, cpu, prev); ++ clear_tsk_need_resched(prev); ++ clear_preempt_need_resched(); ++#ifdef CONFIG_SCHED_DEBUG ++ rq->last_seen_need_resched_ns = 0; ++#endif ++ ++ if (likely(prev != next)) { ++ if (deactivated) ++ update_sched_rq_watermark(rq); ++ next->last_ran = rq->clock_task; ++ rq->last_ts_switch = rq->clock; ++ ++ rq->nr_switches++; ++ /* ++ * RCU users of rcu_dereference(rq->curr) may not see ++ * changes to task_struct made by pick_next_task(). ++ */ ++ RCU_INIT_POINTER(rq->curr, next); ++ /* ++ * The membarrier system call requires each architecture ++ * to have a full memory barrier after updating ++ * rq->curr, before returning to user-space. ++ * ++ * Here are the schemes providing that barrier on the ++ * various architectures: ++ * - mm ? switch_mm() : mmdrop() for x86, s390, sparc, PowerPC. ++ * switch_mm() rely on membarrier_arch_switch_mm() on PowerPC. ++ * - finish_lock_switch() for weakly-ordered ++ * architectures where spin_unlock is a full barrier, ++ * - switch_to() for arm64 (weakly-ordered, spin_unlock ++ * is a RELEASE barrier), ++ */ ++ ++*switch_count; ++ ++ psi_sched_switch(prev, next, !task_on_rq_queued(prev)); ++ ++ trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state); ++ ++ /* Also unlocks the rq: */ ++ rq = context_switch(rq, prev, next); ++ } else { ++ __balance_callbacks(rq); ++ raw_spin_unlock_irq(&rq->lock); ++ } ++ ++#ifdef CONFIG_SCHED_SMT ++ sg_balance(rq); ++#endif ++} ++ ++void __noreturn do_task_dead(void) ++{ ++ /* Causes final put_task_struct in finish_task_switch(): */ ++ set_special_state(TASK_DEAD); ++ ++ /* Tell freezer to ignore us: */ ++ current->flags |= PF_NOFREEZE; ++ ++ __schedule(SM_NONE); ++ BUG(); ++ ++ /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */ ++ for (;;) ++ cpu_relax(); ++} ++ ++static inline void sched_submit_work(struct task_struct *tsk) ++{ ++ unsigned int task_flags; ++ ++ if (task_is_running(tsk)) ++ return; ++ ++ task_flags = tsk->flags; ++ /* ++ * If a worker goes to sleep, notify and ask workqueue whether it ++ * wants to wake up a task to maintain concurrency. ++ */ ++ if (task_flags & (PF_WQ_WORKER | PF_IO_WORKER)) { ++ if (task_flags & PF_WQ_WORKER) ++ wq_worker_sleeping(tsk); ++ else ++ io_wq_worker_sleeping(tsk); ++ } ++ ++ if (tsk_is_pi_blocked(tsk)) ++ return; ++ ++ /* ++ * If we are going to sleep and we have plugged IO queued, ++ * make sure to submit it to avoid deadlocks. ++ */ ++ blk_flush_plug(tsk->plug, true); ++} ++ ++static void sched_update_worker(struct task_struct *tsk) ++{ ++ if (tsk->flags & (PF_WQ_WORKER | PF_IO_WORKER)) { ++ if (tsk->flags & PF_WQ_WORKER) ++ wq_worker_running(tsk); ++ else ++ io_wq_worker_running(tsk); ++ } ++} ++ ++asmlinkage __visible void __sched schedule(void) ++{ ++ struct task_struct *tsk = current; ++ ++ sched_submit_work(tsk); ++ do { ++ preempt_disable(); ++ __schedule(SM_NONE); ++ sched_preempt_enable_no_resched(); ++ } while (need_resched()); ++ sched_update_worker(tsk); ++} ++EXPORT_SYMBOL(schedule); ++ ++/* ++ * synchronize_rcu_tasks() makes sure that no task is stuck in preempted ++ * state (have scheduled out non-voluntarily) by making sure that all ++ * tasks have either left the run queue or have gone into user space. ++ * As idle tasks do not do either, they must not ever be preempted ++ * (schedule out non-voluntarily). ++ * ++ * schedule_idle() is similar to schedule_preempt_disable() except that it ++ * never enables preemption because it does not call sched_submit_work(). ++ */ ++void __sched schedule_idle(void) ++{ ++ /* ++ * As this skips calling sched_submit_work(), which the idle task does ++ * regardless because that function is a nop when the task is in a ++ * TASK_RUNNING state, make sure this isn't used someplace that the ++ * current task can be in any other state. Note, idle is always in the ++ * TASK_RUNNING state. ++ */ ++ WARN_ON_ONCE(current->__state); ++ do { ++ __schedule(SM_NONE); ++ } while (need_resched()); ++} ++ ++#if defined(CONFIG_CONTEXT_TRACKING) && !defined(CONFIG_HAVE_CONTEXT_TRACKING_OFFSTACK) ++asmlinkage __visible void __sched schedule_user(void) ++{ ++ /* ++ * If we come here after a random call to set_need_resched(), ++ * or we have been woken up remotely but the IPI has not yet arrived, ++ * we haven't yet exited the RCU idle mode. Do it here manually until ++ * we find a better solution. ++ * ++ * NB: There are buggy callers of this function. Ideally we ++ * should warn if prev_state != CONTEXT_USER, but that will trigger ++ * too frequently to make sense yet. ++ */ ++ enum ctx_state prev_state = exception_enter(); ++ schedule(); ++ exception_exit(prev_state); ++} ++#endif ++ ++/** ++ * schedule_preempt_disabled - called with preemption disabled ++ * ++ * Returns with preemption disabled. Note: preempt_count must be 1 ++ */ ++void __sched schedule_preempt_disabled(void) ++{ ++ sched_preempt_enable_no_resched(); ++ schedule(); ++ preempt_disable(); ++} ++ ++#ifdef CONFIG_PREEMPT_RT ++void __sched notrace schedule_rtlock(void) ++{ ++ do { ++ preempt_disable(); ++ __schedule(SM_RTLOCK_WAIT); ++ sched_preempt_enable_no_resched(); ++ } while (need_resched()); ++} ++NOKPROBE_SYMBOL(schedule_rtlock); ++#endif ++ ++static void __sched notrace preempt_schedule_common(void) ++{ ++ do { ++ /* ++ * Because the function tracer can trace preempt_count_sub() ++ * and it also uses preempt_enable/disable_notrace(), if ++ * NEED_RESCHED is set, the preempt_enable_notrace() called ++ * by the function tracer will call this function again and ++ * cause infinite recursion. ++ * ++ * Preemption must be disabled here before the function ++ * tracer can trace. Break up preempt_disable() into two ++ * calls. One to disable preemption without fear of being ++ * traced. The other to still record the preemption latency, ++ * which can also be traced by the function tracer. ++ */ ++ preempt_disable_notrace(); ++ preempt_latency_start(1); ++ __schedule(SM_PREEMPT); ++ preempt_latency_stop(1); ++ preempt_enable_no_resched_notrace(); ++ ++ /* ++ * Check again in case we missed a preemption opportunity ++ * between schedule and now. ++ */ ++ } while (need_resched()); ++} ++ ++#ifdef CONFIG_PREEMPTION ++/* ++ * This is the entry point to schedule() from in-kernel preemption ++ * off of preempt_enable. ++ */ ++asmlinkage __visible void __sched notrace preempt_schedule(void) ++{ ++ /* ++ * If there is a non-zero preempt_count or interrupts are disabled, ++ * we do not want to preempt the current task. Just return.. ++ */ ++ if (likely(!preemptible())) ++ return; ++ ++ preempt_schedule_common(); ++} ++NOKPROBE_SYMBOL(preempt_schedule); ++EXPORT_SYMBOL(preempt_schedule); ++ ++#ifdef CONFIG_PREEMPT_DYNAMIC ++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) ++#ifndef preempt_schedule_dynamic_enabled ++#define preempt_schedule_dynamic_enabled preempt_schedule ++#define preempt_schedule_dynamic_disabled NULL ++#endif ++DEFINE_STATIC_CALL(preempt_schedule, preempt_schedule_dynamic_enabled); ++EXPORT_STATIC_CALL_TRAMP(preempt_schedule); ++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) ++static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule); ++void __sched notrace dynamic_preempt_schedule(void) ++{ ++ if (!static_branch_unlikely(&sk_dynamic_preempt_schedule)) ++ return; ++ preempt_schedule(); ++} ++NOKPROBE_SYMBOL(dynamic_preempt_schedule); ++EXPORT_SYMBOL(dynamic_preempt_schedule); ++#endif ++#endif ++ ++/** ++ * preempt_schedule_notrace - preempt_schedule called by tracing ++ * ++ * The tracing infrastructure uses preempt_enable_notrace to prevent ++ * recursion and tracing preempt enabling caused by the tracing ++ * infrastructure itself. But as tracing can happen in areas coming ++ * from userspace or just about to enter userspace, a preempt enable ++ * can occur before user_exit() is called. This will cause the scheduler ++ * to be called when the system is still in usermode. ++ * ++ * To prevent this, the preempt_enable_notrace will use this function ++ * instead of preempt_schedule() to exit user context if needed before ++ * calling the scheduler. ++ */ ++asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) ++{ ++ enum ctx_state prev_ctx; ++ ++ if (likely(!preemptible())) ++ return; ++ ++ do { ++ /* ++ * Because the function tracer can trace preempt_count_sub() ++ * and it also uses preempt_enable/disable_notrace(), if ++ * NEED_RESCHED is set, the preempt_enable_notrace() called ++ * by the function tracer will call this function again and ++ * cause infinite recursion. ++ * ++ * Preemption must be disabled here before the function ++ * tracer can trace. Break up preempt_disable() into two ++ * calls. One to disable preemption without fear of being ++ * traced. The other to still record the preemption latency, ++ * which can also be traced by the function tracer. ++ */ ++ preempt_disable_notrace(); ++ preempt_latency_start(1); ++ /* ++ * Needs preempt disabled in case user_exit() is traced ++ * and the tracer calls preempt_enable_notrace() causing ++ * an infinite recursion. ++ */ ++ prev_ctx = exception_enter(); ++ __schedule(SM_PREEMPT); ++ exception_exit(prev_ctx); ++ ++ preempt_latency_stop(1); ++ preempt_enable_no_resched_notrace(); ++ } while (need_resched()); ++} ++EXPORT_SYMBOL_GPL(preempt_schedule_notrace); ++ ++#ifdef CONFIG_PREEMPT_DYNAMIC ++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) ++#ifndef preempt_schedule_notrace_dynamic_enabled ++#define preempt_schedule_notrace_dynamic_enabled preempt_schedule_notrace ++#define preempt_schedule_notrace_dynamic_disabled NULL ++#endif ++DEFINE_STATIC_CALL(preempt_schedule_notrace, preempt_schedule_notrace_dynamic_enabled); ++EXPORT_STATIC_CALL_TRAMP(preempt_schedule_notrace); ++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) ++static DEFINE_STATIC_KEY_TRUE(sk_dynamic_preempt_schedule_notrace); ++void __sched notrace dynamic_preempt_schedule_notrace(void) ++{ ++ if (!static_branch_unlikely(&sk_dynamic_preempt_schedule_notrace)) ++ return; ++ preempt_schedule_notrace(); ++} ++NOKPROBE_SYMBOL(dynamic_preempt_schedule_notrace); ++EXPORT_SYMBOL(dynamic_preempt_schedule_notrace); ++#endif ++#endif ++ ++#endif /* CONFIG_PREEMPTION */ ++ ++/* ++ * This is the entry point to schedule() from kernel preemption ++ * off of irq context. ++ * Note, that this is called and return with irqs disabled. This will ++ * protect us against recursive calling from irq. ++ */ ++asmlinkage __visible void __sched preempt_schedule_irq(void) ++{ ++ enum ctx_state prev_state; ++ ++ /* Catch callers which need to be fixed */ ++ BUG_ON(preempt_count() || !irqs_disabled()); ++ ++ prev_state = exception_enter(); ++ ++ do { ++ preempt_disable(); ++ local_irq_enable(); ++ __schedule(SM_PREEMPT); ++ local_irq_disable(); ++ sched_preempt_enable_no_resched(); ++ } while (need_resched()); ++ ++ exception_exit(prev_state); ++} ++ ++int default_wake_function(wait_queue_entry_t *curr, unsigned mode, int wake_flags, ++ void *key) ++{ ++ WARN_ON_ONCE(IS_ENABLED(CONFIG_SCHED_DEBUG) && wake_flags & ~WF_SYNC); ++ return try_to_wake_up(curr->private, mode, wake_flags); ++} ++EXPORT_SYMBOL(default_wake_function); ++ ++static inline void check_task_changed(struct task_struct *p, struct rq *rq) ++{ ++ int idx; ++ ++ /* Trigger resched if task sched_prio has been modified. */ ++ if (task_on_rq_queued(p) && (idx = task_sched_prio_idx(p, rq)) != p->sq_idx) { ++ requeue_task(p, rq, idx); ++ check_preempt_curr(rq); ++ } ++} ++ ++static void __setscheduler_prio(struct task_struct *p, int prio) ++{ ++ p->prio = prio; ++} ++ ++#ifdef CONFIG_RT_MUTEXES ++ ++static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) ++{ ++ if (pi_task) ++ prio = min(prio, pi_task->prio); ++ ++ return prio; ++} ++ ++static inline int rt_effective_prio(struct task_struct *p, int prio) ++{ ++ struct task_struct *pi_task = rt_mutex_get_top_task(p); ++ ++ return __rt_effective_prio(pi_task, prio); ++} ++ ++/* ++ * rt_mutex_setprio - set the current priority of a task ++ * @p: task to boost ++ * @pi_task: donor task ++ * ++ * This function changes the 'effective' priority of a task. It does ++ * not touch ->normal_prio like __setscheduler(). ++ * ++ * Used by the rt_mutex code to implement priority inheritance ++ * logic. Call site only calls if the priority of the task changed. ++ */ ++void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) ++{ ++ int prio; ++ struct rq *rq; ++ raw_spinlock_t *lock; ++ ++ /* XXX used to be waiter->prio, not waiter->task->prio */ ++ prio = __rt_effective_prio(pi_task, p->normal_prio); ++ ++ /* ++ * If nothing changed; bail early. ++ */ ++ if (p->pi_top_task == pi_task && prio == p->prio) ++ return; ++ ++ rq = __task_access_lock(p, &lock); ++ /* ++ * Set under pi_lock && rq->lock, such that the value can be used under ++ * either lock. ++ * ++ * Note that there is loads of tricky to make this pointer cache work ++ * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to ++ * ensure a task is de-boosted (pi_task is set to NULL) before the ++ * task is allowed to run again (and can exit). This ensures the pointer ++ * points to a blocked task -- which guarantees the task is present. ++ */ ++ p->pi_top_task = pi_task; ++ ++ /* ++ * For FIFO/RR we only need to set prio, if that matches we're done. ++ */ ++ if (prio == p->prio) ++ goto out_unlock; ++ ++ /* ++ * Idle task boosting is a nono in general. There is one ++ * exception, when PREEMPT_RT and NOHZ is active: ++ * ++ * The idle task calls get_next_timer_interrupt() and holds ++ * the timer wheel base->lock on the CPU and another CPU wants ++ * to access the timer (probably to cancel it). We can safely ++ * ignore the boosting request, as the idle CPU runs this code ++ * with interrupts disabled and will complete the lock ++ * protected section without being interrupted. So there is no ++ * real need to boost. ++ */ ++ if (unlikely(p == rq->idle)) { ++ WARN_ON(p != rq->curr); ++ WARN_ON(p->pi_blocked_on); ++ goto out_unlock; ++ } ++ ++ trace_sched_pi_setprio(p, pi_task); ++ ++ __setscheduler_prio(p, prio); ++ ++ check_task_changed(p, rq); ++out_unlock: ++ /* Avoid rq from going away on us: */ ++ preempt_disable(); ++ ++ __balance_callbacks(rq); ++ __task_access_unlock(p, lock); ++ ++ preempt_enable(); ++} ++#else ++static inline int rt_effective_prio(struct task_struct *p, int prio) ++{ ++ return prio; ++} ++#endif ++ ++void set_user_nice(struct task_struct *p, long nice) ++{ ++ unsigned long flags; ++ struct rq *rq; ++ raw_spinlock_t *lock; ++ ++ if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) ++ return; ++ /* ++ * We have to be careful, if called from sys_setpriority(), ++ * the task might be in the middle of scheduling on another CPU. ++ */ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ rq = __task_access_lock(p, &lock); ++ ++ p->static_prio = NICE_TO_PRIO(nice); ++ /* ++ * The RT priorities are set via sched_setscheduler(), but we still ++ * allow the 'normal' nice value to be set - but as expected ++ * it won't have any effect on scheduling until the task is ++ * not SCHED_NORMAL/SCHED_BATCH: ++ */ ++ if (task_has_rt_policy(p)) ++ goto out_unlock; ++ ++ p->prio = effective_prio(p); ++ ++ check_task_changed(p, rq); ++out_unlock: ++ __task_access_unlock(p, lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++} ++EXPORT_SYMBOL(set_user_nice); ++ ++/* ++ * can_nice - check if a task can reduce its nice value ++ * @p: task ++ * @nice: nice value ++ */ ++int can_nice(const struct task_struct *p, const int nice) ++{ ++ /* Convert nice value [19,-20] to rlimit style value [1,40] */ ++ int nice_rlim = nice_to_rlimit(nice); ++ ++ return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || ++ capable(CAP_SYS_NICE)); ++} ++ ++#ifdef __ARCH_WANT_SYS_NICE ++ ++/* ++ * sys_nice - change the priority of the current process. ++ * @increment: priority increment ++ * ++ * sys_setpriority is a more generic, but much slower function that ++ * does similar things. ++ */ ++SYSCALL_DEFINE1(nice, int, increment) ++{ ++ long nice, retval; ++ ++ /* ++ * Setpriority might change our priority at the same moment. ++ * We don't have to worry. Conceptually one call occurs first ++ * and we have a single winner. ++ */ ++ ++ increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); ++ nice = task_nice(current) + increment; ++ ++ nice = clamp_val(nice, MIN_NICE, MAX_NICE); ++ if (increment < 0 && !can_nice(current, nice)) ++ return -EPERM; ++ ++ retval = security_task_setnice(current, nice); ++ if (retval) ++ return retval; ++ ++ set_user_nice(current, nice); ++ return 0; ++} ++ ++#endif ++ ++/** ++ * task_prio - return the priority value of a given task. ++ * @p: the task in question. ++ * ++ * Return: The priority value as seen by users in /proc. ++ * ++ * sched policy return value kernel prio user prio/nice ++ * ++ * (BMQ)normal, batch, idle[0 ... 53] [100 ... 139] 0/[-20 ... 19]/[-7 ... 7] ++ * (PDS)normal, batch, idle[0 ... 39] 100 0/[-20 ... 19] ++ * fifo, rr [-1 ... -100] [99 ... 0] [0 ... 99] ++ */ ++int task_prio(const struct task_struct *p) ++{ ++ return (p->prio < MAX_RT_PRIO) ? p->prio - MAX_RT_PRIO : ++ task_sched_prio_normal(p, task_rq(p)); ++} ++ ++/** ++ * idle_cpu - is a given CPU idle currently? ++ * @cpu: the processor in question. ++ * ++ * Return: 1 if the CPU is currently idle. 0 otherwise. ++ */ ++int idle_cpu(int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ ++ if (rq->curr != rq->idle) ++ return 0; ++ ++ if (rq->nr_running) ++ return 0; ++ ++#ifdef CONFIG_SMP ++ if (rq->ttwu_pending) ++ return 0; ++#endif ++ ++ return 1; ++} ++ ++/** ++ * idle_task - return the idle task for a given CPU. ++ * @cpu: the processor in question. ++ * ++ * Return: The idle task for the cpu @cpu. ++ */ ++struct task_struct *idle_task(int cpu) ++{ ++ return cpu_rq(cpu)->idle; ++} ++ ++/** ++ * find_process_by_pid - find a process with a matching PID value. ++ * @pid: the pid in question. ++ * ++ * The task of @pid, if found. %NULL otherwise. ++ */ ++static inline struct task_struct *find_process_by_pid(pid_t pid) ++{ ++ return pid ? find_task_by_vpid(pid) : current; ++} ++ ++/* ++ * sched_setparam() passes in -1 for its policy, to let the functions ++ * it calls know not to change it. ++ */ ++#define SETPARAM_POLICY -1 ++ ++static void __setscheduler_params(struct task_struct *p, ++ const struct sched_attr *attr) ++{ ++ int policy = attr->sched_policy; ++ ++ if (policy == SETPARAM_POLICY) ++ policy = p->policy; ++ ++ p->policy = policy; ++ ++ /* ++ * allow normal nice value to be set, but will not have any ++ * effect on scheduling until the task not SCHED_NORMAL/ ++ * SCHED_BATCH ++ */ ++ p->static_prio = NICE_TO_PRIO(attr->sched_nice); ++ ++ /* ++ * __sched_setscheduler() ensures attr->sched_priority == 0 when ++ * !rt_policy. Always setting this ensures that things like ++ * getparam()/getattr() don't report silly values for !rt tasks. ++ */ ++ p->rt_priority = attr->sched_priority; ++ p->normal_prio = normal_prio(p); ++} ++ ++/* ++ * check the target process has a UID that matches the current process's ++ */ ++static bool check_same_owner(struct task_struct *p) ++{ ++ const struct cred *cred = current_cred(), *pcred; ++ bool match; ++ ++ rcu_read_lock(); ++ pcred = __task_cred(p); ++ match = (uid_eq(cred->euid, pcred->euid) || ++ uid_eq(cred->euid, pcred->uid)); ++ rcu_read_unlock(); ++ return match; ++} ++ ++static int __sched_setscheduler(struct task_struct *p, ++ const struct sched_attr *attr, ++ bool user, bool pi) ++{ ++ const struct sched_attr dl_squash_attr = { ++ .size = sizeof(struct sched_attr), ++ .sched_policy = SCHED_FIFO, ++ .sched_nice = 0, ++ .sched_priority = 99, ++ }; ++ int oldpolicy = -1, policy = attr->sched_policy; ++ int retval, newprio; ++ struct callback_head *head; ++ unsigned long flags; ++ struct rq *rq; ++ int reset_on_fork; ++ raw_spinlock_t *lock; ++ ++ /* The pi code expects interrupts enabled */ ++ BUG_ON(pi && in_interrupt()); ++ ++ /* ++ * Alt schedule FW supports SCHED_DEADLINE by squash it as prio 0 SCHED_FIFO ++ */ ++ if (unlikely(SCHED_DEADLINE == policy)) { ++ attr = &dl_squash_attr; ++ policy = attr->sched_policy; ++ } ++recheck: ++ /* Double check policy once rq lock held */ ++ if (policy < 0) { ++ reset_on_fork = p->sched_reset_on_fork; ++ policy = oldpolicy = p->policy; ++ } else { ++ reset_on_fork = !!(attr->sched_flags & SCHED_RESET_ON_FORK); ++ ++ if (policy > SCHED_IDLE) ++ return -EINVAL; ++ } ++ ++ if (attr->sched_flags & ~(SCHED_FLAG_ALL)) ++ return -EINVAL; ++ ++ /* ++ * Valid priorities for SCHED_FIFO and SCHED_RR are ++ * 1..MAX_RT_PRIO-1, valid priority for SCHED_NORMAL and ++ * SCHED_BATCH and SCHED_IDLE is 0. ++ */ ++ if (attr->sched_priority < 0 || ++ (p->mm && attr->sched_priority > MAX_RT_PRIO - 1) || ++ (!p->mm && attr->sched_priority > MAX_RT_PRIO - 1)) ++ return -EINVAL; ++ if ((SCHED_RR == policy || SCHED_FIFO == policy) != ++ (attr->sched_priority != 0)) ++ return -EINVAL; ++ ++ /* ++ * Allow unprivileged RT tasks to decrease priority: ++ */ ++ if (user && !capable(CAP_SYS_NICE)) { ++ if (SCHED_FIFO == policy || SCHED_RR == policy) { ++ unsigned long rlim_rtprio = ++ task_rlimit(p, RLIMIT_RTPRIO); ++ ++ /* Can't set/change the rt policy */ ++ if (policy != p->policy && !rlim_rtprio) ++ return -EPERM; ++ ++ /* Can't increase priority */ ++ if (attr->sched_priority > p->rt_priority && ++ attr->sched_priority > rlim_rtprio) ++ return -EPERM; ++ } ++ ++ /* Can't change other user's priorities */ ++ if (!check_same_owner(p)) ++ return -EPERM; ++ ++ /* Normal users shall not reset the sched_reset_on_fork flag */ ++ if (p->sched_reset_on_fork && !reset_on_fork) ++ return -EPERM; ++ } ++ ++ if (user) { ++ retval = security_task_setscheduler(p); ++ if (retval) ++ return retval; ++ } ++ ++ if (pi) ++ cpuset_read_lock(); ++ ++ /* ++ * Make sure no PI-waiters arrive (or leave) while we are ++ * changing the priority of the task: ++ */ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ ++ /* ++ * To be able to change p->policy safely, task_access_lock() ++ * must be called. ++ * IF use task_access_lock() here: ++ * For the task p which is not running, reading rq->stop is ++ * racy but acceptable as ->stop doesn't change much. ++ * An enhancemnet can be made to read rq->stop saftly. ++ */ ++ rq = __task_access_lock(p, &lock); ++ ++ /* ++ * Changing the policy of the stop threads its a very bad idea ++ */ ++ if (p == rq->stop) { ++ retval = -EINVAL; ++ goto unlock; ++ } ++ ++ /* ++ * If not changing anything there's no need to proceed further: ++ */ ++ if (unlikely(policy == p->policy)) { ++ if (rt_policy(policy) && attr->sched_priority != p->rt_priority) ++ goto change; ++ if (!rt_policy(policy) && ++ NICE_TO_PRIO(attr->sched_nice) != p->static_prio) ++ goto change; ++ ++ p->sched_reset_on_fork = reset_on_fork; ++ retval = 0; ++ goto unlock; ++ } ++change: ++ ++ /* Re-check policy now with rq lock held */ ++ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { ++ policy = oldpolicy = -1; ++ __task_access_unlock(p, lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ if (pi) ++ cpuset_read_unlock(); ++ goto recheck; ++ } ++ ++ p->sched_reset_on_fork = reset_on_fork; ++ ++ newprio = __normal_prio(policy, attr->sched_priority, NICE_TO_PRIO(attr->sched_nice)); ++ if (pi) { ++ /* ++ * Take priority boosted tasks into account. If the new ++ * effective priority is unchanged, we just store the new ++ * normal parameters and do not touch the scheduler class and ++ * the runqueue. This will be done when the task deboost ++ * itself. ++ */ ++ newprio = rt_effective_prio(p, newprio); ++ } ++ ++ if (!(attr->sched_flags & SCHED_FLAG_KEEP_PARAMS)) { ++ __setscheduler_params(p, attr); ++ __setscheduler_prio(p, newprio); ++ } ++ ++ check_task_changed(p, rq); ++ ++ /* Avoid rq from going away on us: */ ++ preempt_disable(); ++ head = splice_balance_callbacks(rq); ++ __task_access_unlock(p, lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ ++ if (pi) { ++ cpuset_read_unlock(); ++ rt_mutex_adjust_pi(p); ++ } ++ ++ /* Run balance callbacks after we've adjusted the PI chain: */ ++ balance_callbacks(rq, head); ++ preempt_enable(); ++ ++ return 0; ++ ++unlock: ++ __task_access_unlock(p, lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ if (pi) ++ cpuset_read_unlock(); ++ return retval; ++} ++ ++static int _sched_setscheduler(struct task_struct *p, int policy, ++ const struct sched_param *param, bool check) ++{ ++ struct sched_attr attr = { ++ .sched_policy = policy, ++ .sched_priority = param->sched_priority, ++ .sched_nice = PRIO_TO_NICE(p->static_prio), ++ }; ++ ++ /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ ++ if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { ++ attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; ++ policy &= ~SCHED_RESET_ON_FORK; ++ attr.sched_policy = policy; ++ } ++ ++ return __sched_setscheduler(p, &attr, check, true); ++} ++ ++/** ++ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. ++ * @p: the task in question. ++ * @policy: new policy. ++ * @param: structure containing the new RT priority. ++ * ++ * Use sched_set_fifo(), read its comment. ++ * ++ * Return: 0 on success. An error code otherwise. ++ * ++ * NOTE that the task may be already dead. ++ */ ++int sched_setscheduler(struct task_struct *p, int policy, ++ const struct sched_param *param) ++{ ++ return _sched_setscheduler(p, policy, param, true); ++} ++ ++int sched_setattr(struct task_struct *p, const struct sched_attr *attr) ++{ ++ return __sched_setscheduler(p, attr, true, true); ++} ++ ++int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr) ++{ ++ return __sched_setscheduler(p, attr, false, true); ++} ++EXPORT_SYMBOL_GPL(sched_setattr_nocheck); ++ ++/** ++ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. ++ * @p: the task in question. ++ * @policy: new policy. ++ * @param: structure containing the new RT priority. ++ * ++ * Just like sched_setscheduler, only don't bother checking if the ++ * current context has permission. For example, this is needed in ++ * stop_machine(): we create temporary high priority worker threads, ++ * but our caller might not have that capability. ++ * ++ * Return: 0 on success. An error code otherwise. ++ */ ++int sched_setscheduler_nocheck(struct task_struct *p, int policy, ++ const struct sched_param *param) ++{ ++ return _sched_setscheduler(p, policy, param, false); ++} ++ ++/* ++ * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally ++ * incapable of resource management, which is the one thing an OS really should ++ * be doing. ++ * ++ * This is of course the reason it is limited to privileged users only. ++ * ++ * Worse still; it is fundamentally impossible to compose static priority ++ * workloads. You cannot take two correctly working static prio workloads ++ * and smash them together and still expect them to work. ++ * ++ * For this reason 'all' FIFO tasks the kernel creates are basically at: ++ * ++ * MAX_RT_PRIO / 2 ++ * ++ * The administrator _MUST_ configure the system, the kernel simply doesn't ++ * know enough information to make a sensible choice. ++ */ ++void sched_set_fifo(struct task_struct *p) ++{ ++ struct sched_param sp = { .sched_priority = MAX_RT_PRIO / 2 }; ++ WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); ++} ++EXPORT_SYMBOL_GPL(sched_set_fifo); ++ ++/* ++ * For when you don't much care about FIFO, but want to be above SCHED_NORMAL. ++ */ ++void sched_set_fifo_low(struct task_struct *p) ++{ ++ struct sched_param sp = { .sched_priority = 1 }; ++ WARN_ON_ONCE(sched_setscheduler_nocheck(p, SCHED_FIFO, &sp) != 0); ++} ++EXPORT_SYMBOL_GPL(sched_set_fifo_low); ++ ++void sched_set_normal(struct task_struct *p, int nice) ++{ ++ struct sched_attr attr = { ++ .sched_policy = SCHED_NORMAL, ++ .sched_nice = nice, ++ }; ++ WARN_ON_ONCE(sched_setattr_nocheck(p, &attr) != 0); ++} ++EXPORT_SYMBOL_GPL(sched_set_normal); ++ ++static int ++do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) ++{ ++ struct sched_param lparam; ++ struct task_struct *p; ++ int retval; ++ ++ if (!param || pid < 0) ++ return -EINVAL; ++ if (copy_from_user(&lparam, param, sizeof(struct sched_param))) ++ return -EFAULT; ++ ++ rcu_read_lock(); ++ retval = -ESRCH; ++ p = find_process_by_pid(pid); ++ if (likely(p)) ++ get_task_struct(p); ++ rcu_read_unlock(); ++ ++ if (likely(p)) { ++ retval = sched_setscheduler(p, policy, &lparam); ++ put_task_struct(p); ++ } ++ ++ return retval; ++} ++ ++/* ++ * Mimics kernel/events/core.c perf_copy_attr(). ++ */ ++static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *attr) ++{ ++ u32 size; ++ int ret; ++ ++ /* Zero the full structure, so that a short copy will be nice: */ ++ memset(attr, 0, sizeof(*attr)); ++ ++ ret = get_user(size, &uattr->size); ++ if (ret) ++ return ret; ++ ++ /* ABI compatibility quirk: */ ++ if (!size) ++ size = SCHED_ATTR_SIZE_VER0; ++ ++ if (size < SCHED_ATTR_SIZE_VER0 || size > PAGE_SIZE) ++ goto err_size; ++ ++ ret = copy_struct_from_user(attr, sizeof(*attr), uattr, size); ++ if (ret) { ++ if (ret == -E2BIG) ++ goto err_size; ++ return ret; ++ } ++ ++ /* ++ * XXX: Do we want to be lenient like existing syscalls; or do we want ++ * to be strict and return an error on out-of-bounds values? ++ */ ++ attr->sched_nice = clamp(attr->sched_nice, -20, 19); ++ ++ /* sched/core.c uses zero here but we already know ret is zero */ ++ return 0; ++ ++err_size: ++ put_user(sizeof(*attr), &uattr->size); ++ return -E2BIG; ++} ++ ++/** ++ * sys_sched_setscheduler - set/change the scheduler policy and RT priority ++ * @pid: the pid in question. ++ * @policy: new policy. ++ * ++ * Return: 0 on success. An error code otherwise. ++ * @param: structure containing the new RT priority. ++ */ ++SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, struct sched_param __user *, param) ++{ ++ if (policy < 0) ++ return -EINVAL; ++ ++ return do_sched_setscheduler(pid, policy, param); ++} ++ ++/** ++ * sys_sched_setparam - set/change the RT priority of a thread ++ * @pid: the pid in question. ++ * @param: structure containing the new RT priority. ++ * ++ * Return: 0 on success. An error code otherwise. ++ */ ++SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) ++{ ++ return do_sched_setscheduler(pid, SETPARAM_POLICY, param); ++} ++ ++/** ++ * sys_sched_setattr - same as above, but with extended sched_attr ++ * @pid: the pid in question. ++ * @uattr: structure containing the extended parameters. ++ */ ++SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, ++ unsigned int, flags) ++{ ++ struct sched_attr attr; ++ struct task_struct *p; ++ int retval; ++ ++ if (!uattr || pid < 0 || flags) ++ return -EINVAL; ++ ++ retval = sched_copy_attr(uattr, &attr); ++ if (retval) ++ return retval; ++ ++ if ((int)attr.sched_policy < 0) ++ return -EINVAL; ++ ++ rcu_read_lock(); ++ retval = -ESRCH; ++ p = find_process_by_pid(pid); ++ if (likely(p)) ++ get_task_struct(p); ++ rcu_read_unlock(); ++ ++ if (likely(p)) { ++ retval = sched_setattr(p, &attr); ++ put_task_struct(p); ++ } ++ ++ return retval; ++} ++ ++/** ++ * sys_sched_getscheduler - get the policy (scheduling class) of a thread ++ * @pid: the pid in question. ++ * ++ * Return: On success, the policy of the thread. Otherwise, a negative error ++ * code. ++ */ ++SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) ++{ ++ struct task_struct *p; ++ int retval = -EINVAL; ++ ++ if (pid < 0) ++ goto out_nounlock; ++ ++ retval = -ESRCH; ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ if (p) { ++ retval = security_task_getscheduler(p); ++ if (!retval) ++ retval = p->policy; ++ } ++ rcu_read_unlock(); ++ ++out_nounlock: ++ return retval; ++} ++ ++/** ++ * sys_sched_getscheduler - get the RT priority of a thread ++ * @pid: the pid in question. ++ * @param: structure containing the RT priority. ++ * ++ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error ++ * code. ++ */ ++SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) ++{ ++ struct sched_param lp = { .sched_priority = 0 }; ++ struct task_struct *p; ++ int retval = -EINVAL; ++ ++ if (!param || pid < 0) ++ goto out_nounlock; ++ ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ retval = -ESRCH; ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ if (task_has_rt_policy(p)) ++ lp.sched_priority = p->rt_priority; ++ rcu_read_unlock(); ++ ++ /* ++ * This one might sleep, we cannot do it with a spinlock held ... ++ */ ++ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; ++ ++out_nounlock: ++ return retval; ++ ++out_unlock: ++ rcu_read_unlock(); ++ return retval; ++} ++ ++/* ++ * Copy the kernel size attribute structure (which might be larger ++ * than what user-space knows about) to user-space. ++ * ++ * Note that all cases are valid: user-space buffer can be larger or ++ * smaller than the kernel-space buffer. The usual case is that both ++ * have the same size. ++ */ ++static int ++sched_attr_copy_to_user(struct sched_attr __user *uattr, ++ struct sched_attr *kattr, ++ unsigned int usize) ++{ ++ unsigned int ksize = sizeof(*kattr); ++ ++ if (!access_ok(uattr, usize)) ++ return -EFAULT; ++ ++ /* ++ * sched_getattr() ABI forwards and backwards compatibility: ++ * ++ * If usize == ksize then we just copy everything to user-space and all is good. ++ * ++ * If usize < ksize then we only copy as much as user-space has space for, ++ * this keeps ABI compatibility as well. We skip the rest. ++ * ++ * If usize > ksize then user-space is using a newer version of the ABI, ++ * which part the kernel doesn't know about. Just ignore it - tooling can ++ * detect the kernel's knowledge of attributes from the attr->size value ++ * which is set to ksize in this case. ++ */ ++ kattr->size = min(usize, ksize); ++ ++ if (copy_to_user(uattr, kattr, kattr->size)) ++ return -EFAULT; ++ ++ return 0; ++} ++ ++/** ++ * sys_sched_getattr - similar to sched_getparam, but with sched_attr ++ * @pid: the pid in question. ++ * @uattr: structure containing the extended parameters. ++ * @usize: sizeof(attr) for fwd/bwd comp. ++ * @flags: for future extension. ++ */ ++SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, ++ unsigned int, usize, unsigned int, flags) ++{ ++ struct sched_attr kattr = { }; ++ struct task_struct *p; ++ int retval; ++ ++ if (!uattr || pid < 0 || usize > PAGE_SIZE || ++ usize < SCHED_ATTR_SIZE_VER0 || flags) ++ return -EINVAL; ++ ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ retval = -ESRCH; ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ kattr.sched_policy = p->policy; ++ if (p->sched_reset_on_fork) ++ kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; ++ if (task_has_rt_policy(p)) ++ kattr.sched_priority = p->rt_priority; ++ else ++ kattr.sched_nice = task_nice(p); ++ kattr.sched_flags &= SCHED_FLAG_ALL; ++ ++#ifdef CONFIG_UCLAMP_TASK ++ kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; ++ kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; ++#endif ++ ++ rcu_read_unlock(); ++ ++ return sched_attr_copy_to_user(uattr, &kattr, usize); ++ ++out_unlock: ++ rcu_read_unlock(); ++ return retval; ++} ++ ++static int ++__sched_setaffinity(struct task_struct *p, const struct cpumask *mask) ++{ ++ int retval; ++ cpumask_var_t cpus_allowed, new_mask; ++ ++ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) ++ return -ENOMEM; ++ ++ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { ++ retval = -ENOMEM; ++ goto out_free_cpus_allowed; ++ } ++ ++ cpuset_cpus_allowed(p, cpus_allowed); ++ cpumask_and(new_mask, mask, cpus_allowed); ++again: ++ retval = __set_cpus_allowed_ptr(p, new_mask, SCA_CHECK | SCA_USER); ++ if (retval) ++ goto out_free_new_mask; ++ ++ cpuset_cpus_allowed(p, cpus_allowed); ++ if (!cpumask_subset(new_mask, cpus_allowed)) { ++ /* ++ * We must have raced with a concurrent cpuset ++ * update. Just reset the cpus_allowed to the ++ * cpuset's cpus_allowed ++ */ ++ cpumask_copy(new_mask, cpus_allowed); ++ goto again; ++ } ++ ++out_free_new_mask: ++ free_cpumask_var(new_mask); ++out_free_cpus_allowed: ++ free_cpumask_var(cpus_allowed); ++ return retval; ++} ++ ++long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) ++{ ++ struct task_struct *p; ++ int retval; ++ ++ rcu_read_lock(); ++ ++ p = find_process_by_pid(pid); ++ if (!p) { ++ rcu_read_unlock(); ++ return -ESRCH; ++ } ++ ++ /* Prevent p going away */ ++ get_task_struct(p); ++ rcu_read_unlock(); ++ ++ if (p->flags & PF_NO_SETAFFINITY) { ++ retval = -EINVAL; ++ goto out_put_task; ++ } ++ ++ if (!check_same_owner(p)) { ++ rcu_read_lock(); ++ if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { ++ rcu_read_unlock(); ++ retval = -EPERM; ++ goto out_put_task; ++ } ++ rcu_read_unlock(); ++ } ++ ++ retval = security_task_setscheduler(p); ++ if (retval) ++ goto out_put_task; ++ ++ retval = __sched_setaffinity(p, in_mask); ++out_put_task: ++ put_task_struct(p); ++ return retval; ++} ++ ++static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, ++ struct cpumask *new_mask) ++{ ++ if (len < cpumask_size()) ++ cpumask_clear(new_mask); ++ else if (len > cpumask_size()) ++ len = cpumask_size(); ++ ++ return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; ++} ++ ++/** ++ * sys_sched_setaffinity - set the CPU affinity of a process ++ * @pid: pid of the process ++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr ++ * @user_mask_ptr: user-space pointer to the new CPU mask ++ * ++ * Return: 0 on success. An error code otherwise. ++ */ ++SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, ++ unsigned long __user *, user_mask_ptr) ++{ ++ cpumask_var_t new_mask; ++ int retval; ++ ++ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) ++ return -ENOMEM; ++ ++ retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); ++ if (retval == 0) ++ retval = sched_setaffinity(pid, new_mask); ++ free_cpumask_var(new_mask); ++ return retval; ++} ++ ++long sched_getaffinity(pid_t pid, cpumask_t *mask) ++{ ++ struct task_struct *p; ++ raw_spinlock_t *lock; ++ unsigned long flags; ++ int retval; ++ ++ rcu_read_lock(); ++ ++ retval = -ESRCH; ++ p = find_process_by_pid(pid); ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ task_access_lock_irqsave(p, &lock, &flags); ++ cpumask_and(mask, &p->cpus_mask, cpu_active_mask); ++ task_access_unlock_irqrestore(p, lock, &flags); ++ ++out_unlock: ++ rcu_read_unlock(); ++ ++ return retval; ++} ++ ++/** ++ * sys_sched_getaffinity - get the CPU affinity of a process ++ * @pid: pid of the process ++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr ++ * @user_mask_ptr: user-space pointer to hold the current CPU mask ++ * ++ * Return: size of CPU mask copied to user_mask_ptr on success. An ++ * error code otherwise. ++ */ ++SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, ++ unsigned long __user *, user_mask_ptr) ++{ ++ int ret; ++ cpumask_var_t mask; ++ ++ if ((len * BITS_PER_BYTE) < nr_cpu_ids) ++ return -EINVAL; ++ if (len & (sizeof(unsigned long)-1)) ++ return -EINVAL; ++ ++ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) ++ return -ENOMEM; ++ ++ ret = sched_getaffinity(pid, mask); ++ if (ret == 0) { ++ unsigned int retlen = min_t(size_t, len, cpumask_size()); ++ ++ if (copy_to_user(user_mask_ptr, mask, retlen)) ++ ret = -EFAULT; ++ else ++ ret = retlen; ++ } ++ free_cpumask_var(mask); ++ ++ return ret; ++} ++ ++static void do_sched_yield(void) ++{ ++ struct rq *rq; ++ struct rq_flags rf; ++ ++ if (!sched_yield_type) ++ return; ++ ++ rq = this_rq_lock_irq(&rf); ++ ++ schedstat_inc(rq->yld_count); ++ ++ if (1 == sched_yield_type) { ++ if (!rt_task(current)) ++ do_sched_yield_type_1(current, rq); ++ } else if (2 == sched_yield_type) { ++ if (rq->nr_running > 1) ++ rq->skip = current; ++ } ++ ++ preempt_disable(); ++ raw_spin_unlock_irq(&rq->lock); ++ sched_preempt_enable_no_resched(); ++ ++ schedule(); ++} ++ ++/** ++ * sys_sched_yield - yield the current processor to other threads. ++ * ++ * This function yields the current CPU to other tasks. If there are no ++ * other threads running on this CPU then this function will return. ++ * ++ * Return: 0. ++ */ ++SYSCALL_DEFINE0(sched_yield) ++{ ++ do_sched_yield(); ++ return 0; ++} ++ ++#if !defined(CONFIG_PREEMPTION) || defined(CONFIG_PREEMPT_DYNAMIC) ++int __sched __cond_resched(void) ++{ ++ if (should_resched(0)) { ++ preempt_schedule_common(); ++ return 1; ++ } ++ /* ++ * In preemptible kernels, ->rcu_read_lock_nesting tells the tick ++ * whether the current CPU is in an RCU read-side critical section, ++ * so the tick can report quiescent states even for CPUs looping ++ * in kernel context. In contrast, in non-preemptible kernels, ++ * RCU readers leave no in-memory hints, which means that CPU-bound ++ * processes executing in kernel context might never report an ++ * RCU quiescent state. Therefore, the following code causes ++ * cond_resched() to report a quiescent state, but only when RCU ++ * is in urgent need of one. ++ */ ++#ifndef CONFIG_PREEMPT_RCU ++ rcu_all_qs(); ++#endif ++ return 0; ++} ++EXPORT_SYMBOL(__cond_resched); ++#endif ++ ++#ifdef CONFIG_PREEMPT_DYNAMIC ++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) ++#define cond_resched_dynamic_enabled __cond_resched ++#define cond_resched_dynamic_disabled ((void *)&__static_call_return0) ++DEFINE_STATIC_CALL_RET0(cond_resched, __cond_resched); ++EXPORT_STATIC_CALL_TRAMP(cond_resched); ++ ++#define might_resched_dynamic_enabled __cond_resched ++#define might_resched_dynamic_disabled ((void *)&__static_call_return0) ++DEFINE_STATIC_CALL_RET0(might_resched, __cond_resched); ++EXPORT_STATIC_CALL_TRAMP(might_resched); ++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) ++static DEFINE_STATIC_KEY_FALSE(sk_dynamic_cond_resched); ++int __sched dynamic_cond_resched(void) ++{ ++ if (!static_branch_unlikely(&sk_dynamic_cond_resched)) ++ return 0; ++ return __cond_resched(); ++} ++EXPORT_SYMBOL(dynamic_cond_resched); ++ ++static DEFINE_STATIC_KEY_FALSE(sk_dynamic_might_resched); ++int __sched dynamic_might_resched(void) ++{ ++ if (!static_branch_unlikely(&sk_dynamic_might_resched)) ++ return 0; ++ return __cond_resched(); ++} ++EXPORT_SYMBOL(dynamic_might_resched); ++#endif ++#endif ++ ++/* ++ * __cond_resched_lock() - if a reschedule is pending, drop the given lock, ++ * call schedule, and on return reacquire the lock. ++ * ++ * This works OK both with and without CONFIG_PREEMPTION. We do strange low-level ++ * operations here to prevent schedule() from being called twice (once via ++ * spin_unlock(), once by hand). ++ */ ++int __cond_resched_lock(spinlock_t *lock) ++{ ++ int resched = should_resched(PREEMPT_LOCK_OFFSET); ++ int ret = 0; ++ ++ lockdep_assert_held(lock); ++ ++ if (spin_needbreak(lock) || resched) { ++ spin_unlock(lock); ++ if (!_cond_resched()) ++ cpu_relax(); ++ ret = 1; ++ spin_lock(lock); ++ } ++ return ret; ++} ++EXPORT_SYMBOL(__cond_resched_lock); ++ ++int __cond_resched_rwlock_read(rwlock_t *lock) ++{ ++ int resched = should_resched(PREEMPT_LOCK_OFFSET); ++ int ret = 0; ++ ++ lockdep_assert_held_read(lock); ++ ++ if (rwlock_needbreak(lock) || resched) { ++ read_unlock(lock); ++ if (!_cond_resched()) ++ cpu_relax(); ++ ret = 1; ++ read_lock(lock); ++ } ++ return ret; ++} ++EXPORT_SYMBOL(__cond_resched_rwlock_read); ++ ++int __cond_resched_rwlock_write(rwlock_t *lock) ++{ ++ int resched = should_resched(PREEMPT_LOCK_OFFSET); ++ int ret = 0; ++ ++ lockdep_assert_held_write(lock); ++ ++ if (rwlock_needbreak(lock) || resched) { ++ write_unlock(lock); ++ if (!_cond_resched()) ++ cpu_relax(); ++ ret = 1; ++ write_lock(lock); ++ } ++ return ret; ++} ++EXPORT_SYMBOL(__cond_resched_rwlock_write); ++ ++#ifdef CONFIG_PREEMPT_DYNAMIC ++ ++#ifdef CONFIG_GENERIC_ENTRY ++#include ++#endif ++ ++/* ++ * SC:cond_resched ++ * SC:might_resched ++ * SC:preempt_schedule ++ * SC:preempt_schedule_notrace ++ * SC:irqentry_exit_cond_resched ++ * ++ * ++ * NONE: ++ * cond_resched <- __cond_resched ++ * might_resched <- RET0 ++ * preempt_schedule <- NOP ++ * preempt_schedule_notrace <- NOP ++ * irqentry_exit_cond_resched <- NOP ++ * ++ * VOLUNTARY: ++ * cond_resched <- __cond_resched ++ * might_resched <- __cond_resched ++ * preempt_schedule <- NOP ++ * preempt_schedule_notrace <- NOP ++ * irqentry_exit_cond_resched <- NOP ++ * ++ * FULL: ++ * cond_resched <- RET0 ++ * might_resched <- RET0 ++ * preempt_schedule <- preempt_schedule ++ * preempt_schedule_notrace <- preempt_schedule_notrace ++ * irqentry_exit_cond_resched <- irqentry_exit_cond_resched ++ */ ++ ++enum { ++ preempt_dynamic_undefined = -1, ++ preempt_dynamic_none, ++ preempt_dynamic_voluntary, ++ preempt_dynamic_full, ++}; ++ ++int preempt_dynamic_mode = preempt_dynamic_undefined; ++ ++int sched_dynamic_mode(const char *str) ++{ ++ if (!strcmp(str, "none")) ++ return preempt_dynamic_none; ++ ++ if (!strcmp(str, "voluntary")) ++ return preempt_dynamic_voluntary; ++ ++ if (!strcmp(str, "full")) ++ return preempt_dynamic_full; ++ ++ return -EINVAL; ++} ++ ++#if defined(CONFIG_HAVE_PREEMPT_DYNAMIC_CALL) ++#define preempt_dynamic_enable(f) static_call_update(f, f##_dynamic_enabled) ++#define preempt_dynamic_disable(f) static_call_update(f, f##_dynamic_disabled) ++#elif defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY) ++#define preempt_dynamic_enable(f) static_key_enable(&sk_dynamic_##f.key) ++#define preempt_dynamic_disable(f) static_key_disable(&sk_dynamic_##f.key) ++#else ++#error "Unsupported PREEMPT_DYNAMIC mechanism" ++#endif ++ ++void sched_dynamic_update(int mode) ++{ ++ /* ++ * Avoid {NONE,VOLUNTARY} -> FULL transitions from ever ending up in ++ * the ZERO state, which is invalid. ++ */ ++ preempt_dynamic_enable(cond_resched); ++ preempt_dynamic_enable(might_resched); ++ preempt_dynamic_enable(preempt_schedule); ++ preempt_dynamic_enable(preempt_schedule_notrace); ++ preempt_dynamic_enable(irqentry_exit_cond_resched); ++ ++ switch (mode) { ++ case preempt_dynamic_none: ++ preempt_dynamic_enable(cond_resched); ++ preempt_dynamic_disable(might_resched); ++ preempt_dynamic_disable(preempt_schedule); ++ preempt_dynamic_disable(preempt_schedule_notrace); ++ preempt_dynamic_disable(irqentry_exit_cond_resched); ++ pr_info("Dynamic Preempt: none\n"); ++ break; ++ ++ case preempt_dynamic_voluntary: ++ preempt_dynamic_enable(cond_resched); ++ preempt_dynamic_enable(might_resched); ++ preempt_dynamic_disable(preempt_schedule); ++ preempt_dynamic_disable(preempt_schedule_notrace); ++ preempt_dynamic_disable(irqentry_exit_cond_resched); ++ pr_info("Dynamic Preempt: voluntary\n"); ++ break; ++ ++ case preempt_dynamic_full: ++ preempt_dynamic_disable(cond_resched); ++ preempt_dynamic_disable(might_resched); ++ preempt_dynamic_enable(preempt_schedule); ++ preempt_dynamic_enable(preempt_schedule_notrace); ++ preempt_dynamic_enable(irqentry_exit_cond_resched); ++ pr_info("Dynamic Preempt: full\n"); ++ break; ++ } ++ ++ preempt_dynamic_mode = mode; ++} ++ ++static int __init setup_preempt_mode(char *str) ++{ ++ int mode = sched_dynamic_mode(str); ++ if (mode < 0) { ++ pr_warn("Dynamic Preempt: unsupported mode: %s\n", str); ++ return 0; ++ } ++ ++ sched_dynamic_update(mode); ++ return 1; ++} ++__setup("preempt=", setup_preempt_mode); ++ ++static void __init preempt_dynamic_init(void) ++{ ++ if (preempt_dynamic_mode == preempt_dynamic_undefined) { ++ if (IS_ENABLED(CONFIG_PREEMPT_NONE)) { ++ sched_dynamic_update(preempt_dynamic_none); ++ } else if (IS_ENABLED(CONFIG_PREEMPT_VOLUNTARY)) { ++ sched_dynamic_update(preempt_dynamic_voluntary); ++ } else { ++ /* Default static call setting, nothing to do */ ++ WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT)); ++ preempt_dynamic_mode = preempt_dynamic_full; ++ pr_info("Dynamic Preempt: full\n"); ++ } ++ } ++} ++ ++#define PREEMPT_MODEL_ACCESSOR(mode) \ ++ bool preempt_model_##mode(void) \ ++ { \ ++ WARN_ON_ONCE(preempt_dynamic_mode == preempt_dynamic_undefined); \ ++ return preempt_dynamic_mode == preempt_dynamic_##mode; \ ++ } \ ++ EXPORT_SYMBOL_GPL(preempt_model_##mode) ++ ++PREEMPT_MODEL_ACCESSOR(none); ++PREEMPT_MODEL_ACCESSOR(voluntary); ++PREEMPT_MODEL_ACCESSOR(full); ++ ++#else /* !CONFIG_PREEMPT_DYNAMIC */ ++ ++static inline void preempt_dynamic_init(void) { } ++ ++#endif /* #ifdef CONFIG_PREEMPT_DYNAMIC */ ++ ++/** ++ * yield - yield the current processor to other threads. ++ * ++ * Do not ever use this function, there's a 99% chance you're doing it wrong. ++ * ++ * The scheduler is at all times free to pick the calling task as the most ++ * eligible task to run, if removing the yield() call from your code breaks ++ * it, it's already broken. ++ * ++ * Typical broken usage is: ++ * ++ * while (!event) ++ * yield(); ++ * ++ * where one assumes that yield() will let 'the other' process run that will ++ * make event true. If the current task is a SCHED_FIFO task that will never ++ * happen. Never use yield() as a progress guarantee!! ++ * ++ * If you want to use yield() to wait for something, use wait_event(). ++ * If you want to use yield() to be 'nice' for others, use cond_resched(). ++ * If you still want to use yield(), do not! ++ */ ++void __sched yield(void) ++{ ++ set_current_state(TASK_RUNNING); ++ do_sched_yield(); ++} ++EXPORT_SYMBOL(yield); ++ ++/** ++ * yield_to - yield the current processor to another thread in ++ * your thread group, or accelerate that thread toward the ++ * processor it's on. ++ * @p: target task ++ * @preempt: whether task preemption is allowed or not ++ * ++ * It's the caller's job to ensure that the target task struct ++ * can't go away on us before we can do any checks. ++ * ++ * In Alt schedule FW, yield_to is not supported. ++ * ++ * Return: ++ * true (>0) if we indeed boosted the target task. ++ * false (0) if we failed to boost the target. ++ * -ESRCH if there's no task to yield to. ++ */ ++int __sched yield_to(struct task_struct *p, bool preempt) ++{ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(yield_to); ++ ++int io_schedule_prepare(void) ++{ ++ int old_iowait = current->in_iowait; ++ ++ current->in_iowait = 1; ++ blk_flush_plug(current->plug, true); ++ return old_iowait; ++} ++ ++void io_schedule_finish(int token) ++{ ++ current->in_iowait = token; ++} ++ ++/* ++ * This task is about to go to sleep on IO. Increment rq->nr_iowait so ++ * that process accounting knows that this is a task in IO wait state. ++ * ++ * But don't do that if it is a deliberate, throttling IO wait (this task ++ * has set its backing_dev_info: the queue against which it should throttle) ++ */ ++ ++long __sched io_schedule_timeout(long timeout) ++{ ++ int token; ++ long ret; ++ ++ token = io_schedule_prepare(); ++ ret = schedule_timeout(timeout); ++ io_schedule_finish(token); ++ ++ return ret; ++} ++EXPORT_SYMBOL(io_schedule_timeout); ++ ++void __sched io_schedule(void) ++{ ++ int token; ++ ++ token = io_schedule_prepare(); ++ schedule(); ++ io_schedule_finish(token); ++} ++EXPORT_SYMBOL(io_schedule); ++ ++/** ++ * sys_sched_get_priority_max - return maximum RT priority. ++ * @policy: scheduling class. ++ * ++ * Return: On success, this syscall returns the maximum ++ * rt_priority that can be used by a given scheduling class. ++ * On failure, a negative error code is returned. ++ */ ++SYSCALL_DEFINE1(sched_get_priority_max, int, policy) ++{ ++ int ret = -EINVAL; ++ ++ switch (policy) { ++ case SCHED_FIFO: ++ case SCHED_RR: ++ ret = MAX_RT_PRIO - 1; ++ break; ++ case SCHED_NORMAL: ++ case SCHED_BATCH: ++ case SCHED_IDLE: ++ ret = 0; ++ break; ++ } ++ return ret; ++} ++ ++/** ++ * sys_sched_get_priority_min - return minimum RT priority. ++ * @policy: scheduling class. ++ * ++ * Return: On success, this syscall returns the minimum ++ * rt_priority that can be used by a given scheduling class. ++ * On failure, a negative error code is returned. ++ */ ++SYSCALL_DEFINE1(sched_get_priority_min, int, policy) ++{ ++ int ret = -EINVAL; ++ ++ switch (policy) { ++ case SCHED_FIFO: ++ case SCHED_RR: ++ ret = 1; ++ break; ++ case SCHED_NORMAL: ++ case SCHED_BATCH: ++ case SCHED_IDLE: ++ ret = 0; ++ break; ++ } ++ return ret; ++} ++ ++static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) ++{ ++ struct task_struct *p; ++ int retval; ++ ++ alt_sched_debug(); ++ ++ if (pid < 0) ++ return -EINVAL; ++ ++ retval = -ESRCH; ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ rcu_read_unlock(); ++ ++ *t = ns_to_timespec64(sched_timeslice_ns); ++ return 0; ++ ++out_unlock: ++ rcu_read_unlock(); ++ return retval; ++} ++ ++/** ++ * sys_sched_rr_get_interval - return the default timeslice of a process. ++ * @pid: pid of the process. ++ * @interval: userspace pointer to the timeslice value. ++ * ++ * ++ * Return: On success, 0 and the timeslice is in @interval. Otherwise, ++ * an error code. ++ */ ++SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, ++ struct __kernel_timespec __user *, interval) ++{ ++ struct timespec64 t; ++ int retval = sched_rr_get_interval(pid, &t); ++ ++ if (retval == 0) ++ retval = put_timespec64(&t, interval); ++ ++ return retval; ++} ++ ++#ifdef CONFIG_COMPAT_32BIT_TIME ++SYSCALL_DEFINE2(sched_rr_get_interval_time32, pid_t, pid, ++ struct old_timespec32 __user *, interval) ++{ ++ struct timespec64 t; ++ int retval = sched_rr_get_interval(pid, &t); ++ ++ if (retval == 0) ++ retval = put_old_timespec32(&t, interval); ++ return retval; ++} ++#endif ++ ++void sched_show_task(struct task_struct *p) ++{ ++ unsigned long free = 0; ++ int ppid; ++ ++ if (!try_get_task_stack(p)) ++ return; ++ ++ pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p)); ++ ++ if (task_is_running(p)) ++ pr_cont(" running task "); ++#ifdef CONFIG_DEBUG_STACK_USAGE ++ free = stack_not_used(p); ++#endif ++ ppid = 0; ++ rcu_read_lock(); ++ if (pid_alive(p)) ++ ppid = task_pid_nr(rcu_dereference(p->real_parent)); ++ rcu_read_unlock(); ++ pr_cont(" stack:%5lu pid:%5d ppid:%6d flags:0x%08lx\n", ++ free, task_pid_nr(p), ppid, ++ read_task_thread_flags(p)); ++ ++ print_worker_info(KERN_INFO, p); ++ print_stop_info(KERN_INFO, p); ++ show_stack(p, NULL, KERN_INFO); ++ put_task_stack(p); ++} ++EXPORT_SYMBOL_GPL(sched_show_task); ++ ++static inline bool ++state_filter_match(unsigned long state_filter, struct task_struct *p) ++{ ++ unsigned int state = READ_ONCE(p->__state); ++ ++ /* no filter, everything matches */ ++ if (!state_filter) ++ return true; ++ ++ /* filter, but doesn't match */ ++ if (!(state & state_filter)) ++ return false; ++ ++ /* ++ * When looking for TASK_UNINTERRUPTIBLE skip TASK_IDLE (allows ++ * TASK_KILLABLE). ++ */ ++ if (state_filter == TASK_UNINTERRUPTIBLE && state == TASK_IDLE) ++ return false; ++ ++ return true; ++} ++ ++ ++void show_state_filter(unsigned int state_filter) ++{ ++ struct task_struct *g, *p; ++ ++ rcu_read_lock(); ++ for_each_process_thread(g, p) { ++ /* ++ * reset the NMI-timeout, listing all files on a slow ++ * console might take a lot of time: ++ * Also, reset softlockup watchdogs on all CPUs, because ++ * another CPU might be blocked waiting for us to process ++ * an IPI. ++ */ ++ touch_nmi_watchdog(); ++ touch_all_softlockup_watchdogs(); ++ if (state_filter_match(state_filter, p)) ++ sched_show_task(p); ++ } ++ ++#ifdef CONFIG_SCHED_DEBUG ++ /* TODO: Alt schedule FW should support this ++ if (!state_filter) ++ sysrq_sched_debug_show(); ++ */ ++#endif ++ rcu_read_unlock(); ++ /* ++ * Only show locks if all tasks are dumped: ++ */ ++ if (!state_filter) ++ debug_show_all_locks(); ++} ++ ++void dump_cpu_task(int cpu) ++{ ++ pr_info("Task dump for CPU %d:\n", cpu); ++ sched_show_task(cpu_curr(cpu)); ++} ++ ++/** ++ * init_idle - set up an idle thread for a given CPU ++ * @idle: task in question ++ * @cpu: CPU the idle task belongs to ++ * ++ * NOTE: this function does not set the idle thread's NEED_RESCHED ++ * flag, to make booting more robust. ++ */ ++void __init init_idle(struct task_struct *idle, int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ ++ __sched_fork(0, idle); ++ ++ raw_spin_lock_irqsave(&idle->pi_lock, flags); ++ raw_spin_lock(&rq->lock); ++ update_rq_clock(rq); ++ ++ idle->last_ran = rq->clock_task; ++ idle->__state = TASK_RUNNING; ++ /* ++ * PF_KTHREAD should already be set at this point; regardless, make it ++ * look like a proper per-CPU kthread. ++ */ ++ idle->flags |= PF_IDLE | PF_KTHREAD | PF_NO_SETAFFINITY; ++ kthread_set_per_cpu(idle, cpu); ++ ++ sched_queue_init_idle(&rq->queue, idle); ++ ++#ifdef CONFIG_SMP ++ /* ++ * It's possible that init_idle() gets called multiple times on a task, ++ * in that case do_set_cpus_allowed() will not do the right thing. ++ * ++ * And since this is boot we can forgo the serialisation. ++ */ ++ set_cpus_allowed_common(idle, cpumask_of(cpu)); ++#endif ++ ++ /* Silence PROVE_RCU */ ++ rcu_read_lock(); ++ __set_task_cpu(idle, cpu); ++ rcu_read_unlock(); ++ ++ rq->idle = idle; ++ rcu_assign_pointer(rq->curr, idle); ++ idle->on_cpu = 1; ++ ++ raw_spin_unlock(&rq->lock); ++ raw_spin_unlock_irqrestore(&idle->pi_lock, flags); ++ ++ /* Set the preempt count _outside_ the spinlocks! */ ++ init_idle_preempt_count(idle, cpu); ++ ++ ftrace_graph_init_idle_task(idle, cpu); ++ vtime_init_idle(idle, cpu); ++#ifdef CONFIG_SMP ++ sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); ++#endif ++} ++ ++#ifdef CONFIG_SMP ++ ++int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur, ++ const struct cpumask __maybe_unused *trial) ++{ ++ return 1; ++} ++ ++int task_can_attach(struct task_struct *p, ++ const struct cpumask *cs_cpus_allowed) ++{ ++ int ret = 0; ++ ++ /* ++ * Kthreads which disallow setaffinity shouldn't be moved ++ * to a new cpuset; we don't want to change their CPU ++ * affinity and isolating such threads by their set of ++ * allowed nodes is unnecessary. Thus, cpusets are not ++ * applicable for such threads. This prevents checking for ++ * success of set_cpus_allowed_ptr() on all attached tasks ++ * before cpus_mask may be changed. ++ */ ++ if (p->flags & PF_NO_SETAFFINITY) ++ ret = -EINVAL; ++ ++ return ret; ++} ++ ++bool sched_smp_initialized __read_mostly; ++ ++#ifdef CONFIG_HOTPLUG_CPU ++/* ++ * Ensures that the idle task is using init_mm right before its CPU goes ++ * offline. ++ */ ++void idle_task_exit(void) ++{ ++ struct mm_struct *mm = current->active_mm; ++ ++ BUG_ON(current != this_rq()->idle); ++ ++ if (mm != &init_mm) { ++ switch_mm(mm, &init_mm, current); ++ finish_arch_post_lock_switch(); ++ } ++ ++ /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ ++} ++ ++static int __balance_push_cpu_stop(void *arg) ++{ ++ struct task_struct *p = arg; ++ struct rq *rq = this_rq(); ++ struct rq_flags rf; ++ int cpu; ++ ++ raw_spin_lock_irq(&p->pi_lock); ++ rq_lock(rq, &rf); ++ ++ update_rq_clock(rq); ++ ++ if (task_rq(p) == rq && task_on_rq_queued(p)) { ++ cpu = select_fallback_rq(rq->cpu, p); ++ rq = __migrate_task(rq, p, cpu); ++ } ++ ++ rq_unlock(rq, &rf); ++ raw_spin_unlock_irq(&p->pi_lock); ++ ++ put_task_struct(p); ++ ++ return 0; ++} ++ ++static DEFINE_PER_CPU(struct cpu_stop_work, push_work); ++ ++/* ++ * This is enabled below SCHED_AP_ACTIVE; when !cpu_active(), but only ++ * effective when the hotplug motion is down. ++ */ ++static void balance_push(struct rq *rq) ++{ ++ struct task_struct *push_task = rq->curr; ++ ++ lockdep_assert_held(&rq->lock); ++ ++ /* ++ * Ensure the thing is persistent until balance_push_set(.on = false); ++ */ ++ rq->balance_callback = &balance_push_callback; ++ ++ /* ++ * Only active while going offline and when invoked on the outgoing ++ * CPU. ++ */ ++ if (!cpu_dying(rq->cpu) || rq != this_rq()) ++ return; ++ ++ /* ++ * Both the cpu-hotplug and stop task are in this case and are ++ * required to complete the hotplug process. ++ */ ++ if (kthread_is_per_cpu(push_task) || ++ is_migration_disabled(push_task)) { ++ ++ /* ++ * If this is the idle task on the outgoing CPU try to wake ++ * up the hotplug control thread which might wait for the ++ * last task to vanish. The rcuwait_active() check is ++ * accurate here because the waiter is pinned on this CPU ++ * and can't obviously be running in parallel. ++ * ++ * On RT kernels this also has to check whether there are ++ * pinned and scheduled out tasks on the runqueue. They ++ * need to leave the migrate disabled section first. ++ */ ++ if (!rq->nr_running && !rq_has_pinned_tasks(rq) && ++ rcuwait_active(&rq->hotplug_wait)) { ++ raw_spin_unlock(&rq->lock); ++ rcuwait_wake_up(&rq->hotplug_wait); ++ raw_spin_lock(&rq->lock); ++ } ++ return; ++ } ++ ++ get_task_struct(push_task); ++ /* ++ * Temporarily drop rq->lock such that we can wake-up the stop task. ++ * Both preemption and IRQs are still disabled. ++ */ ++ raw_spin_unlock(&rq->lock); ++ stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task, ++ this_cpu_ptr(&push_work)); ++ /* ++ * At this point need_resched() is true and we'll take the loop in ++ * schedule(). The next pick is obviously going to be the stop task ++ * which kthread_is_per_cpu() and will push this task away. ++ */ ++ raw_spin_lock(&rq->lock); ++} ++ ++static void balance_push_set(int cpu, bool on) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ struct rq_flags rf; ++ ++ rq_lock_irqsave(rq, &rf); ++ if (on) { ++ WARN_ON_ONCE(rq->balance_callback); ++ rq->balance_callback = &balance_push_callback; ++ } else if (rq->balance_callback == &balance_push_callback) { ++ rq->balance_callback = NULL; ++ } ++ rq_unlock_irqrestore(rq, &rf); ++} ++ ++/* ++ * Invoked from a CPUs hotplug control thread after the CPU has been marked ++ * inactive. All tasks which are not per CPU kernel threads are either ++ * pushed off this CPU now via balance_push() or placed on a different CPU ++ * during wakeup. Wait until the CPU is quiescent. ++ */ ++static void balance_hotplug_wait(void) ++{ ++ struct rq *rq = this_rq(); ++ ++ rcuwait_wait_event(&rq->hotplug_wait, ++ rq->nr_running == 1 && !rq_has_pinned_tasks(rq), ++ TASK_UNINTERRUPTIBLE); ++} ++ ++#else ++ ++static void balance_push(struct rq *rq) ++{ ++} ++ ++static void balance_push_set(int cpu, bool on) ++{ ++} ++ ++static inline void balance_hotplug_wait(void) ++{ ++} ++#endif /* CONFIG_HOTPLUG_CPU */ ++ ++static void set_rq_offline(struct rq *rq) ++{ ++ if (rq->online) ++ rq->online = false; ++} ++ ++static void set_rq_online(struct rq *rq) ++{ ++ if (!rq->online) ++ rq->online = true; ++} ++ ++/* ++ * used to mark begin/end of suspend/resume: ++ */ ++static int num_cpus_frozen; ++ ++/* ++ * Update cpusets according to cpu_active mask. If cpusets are ++ * disabled, cpuset_update_active_cpus() becomes a simple wrapper ++ * around partition_sched_domains(). ++ * ++ * If we come here as part of a suspend/resume, don't touch cpusets because we ++ * want to restore it back to its original state upon resume anyway. ++ */ ++static void cpuset_cpu_active(void) ++{ ++ if (cpuhp_tasks_frozen) { ++ /* ++ * num_cpus_frozen tracks how many CPUs are involved in suspend ++ * resume sequence. As long as this is not the last online ++ * operation in the resume sequence, just build a single sched ++ * domain, ignoring cpusets. ++ */ ++ partition_sched_domains(1, NULL, NULL); ++ if (--num_cpus_frozen) ++ return; ++ /* ++ * This is the last CPU online operation. So fall through and ++ * restore the original sched domains by considering the ++ * cpuset configurations. ++ */ ++ cpuset_force_rebuild(); ++ } ++ ++ cpuset_update_active_cpus(); ++} ++ ++static int cpuset_cpu_inactive(unsigned int cpu) ++{ ++ if (!cpuhp_tasks_frozen) { ++ cpuset_update_active_cpus(); ++ } else { ++ num_cpus_frozen++; ++ partition_sched_domains(1, NULL, NULL); ++ } ++ return 0; ++} ++ ++int sched_cpu_activate(unsigned int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ ++ /* ++ * Clear the balance_push callback and prepare to schedule ++ * regular tasks. ++ */ ++ balance_push_set(cpu, false); ++ ++#ifdef CONFIG_SCHED_SMT ++ /* ++ * When going up, increment the number of cores with SMT present. ++ */ ++ if (cpumask_weight(cpu_smt_mask(cpu)) == 2) ++ static_branch_inc_cpuslocked(&sched_smt_present); ++#endif ++ set_cpu_active(cpu, true); ++ ++ if (sched_smp_initialized) ++ cpuset_cpu_active(); ++ ++ /* ++ * Put the rq online, if not already. This happens: ++ * ++ * 1) In the early boot process, because we build the real domains ++ * after all cpus have been brought up. ++ * ++ * 2) At runtime, if cpuset_cpu_active() fails to rebuild the ++ * domains. ++ */ ++ raw_spin_lock_irqsave(&rq->lock, flags); ++ set_rq_online(rq); ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++ ++ return 0; ++} ++ ++int sched_cpu_deactivate(unsigned int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ int ret; ++ ++ set_cpu_active(cpu, false); ++ ++ /* ++ * From this point forward, this CPU will refuse to run any task that ++ * is not: migrate_disable() or KTHREAD_IS_PER_CPU, and will actively ++ * push those tasks away until this gets cleared, see ++ * sched_cpu_dying(). ++ */ ++ balance_push_set(cpu, true); ++ ++ /* ++ * We've cleared cpu_active_mask, wait for all preempt-disabled and RCU ++ * users of this state to go away such that all new such users will ++ * observe it. ++ * ++ * Specifically, we rely on ttwu to no longer target this CPU, see ++ * ttwu_queue_cond() and is_cpu_allowed(). ++ * ++ * Do sync before park smpboot threads to take care the rcu boost case. ++ */ ++ synchronize_rcu(); ++ ++ raw_spin_lock_irqsave(&rq->lock, flags); ++ update_rq_clock(rq); ++ set_rq_offline(rq); ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++ ++#ifdef CONFIG_SCHED_SMT ++ /* ++ * When going down, decrement the number of cores with SMT present. ++ */ ++ if (cpumask_weight(cpu_smt_mask(cpu)) == 2) { ++ static_branch_dec_cpuslocked(&sched_smt_present); ++ if (!static_branch_likely(&sched_smt_present)) ++ cpumask_clear(&sched_sg_idle_mask); ++ } ++#endif ++ ++ if (!sched_smp_initialized) ++ return 0; ++ ++ ret = cpuset_cpu_inactive(cpu); ++ if (ret) { ++ balance_push_set(cpu, false); ++ set_cpu_active(cpu, true); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static void sched_rq_cpu_starting(unsigned int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ ++ rq->calc_load_update = calc_load_update; ++} ++ ++int sched_cpu_starting(unsigned int cpu) ++{ ++ sched_rq_cpu_starting(cpu); ++ sched_tick_start(cpu); ++ return 0; ++} ++ ++#ifdef CONFIG_HOTPLUG_CPU ++ ++/* ++ * Invoked immediately before the stopper thread is invoked to bring the ++ * CPU down completely. At this point all per CPU kthreads except the ++ * hotplug thread (current) and the stopper thread (inactive) have been ++ * either parked or have been unbound from the outgoing CPU. Ensure that ++ * any of those which might be on the way out are gone. ++ * ++ * If after this point a bound task is being woken on this CPU then the ++ * responsible hotplug callback has failed to do it's job. ++ * sched_cpu_dying() will catch it with the appropriate fireworks. ++ */ ++int sched_cpu_wait_empty(unsigned int cpu) ++{ ++ balance_hotplug_wait(); ++ return 0; ++} ++ ++/* ++ * Since this CPU is going 'away' for a while, fold any nr_active delta we ++ * might have. Called from the CPU stopper task after ensuring that the ++ * stopper is the last running task on the CPU, so nr_active count is ++ * stable. We need to take the teardown thread which is calling this into ++ * account, so we hand in adjust = 1 to the load calculation. ++ * ++ * Also see the comment "Global load-average calculations". ++ */ ++static void calc_load_migrate(struct rq *rq) ++{ ++ long delta = calc_load_fold_active(rq, 1); ++ ++ if (delta) ++ atomic_long_add(delta, &calc_load_tasks); ++} ++ ++static void dump_rq_tasks(struct rq *rq, const char *loglvl) ++{ ++ struct task_struct *g, *p; ++ int cpu = cpu_of(rq); ++ ++ lockdep_assert_held(&rq->lock); ++ ++ printk("%sCPU%d enqueued tasks (%u total):\n", loglvl, cpu, rq->nr_running); ++ for_each_process_thread(g, p) { ++ if (task_cpu(p) != cpu) ++ continue; ++ ++ if (!task_on_rq_queued(p)) ++ continue; ++ ++ printk("%s\tpid: %d, name: %s\n", loglvl, p->pid, p->comm); ++ } ++} ++ ++int sched_cpu_dying(unsigned int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ ++ /* Handle pending wakeups and then migrate everything off */ ++ sched_tick_stop(cpu); ++ ++ raw_spin_lock_irqsave(&rq->lock, flags); ++ if (rq->nr_running != 1 || rq_has_pinned_tasks(rq)) { ++ WARN(true, "Dying CPU not properly vacated!"); ++ dump_rq_tasks(rq, KERN_WARNING); ++ } ++ raw_spin_unlock_irqrestore(&rq->lock, flags); ++ ++ calc_load_migrate(rq); ++ hrtick_clear(rq); ++ return 0; ++} ++#endif ++ ++#ifdef CONFIG_SMP ++static void sched_init_topology_cpumask_early(void) ++{ ++ int cpu; ++ cpumask_t *tmp; ++ ++ for_each_possible_cpu(cpu) { ++ /* init topo masks */ ++ tmp = per_cpu(sched_cpu_topo_masks, cpu); ++ ++ cpumask_copy(tmp, cpumask_of(cpu)); ++ tmp++; ++ cpumask_copy(tmp, cpu_possible_mask); ++ per_cpu(sched_cpu_llc_mask, cpu) = tmp; ++ per_cpu(sched_cpu_topo_end_mask, cpu) = ++tmp; ++ /*per_cpu(sd_llc_id, cpu) = cpu;*/ ++ } ++} ++ ++#define TOPOLOGY_CPUMASK(name, mask, last)\ ++ if (cpumask_and(topo, topo, mask)) { \ ++ cpumask_copy(topo, mask); \ ++ printk(KERN_INFO "sched: cpu#%02d topo: 0x%08lx - "#name, \ ++ cpu, (topo++)->bits[0]); \ ++ } \ ++ if (!last) \ ++ cpumask_complement(topo, mask) ++ ++static void sched_init_topology_cpumask(void) ++{ ++ int cpu; ++ cpumask_t *topo; ++ ++ for_each_online_cpu(cpu) { ++ /* take chance to reset time slice for idle tasks */ ++ cpu_rq(cpu)->idle->time_slice = sched_timeslice_ns; ++ ++ topo = per_cpu(sched_cpu_topo_masks, cpu) + 1; ++ ++ cpumask_complement(topo, cpumask_of(cpu)); ++#ifdef CONFIG_SCHED_SMT ++ TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask(cpu), false); ++#endif ++ per_cpu(sd_llc_id, cpu) = cpumask_first(cpu_coregroup_mask(cpu)); ++ per_cpu(sched_cpu_llc_mask, cpu) = topo; ++ TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask(cpu), false); ++ ++ TOPOLOGY_CPUMASK(core, topology_core_cpumask(cpu), false); ++ ++ TOPOLOGY_CPUMASK(others, cpu_online_mask, true); ++ ++ per_cpu(sched_cpu_topo_end_mask, cpu) = topo; ++ printk(KERN_INFO "sched: cpu#%02d llc_id = %d, llc_mask idx = %d\n", ++ cpu, per_cpu(sd_llc_id, cpu), ++ (int) (per_cpu(sched_cpu_llc_mask, cpu) - ++ per_cpu(sched_cpu_topo_masks, cpu))); ++ } ++} ++#endif ++ ++void __init sched_init_smp(void) ++{ ++ /* Move init over to a non-isolated CPU */ ++ if (set_cpus_allowed_ptr(current, housekeeping_cpumask(HK_TYPE_DOMAIN)) < 0) ++ BUG(); ++ current->flags &= ~PF_NO_SETAFFINITY; ++ ++ sched_init_topology_cpumask(); ++ ++ sched_smp_initialized = true; ++} ++#else ++void __init sched_init_smp(void) ++{ ++ cpu_rq(0)->idle->time_slice = sched_timeslice_ns; ++} ++#endif /* CONFIG_SMP */ ++ ++int in_sched_functions(unsigned long addr) ++{ ++ return in_lock_functions(addr) || ++ (addr >= (unsigned long)__sched_text_start ++ && addr < (unsigned long)__sched_text_end); ++} ++ ++#ifdef CONFIG_CGROUP_SCHED ++/* task group related information */ ++struct task_group { ++ struct cgroup_subsys_state css; ++ ++ struct rcu_head rcu; ++ struct list_head list; ++ ++ struct task_group *parent; ++ struct list_head siblings; ++ struct list_head children; ++#ifdef CONFIG_FAIR_GROUP_SCHED ++ unsigned long shares; ++#endif ++}; ++ ++/* ++ * Default task group. ++ * Every task in system belongs to this group at bootup. ++ */ ++struct task_group root_task_group; ++LIST_HEAD(task_groups); ++ ++/* Cacheline aligned slab cache for task_group */ ++static struct kmem_cache *task_group_cache __read_mostly; ++#endif /* CONFIG_CGROUP_SCHED */ ++ ++void __init sched_init(void) ++{ ++ int i; ++ struct rq *rq; ++ ++ printk(KERN_INFO ALT_SCHED_VERSION_MSG); ++ ++ wait_bit_init(); ++ ++#ifdef CONFIG_SMP ++ for (i = 0; i < SCHED_QUEUE_BITS; i++) ++ cpumask_copy(sched_rq_watermark + i, cpu_present_mask); ++#endif ++ ++#ifdef CONFIG_CGROUP_SCHED ++ task_group_cache = KMEM_CACHE(task_group, 0); ++ ++ list_add(&root_task_group.list, &task_groups); ++ INIT_LIST_HEAD(&root_task_group.children); ++ INIT_LIST_HEAD(&root_task_group.siblings); ++#endif /* CONFIG_CGROUP_SCHED */ ++ for_each_possible_cpu(i) { ++ rq = cpu_rq(i); ++ ++ sched_queue_init(&rq->queue); ++ rq->watermark = IDLE_TASK_SCHED_PRIO; ++ rq->skip = NULL; ++ ++ raw_spin_lock_init(&rq->lock); ++ rq->nr_running = rq->nr_uninterruptible = 0; ++ rq->calc_load_active = 0; ++ rq->calc_load_update = jiffies + LOAD_FREQ; ++#ifdef CONFIG_SMP ++ rq->online = false; ++ rq->cpu = i; ++ ++#ifdef CONFIG_SCHED_SMT ++ rq->active_balance = 0; ++#endif ++ ++#ifdef CONFIG_NO_HZ_COMMON ++ INIT_CSD(&rq->nohz_csd, nohz_csd_func, rq); ++#endif ++ rq->balance_callback = &balance_push_callback; ++#ifdef CONFIG_HOTPLUG_CPU ++ rcuwait_init(&rq->hotplug_wait); ++#endif ++#endif /* CONFIG_SMP */ ++ rq->nr_switches = 0; ++ ++ hrtick_rq_init(rq); ++ atomic_set(&rq->nr_iowait, 0); ++ } ++#ifdef CONFIG_SMP ++ /* Set rq->online for cpu 0 */ ++ cpu_rq(0)->online = true; ++#endif ++ /* ++ * The boot idle thread does lazy MMU switching as well: ++ */ ++ mmgrab(&init_mm); ++ enter_lazy_tlb(&init_mm, current); ++ ++ /* ++ * The idle task doesn't need the kthread struct to function, but it ++ * is dressed up as a per-CPU kthread and thus needs to play the part ++ * if we want to avoid special-casing it in code that deals with per-CPU ++ * kthreads. ++ */ ++ WARN_ON(!set_kthread_struct(current)); ++ ++ /* ++ * Make us the idle thread. Technically, schedule() should not be ++ * called from this thread, however somewhere below it might be, ++ * but because we are the idle thread, we just pick up running again ++ * when this runqueue becomes "idle". ++ */ ++ init_idle(current, smp_processor_id()); ++ ++ calc_load_update = jiffies + LOAD_FREQ; ++ ++#ifdef CONFIG_SMP ++ idle_thread_set_boot_cpu(); ++ balance_push_set(smp_processor_id(), false); ++ ++ sched_init_topology_cpumask_early(); ++#endif /* SMP */ ++ ++ psi_init(); ++ ++ preempt_dynamic_init(); ++} ++ ++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP ++ ++void __might_sleep(const char *file, int line) ++{ ++ unsigned int state = get_current_state(); ++ /* ++ * Blocking primitives will set (and therefore destroy) current->state, ++ * since we will exit with TASK_RUNNING make sure we enter with it, ++ * otherwise we will destroy state. ++ */ ++ WARN_ONCE(state != TASK_RUNNING && current->task_state_change, ++ "do not call blocking ops when !TASK_RUNNING; " ++ "state=%x set at [<%p>] %pS\n", state, ++ (void *)current->task_state_change, ++ (void *)current->task_state_change); ++ ++ __might_resched(file, line, 0); ++} ++EXPORT_SYMBOL(__might_sleep); ++ ++static void print_preempt_disable_ip(int preempt_offset, unsigned long ip) ++{ ++ if (!IS_ENABLED(CONFIG_DEBUG_PREEMPT)) ++ return; ++ ++ if (preempt_count() == preempt_offset) ++ return; ++ ++ pr_err("Preemption disabled at:"); ++ print_ip_sym(KERN_ERR, ip); ++} ++ ++static inline bool resched_offsets_ok(unsigned int offsets) ++{ ++ unsigned int nested = preempt_count(); ++ ++ nested += rcu_preempt_depth() << MIGHT_RESCHED_RCU_SHIFT; ++ ++ return nested == offsets; ++} ++ ++void __might_resched(const char *file, int line, unsigned int offsets) ++{ ++ /* Ratelimiting timestamp: */ ++ static unsigned long prev_jiffy; ++ ++ unsigned long preempt_disable_ip; ++ ++ /* WARN_ON_ONCE() by default, no rate limit required: */ ++ rcu_sleep_check(); ++ ++ if ((resched_offsets_ok(offsets) && !irqs_disabled() && ++ !is_idle_task(current) && !current->non_block_count) || ++ system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING || ++ oops_in_progress) ++ return; ++ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) ++ return; ++ prev_jiffy = jiffies; ++ ++ /* Save this before calling printk(), since that will clobber it: */ ++ preempt_disable_ip = get_preempt_disable_ip(current); ++ ++ pr_err("BUG: sleeping function called from invalid context at %s:%d\n", ++ file, line); ++ pr_err("in_atomic(): %d, irqs_disabled(): %d, non_block: %d, pid: %d, name: %s\n", ++ in_atomic(), irqs_disabled(), current->non_block_count, ++ current->pid, current->comm); ++ pr_err("preempt_count: %x, expected: %x\n", preempt_count(), ++ offsets & MIGHT_RESCHED_PREEMPT_MASK); ++ ++ if (IS_ENABLED(CONFIG_PREEMPT_RCU)) { ++ pr_err("RCU nest depth: %d, expected: %u\n", ++ rcu_preempt_depth(), offsets >> MIGHT_RESCHED_RCU_SHIFT); ++ } ++ ++ if (task_stack_end_corrupted(current)) ++ pr_emerg("Thread overran stack, or stack corrupted\n"); ++ ++ debug_show_held_locks(current); ++ if (irqs_disabled()) ++ print_irqtrace_events(current); ++ ++ print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK, ++ preempt_disable_ip); ++ ++ dump_stack(); ++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); ++} ++EXPORT_SYMBOL(__might_resched); ++ ++void __cant_sleep(const char *file, int line, int preempt_offset) ++{ ++ static unsigned long prev_jiffy; ++ ++ if (irqs_disabled()) ++ return; ++ ++ if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) ++ return; ++ ++ if (preempt_count() > preempt_offset) ++ return; ++ ++ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) ++ return; ++ prev_jiffy = jiffies; ++ ++ printk(KERN_ERR "BUG: assuming atomic context at %s:%d\n", file, line); ++ printk(KERN_ERR "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", ++ in_atomic(), irqs_disabled(), ++ current->pid, current->comm); ++ ++ debug_show_held_locks(current); ++ dump_stack(); ++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); ++} ++EXPORT_SYMBOL_GPL(__cant_sleep); ++ ++#ifdef CONFIG_SMP ++void __cant_migrate(const char *file, int line) ++{ ++ static unsigned long prev_jiffy; ++ ++ if (irqs_disabled()) ++ return; ++ ++ if (is_migration_disabled(current)) ++ return; ++ ++ if (!IS_ENABLED(CONFIG_PREEMPT_COUNT)) ++ return; ++ ++ if (preempt_count() > 0) ++ return; ++ ++ if (current->migration_flags & MDF_FORCE_ENABLED) ++ return; ++ ++ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) ++ return; ++ prev_jiffy = jiffies; ++ ++ pr_err("BUG: assuming non migratable context at %s:%d\n", file, line); ++ pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n", ++ in_atomic(), irqs_disabled(), is_migration_disabled(current), ++ current->pid, current->comm); ++ ++ debug_show_held_locks(current); ++ dump_stack(); ++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); ++} ++EXPORT_SYMBOL_GPL(__cant_migrate); ++#endif ++#endif ++ ++#ifdef CONFIG_MAGIC_SYSRQ ++void normalize_rt_tasks(void) ++{ ++ struct task_struct *g, *p; ++ struct sched_attr attr = { ++ .sched_policy = SCHED_NORMAL, ++ }; ++ ++ read_lock(&tasklist_lock); ++ for_each_process_thread(g, p) { ++ /* ++ * Only normalize user tasks: ++ */ ++ if (p->flags & PF_KTHREAD) ++ continue; ++ ++ schedstat_set(p->stats.wait_start, 0); ++ schedstat_set(p->stats.sleep_start, 0); ++ schedstat_set(p->stats.block_start, 0); ++ ++ if (!rt_task(p)) { ++ /* ++ * Renice negative nice level userspace ++ * tasks back to 0: ++ */ ++ if (task_nice(p) < 0) ++ set_user_nice(p, 0); ++ continue; ++ } ++ ++ __sched_setscheduler(p, &attr, false, false); ++ } ++ read_unlock(&tasklist_lock); ++} ++#endif /* CONFIG_MAGIC_SYSRQ */ ++ ++#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) ++/* ++ * These functions are only useful for the IA64 MCA handling, or kdb. ++ * ++ * They can only be called when the whole system has been ++ * stopped - every CPU needs to be quiescent, and no scheduling ++ * activity can take place. Using them for anything else would ++ * be a serious bug, and as a result, they aren't even visible ++ * under any other configuration. ++ */ ++ ++/** ++ * curr_task - return the current task for a given CPU. ++ * @cpu: the processor in question. ++ * ++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! ++ * ++ * Return: The current task for @cpu. ++ */ ++struct task_struct *curr_task(int cpu) ++{ ++ return cpu_curr(cpu); ++} ++ ++#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ ++ ++#ifdef CONFIG_IA64 ++/** ++ * ia64_set_curr_task - set the current task for a given CPU. ++ * @cpu: the processor in question. ++ * @p: the task pointer to set. ++ * ++ * Description: This function must only be used when non-maskable interrupts ++ * are serviced on a separate stack. It allows the architecture to switch the ++ * notion of the current task on a CPU in a non-blocking manner. This function ++ * must be called with all CPU's synchronised, and interrupts disabled, the ++ * and caller must save the original value of the current task (see ++ * curr_task() above) and restore that value before reenabling interrupts and ++ * re-starting the system. ++ * ++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! ++ */ ++void ia64_set_curr_task(int cpu, struct task_struct *p) ++{ ++ cpu_curr(cpu) = p; ++} ++ ++#endif ++ ++#ifdef CONFIG_CGROUP_SCHED ++static void sched_free_group(struct task_group *tg) ++{ ++ kmem_cache_free(task_group_cache, tg); ++} ++ ++static void sched_free_group_rcu(struct rcu_head *rhp) ++{ ++ sched_free_group(container_of(rhp, struct task_group, rcu)); ++} ++ ++static void sched_unregister_group(struct task_group *tg) ++{ ++ /* ++ * We have to wait for yet another RCU grace period to expire, as ++ * print_cfs_stats() might run concurrently. ++ */ ++ call_rcu(&tg->rcu, sched_free_group_rcu); ++} ++ ++/* allocate runqueue etc for a new task group */ ++struct task_group *sched_create_group(struct task_group *parent) ++{ ++ struct task_group *tg; ++ ++ tg = kmem_cache_alloc(task_group_cache, GFP_KERNEL | __GFP_ZERO); ++ if (!tg) ++ return ERR_PTR(-ENOMEM); ++ ++ return tg; ++} ++ ++void sched_online_group(struct task_group *tg, struct task_group *parent) ++{ ++} ++ ++/* rcu callback to free various structures associated with a task group */ ++static void sched_unregister_group_rcu(struct rcu_head *rhp) ++{ ++ /* Now it should be safe to free those cfs_rqs: */ ++ sched_unregister_group(container_of(rhp, struct task_group, rcu)); ++} ++ ++void sched_destroy_group(struct task_group *tg) ++{ ++ /* Wait for possible concurrent references to cfs_rqs complete: */ ++ call_rcu(&tg->rcu, sched_unregister_group_rcu); ++} ++ ++void sched_release_group(struct task_group *tg) ++{ ++} ++ ++static inline struct task_group *css_tg(struct cgroup_subsys_state *css) ++{ ++ return css ? container_of(css, struct task_group, css) : NULL; ++} ++ ++static struct cgroup_subsys_state * ++cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) ++{ ++ struct task_group *parent = css_tg(parent_css); ++ struct task_group *tg; ++ ++ if (!parent) { ++ /* This is early initialization for the top cgroup */ ++ return &root_task_group.css; ++ } ++ ++ tg = sched_create_group(parent); ++ if (IS_ERR(tg)) ++ return ERR_PTR(-ENOMEM); ++ return &tg->css; ++} ++ ++/* Expose task group only after completing cgroup initialization */ ++static int cpu_cgroup_css_online(struct cgroup_subsys_state *css) ++{ ++ struct task_group *tg = css_tg(css); ++ struct task_group *parent = css_tg(css->parent); ++ ++ if (parent) ++ sched_online_group(tg, parent); ++ return 0; ++} ++ ++static void cpu_cgroup_css_released(struct cgroup_subsys_state *css) ++{ ++ struct task_group *tg = css_tg(css); ++ ++ sched_release_group(tg); ++} ++ ++static void cpu_cgroup_css_free(struct cgroup_subsys_state *css) ++{ ++ struct task_group *tg = css_tg(css); ++ ++ /* ++ * Relies on the RCU grace period between css_released() and this. ++ */ ++ sched_unregister_group(tg); ++} ++ ++static void cpu_cgroup_fork(struct task_struct *task) ++{ ++} ++ ++static int cpu_cgroup_can_attach(struct cgroup_taskset *tset) ++{ ++ return 0; ++} ++ ++static void cpu_cgroup_attach(struct cgroup_taskset *tset) ++{ ++} ++ ++#ifdef CONFIG_FAIR_GROUP_SCHED ++static DEFINE_MUTEX(shares_mutex); ++ ++int sched_group_set_shares(struct task_group *tg, unsigned long shares) ++{ ++ /* ++ * We can't change the weight of the root cgroup. ++ */ ++ if (&root_task_group == tg) ++ return -EINVAL; ++ ++ shares = clamp(shares, scale_load(MIN_SHARES), scale_load(MAX_SHARES)); ++ ++ mutex_lock(&shares_mutex); ++ if (tg->shares == shares) ++ goto done; ++ ++ tg->shares = shares; ++done: ++ mutex_unlock(&shares_mutex); ++ return 0; ++} ++ ++static int cpu_shares_write_u64(struct cgroup_subsys_state *css, ++ struct cftype *cftype, u64 shareval) ++{ ++ if (shareval > scale_load_down(ULONG_MAX)) ++ shareval = MAX_SHARES; ++ return sched_group_set_shares(css_tg(css), scale_load(shareval)); ++} ++ ++static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css, ++ struct cftype *cft) ++{ ++ struct task_group *tg = css_tg(css); ++ ++ return (u64) scale_load_down(tg->shares); ++} ++#endif ++ ++static struct cftype cpu_legacy_files[] = { ++#ifdef CONFIG_FAIR_GROUP_SCHED ++ { ++ .name = "shares", ++ .read_u64 = cpu_shares_read_u64, ++ .write_u64 = cpu_shares_write_u64, ++ }, ++#endif ++ { } /* Terminate */ ++}; ++ ++ ++static struct cftype cpu_files[] = { ++ { } /* terminate */ ++}; ++ ++static int cpu_extra_stat_show(struct seq_file *sf, ++ struct cgroup_subsys_state *css) ++{ ++ return 0; ++} ++ ++struct cgroup_subsys cpu_cgrp_subsys = { ++ .css_alloc = cpu_cgroup_css_alloc, ++ .css_online = cpu_cgroup_css_online, ++ .css_released = cpu_cgroup_css_released, ++ .css_free = cpu_cgroup_css_free, ++ .css_extra_stat_show = cpu_extra_stat_show, ++ .fork = cpu_cgroup_fork, ++ .can_attach = cpu_cgroup_can_attach, ++ .attach = cpu_cgroup_attach, ++ .legacy_cftypes = cpu_files, ++ .legacy_cftypes = cpu_legacy_files, ++ .dfl_cftypes = cpu_files, ++ .early_init = true, ++ .threaded = true, ++}; ++#endif /* CONFIG_CGROUP_SCHED */ ++ ++#undef CREATE_TRACE_POINTS +diff --git a/kernel/sched/alt_debug.c b/kernel/sched/alt_debug.c +new file mode 100644 +index 000000000000..1212a031700e +--- /dev/null ++++ b/kernel/sched/alt_debug.c +@@ -0,0 +1,31 @@ ++/* ++ * kernel/sched/alt_debug.c ++ * ++ * Print the alt scheduler debugging details ++ * ++ * Author: Alfred Chen ++ * Date : 2020 ++ */ ++#include "sched.h" ++ ++/* ++ * This allows printing both to /proc/sched_debug and ++ * to the console ++ */ ++#define SEQ_printf(m, x...) \ ++ do { \ ++ if (m) \ ++ seq_printf(m, x); \ ++ else \ ++ pr_cont(x); \ ++ } while (0) ++ ++void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, ++ struct seq_file *m) ++{ ++ SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr_ns(p, ns), ++ get_nr_threads(p)); ++} ++ ++void proc_sched_set_task(struct task_struct *p) ++{} +diff --git a/kernel/sched/alt_sched.h b/kernel/sched/alt_sched.h +new file mode 100644 +index 000000000000..a181bf9ce57d +--- /dev/null ++++ b/kernel/sched/alt_sched.h +@@ -0,0 +1,645 @@ ++#ifndef ALT_SCHED_H ++#define ALT_SCHED_H ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include "../workqueue_internal.h" ++ ++#include "cpupri.h" ++ ++#ifdef CONFIG_SCHED_BMQ ++/* bits: ++ * RT(0-99), (Low prio adj range, nice width, high prio adj range) / 2, cpu idle task */ ++#define SCHED_BITS (MAX_RT_PRIO + NICE_WIDTH / 2 + MAX_PRIORITY_ADJ + 1) ++#endif ++ ++#ifdef CONFIG_SCHED_PDS ++/* bits: RT(0-99), reserved(100-127), NORMAL_PRIO_NUM, cpu idle task */ ++#define SCHED_BITS (MIN_NORMAL_PRIO + NORMAL_PRIO_NUM + 1) ++#endif /* CONFIG_SCHED_PDS */ ++ ++#define IDLE_TASK_SCHED_PRIO (SCHED_BITS - 1) ++ ++#ifdef CONFIG_SCHED_DEBUG ++# define SCHED_WARN_ON(x) WARN_ONCE(x, #x) ++extern void resched_latency_warn(int cpu, u64 latency); ++#else ++# define SCHED_WARN_ON(x) ({ (void)(x), 0; }) ++static inline void resched_latency_warn(int cpu, u64 latency) {} ++#endif ++ ++/* ++ * Increase resolution of nice-level calculations for 64-bit architectures. ++ * The extra resolution improves shares distribution and load balancing of ++ * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup ++ * hierarchies, especially on larger systems. This is not a user-visible change ++ * and does not change the user-interface for setting shares/weights. ++ * ++ * We increase resolution only if we have enough bits to allow this increased ++ * resolution (i.e. 64-bit). The costs for increasing resolution when 32-bit ++ * are pretty high and the returns do not justify the increased costs. ++ * ++ * Really only required when CONFIG_FAIR_GROUP_SCHED=y is also set, but to ++ * increase coverage and consistency always enable it on 64-bit platforms. ++ */ ++#ifdef CONFIG_64BIT ++# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) ++# define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) ++# define scale_load_down(w) \ ++({ \ ++ unsigned long __w = (w); \ ++ if (__w) \ ++ __w = max(2UL, __w >> SCHED_FIXEDPOINT_SHIFT); \ ++ __w; \ ++}) ++#else ++# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) ++# define scale_load(w) (w) ++# define scale_load_down(w) (w) ++#endif ++ ++#ifdef CONFIG_FAIR_GROUP_SCHED ++#define ROOT_TASK_GROUP_LOAD NICE_0_LOAD ++ ++/* ++ * A weight of 0 or 1 can cause arithmetics problems. ++ * A weight of a cfs_rq is the sum of weights of which entities ++ * are queued on this cfs_rq, so a weight of a entity should not be ++ * too large, so as the shares value of a task group. ++ * (The default weight is 1024 - so there's no practical ++ * limitation from this.) ++ */ ++#define MIN_SHARES (1UL << 1) ++#define MAX_SHARES (1UL << 18) ++#endif ++ ++/* task_struct::on_rq states: */ ++#define TASK_ON_RQ_QUEUED 1 ++#define TASK_ON_RQ_MIGRATING 2 ++ ++static inline int task_on_rq_queued(struct task_struct *p) ++{ ++ return p->on_rq == TASK_ON_RQ_QUEUED; ++} ++ ++static inline int task_on_rq_migrating(struct task_struct *p) ++{ ++ return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING; ++} ++ ++/* ++ * wake flags ++ */ ++#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ ++#define WF_FORK 0x02 /* child wakeup after fork */ ++#define WF_MIGRATED 0x04 /* internal use, task got migrated */ ++#define WF_ON_CPU 0x08 /* Wakee is on_rq */ ++ ++#define SCHED_QUEUE_BITS (SCHED_BITS - 1) ++ ++struct sched_queue { ++ DECLARE_BITMAP(bitmap, SCHED_QUEUE_BITS); ++ struct list_head heads[SCHED_BITS]; ++}; ++ ++/* ++ * This is the main, per-CPU runqueue data structure. ++ * This data should only be modified by the local cpu. ++ */ ++struct rq { ++ /* runqueue lock: */ ++ raw_spinlock_t lock; ++ ++ struct task_struct __rcu *curr; ++ struct task_struct *idle, *stop, *skip; ++ struct mm_struct *prev_mm; ++ ++ struct sched_queue queue; ++#ifdef CONFIG_SCHED_PDS ++ u64 time_edge; ++#endif ++ unsigned long watermark; ++ ++ /* switch count */ ++ u64 nr_switches; ++ ++ atomic_t nr_iowait; ++ ++#ifdef CONFIG_SCHED_DEBUG ++ u64 last_seen_need_resched_ns; ++ int ticks_without_resched; ++#endif ++ ++#ifdef CONFIG_MEMBARRIER ++ int membarrier_state; ++#endif ++ ++#ifdef CONFIG_SMP ++ int cpu; /* cpu of this runqueue */ ++ bool online; ++ ++ unsigned int ttwu_pending; ++ unsigned char nohz_idle_balance; ++ unsigned char idle_balance; ++ ++#ifdef CONFIG_HAVE_SCHED_AVG_IRQ ++ struct sched_avg avg_irq; ++#endif ++ ++#ifdef CONFIG_SCHED_SMT ++ int active_balance; ++ struct cpu_stop_work active_balance_work; ++#endif ++ struct callback_head *balance_callback; ++#ifdef CONFIG_HOTPLUG_CPU ++ struct rcuwait hotplug_wait; ++#endif ++ unsigned int nr_pinned; ++ ++#endif /* CONFIG_SMP */ ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING ++ u64 prev_irq_time; ++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ ++#ifdef CONFIG_PARAVIRT ++ u64 prev_steal_time; ++#endif /* CONFIG_PARAVIRT */ ++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING ++ u64 prev_steal_time_rq; ++#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */ ++ ++ /* For genenal cpu load util */ ++ s32 load_history; ++ u64 load_block; ++ u64 load_stamp; ++ ++ /* calc_load related fields */ ++ unsigned long calc_load_update; ++ long calc_load_active; ++ ++ u64 clock, last_tick; ++ u64 last_ts_switch; ++ u64 clock_task; ++ ++ unsigned int nr_running; ++ unsigned long nr_uninterruptible; ++ ++#ifdef CONFIG_SCHED_HRTICK ++#ifdef CONFIG_SMP ++ call_single_data_t hrtick_csd; ++#endif ++ struct hrtimer hrtick_timer; ++ ktime_t hrtick_time; ++#endif ++ ++#ifdef CONFIG_SCHEDSTATS ++ ++ /* latency stats */ ++ struct sched_info rq_sched_info; ++ unsigned long long rq_cpu_time; ++ /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ ++ ++ /* sys_sched_yield() stats */ ++ unsigned int yld_count; ++ ++ /* schedule() stats */ ++ unsigned int sched_switch; ++ unsigned int sched_count; ++ unsigned int sched_goidle; ++ ++ /* try_to_wake_up() stats */ ++ unsigned int ttwu_count; ++ unsigned int ttwu_local; ++#endif /* CONFIG_SCHEDSTATS */ ++ ++#ifdef CONFIG_CPU_IDLE ++ /* Must be inspected within a rcu lock section */ ++ struct cpuidle_state *idle_state; ++#endif ++ ++#ifdef CONFIG_NO_HZ_COMMON ++#ifdef CONFIG_SMP ++ call_single_data_t nohz_csd; ++#endif ++ atomic_t nohz_flags; ++#endif /* CONFIG_NO_HZ_COMMON */ ++}; ++ ++extern unsigned long rq_load_util(struct rq *rq, unsigned long max); ++ ++extern unsigned long calc_load_update; ++extern atomic_long_t calc_load_tasks; ++ ++extern void calc_global_load_tick(struct rq *this_rq); ++extern long calc_load_fold_active(struct rq *this_rq, long adjust); ++ ++DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); ++#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu))) ++#define this_rq() this_cpu_ptr(&runqueues) ++#define task_rq(p) cpu_rq(task_cpu(p)) ++#define cpu_curr(cpu) (cpu_rq(cpu)->curr) ++#define raw_rq() raw_cpu_ptr(&runqueues) ++ ++#ifdef CONFIG_SMP ++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) ++void register_sched_domain_sysctl(void); ++void unregister_sched_domain_sysctl(void); ++#else ++static inline void register_sched_domain_sysctl(void) ++{ ++} ++static inline void unregister_sched_domain_sysctl(void) ++{ ++} ++#endif ++ ++extern bool sched_smp_initialized; ++ ++enum { ++ ITSELF_LEVEL_SPACE_HOLDER, ++#ifdef CONFIG_SCHED_SMT ++ SMT_LEVEL_SPACE_HOLDER, ++#endif ++ COREGROUP_LEVEL_SPACE_HOLDER, ++ CORE_LEVEL_SPACE_HOLDER, ++ OTHER_LEVEL_SPACE_HOLDER, ++ NR_CPU_AFFINITY_LEVELS ++}; ++ ++DECLARE_PER_CPU(cpumask_t [NR_CPU_AFFINITY_LEVELS], sched_cpu_topo_masks); ++DECLARE_PER_CPU(cpumask_t *, sched_cpu_llc_mask); ++ ++static inline int ++__best_mask_cpu(const cpumask_t *cpumask, const cpumask_t *mask) ++{ ++ int cpu; ++ ++ while ((cpu = cpumask_any_and(cpumask, mask)) >= nr_cpu_ids) ++ mask++; ++ ++ return cpu; ++} ++ ++static inline int best_mask_cpu(int cpu, const cpumask_t *mask) ++{ ++ return __best_mask_cpu(mask, per_cpu(sched_cpu_topo_masks, cpu)); ++} ++ ++extern void flush_smp_call_function_queue(void); ++ ++#else /* !CONFIG_SMP */ ++static inline void flush_smp_call_function_queue(void) { } ++#endif ++ ++#ifndef arch_scale_freq_tick ++static __always_inline ++void arch_scale_freq_tick(void) ++{ ++} ++#endif ++ ++#ifndef arch_scale_freq_capacity ++static __always_inline ++unsigned long arch_scale_freq_capacity(int cpu) ++{ ++ return SCHED_CAPACITY_SCALE; ++} ++#endif ++ ++static inline u64 __rq_clock_broken(struct rq *rq) ++{ ++ return READ_ONCE(rq->clock); ++} ++ ++static inline u64 rq_clock(struct rq *rq) ++{ ++ /* ++ * Relax lockdep_assert_held() checking as in VRQ, call to ++ * sched_info_xxxx() may not held rq->lock ++ * lockdep_assert_held(&rq->lock); ++ */ ++ return rq->clock; ++} ++ ++static inline u64 rq_clock_task(struct rq *rq) ++{ ++ /* ++ * Relax lockdep_assert_held() checking as in VRQ, call to ++ * sched_info_xxxx() may not held rq->lock ++ * lockdep_assert_held(&rq->lock); ++ */ ++ return rq->clock_task; ++} ++ ++/* ++ * {de,en}queue flags: ++ * ++ * DEQUEUE_SLEEP - task is no longer runnable ++ * ENQUEUE_WAKEUP - task just became runnable ++ * ++ */ ++ ++#define DEQUEUE_SLEEP 0x01 ++ ++#define ENQUEUE_WAKEUP 0x01 ++ ++ ++/* ++ * Below are scheduler API which using in other kernel code ++ * It use the dummy rq_flags ++ * ToDo : BMQ need to support these APIs for compatibility with mainline ++ * scheduler code. ++ */ ++struct rq_flags { ++ unsigned long flags; ++}; ++ ++struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf) ++ __acquires(rq->lock); ++ ++struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf) ++ __acquires(p->pi_lock) ++ __acquires(rq->lock); ++ ++static inline void __task_rq_unlock(struct rq *rq, struct rq_flags *rf) ++ __releases(rq->lock) ++{ ++ raw_spin_unlock(&rq->lock); ++} ++ ++static inline void ++task_rq_unlock(struct rq *rq, struct task_struct *p, struct rq_flags *rf) ++ __releases(rq->lock) ++ __releases(p->pi_lock) ++{ ++ raw_spin_unlock(&rq->lock); ++ raw_spin_unlock_irqrestore(&p->pi_lock, rf->flags); ++} ++ ++static inline void ++rq_lock(struct rq *rq, struct rq_flags *rf) ++ __acquires(rq->lock) ++{ ++ raw_spin_lock(&rq->lock); ++} ++ ++static inline void ++rq_unlock_irq(struct rq *rq, struct rq_flags *rf) ++ __releases(rq->lock) ++{ ++ raw_spin_unlock_irq(&rq->lock); ++} ++ ++static inline void ++rq_unlock(struct rq *rq, struct rq_flags *rf) ++ __releases(rq->lock) ++{ ++ raw_spin_unlock(&rq->lock); ++} ++ ++static inline struct rq * ++this_rq_lock_irq(struct rq_flags *rf) ++ __acquires(rq->lock) ++{ ++ struct rq *rq; ++ ++ local_irq_disable(); ++ rq = this_rq(); ++ raw_spin_lock(&rq->lock); ++ ++ return rq; ++} ++ ++static inline raw_spinlock_t *__rq_lockp(struct rq *rq) ++{ ++ return &rq->lock; ++} ++ ++static inline raw_spinlock_t *rq_lockp(struct rq *rq) ++{ ++ return __rq_lockp(rq); ++} ++ ++static inline void lockdep_assert_rq_held(struct rq *rq) ++{ ++ lockdep_assert_held(__rq_lockp(rq)); ++} ++ ++extern void raw_spin_rq_lock_nested(struct rq *rq, int subclass); ++extern void raw_spin_rq_unlock(struct rq *rq); ++ ++static inline void raw_spin_rq_lock(struct rq *rq) ++{ ++ raw_spin_rq_lock_nested(rq, 0); ++} ++ ++static inline void raw_spin_rq_lock_irq(struct rq *rq) ++{ ++ local_irq_disable(); ++ raw_spin_rq_lock(rq); ++} ++ ++static inline void raw_spin_rq_unlock_irq(struct rq *rq) ++{ ++ raw_spin_rq_unlock(rq); ++ local_irq_enable(); ++} ++ ++static inline int task_current(struct rq *rq, struct task_struct *p) ++{ ++ return rq->curr == p; ++} ++ ++static inline bool task_running(struct task_struct *p) ++{ ++ return p->on_cpu; ++} ++ ++extern int task_running_nice(struct task_struct *p); ++ ++extern struct static_key_false sched_schedstats; ++ ++#ifdef CONFIG_CPU_IDLE ++static inline void idle_set_state(struct rq *rq, ++ struct cpuidle_state *idle_state) ++{ ++ rq->idle_state = idle_state; ++} ++ ++static inline struct cpuidle_state *idle_get_state(struct rq *rq) ++{ ++ WARN_ON(!rcu_read_lock_held()); ++ return rq->idle_state; ++} ++#else ++static inline void idle_set_state(struct rq *rq, ++ struct cpuidle_state *idle_state) ++{ ++} ++ ++static inline struct cpuidle_state *idle_get_state(struct rq *rq) ++{ ++ return NULL; ++} ++#endif ++ ++static inline int cpu_of(const struct rq *rq) ++{ ++#ifdef CONFIG_SMP ++ return rq->cpu; ++#else ++ return 0; ++#endif ++} ++ ++#include "stats.h" ++ ++#ifdef CONFIG_NO_HZ_COMMON ++#define NOHZ_BALANCE_KICK_BIT 0 ++#define NOHZ_STATS_KICK_BIT 1 ++ ++#define NOHZ_BALANCE_KICK BIT(NOHZ_BALANCE_KICK_BIT) ++#define NOHZ_STATS_KICK BIT(NOHZ_STATS_KICK_BIT) ++ ++#define NOHZ_KICK_MASK (NOHZ_BALANCE_KICK | NOHZ_STATS_KICK) ++ ++#define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) ++ ++/* TODO: needed? ++extern void nohz_balance_exit_idle(struct rq *rq); ++#else ++static inline void nohz_balance_exit_idle(struct rq *rq) { } ++*/ ++#endif ++ ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING ++struct irqtime { ++ u64 total; ++ u64 tick_delta; ++ u64 irq_start_time; ++ struct u64_stats_sync sync; ++}; ++ ++DECLARE_PER_CPU(struct irqtime, cpu_irqtime); ++ ++/* ++ * Returns the irqtime minus the softirq time computed by ksoftirqd. ++ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime ++ * and never move forward. ++ */ ++static inline u64 irq_time_read(int cpu) ++{ ++ struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu); ++ unsigned int seq; ++ u64 total; ++ ++ do { ++ seq = __u64_stats_fetch_begin(&irqtime->sync); ++ total = irqtime->total; ++ } while (__u64_stats_fetch_retry(&irqtime->sync, seq)); ++ ++ return total; ++} ++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ ++ ++#ifdef CONFIG_CPU_FREQ ++DECLARE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data); ++#endif /* CONFIG_CPU_FREQ */ ++ ++#ifdef CONFIG_NO_HZ_FULL ++extern int __init sched_tick_offload_init(void); ++#else ++static inline int sched_tick_offload_init(void) { return 0; } ++#endif ++ ++#ifdef arch_scale_freq_capacity ++#ifndef arch_scale_freq_invariant ++#define arch_scale_freq_invariant() (true) ++#endif ++#else /* arch_scale_freq_capacity */ ++#define arch_scale_freq_invariant() (false) ++#endif ++ ++extern void schedule_idle(void); ++ ++#define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT) ++ ++/* ++ * !! For sched_setattr_nocheck() (kernel) only !! ++ * ++ * This is actually gross. :( ++ * ++ * It is used to make schedutil kworker(s) higher priority than SCHED_DEADLINE ++ * tasks, but still be able to sleep. We need this on platforms that cannot ++ * atomically change clock frequency. Remove once fast switching will be ++ * available on such platforms. ++ * ++ * SUGOV stands for SchedUtil GOVernor. ++ */ ++#define SCHED_FLAG_SUGOV 0x10000000 ++ ++#ifdef CONFIG_MEMBARRIER ++/* ++ * The scheduler provides memory barriers required by membarrier between: ++ * - prior user-space memory accesses and store to rq->membarrier_state, ++ * - store to rq->membarrier_state and following user-space memory accesses. ++ * In the same way it provides those guarantees around store to rq->curr. ++ */ ++static inline void membarrier_switch_mm(struct rq *rq, ++ struct mm_struct *prev_mm, ++ struct mm_struct *next_mm) ++{ ++ int membarrier_state; ++ ++ if (prev_mm == next_mm) ++ return; ++ ++ membarrier_state = atomic_read(&next_mm->membarrier_state); ++ if (READ_ONCE(rq->membarrier_state) == membarrier_state) ++ return; ++ ++ WRITE_ONCE(rq->membarrier_state, membarrier_state); ++} ++#else ++static inline void membarrier_switch_mm(struct rq *rq, ++ struct mm_struct *prev_mm, ++ struct mm_struct *next_mm) ++{ ++} ++#endif ++ ++#ifdef CONFIG_NUMA ++extern int sched_numa_find_closest(const struct cpumask *cpus, int cpu); ++#else ++static inline int sched_numa_find_closest(const struct cpumask *cpus, int cpu) ++{ ++ return nr_cpu_ids; ++} ++#endif ++ ++extern void swake_up_all_locked(struct swait_queue_head *q); ++extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); ++ ++#ifdef CONFIG_PREEMPT_DYNAMIC ++extern int preempt_dynamic_mode; ++extern int sched_dynamic_mode(const char *str); ++extern void sched_dynamic_update(int mode); ++#endif ++ ++static inline void nohz_run_idle_balance(int cpu) { } ++ ++static inline ++unsigned long uclamp_rq_util_with(struct rq *rq, unsigned long util, ++ struct task_struct *p) ++{ ++ return util; ++} ++ ++static inline bool uclamp_rq_is_capped(struct rq *rq) { return false; } ++ ++#endif /* ALT_SCHED_H */ +diff --git a/kernel/sched/bmq.h b/kernel/sched/bmq.h +new file mode 100644 +index 000000000000..66b77291b9d0 +--- /dev/null ++++ b/kernel/sched/bmq.h +@@ -0,0 +1,110 @@ ++#define ALT_SCHED_VERSION_MSG "sched/bmq: BMQ CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n" ++ ++/* ++ * BMQ only routines ++ */ ++#define rq_switch_time(rq) ((rq)->clock - (rq)->last_ts_switch) ++#define boost_threshold(p) (sched_timeslice_ns >>\ ++ (15 - MAX_PRIORITY_ADJ - (p)->boost_prio)) ++ ++static inline void boost_task(struct task_struct *p) ++{ ++ int limit; ++ ++ switch (p->policy) { ++ case SCHED_NORMAL: ++ limit = -MAX_PRIORITY_ADJ; ++ break; ++ case SCHED_BATCH: ++ case SCHED_IDLE: ++ limit = 0; ++ break; ++ default: ++ return; ++ } ++ ++ if (p->boost_prio > limit) ++ p->boost_prio--; ++} ++ ++static inline void deboost_task(struct task_struct *p) ++{ ++ if (p->boost_prio < MAX_PRIORITY_ADJ) ++ p->boost_prio++; ++} ++ ++/* ++ * Common interfaces ++ */ ++static inline void sched_timeslice_imp(const int timeslice_ms) {} ++ ++static inline int ++task_sched_prio_normal(const struct task_struct *p, const struct rq *rq) ++{ ++ return p->prio + p->boost_prio - MAX_RT_PRIO; ++} ++ ++static inline int task_sched_prio(const struct task_struct *p) ++{ ++ return (p->prio < MAX_RT_PRIO)? p->prio : MAX_RT_PRIO / 2 + (p->prio + p->boost_prio) / 2; ++} ++ ++static inline int ++task_sched_prio_idx(const struct task_struct *p, const struct rq *rq) ++{ ++ return task_sched_prio(p); ++} ++ ++static inline int sched_prio2idx(int prio, struct rq *rq) ++{ ++ return prio; ++} ++ ++static inline int sched_idx2prio(int idx, struct rq *rq) ++{ ++ return idx; ++} ++ ++static inline void time_slice_expired(struct task_struct *p, struct rq *rq) ++{ ++ p->time_slice = sched_timeslice_ns; ++ ++ if (SCHED_FIFO != p->policy && task_on_rq_queued(p)) { ++ if (SCHED_RR != p->policy) ++ deboost_task(p); ++ requeue_task(p, rq, task_sched_prio_idx(p, rq)); ++ } ++} ++ ++static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq) {} ++ ++inline int task_running_nice(struct task_struct *p) ++{ ++ return (p->prio + p->boost_prio > DEFAULT_PRIO + MAX_PRIORITY_ADJ); ++} ++ ++static void sched_task_fork(struct task_struct *p, struct rq *rq) ++{ ++ p->boost_prio = MAX_PRIORITY_ADJ; ++} ++ ++static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq) ++{ ++ p->boost_prio = MAX_PRIORITY_ADJ; ++} ++ ++#ifdef CONFIG_SMP ++static inline void sched_task_ttwu(struct task_struct *p) ++{ ++ if(this_rq()->clock_task - p->last_ran > sched_timeslice_ns) ++ boost_task(p); ++} ++#endif ++ ++static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq) ++{ ++ if (rq_switch_time(rq) < boost_threshold(p)) ++ boost_task(p); ++} ++ ++static inline void update_rq_time_edge(struct rq *rq) {} +diff --git a/kernel/sched/build_policy.c b/kernel/sched/build_policy.c +index d9dc9ab3773f..71a25540d65e 100644 +--- a/kernel/sched/build_policy.c ++++ b/kernel/sched/build_policy.c +@@ -42,13 +42,19 @@ + + #include "idle.c" + ++#ifndef CONFIG_SCHED_ALT + #include "rt.c" ++#endif + + #ifdef CONFIG_SMP ++#ifndef CONFIG_SCHED_ALT + # include "cpudeadline.c" ++#endif + # include "pelt.c" + #endif + + #include "cputime.c" +-#include "deadline.c" + ++#ifndef CONFIG_SCHED_ALT ++#include "deadline.c" ++#endif +diff --git a/kernel/sched/build_utility.c b/kernel/sched/build_utility.c +index 99bdd96f454f..23f80a86d2d7 100644 +--- a/kernel/sched/build_utility.c ++++ b/kernel/sched/build_utility.c +@@ -85,7 +85,9 @@ + + #ifdef CONFIG_SMP + # include "cpupri.c" ++#ifndef CONFIG_SCHED_ALT + # include "stop_task.c" ++#endif + # include "topology.c" + #endif + +diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c +index 3dbf351d12d5..b2590f961139 100644 +--- a/kernel/sched/cpufreq_schedutil.c ++++ b/kernel/sched/cpufreq_schedutil.c +@@ -160,9 +160,14 @@ static void sugov_get_util(struct sugov_cpu *sg_cpu) + unsigned long max = arch_scale_cpu_capacity(sg_cpu->cpu); + + sg_cpu->max = max; ++#ifndef CONFIG_SCHED_ALT + sg_cpu->bw_dl = cpu_bw_dl(rq); + sg_cpu->util = effective_cpu_util(sg_cpu->cpu, cpu_util_cfs(sg_cpu->cpu), max, + FREQUENCY_UTIL, NULL); ++#else ++ sg_cpu->bw_dl = 0; ++ sg_cpu->util = rq_load_util(rq, max); ++#endif /* CONFIG_SCHED_ALT */ + } + + /** +@@ -306,8 +311,10 @@ static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; } + */ + static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu) + { ++#ifndef CONFIG_SCHED_ALT + if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl) + sg_cpu->sg_policy->limits_changed = true; ++#endif + } + + static inline bool sugov_update_single_common(struct sugov_cpu *sg_cpu, +@@ -607,6 +614,7 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy) + } + + ret = sched_setattr_nocheck(thread, &attr); ++ + if (ret) { + kthread_stop(thread); + pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__); +@@ -839,7 +847,9 @@ cpufreq_governor_init(schedutil_gov); + #ifdef CONFIG_ENERGY_MODEL + static void rebuild_sd_workfn(struct work_struct *work) + { ++#ifndef CONFIG_SCHED_ALT + rebuild_sched_domains_energy(); ++#endif /* CONFIG_SCHED_ALT */ + } + static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn); + +diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c +index 78a233d43757..b3bbc87d4352 100644 +--- a/kernel/sched/cputime.c ++++ b/kernel/sched/cputime.c +@@ -122,7 +122,7 @@ void account_user_time(struct task_struct *p, u64 cputime) + p->utime += cputime; + account_group_user_time(p, cputime); + +- index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; ++ index = task_running_nice(p) ? CPUTIME_NICE : CPUTIME_USER; + + /* Add user time to cpustat. */ + task_group_account_field(p, index, cputime); +@@ -146,7 +146,7 @@ void account_guest_time(struct task_struct *p, u64 cputime) + p->gtime += cputime; + + /* Add guest time to cpustat. */ +- if (task_nice(p) > 0) { ++ if (task_running_nice(p)) { + task_group_account_field(p, CPUTIME_NICE, cputime); + cpustat[CPUTIME_GUEST_NICE] += cputime; + } else { +@@ -269,7 +269,7 @@ static inline u64 account_other_time(u64 max) + #ifdef CONFIG_64BIT + static inline u64 read_sum_exec_runtime(struct task_struct *t) + { +- return t->se.sum_exec_runtime; ++ return tsk_seruntime(t); + } + #else + static u64 read_sum_exec_runtime(struct task_struct *t) +@@ -279,7 +279,7 @@ static u64 read_sum_exec_runtime(struct task_struct *t) + struct rq *rq; + + rq = task_rq_lock(t, &rf); +- ns = t->se.sum_exec_runtime; ++ ns = tsk_seruntime(t); + task_rq_unlock(rq, t, &rf); + + return ns; +@@ -611,7 +611,7 @@ void cputime_adjust(struct task_cputime *curr, struct prev_cputime *prev, + void task_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st) + { + struct task_cputime cputime = { +- .sum_exec_runtime = p->se.sum_exec_runtime, ++ .sum_exec_runtime = tsk_seruntime(p), + }; + + if (task_cputime(p, &cputime.utime, &cputime.stime)) +diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c +index bb3d63bdf4ae..4e1680785704 100644 +--- a/kernel/sched/debug.c ++++ b/kernel/sched/debug.c +@@ -7,6 +7,7 @@ + * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar + */ + ++#ifndef CONFIG_SCHED_ALT + /* + * This allows printing both to /proc/sched_debug and + * to the console +@@ -215,6 +216,7 @@ static const struct file_operations sched_scaling_fops = { + }; + + #endif /* SMP */ ++#endif /* !CONFIG_SCHED_ALT */ + + #ifdef CONFIG_PREEMPT_DYNAMIC + +@@ -278,6 +280,7 @@ static const struct file_operations sched_dynamic_fops = { + + #endif /* CONFIG_PREEMPT_DYNAMIC */ + ++#ifndef CONFIG_SCHED_ALT + __read_mostly bool sched_debug_verbose; + + static const struct seq_operations sched_debug_sops; +@@ -293,6 +296,7 @@ static const struct file_operations sched_debug_fops = { + .llseek = seq_lseek, + .release = seq_release, + }; ++#endif /* !CONFIG_SCHED_ALT */ + + static struct dentry *debugfs_sched; + +@@ -302,12 +306,15 @@ static __init int sched_init_debug(void) + + debugfs_sched = debugfs_create_dir("sched", NULL); + ++#ifndef CONFIG_SCHED_ALT + debugfs_create_file("features", 0644, debugfs_sched, NULL, &sched_feat_fops); + debugfs_create_bool("verbose", 0644, debugfs_sched, &sched_debug_verbose); ++#endif /* !CONFIG_SCHED_ALT */ + #ifdef CONFIG_PREEMPT_DYNAMIC + debugfs_create_file("preempt", 0644, debugfs_sched, NULL, &sched_dynamic_fops); + #endif + ++#ifndef CONFIG_SCHED_ALT + debugfs_create_u32("latency_ns", 0644, debugfs_sched, &sysctl_sched_latency); + debugfs_create_u32("min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_min_granularity); + debugfs_create_u32("idle_min_granularity_ns", 0644, debugfs_sched, &sysctl_sched_idle_min_granularity); +@@ -336,11 +343,13 @@ static __init int sched_init_debug(void) + #endif + + debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops); ++#endif /* !CONFIG_SCHED_ALT */ + + return 0; + } + late_initcall(sched_init_debug); + ++#ifndef CONFIG_SCHED_ALT + #ifdef CONFIG_SMP + + static cpumask_var_t sd_sysctl_cpus; +@@ -1067,6 +1076,7 @@ void proc_sched_set_task(struct task_struct *p) + memset(&p->stats, 0, sizeof(p->stats)); + #endif + } ++#endif /* !CONFIG_SCHED_ALT */ + + void resched_latency_warn(int cpu, u64 latency) + { +diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c +index 328cccbee444..aef991facc79 100644 +--- a/kernel/sched/idle.c ++++ b/kernel/sched/idle.c +@@ -400,6 +400,7 @@ void cpu_startup_entry(enum cpuhp_state state) + do_idle(); + } + ++#ifndef CONFIG_SCHED_ALT + /* + * idle-task scheduling class. + */ +@@ -521,3 +522,4 @@ DEFINE_SCHED_CLASS(idle) = { + .switched_to = switched_to_idle, + .update_curr = update_curr_idle, + }; ++#endif +diff --git a/kernel/sched/pds.h b/kernel/sched/pds.h +new file mode 100644 +index 000000000000..56a649d02e49 +--- /dev/null ++++ b/kernel/sched/pds.h +@@ -0,0 +1,127 @@ ++#define ALT_SCHED_VERSION_MSG "sched/pds: PDS CPU Scheduler "ALT_SCHED_VERSION" by Alfred Chen.\n" ++ ++static int sched_timeslice_shift = 22; ++ ++#define NORMAL_PRIO_MOD(x) ((x) & (NORMAL_PRIO_NUM - 1)) ++ ++/* ++ * Common interfaces ++ */ ++static inline void sched_timeslice_imp(const int timeslice_ms) ++{ ++ if (2 == timeslice_ms) ++ sched_timeslice_shift = 21; ++} ++ ++static inline int ++task_sched_prio_normal(const struct task_struct *p, const struct rq *rq) ++{ ++ s64 delta = p->deadline - rq->time_edge + NORMAL_PRIO_NUM - NICE_WIDTH; ++ ++ if (WARN_ONCE(delta > NORMAL_PRIO_NUM - 1, ++ "pds: task_sched_prio_normal() delta %lld\n", delta)) ++ return NORMAL_PRIO_NUM - 1; ++ ++ return (delta < 0) ? 0 : delta; ++} ++ ++static inline int task_sched_prio(const struct task_struct *p) ++{ ++ return (p->prio < MAX_RT_PRIO) ? p->prio : ++ MIN_NORMAL_PRIO + task_sched_prio_normal(p, task_rq(p)); ++} ++ ++static inline int ++task_sched_prio_idx(const struct task_struct *p, const struct rq *rq) ++{ ++ return (p->prio < MAX_RT_PRIO) ? p->prio : MIN_NORMAL_PRIO + ++ NORMAL_PRIO_MOD(task_sched_prio_normal(p, rq) + rq->time_edge); ++} ++ ++static inline int sched_prio2idx(int prio, struct rq *rq) ++{ ++ return (IDLE_TASK_SCHED_PRIO == prio || prio < MAX_RT_PRIO) ? prio : ++ MIN_NORMAL_PRIO + NORMAL_PRIO_MOD((prio - MIN_NORMAL_PRIO) + ++ rq->time_edge); ++} ++ ++static inline int sched_idx2prio(int idx, struct rq *rq) ++{ ++ return (idx < MAX_RT_PRIO) ? idx : MIN_NORMAL_PRIO + ++ NORMAL_PRIO_MOD((idx - MIN_NORMAL_PRIO) + NORMAL_PRIO_NUM - ++ NORMAL_PRIO_MOD(rq->time_edge)); ++} ++ ++static inline void sched_renew_deadline(struct task_struct *p, const struct rq *rq) ++{ ++ if (p->prio >= MAX_RT_PRIO) ++ p->deadline = (rq->clock >> sched_timeslice_shift) + ++ p->static_prio - (MAX_PRIO - NICE_WIDTH); ++} ++ ++int task_running_nice(struct task_struct *p) ++{ ++ return (p->prio > DEFAULT_PRIO); ++} ++ ++static inline void update_rq_time_edge(struct rq *rq) ++{ ++ struct list_head head; ++ u64 old = rq->time_edge; ++ u64 now = rq->clock >> sched_timeslice_shift; ++ u64 prio, delta; ++ ++ if (now == old) ++ return; ++ ++ delta = min_t(u64, NORMAL_PRIO_NUM, now - old); ++ INIT_LIST_HEAD(&head); ++ ++ for_each_set_bit(prio, &rq->queue.bitmap[2], delta) ++ list_splice_tail_init(rq->queue.heads + MIN_NORMAL_PRIO + ++ NORMAL_PRIO_MOD(prio + old), &head); ++ ++ rq->queue.bitmap[2] = (NORMAL_PRIO_NUM == delta) ? 0UL : ++ rq->queue.bitmap[2] >> delta; ++ rq->time_edge = now; ++ if (!list_empty(&head)) { ++ u64 idx = MIN_NORMAL_PRIO + NORMAL_PRIO_MOD(now); ++ struct task_struct *p; ++ ++ list_for_each_entry(p, &head, sq_node) ++ p->sq_idx = idx; ++ ++ list_splice(&head, rq->queue.heads + idx); ++ rq->queue.bitmap[2] |= 1UL; ++ } ++} ++ ++static inline void time_slice_expired(struct task_struct *p, struct rq *rq) ++{ ++ p->time_slice = sched_timeslice_ns; ++ sched_renew_deadline(p, rq); ++ if (SCHED_FIFO != p->policy && task_on_rq_queued(p)) ++ requeue_task(p, rq, task_sched_prio_idx(p, rq)); ++} ++ ++static inline void sched_task_sanity_check(struct task_struct *p, struct rq *rq) ++{ ++ u64 max_dl = rq->time_edge + NICE_WIDTH - 1; ++ if (unlikely(p->deadline > max_dl)) ++ p->deadline = max_dl; ++} ++ ++static void sched_task_fork(struct task_struct *p, struct rq *rq) ++{ ++ sched_renew_deadline(p, rq); ++} ++ ++static inline void do_sched_yield_type_1(struct task_struct *p, struct rq *rq) ++{ ++ time_slice_expired(p, rq); ++} ++ ++#ifdef CONFIG_SMP ++static inline void sched_task_ttwu(struct task_struct *p) {} ++#endif ++static inline void sched_task_deactivate(struct task_struct *p, struct rq *rq) {} +diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c +index 0f310768260c..bd38bf738fe9 100644 +--- a/kernel/sched/pelt.c ++++ b/kernel/sched/pelt.c +@@ -266,6 +266,7 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load) + WRITE_ONCE(sa->util_avg, sa->util_sum / divider); + } + ++#ifndef CONFIG_SCHED_ALT + /* + * sched_entity: + * +@@ -383,8 +384,9 @@ int update_dl_rq_load_avg(u64 now, struct rq *rq, int running) + + return 0; + } ++#endif + +-#ifdef CONFIG_SCHED_THERMAL_PRESSURE ++#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT) + /* + * thermal: + * +diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h +index 4ff2ed4f8fa1..226eeed61318 100644 +--- a/kernel/sched/pelt.h ++++ b/kernel/sched/pelt.h +@@ -1,13 +1,15 @@ + #ifdef CONFIG_SMP + #include "sched-pelt.h" + ++#ifndef CONFIG_SCHED_ALT + int __update_load_avg_blocked_se(u64 now, struct sched_entity *se); + int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se); + int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq); + int update_rt_rq_load_avg(u64 now, struct rq *rq, int running); + int update_dl_rq_load_avg(u64 now, struct rq *rq, int running); ++#endif + +-#ifdef CONFIG_SCHED_THERMAL_PRESSURE ++#if defined(CONFIG_SCHED_THERMAL_PRESSURE) && !defined(CONFIG_SCHED_ALT) + int update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity); + + static inline u64 thermal_load_avg(struct rq *rq) +@@ -44,6 +46,7 @@ static inline u32 get_pelt_divider(struct sched_avg *avg) + return PELT_MIN_DIVIDER + avg->period_contrib; + } + ++#ifndef CONFIG_SCHED_ALT + static inline void cfs_se_util_change(struct sched_avg *avg) + { + unsigned int enqueued; +@@ -155,9 +158,11 @@ static inline u64 cfs_rq_clock_pelt(struct cfs_rq *cfs_rq) + return rq_clock_pelt(rq_of(cfs_rq)); + } + #endif ++#endif /* CONFIG_SCHED_ALT */ + + #else + ++#ifndef CONFIG_SCHED_ALT + static inline int + update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) + { +@@ -175,6 +180,7 @@ update_dl_rq_load_avg(u64 now, struct rq *rq, int running) + { + return 0; + } ++#endif + + static inline int + update_thermal_load_avg(u64 now, struct rq *rq, u64 capacity) +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index 47b89a0fc6e5..de2641a32c22 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -5,6 +5,10 @@ + #ifndef _KERNEL_SCHED_SCHED_H + #define _KERNEL_SCHED_SCHED_H + ++#ifdef CONFIG_SCHED_ALT ++#include "alt_sched.h" ++#else ++ + #include + #include + #include +@@ -3116,4 +3120,9 @@ extern int sched_dynamic_mode(const char *str); + extern void sched_dynamic_update(int mode); + #endif + ++static inline int task_running_nice(struct task_struct *p) ++{ ++ return (task_nice(p) > 0); ++} ++#endif /* !CONFIG_SCHED_ALT */ + #endif /* _KERNEL_SCHED_SCHED_H */ +diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c +index 857f837f52cb..5486c63e4790 100644 +--- a/kernel/sched/stats.c ++++ b/kernel/sched/stats.c +@@ -125,8 +125,10 @@ static int show_schedstat(struct seq_file *seq, void *v) + } else { + struct rq *rq; + #ifdef CONFIG_SMP ++#ifndef CONFIG_SCHED_ALT + struct sched_domain *sd; + int dcount = 0; ++#endif + #endif + cpu = (unsigned long)(v - 2); + rq = cpu_rq(cpu); +@@ -143,6 +145,7 @@ static int show_schedstat(struct seq_file *seq, void *v) + seq_printf(seq, "\n"); + + #ifdef CONFIG_SMP ++#ifndef CONFIG_SCHED_ALT + /* domain-specific stats */ + rcu_read_lock(); + for_each_domain(cpu, sd) { +@@ -171,6 +174,7 @@ static int show_schedstat(struct seq_file *seq, void *v) + sd->ttwu_move_balance); + } + rcu_read_unlock(); ++#endif + #endif + } + return 0; +diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h +index baa839c1ba96..15238be0581b 100644 +--- a/kernel/sched/stats.h ++++ b/kernel/sched/stats.h +@@ -89,6 +89,7 @@ static inline void rq_sched_info_depart (struct rq *rq, unsigned long long delt + + #endif /* CONFIG_SCHEDSTATS */ + ++#ifndef CONFIG_SCHED_ALT + #ifdef CONFIG_FAIR_GROUP_SCHED + struct sched_entity_stats { + struct sched_entity se; +@@ -105,6 +106,7 @@ __schedstats_from_se(struct sched_entity *se) + #endif + return &task_of(se)->stats; + } ++#endif /* CONFIG_SCHED_ALT */ + + #ifdef CONFIG_PSI + /* +diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c +index 05b6c2ad90b9..480ef393b3c9 100644 +--- a/kernel/sched/topology.c ++++ b/kernel/sched/topology.c +@@ -3,6 +3,7 @@ + * Scheduler topology setup/handling methods + */ + ++#ifndef CONFIG_SCHED_ALT + DEFINE_MUTEX(sched_domains_mutex); + + /* Protected by sched_domains_mutex: */ +@@ -1413,8 +1414,10 @@ static void asym_cpu_capacity_scan(void) + */ + + static int default_relax_domain_level = -1; ++#endif /* CONFIG_SCHED_ALT */ + int sched_domain_level_max; + ++#ifndef CONFIG_SCHED_ALT + static int __init setup_relax_domain_level(char *str) + { + if (kstrtoint(str, 0, &default_relax_domain_level)) +@@ -1647,6 +1650,7 @@ sd_init(struct sched_domain_topology_level *tl, + + return sd; + } ++#endif /* CONFIG_SCHED_ALT */ + + /* + * Topology list, bottom-up. +@@ -1683,6 +1687,7 @@ void set_sched_topology(struct sched_domain_topology_level *tl) + sched_domain_topology_saved = NULL; + } + ++#ifndef CONFIG_SCHED_ALT + #ifdef CONFIG_NUMA + + static const struct cpumask *sd_numa_mask(int cpu) +@@ -2638,3 +2643,15 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], + partition_sched_domains_locked(ndoms_new, doms_new, dattr_new); + mutex_unlock(&sched_domains_mutex); + } ++#else /* CONFIG_SCHED_ALT */ ++void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], ++ struct sched_domain_attr *dattr_new) ++{} ++ ++#ifdef CONFIG_NUMA ++int sched_numa_find_closest(const struct cpumask *cpus, int cpu) ++{ ++ return best_mask_cpu(cpu, cpus); ++} ++#endif /* CONFIG_NUMA */ ++#endif +diff --git a/kernel/sysctl.c b/kernel/sysctl.c +index 35d034219513..23719c728677 100644 +--- a/kernel/sysctl.c ++++ b/kernel/sysctl.c +@@ -86,6 +86,10 @@ + + /* Constants used for minimum and maximum */ + ++#ifdef CONFIG_SCHED_ALT ++extern int sched_yield_type; ++#endif ++ + #ifdef CONFIG_PERF_EVENTS + static const int six_hundred_forty_kb = 640 * 1024; + #endif +@@ -1590,6 +1594,7 @@ int proc_do_static_key(struct ctl_table *table, int write, + } + + static struct ctl_table kern_table[] = { ++#ifndef CONFIG_SCHED_ALT + #ifdef CONFIG_NUMA_BALANCING + { + .procname = "numa_balancing", +@@ -1601,6 +1606,7 @@ static struct ctl_table kern_table[] = { + .extra2 = SYSCTL_FOUR, + }, + #endif /* CONFIG_NUMA_BALANCING */ ++#endif /* !CONFIG_SCHED_ALT */ + { + .procname = "panic", + .data = &panic_timeout, +@@ -1902,6 +1908,17 @@ static struct ctl_table kern_table[] = { + .proc_handler = proc_dointvec, + }, + #endif ++#ifdef CONFIG_SCHED_ALT ++ { ++ .procname = "yield_type", ++ .data = &sched_yield_type, ++ .maxlen = sizeof (int), ++ .mode = 0644, ++ .proc_handler = &proc_dointvec_minmax, ++ .extra1 = SYSCTL_ZERO, ++ .extra2 = SYSCTL_TWO, ++ }, ++#endif + #if defined(CONFIG_S390) && defined(CONFIG_SMP) + { + .procname = "spin_retry", +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c +index 0ea8702eb516..a27a0f3a654d 100644 +--- a/kernel/time/hrtimer.c ++++ b/kernel/time/hrtimer.c +@@ -2088,8 +2088,10 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode, + int ret = 0; + u64 slack; + ++#ifndef CONFIG_SCHED_ALT + slack = current->timer_slack_ns; + if (dl_task(current) || rt_task(current)) ++#endif + slack = 0; + + hrtimer_init_sleeper_on_stack(&t, clockid, mode); +diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c +index cb925e8ef9a8..67d823510f5c 100644 +--- a/kernel/time/posix-cpu-timers.c ++++ b/kernel/time/posix-cpu-timers.c +@@ -223,7 +223,7 @@ static void task_sample_cputime(struct task_struct *p, u64 *samples) + u64 stime, utime; + + task_cputime(p, &utime, &stime); +- store_samples(samples, stime, utime, p->se.sum_exec_runtime); ++ store_samples(samples, stime, utime, tsk_seruntime(p)); + } + + static void proc_sample_cputime_atomic(struct task_cputime_atomic *at, +@@ -866,6 +866,7 @@ static void collect_posix_cputimers(struct posix_cputimers *pct, u64 *samples, + } + } + ++#ifndef CONFIG_SCHED_ALT + static inline void check_dl_overrun(struct task_struct *tsk) + { + if (tsk->dl.dl_overrun) { +@@ -873,6 +874,7 @@ static inline void check_dl_overrun(struct task_struct *tsk) + send_signal_locked(SIGXCPU, SEND_SIG_PRIV, tsk, PIDTYPE_TGID); + } + } ++#endif + + static bool check_rlimit(u64 time, u64 limit, int signo, bool rt, bool hard) + { +@@ -900,8 +902,10 @@ static void check_thread_timers(struct task_struct *tsk, + u64 samples[CPUCLOCK_MAX]; + unsigned long soft; + ++#ifndef CONFIG_SCHED_ALT + if (dl_task(tsk)) + check_dl_overrun(tsk); ++#endif + + if (expiry_cache_is_inactive(pct)) + return; +@@ -915,7 +919,7 @@ static void check_thread_timers(struct task_struct *tsk, + soft = task_rlimit(tsk, RLIMIT_RTTIME); + if (soft != RLIM_INFINITY) { + /* Task RT timeout is accounted in jiffies. RTTIME is usec */ +- unsigned long rttime = tsk->rt.timeout * (USEC_PER_SEC / HZ); ++ unsigned long rttime = tsk_rttimeout(tsk) * (USEC_PER_SEC / HZ); + unsigned long hard = task_rlimit_max(tsk, RLIMIT_RTTIME); + + /* At the hard limit, send SIGKILL. No further action. */ +@@ -1151,8 +1155,10 @@ static inline bool fastpath_timer_check(struct task_struct *tsk) + return true; + } + ++#ifndef CONFIG_SCHED_ALT + if (dl_task(tsk) && tsk->dl.dl_overrun) + return true; ++#endif + + return false; + } +diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c +index a2d301f58ced..2ccdede8585c 100644 +--- a/kernel/trace/trace_selftest.c ++++ b/kernel/trace/trace_selftest.c +@@ -1143,10 +1143,15 @@ static int trace_wakeup_test_thread(void *data) + { + /* Make this a -deadline thread */ + static const struct sched_attr attr = { ++#ifdef CONFIG_SCHED_ALT ++ /* No deadline on BMQ/PDS, use RR */ ++ .sched_policy = SCHED_RR, ++#else + .sched_policy = SCHED_DEADLINE, + .sched_runtime = 100000ULL, + .sched_deadline = 10000000ULL, + .sched_period = 10000000ULL ++#endif + }; + struct wakeup_test_data *x = data; + diff --git a/sys-kernel/pinephone-pro-sources/files/5021_BMQ-and-PDS-gentoo-defaults.patch b/sys-kernel/pinephone-pro-sources/files/5021_BMQ-and-PDS-gentoo-defaults.patch new file mode 100644 index 0000000..6b2049d --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/files/5021_BMQ-and-PDS-gentoo-defaults.patch @@ -0,0 +1,13 @@ +--- a/init/Kconfig 2022-07-07 13:22:00.698439887 -0400 ++++ b/init/Kconfig 2022-07-07 13:23:45.152333576 -0400 +@@ -874,8 +874,9 @@ config UCLAMP_BUCKETS_COUNT + If in doubt, use the default value. + + menuconfig SCHED_ALT ++ depends on X86_64 + bool "Alternative CPU Schedulers" +- default y ++ default n + help + This feature enable alternative CPU scheduler" + diff --git a/sys-kernel/pinephone-pro-sources/pinephone-pro-sources-5.18.6.ebuild b/sys-kernel/pinephone-pro-sources/pinephone-pro-sources-5.18.6.ebuild deleted file mode 100644 index e5be7c1..0000000 --- a/sys-kernel/pinephone-pro-sources/pinephone-pro-sources-5.18.6.ebuild +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright 1999-2021 Gentoo Authors -# Distributed under the terms of the GNU General Public License v2 - -EAPI="8" -ETYPE="sources" -K_NOUSENAME="yes" -K_NOSETEXTRAVERSION="yes" -K_SECURITY_UNSUPPORTED="1" -K_WANT_GENPATCHES="base extras experimental" -K_GENPATCHES_VER="9" - -inherit kernel-2 -detect_version - -KEYWORDS="~arm64" - -DEPEND="${RDEPEND} - >=sys-devel/patch-2.7.5" - -DESCRIPTION="Full sources for the Linux kernel with gentoo patchset and patches for the PinePhone Pro" - -MEGI_TAG="orange-pi-5.18-20220615-1100" -SRC_URI="https://github.com/megous/linux/archive/${MEGI_TAG}.tar.gz ${GENPATCHES_URI}" - -PATCHES=( - -) - -S="${WORKDIR}/linux-${MEGI_TAG}" - -src_unpack() { - default -} - -src_prepare() { - default - eapply_user -} - -pkg_postinst() { - kernel-2_pkg_postinst - einfo "To build and install the kernel use the following commands:" - einfo "# make Image modules" - einfo "# make DTC_FLAGS="-@" dtbs" - einfo "# cp arch/arm64/boot/Image /boot" - einfo "# make INSTALL_MOD_PATH=/usr modules_install" - einfo "# make INSTALL_DTBS_PATH=/boot/dtbs dtbs_install" - einfo "You will need to create and initramfs afterwards." - einfo "If you use dracut you can run:" - einfo "# dracut -m \"rootfs-block base\" --host-only --kver 5.18.3-pinehone-gentoo-arm64" - einfo "Change 5.18.2-pinehone-gentoo-arm64 to your kernel version installed in /lib/modules" -} - -pkg_postrm() { - kernel-2_pkg_postrm -} diff --git a/sys-kernel/pinephone-pro-sources/pinephone-pro-sources-5.19.0.ebuild b/sys-kernel/pinephone-pro-sources/pinephone-pro-sources-5.19.0.ebuild new file mode 100644 index 0000000..199770b --- /dev/null +++ b/sys-kernel/pinephone-pro-sources/pinephone-pro-sources-5.19.0.ebuild @@ -0,0 +1,90 @@ +# Copyright 1999-2021 Gentoo Authors +# Distributed under the terms of the GNU General Public License v2 + +EAPI="8" +K_NOUSENAME="yes" +K_NOSETEXTRAVERSION="yes" +K_SECURITY_UNSUPPORTED="1" +K_GENPATCHES_VER="1" +ETYPE="sources" +inherit kernel-2 +detect_version + +KEYWORDS="~arm64" + +DEPEND="${RDEPEND} + >=sys-devel/patch-2.7.5" + +DESCRIPTION="Full sources for the Linux kernel, with megi's patch for pinephone and gentoo patchset" + +MEGI_TAG="orange-pi-5.19-20220802-0940" +SRC_URI="https://github.com/megous/linux/archive/${MEGI_TAG}.tar.gz" + +PATCHES=( + #Gentoo Patches + ${FILESDIR}/1500_XATTR_USER_PREFIX.patch + ${FILESDIR}/1510_fs-enable-link-security-restrictions-by-default.patch + ${FILESDIR}/1700_sparc-address-warray-bound-warnings.patch + ${FILESDIR}/2000_BT-Check-key-sizes-only-if-Secure-Simple-Pairing-enabled.patch + ${FILESDIR}/2900_tmp513-Fix-build-issue-by-selecting-CONFIG_REG.patch + ${FILESDIR}/2920_sign-file-patch-for-libressl.patch + ${FILESDIR}/3000_Support-printing-firmware-info.patch + ${FILESDIR}/4567_distro-Gentoo-Kconfig.patch + ${FILESDIR}/5010_enable-cpu-optimizations-universal.patch + ${FILESDIR}/5020_BMQ-and-PDS-io-scheduler-v5.19-r0.patch + ${FILESDIR}/5021_BMQ-and-PDS-gentoo-defaults.patch + + #PinePhone Patches + ${FILESDIR}/0101-arm64-dts-pinephone-drop-modem-power-node.patch + ${FILESDIR}/0102-arm64-dts-pinephone-pro-remove-modem-node.patch + ${FILESDIR}/0103-arm64-dts-rk3399-pinephone-pro-add-modem-RI-pin.patch + ${FILESDIR}/0104-rk818_charger-use-type-battery-again.patch + ${FILESDIR}/0201-revert-fbcon-remove-now-unusued-softback_lines-cursor-argument.patch + ${FILESDIR}/0202-revert-fbcon-remove-no-op-fbcon_set_origin.patch + ${FILESDIR}/0203-revert-fbcon-remove-soft-scrollback-code.patch + + #Bootsplash + ${FILESDIR}/0301-bootsplash.patch + ${FILESDIR}/0302-bootsplash.patch + ${FILESDIR}/0303-bootsplash.patch + ${FILESDIR}/0304-bootsplash.patch + ${FILESDIR}/0305-bootsplash.patch + ${FILESDIR}/0306-bootsplash.patch + ${FILESDIR}/0307-bootsplash.patch + ${FILESDIR}/0308-bootsplash.patch + ${FILESDIR}/0309-bootsplash.patch + ${FILESDIR}/0310-bootsplash.patch + ${FILESDIR}/0311-bootsplash.patch + ${FILESDIR}/0312-bootsplash.patch +) + +S="${WORKDIR}/linux-${MEGI_TAG}" + +src_unpack() { + default +} + +src_prepare() { + default + eapply_user +} + +pkg_postinst() { + kernel-2_pkg_postinst + einfo "For more info on this patchset, and how to report problems, see:" + einfo "${HOMEPAGE}" + einfo "To build the kernel use the following command:" + einfo "make Image Image.gz modules" + einfo "make DTC_FLAGS="-@" dtbs" + einfo "make install; make modules_intall; make dtbs_install" + einfo "If you use kernel config coming with this ebuild, don't forget to also copy dracut-pp.conf to /etc/dracut.conf.d/" + einfo "to make sure proper kernel modules are loaded into initramfs" + einfo "if you want to cross compile pinephone kernel on amd64 host, follow the https://wiki.gentoo.org/wiki/Cross_build_environment" + einfo "to setup cross toolchain environment, then create a xmake wrapper like the following, and replace make with xmake in above commands" + einfo "#!/bin/sh" + einfo "exec make ARCH='arm64' CROSS_COMPILE='aarch64-unknown-linux-gnu-' INSTALL_MOD_PATH='${SYSROOT}' '$@'" +} + +pkg_postrm() { + kernel-2_pkg_postrm +}