mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 04:18:39 +08:00
This is the 6.6.45 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAma4lw4ACgkQONu9yGCS aT6AwBAAp54rory1tzRSXkA2XyBm9be5fu2y0NgaqXTVxY0iOMy0Ic8hEOi9QLcK Z0jQnaxtWTSwo+kzVL3fmd3vvvwt/0X2SClGYriRHnCRVze67k3Fu7Nm3LAfSGTc SiHYe4s+r+GRlHVn9Nryjf3sVD4Snc7Bl3dvG1WeKyceJVWYb7e5qICaB2IFtdXb z21pUn9Be1TBQmQCEUYciA19gbS3itcIIeVSrzKBfQOCQp/vNLIzwaXNXvRVuPE0 eUrVSGarOFZNNBojySOXLz3E7wdPAAYZtHxtxFr0FVoeiSSm2SHFQ+f7hhKoCNE3 LJOyy5+NBjXnZ1fjwiAgyf6osuGoo5pY5uVGg56dBGBNkjfJqDWxXudksop3VkKX qDo4LkUi60ZyurdOqemGzls1S/0lpEmEWm9Dla1vgGWiAuwsE+QHcSjllH7d1m2L JOTCW0918P/+uLDW45xxFDTzEMhISjg0J75ZH2tgrUr6ZsSl6FfPCZ2EwkcF3L37 VBVMnFMoGqHRhb91QMdUxiIJ+/1KSoJdo5tYmvd7puBNpuFyug44KaH41s70iBrM nk/axSewu9iTbejc7GSEir/W5WDlOooxE4hOBhj0QjOksKF+zpOkyLso7FDFPxQb ikTJx4bxWIuhtlnFzcXmSJDInxgbjj8yFDA3kofNX6wLxejg7dQ= =F4sT -----END PGP SIGNATURE----- Merge v6.6.45 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
cf0fc152b7
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 44
|
||||
SUBLEVEL = 45
|
||||
EXTRAVERSION =
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
|
@ -85,8 +85,7 @@ static bool
|
||||
callchain_trace(void *data, unsigned long pc)
|
||||
{
|
||||
struct perf_callchain_entry_ctx *entry = data;
|
||||
perf_callchain_store(entry, pc);
|
||||
return true;
|
||||
return perf_callchain_store(entry, pc) == 0;
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -641,6 +641,7 @@
|
||||
interrupts = <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>;
|
||||
phys = <&qusb_phy_0>, <&usb0_ssphy>;
|
||||
phy-names = "usb2-phy", "usb3-phy";
|
||||
snps,parkmode-disable-ss-quirk;
|
||||
snps,is-utmi-l1-suspend;
|
||||
snps,hird-threshold = /bits/ 8 <0x0>;
|
||||
snps,dis_u2_susphy_quirk;
|
||||
@ -683,6 +684,7 @@
|
||||
interrupts = <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
|
||||
phys = <&qusb_phy_1>, <&usb1_ssphy>;
|
||||
phy-names = "usb2-phy", "usb3-phy";
|
||||
snps,parkmode-disable-ss-quirk;
|
||||
snps,is-utmi-l1-suspend;
|
||||
snps,hird-threshold = /bits/ 8 <0x0>;
|
||||
snps,dis_u2_susphy_quirk;
|
||||
|
@ -2159,7 +2159,8 @@
|
||||
interrupts = <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
|
||||
snps,dis_u2_susphy_quirk;
|
||||
snps,dis_enblslpm_quirk;
|
||||
phys = <&qusb2phy>, <&usb1_ssphy>;
|
||||
snps,parkmode-disable-ss-quirk;
|
||||
phys = <&qusb2phy>, <&usb3phy>;
|
||||
phy-names = "usb2-phy", "usb3-phy";
|
||||
snps,has-lpm-erratum;
|
||||
snps,hird-threshold = /bits/ 8 <0x10>;
|
||||
@ -2168,33 +2169,26 @@
|
||||
|
||||
usb3phy: phy@c010000 {
|
||||
compatible = "qcom,msm8998-qmp-usb3-phy";
|
||||
reg = <0x0c010000 0x18c>;
|
||||
status = "disabled";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
ranges;
|
||||
reg = <0x0c010000 0x1000>;
|
||||
|
||||
clocks = <&gcc GCC_USB3_PHY_AUX_CLK>,
|
||||
<&gcc GCC_USB3_CLKREF_CLK>,
|
||||
<&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
|
||||
<&gcc GCC_USB3_CLKREF_CLK>;
|
||||
clock-names = "aux", "cfg_ahb", "ref";
|
||||
<&gcc GCC_USB3_PHY_PIPE_CLK>;
|
||||
clock-names = "aux",
|
||||
"ref",
|
||||
"cfg_ahb",
|
||||
"pipe";
|
||||
clock-output-names = "usb3_phy_pipe_clk_src";
|
||||
#clock-cells = <0>;
|
||||
#phy-cells = <0>;
|
||||
|
||||
resets = <&gcc GCC_USB3_PHY_BCR>,
|
||||
<&gcc GCC_USB3PHY_PHY_BCR>;
|
||||
reset-names = "phy", "common";
|
||||
reset-names = "phy",
|
||||
"phy_phy";
|
||||
|
||||
usb1_ssphy: phy@c010200 {
|
||||
reg = <0xc010200 0x128>,
|
||||
<0xc010400 0x200>,
|
||||
<0xc010c00 0x20c>,
|
||||
<0xc010600 0x128>,
|
||||
<0xc010800 0x200>;
|
||||
#phy-cells = <0>;
|
||||
#clock-cells = <0>;
|
||||
clocks = <&gcc GCC_USB3_PHY_PIPE_CLK>;
|
||||
clock-names = "pipe0";
|
||||
clock-output-names = "usb3_phy_pipe_clk_src";
|
||||
};
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
qusb2phy: phy@c012000 {
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <dt-bindings/interconnect/qcom,osm-l3.h>
|
||||
#include <dt-bindings/interconnect/qcom,sc7180.h>
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/phy/phy-qcom-qmp.h>
|
||||
#include <dt-bindings/phy/phy-qcom-qusb2.h>
|
||||
#include <dt-bindings/power/qcom-rpmpd.h>
|
||||
#include <dt-bindings/reset/qcom,sdm845-aoss.h>
|
||||
@ -2795,49 +2796,28 @@
|
||||
nvmem-cells = <&qusb2p_hstx_trim>;
|
||||
};
|
||||
|
||||
usb_1_qmpphy: phy-wrapper@88e9000 {
|
||||
usb_1_qmpphy: phy@88e8000 {
|
||||
compatible = "qcom,sc7180-qmp-usb3-dp-phy";
|
||||
reg = <0 0x088e9000 0 0x18c>,
|
||||
<0 0x088e8000 0 0x3c>,
|
||||
<0 0x088ea000 0 0x18c>;
|
||||
reg = <0 0x088e8000 0 0x3000>;
|
||||
status = "disabled";
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
ranges;
|
||||
|
||||
clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
|
||||
<&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
|
||||
<&gcc GCC_USB3_PRIM_CLKREF_CLK>,
|
||||
<&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>;
|
||||
clock-names = "aux", "cfg_ahb", "ref", "com_aux";
|
||||
<&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>,
|
||||
<&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>,
|
||||
<&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
|
||||
clock-names = "aux",
|
||||
"ref",
|
||||
"com_aux",
|
||||
"usb3_pipe",
|
||||
"cfg_ahb";
|
||||
|
||||
resets = <&gcc GCC_USB3_PHY_PRIM_BCR>,
|
||||
<&gcc GCC_USB3_DP_PHY_PRIM_BCR>;
|
||||
reset-names = "phy", "common";
|
||||
|
||||
usb_1_ssphy: usb3-phy@88e9200 {
|
||||
reg = <0 0x088e9200 0 0x128>,
|
||||
<0 0x088e9400 0 0x200>,
|
||||
<0 0x088e9c00 0 0x218>,
|
||||
<0 0x088e9600 0 0x128>,
|
||||
<0 0x088e9800 0 0x200>,
|
||||
<0 0x088e9a00 0 0x18>;
|
||||
#clock-cells = <0>;
|
||||
#phy-cells = <0>;
|
||||
clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
|
||||
clock-names = "pipe0";
|
||||
clock-output-names = "usb3_phy_pipe_clk_src";
|
||||
};
|
||||
|
||||
dp_phy: dp-phy@88ea200 {
|
||||
reg = <0 0x088ea200 0 0x200>,
|
||||
<0 0x088ea400 0 0x200>,
|
||||
<0 0x088eaa00 0 0x200>,
|
||||
<0 0x088ea600 0 0x200>,
|
||||
<0 0x088ea800 0 0x200>;
|
||||
#clock-cells = <1>;
|
||||
#phy-cells = <0>;
|
||||
};
|
||||
#clock-cells = <1>;
|
||||
#phy-cells = <1>;
|
||||
};
|
||||
|
||||
pmu@90b6300 {
|
||||
@ -3001,7 +2981,8 @@
|
||||
iommus = <&apps_smmu 0x540 0>;
|
||||
snps,dis_u2_susphy_quirk;
|
||||
snps,dis_enblslpm_quirk;
|
||||
phys = <&usb_1_hsphy>, <&usb_1_ssphy>;
|
||||
snps,parkmode-disable-ss-quirk;
|
||||
phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
|
||||
phy-names = "usb2-phy", "usb3-phy";
|
||||
maximum-speed = "super-speed";
|
||||
};
|
||||
@ -3307,8 +3288,9 @@
|
||||
"ctrl_link_iface", "stream_pixel";
|
||||
assigned-clocks = <&dispcc DISP_CC_MDSS_DP_LINK_CLK_SRC>,
|
||||
<&dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>;
|
||||
assigned-clock-parents = <&dp_phy 0>, <&dp_phy 1>;
|
||||
phys = <&dp_phy>;
|
||||
assigned-clock-parents = <&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>,
|
||||
<&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
|
||||
phys = <&usb_1_qmpphy QMP_USB43DP_DP_PHY>;
|
||||
phy-names = "dp";
|
||||
|
||||
operating-points-v2 = <&dp_opp_table>;
|
||||
@ -3365,8 +3347,8 @@
|
||||
<&gcc GCC_DISP_GPLL0_CLK_SRC>,
|
||||
<&mdss_dsi0_phy 0>,
|
||||
<&mdss_dsi0_phy 1>,
|
||||
<&dp_phy 0>,
|
||||
<&dp_phy 1>;
|
||||
<&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>,
|
||||
<&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
|
||||
clock-names = "bi_tcxo",
|
||||
"gcc_disp_gpll0_clk_src",
|
||||
"dsi0_phy_pll_out_byteclk",
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <dt-bindings/interconnect/qcom,sc7280.h>
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/mailbox/qcom-ipcc.h>
|
||||
#include <dt-bindings/phy/phy-qcom-qmp.h>
|
||||
#include <dt-bindings/power/qcom-rpmpd.h>
|
||||
#include <dt-bindings/reset/qcom,sdm845-aoss.h>
|
||||
#include <dt-bindings/reset/qcom,sdm845-pdc.h>
|
||||
@ -858,7 +859,7 @@
|
||||
<&rpmhcc RPMH_CXO_CLK_A>, <&sleep_clk>,
|
||||
<0>, <&pcie1_lane>,
|
||||
<0>, <0>, <0>,
|
||||
<&usb_1_ssphy>;
|
||||
<&usb_1_qmpphy QMP_USB43DP_USB3_PIPE_CLK>;
|
||||
clock-names = "bi_tcxo", "bi_tcxo_ao", "sleep_clk",
|
||||
"pcie_0_pipe_clk", "pcie_1_pipe_clk",
|
||||
"ufs_phy_rx_symbol_0_clk", "ufs_phy_rx_symbol_1_clk",
|
||||
@ -3351,49 +3352,26 @@
|
||||
resets = <&gcc GCC_QUSB2PHY_SEC_BCR>;
|
||||
};
|
||||
|
||||
usb_1_qmpphy: phy-wrapper@88e9000 {
|
||||
compatible = "qcom,sc7280-qmp-usb3-dp-phy",
|
||||
"qcom,sm8250-qmp-usb3-dp-phy";
|
||||
reg = <0 0x088e9000 0 0x200>,
|
||||
<0 0x088e8000 0 0x40>,
|
||||
<0 0x088ea000 0 0x200>;
|
||||
usb_1_qmpphy: phy@88e8000 {
|
||||
compatible = "qcom,sc7280-qmp-usb3-dp-phy";
|
||||
reg = <0 0x088e8000 0 0x3000>;
|
||||
status = "disabled";
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
ranges;
|
||||
|
||||
clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
|
||||
<&rpmhcc RPMH_CXO_CLK>,
|
||||
<&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>;
|
||||
clock-names = "aux", "ref_clk_src", "com_aux";
|
||||
<&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>,
|
||||
<&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
|
||||
clock-names = "aux",
|
||||
"ref",
|
||||
"com_aux",
|
||||
"usb3_pipe";
|
||||
|
||||
resets = <&gcc GCC_USB3_DP_PHY_PRIM_BCR>,
|
||||
<&gcc GCC_USB3_PHY_PRIM_BCR>;
|
||||
reset-names = "phy", "common";
|
||||
|
||||
usb_1_ssphy: usb3-phy@88e9200 {
|
||||
reg = <0 0x088e9200 0 0x200>,
|
||||
<0 0x088e9400 0 0x200>,
|
||||
<0 0x088e9c00 0 0x400>,
|
||||
<0 0x088e9600 0 0x200>,
|
||||
<0 0x088e9800 0 0x200>,
|
||||
<0 0x088e9a00 0 0x100>;
|
||||
#clock-cells = <0>;
|
||||
#phy-cells = <0>;
|
||||
clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
|
||||
clock-names = "pipe0";
|
||||
clock-output-names = "usb3_phy_pipe_clk_src";
|
||||
};
|
||||
|
||||
dp_phy: dp-phy@88ea200 {
|
||||
reg = <0 0x088ea200 0 0x200>,
|
||||
<0 0x088ea400 0 0x200>,
|
||||
<0 0x088eaa00 0 0x200>,
|
||||
<0 0x088ea600 0 0x200>,
|
||||
<0 0x088ea800 0 0x200>;
|
||||
#phy-cells = <0>;
|
||||
#clock-cells = <1>;
|
||||
};
|
||||
#clock-cells = <1>;
|
||||
#phy-cells = <1>;
|
||||
};
|
||||
|
||||
usb_2: usb@8cf8800 {
|
||||
@ -3702,7 +3680,8 @@
|
||||
iommus = <&apps_smmu 0xe0 0x0>;
|
||||
snps,dis_u2_susphy_quirk;
|
||||
snps,dis_enblslpm_quirk;
|
||||
phys = <&usb_1_hsphy>, <&usb_1_ssphy>;
|
||||
snps,parkmode-disable-ss-quirk;
|
||||
phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
|
||||
phy-names = "usb2-phy", "usb3-phy";
|
||||
maximum-speed = "super-speed";
|
||||
};
|
||||
@ -3807,8 +3786,8 @@
|
||||
<&gcc GCC_DISP_GPLL0_CLK_SRC>,
|
||||
<&mdss_dsi_phy 0>,
|
||||
<&mdss_dsi_phy 1>,
|
||||
<&dp_phy 0>,
|
||||
<&dp_phy 1>,
|
||||
<&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>,
|
||||
<&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>,
|
||||
<&mdss_edp_phy 0>,
|
||||
<&mdss_edp_phy 1>;
|
||||
clock-names = "bi_tcxo",
|
||||
@ -4144,8 +4123,9 @@
|
||||
"stream_pixel";
|
||||
assigned-clocks = <&dispcc DISP_CC_MDSS_DP_LINK_CLK_SRC>,
|
||||
<&dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>;
|
||||
assigned-clock-parents = <&dp_phy 0>, <&dp_phy 1>;
|
||||
phys = <&dp_phy>;
|
||||
assigned-clock-parents = <&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>,
|
||||
<&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
|
||||
phys = <&usb_1_qmpphy QMP_USB43DP_DP_PHY>;
|
||||
phy-names = "dp";
|
||||
|
||||
operating-points-v2 = <&dp_opp_table>;
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <dt-bindings/interconnect/qcom,osm-l3.h>
|
||||
#include <dt-bindings/interconnect/qcom,sdm845.h>
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
#include <dt-bindings/phy/phy-qcom-qmp.h>
|
||||
#include <dt-bindings/phy/phy-qcom-qusb2.h>
|
||||
#include <dt-bindings/power/qcom-rpmpd.h>
|
||||
#include <dt-bindings/reset/qcom,sdm845-aoss.h>
|
||||
@ -3983,80 +3984,54 @@
|
||||
nvmem-cells = <&qusb2s_hstx_trim>;
|
||||
};
|
||||
|
||||
usb_1_qmpphy: phy@88e9000 {
|
||||
usb_1_qmpphy: phy@88e8000 {
|
||||
compatible = "qcom,sdm845-qmp-usb3-dp-phy";
|
||||
reg = <0 0x088e9000 0 0x18c>,
|
||||
<0 0x088e8000 0 0x38>,
|
||||
<0 0x088ea000 0 0x40>;
|
||||
reg = <0 0x088e8000 0 0x3000>;
|
||||
status = "disabled";
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
ranges;
|
||||
|
||||
clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
|
||||
<&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
|
||||
<&gcc GCC_USB3_PRIM_CLKREF_CLK>,
|
||||
<&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>;
|
||||
clock-names = "aux", "cfg_ahb", "ref", "com_aux";
|
||||
<&gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>,
|
||||
<&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>,
|
||||
<&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
|
||||
clock-names = "aux",
|
||||
"ref",
|
||||
"com_aux",
|
||||
"usb3_pipe",
|
||||
"cfg_ahb";
|
||||
|
||||
resets = <&gcc GCC_USB3_PHY_PRIM_BCR>,
|
||||
<&gcc GCC_USB3_DP_PHY_PRIM_BCR>;
|
||||
reset-names = "phy", "common";
|
||||
|
||||
usb_1_ssphy: usb3-phy@88e9200 {
|
||||
reg = <0 0x088e9200 0 0x128>,
|
||||
<0 0x088e9400 0 0x200>,
|
||||
<0 0x088e9c00 0 0x218>,
|
||||
<0 0x088e9600 0 0x128>,
|
||||
<0 0x088e9800 0 0x200>,
|
||||
<0 0x088e9a00 0 0x100>;
|
||||
#clock-cells = <0>;
|
||||
#phy-cells = <0>;
|
||||
clocks = <&gcc GCC_USB3_PRIM_PHY_PIPE_CLK>;
|
||||
clock-names = "pipe0";
|
||||
clock-output-names = "usb3_phy_pipe_clk_src";
|
||||
};
|
||||
|
||||
dp_phy: dp-phy@88ea200 {
|
||||
reg = <0 0x088ea200 0 0x200>,
|
||||
<0 0x088ea400 0 0x200>,
|
||||
<0 0x088eaa00 0 0x200>,
|
||||
<0 0x088ea600 0 0x200>,
|
||||
<0 0x088ea800 0 0x200>;
|
||||
#clock-cells = <1>;
|
||||
#phy-cells = <0>;
|
||||
};
|
||||
#clock-cells = <1>;
|
||||
#phy-cells = <1>;
|
||||
};
|
||||
|
||||
usb_2_qmpphy: phy@88eb000 {
|
||||
compatible = "qcom,sdm845-qmp-usb3-uni-phy";
|
||||
reg = <0 0x088eb000 0 0x18c>;
|
||||
status = "disabled";
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
ranges;
|
||||
reg = <0 0x088eb000 0 0x1000>;
|
||||
|
||||
clocks = <&gcc GCC_USB3_SEC_PHY_AUX_CLK>,
|
||||
<&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
|
||||
<&gcc GCC_USB3_SEC_CLKREF_CLK>,
|
||||
<&gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>;
|
||||
clock-names = "aux", "cfg_ahb", "ref", "com_aux";
|
||||
<&gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>,
|
||||
<&gcc GCC_USB3_SEC_PHY_PIPE_CLK>;
|
||||
clock-names = "aux",
|
||||
"cfg_ahb",
|
||||
"ref",
|
||||
"com_aux",
|
||||
"pipe";
|
||||
clock-output-names = "usb3_uni_phy_pipe_clk_src";
|
||||
#clock-cells = <0>;
|
||||
#phy-cells = <0>;
|
||||
|
||||
resets = <&gcc GCC_USB3PHY_PHY_SEC_BCR>,
|
||||
<&gcc GCC_USB3_PHY_SEC_BCR>;
|
||||
reset-names = "phy", "common";
|
||||
resets = <&gcc GCC_USB3_PHY_SEC_BCR>,
|
||||
<&gcc GCC_USB3PHY_PHY_SEC_BCR>;
|
||||
reset-names = "phy",
|
||||
"phy_phy";
|
||||
|
||||
usb_2_ssphy: phy@88eb200 {
|
||||
reg = <0 0x088eb200 0 0x128>,
|
||||
<0 0x088eb400 0 0x1fc>,
|
||||
<0 0x088eb800 0 0x218>,
|
||||
<0 0x088eb600 0 0x70>;
|
||||
#clock-cells = <0>;
|
||||
#phy-cells = <0>;
|
||||
clocks = <&gcc GCC_USB3_SEC_PHY_PIPE_CLK>;
|
||||
clock-names = "pipe0";
|
||||
clock-output-names = "usb3_uni_phy_pipe_clk_src";
|
||||
};
|
||||
status = "disabled";
|
||||
};
|
||||
|
||||
usb_1: usb@a6f8800 {
|
||||
@ -4105,7 +4080,8 @@
|
||||
iommus = <&apps_smmu 0x740 0>;
|
||||
snps,dis_u2_susphy_quirk;
|
||||
snps,dis_enblslpm_quirk;
|
||||
phys = <&usb_1_hsphy>, <&usb_1_ssphy>;
|
||||
snps,parkmode-disable-ss-quirk;
|
||||
phys = <&usb_1_hsphy>, <&usb_1_qmpphy QMP_USB43DP_USB3_PHY>;
|
||||
phy-names = "usb2-phy", "usb3-phy";
|
||||
};
|
||||
};
|
||||
@ -4156,7 +4132,8 @@
|
||||
iommus = <&apps_smmu 0x760 0>;
|
||||
snps,dis_u2_susphy_quirk;
|
||||
snps,dis_enblslpm_quirk;
|
||||
phys = <&usb_2_hsphy>, <&usb_2_ssphy>;
|
||||
snps,parkmode-disable-ss-quirk;
|
||||
phys = <&usb_2_hsphy>, <&usb_2_qmpphy>;
|
||||
phy-names = "usb2-phy", "usb3-phy";
|
||||
};
|
||||
};
|
||||
@ -4573,8 +4550,9 @@
|
||||
"ctrl_link_iface", "stream_pixel";
|
||||
assigned-clocks = <&dispcc DISP_CC_MDSS_DP_LINK_CLK_SRC>,
|
||||
<&dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>;
|
||||
assigned-clock-parents = <&dp_phy 0>, <&dp_phy 1>;
|
||||
phys = <&dp_phy>;
|
||||
assigned-clock-parents = <&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>,
|
||||
<&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
|
||||
phys = <&usb_1_qmpphy QMP_USB43DP_DP_PHY>;
|
||||
phy-names = "dp";
|
||||
|
||||
operating-points-v2 = <&dp_opp_table>;
|
||||
@ -4912,8 +4890,8 @@
|
||||
<&mdss_dsi0_phy 1>,
|
||||
<&mdss_dsi1_phy 0>,
|
||||
<&mdss_dsi1_phy 1>,
|
||||
<&dp_phy 0>,
|
||||
<&dp_phy 1>;
|
||||
<&usb_1_qmpphy QMP_USB43DP_DP_LINK_CLK>,
|
||||
<&usb_1_qmpphy QMP_USB43DP_DP_VCO_DIV_CLK>;
|
||||
clock-names = "bi_tcxo",
|
||||
"gcc_disp_gpll0_clk_src",
|
||||
"gcc_disp_gpll0_div_clk_src",
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <asm/insn.h>
|
||||
|
||||
#define HAVE_JUMP_LABEL_BATCH
|
||||
#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
|
||||
|
||||
static __always_inline bool arch_static_branch(struct static_key * const key,
|
||||
|
@ -7,11 +7,12 @@
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/smp.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/patching.h>
|
||||
|
||||
void arch_jump_label_transform(struct jump_entry *entry,
|
||||
enum jump_label_type type)
|
||||
bool arch_jump_label_transform_queue(struct jump_entry *entry,
|
||||
enum jump_label_type type)
|
||||
{
|
||||
void *addr = (void *)jump_entry_code(entry);
|
||||
u32 insn;
|
||||
@ -25,4 +26,10 @@ void arch_jump_label_transform(struct jump_entry *entry,
|
||||
}
|
||||
|
||||
aarch64_insn_patch_text_nosync(addr, insn);
|
||||
return true;
|
||||
}
|
||||
|
||||
void arch_jump_label_transform_apply(void)
|
||||
{
|
||||
kick_all_cpus_sync();
|
||||
}
|
||||
|
@ -99,8 +99,8 @@
|
||||
rtc0: rtc@1fe07800 {
|
||||
compatible = "loongson,ls2k1000-rtc";
|
||||
reg = <0 0x1fe07800 0 0x78>;
|
||||
interrupt-parent = <&liointc0>;
|
||||
interrupts = <60 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupt-parent = <&liointc1>;
|
||||
interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
|
||||
uart0: serial@1fe00000 {
|
||||
@ -108,7 +108,7 @@
|
||||
reg = <0 0x1fe00000 0 0x8>;
|
||||
clock-frequency = <125000000>;
|
||||
interrupt-parent = <&liointc0>;
|
||||
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
|
||||
no-loopback-test;
|
||||
};
|
||||
|
||||
@ -117,7 +117,6 @@
|
||||
device_type = "pci";
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
#interrupt-cells = <2>;
|
||||
|
||||
reg = <0 0x1a000000 0 0x02000000>,
|
||||
<0xfe 0x00000000 0 0x20000000>;
|
||||
@ -132,8 +131,8 @@
|
||||
"pciclass0c03";
|
||||
|
||||
reg = <0x1800 0x0 0x0 0x0 0x0>;
|
||||
interrupts = <12 IRQ_TYPE_LEVEL_LOW>,
|
||||
<13 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<13 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "macirq", "eth_lpi";
|
||||
interrupt-parent = <&liointc0>;
|
||||
phy-mode = "rgmii-id";
|
||||
@ -156,8 +155,8 @@
|
||||
"loongson, pci-gmac";
|
||||
|
||||
reg = <0x1900 0x0 0x0 0x0 0x0>;
|
||||
interrupts = <14 IRQ_TYPE_LEVEL_LOW>,
|
||||
<15 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupts = <14 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<15 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "macirq", "eth_lpi";
|
||||
interrupt-parent = <&liointc0>;
|
||||
phy-mode = "rgmii-id";
|
||||
@ -179,7 +178,7 @@
|
||||
"pciclass0c03";
|
||||
|
||||
reg = <0x2100 0x0 0x0 0x0 0x0>;
|
||||
interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupts = <18 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&liointc1>;
|
||||
};
|
||||
|
||||
@ -190,7 +189,7 @@
|
||||
"pciclass0c03";
|
||||
|
||||
reg = <0x2200 0x0 0x0 0x0 0x0>;
|
||||
interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&liointc1>;
|
||||
};
|
||||
|
||||
@ -201,97 +200,121 @@
|
||||
"pciclass0106";
|
||||
|
||||
reg = <0x4000 0x0 0x0 0x0 0x0>;
|
||||
interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&liointc0>;
|
||||
};
|
||||
|
||||
pci_bridge@9,0 {
|
||||
pcie@9,0 {
|
||||
compatible = "pci0014,7a19.0",
|
||||
"pci0014,7a19",
|
||||
"pciclass060400",
|
||||
"pciclass0604";
|
||||
|
||||
reg = <0x4800 0x0 0x0 0x0 0x0>;
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
device_type = "pci";
|
||||
#interrupt-cells = <1>;
|
||||
interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&liointc1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &liointc1 0 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupt-map = <0 0 0 0 &liointc1 0 IRQ_TYPE_LEVEL_HIGH>;
|
||||
ranges;
|
||||
external-facing;
|
||||
};
|
||||
|
||||
pci_bridge@a,0 {
|
||||
pcie@a,0 {
|
||||
compatible = "pci0014,7a09.0",
|
||||
"pci0014,7a09",
|
||||
"pciclass060400",
|
||||
"pciclass0604";
|
||||
|
||||
reg = <0x5000 0x0 0x0 0x0 0x0>;
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
device_type = "pci";
|
||||
#interrupt-cells = <1>;
|
||||
interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&liointc1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &liointc1 1 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupt-map = <0 0 0 0 &liointc1 1 IRQ_TYPE_LEVEL_HIGH>;
|
||||
ranges;
|
||||
external-facing;
|
||||
};
|
||||
|
||||
pci_bridge@b,0 {
|
||||
pcie@b,0 {
|
||||
compatible = "pci0014,7a09.0",
|
||||
"pci0014,7a09",
|
||||
"pciclass060400",
|
||||
"pciclass0604";
|
||||
|
||||
reg = <0x5800 0x0 0x0 0x0 0x0>;
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
device_type = "pci";
|
||||
#interrupt-cells = <1>;
|
||||
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&liointc1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &liointc1 2 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupt-map = <0 0 0 0 &liointc1 2 IRQ_TYPE_LEVEL_HIGH>;
|
||||
ranges;
|
||||
external-facing;
|
||||
};
|
||||
|
||||
pci_bridge@c,0 {
|
||||
pcie@c,0 {
|
||||
compatible = "pci0014,7a09.0",
|
||||
"pci0014,7a09",
|
||||
"pciclass060400",
|
||||
"pciclass0604";
|
||||
|
||||
reg = <0x6000 0x0 0x0 0x0 0x0>;
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
device_type = "pci";
|
||||
#interrupt-cells = <1>;
|
||||
interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&liointc1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &liointc1 3 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupt-map = <0 0 0 0 &liointc1 3 IRQ_TYPE_LEVEL_HIGH>;
|
||||
ranges;
|
||||
external-facing;
|
||||
};
|
||||
|
||||
pci_bridge@d,0 {
|
||||
pcie@d,0 {
|
||||
compatible = "pci0014,7a19.0",
|
||||
"pci0014,7a19",
|
||||
"pciclass060400",
|
||||
"pciclass0604";
|
||||
|
||||
reg = <0x6800 0x0 0x0 0x0 0x0>;
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
device_type = "pci";
|
||||
#interrupt-cells = <1>;
|
||||
interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&liointc1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &liointc1 4 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupt-map = <0 0 0 0 &liointc1 4 IRQ_TYPE_LEVEL_HIGH>;
|
||||
ranges;
|
||||
external-facing;
|
||||
};
|
||||
|
||||
pci_bridge@e,0 {
|
||||
pcie@e,0 {
|
||||
compatible = "pci0014,7a09.0",
|
||||
"pci0014,7a09",
|
||||
"pciclass060400",
|
||||
"pciclass0604";
|
||||
|
||||
reg = <0x7000 0x0 0x0 0x0 0x0>;
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
device_type = "pci";
|
||||
#interrupt-cells = <1>;
|
||||
interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&liointc1>;
|
||||
interrupt-map-mask = <0 0 0 0>;
|
||||
interrupt-map = <0 0 0 0 &liointc1 5 IRQ_TYPE_LEVEL_LOW>;
|
||||
interrupt-map = <0 0 0 0 &liointc1 5 IRQ_TYPE_LEVEL_HIGH>;
|
||||
ranges;
|
||||
external-facing;
|
||||
};
|
||||
|
||||
|
@ -151,51 +151,19 @@
|
||||
#define PRECISION_S 0
|
||||
#define PRECISION_D 1
|
||||
|
||||
#define DECLARE_UNPRIVILEGED_LOAD_FUNCTION(type, insn) \
|
||||
static inline type load_##type(const type *addr) \
|
||||
{ \
|
||||
type val; \
|
||||
asm (#insn " %0, %1" \
|
||||
: "=&r" (val) : "m" (*addr)); \
|
||||
return val; \
|
||||
}
|
||||
|
||||
#define DECLARE_UNPRIVILEGED_STORE_FUNCTION(type, insn) \
|
||||
static inline void store_##type(type *addr, type val) \
|
||||
{ \
|
||||
asm volatile (#insn " %0, %1\n" \
|
||||
: : "r" (val), "m" (*addr)); \
|
||||
}
|
||||
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u8, lbu)
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u16, lhu)
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s8, lb)
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s16, lh)
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s32, lw)
|
||||
DECLARE_UNPRIVILEGED_STORE_FUNCTION(u8, sb)
|
||||
DECLARE_UNPRIVILEGED_STORE_FUNCTION(u16, sh)
|
||||
DECLARE_UNPRIVILEGED_STORE_FUNCTION(u32, sw)
|
||||
#if defined(CONFIG_64BIT)
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lwu)
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u64, ld)
|
||||
DECLARE_UNPRIVILEGED_STORE_FUNCTION(u64, sd)
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, ld)
|
||||
#else
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lw)
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, lw)
|
||||
|
||||
static inline u64 load_u64(const u64 *addr)
|
||||
static inline u8 load_u8(const u8 *addr)
|
||||
{
|
||||
return load_u32((u32 *)addr)
|
||||
+ ((u64)load_u32((u32 *)addr + 1) << 32);
|
||||
u8 val;
|
||||
|
||||
asm volatile("lbu %0, %1" : "=&r" (val) : "m" (*addr));
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void store_u64(u64 *addr, u64 val)
|
||||
static inline void store_u8(u8 *addr, u8 val)
|
||||
{
|
||||
store_u32((u32 *)addr, val);
|
||||
store_u32((u32 *)addr + 1, val >> 32);
|
||||
asm volatile ("sb %0, %1\n" : : "r" (val), "m" (*addr));
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline ulong get_insn(ulong mepc)
|
||||
{
|
||||
|
@ -61,26 +61,27 @@ static inline void no_context(struct pt_regs *regs, unsigned long addr)
|
||||
|
||||
static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
|
||||
{
|
||||
if (!user_mode(regs)) {
|
||||
no_context(regs, addr);
|
||||
return;
|
||||
}
|
||||
|
||||
if (fault & VM_FAULT_OOM) {
|
||||
/*
|
||||
* We ran out of memory, call the OOM killer, and return the userspace
|
||||
* (which will retry the fault, or kill us if we got oom-killed).
|
||||
*/
|
||||
if (!user_mode(regs)) {
|
||||
no_context(regs, addr);
|
||||
return;
|
||||
}
|
||||
pagefault_out_of_memory();
|
||||
return;
|
||||
} else if (fault & (VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) {
|
||||
/* Kernel mode? Handle exceptions or die */
|
||||
if (!user_mode(regs)) {
|
||||
no_context(regs, addr);
|
||||
return;
|
||||
}
|
||||
do_trap(regs, SIGBUS, BUS_ADRERR, addr);
|
||||
return;
|
||||
} else if (fault & VM_FAULT_SIGSEGV) {
|
||||
do_trap(regs, SIGSEGV, SEGV_MAPERR, addr);
|
||||
return;
|
||||
}
|
||||
|
||||
BUG();
|
||||
}
|
||||
|
||||
|
@ -217,8 +217,6 @@ static void __init setup_bootmem(void)
|
||||
*/
|
||||
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
|
||||
|
||||
phys_ram_end = memblock_end_of_DRAM();
|
||||
|
||||
/*
|
||||
* Make sure we align the start of the memory on a PMD boundary so that
|
||||
* at worst, we map the linear mapping with PMD mappings.
|
||||
@ -233,6 +231,16 @@ static void __init setup_bootmem(void)
|
||||
if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU))
|
||||
kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base;
|
||||
|
||||
/*
|
||||
* The size of the linear page mapping may restrict the amount of
|
||||
* usable RAM.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_64BIT)) {
|
||||
max_mapped_addr = __pa(PAGE_OFFSET) + KERN_VIRT_SIZE;
|
||||
memblock_cap_memory_range(phys_ram_base,
|
||||
max_mapped_addr - phys_ram_base);
|
||||
}
|
||||
|
||||
/*
|
||||
* Reserve physical address space that would be mapped to virtual
|
||||
* addresses greater than (void *)(-PAGE_SIZE) because:
|
||||
@ -249,6 +257,7 @@ static void __init setup_bootmem(void)
|
||||
memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr);
|
||||
}
|
||||
|
||||
phys_ram_end = memblock_end_of_DRAM();
|
||||
min_low_pfn = PFN_UP(phys_ram_base);
|
||||
max_low_pfn = max_pfn = PFN_DOWN(phys_ram_end);
|
||||
high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
|
||||
@ -1269,8 +1278,6 @@ static void __init create_linear_mapping_page_table(void)
|
||||
if (start <= __pa(PAGE_OFFSET) &&
|
||||
__pa(PAGE_OFFSET) < end)
|
||||
start = __pa(PAGE_OFFSET);
|
||||
if (end >= __pa(PAGE_OFFSET) + memory_limit)
|
||||
end = __pa(PAGE_OFFSET) + memory_limit;
|
||||
|
||||
create_linear_mapping_range(start, end, 0);
|
||||
}
|
||||
|
88
arch/x86/include/asm/posted_intr.h
Normal file
88
arch/x86/include/asm/posted_intr.h
Normal file
@ -0,0 +1,88 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _X86_POSTED_INTR_H
|
||||
#define _X86_POSTED_INTR_H
|
||||
|
||||
#define POSTED_INTR_ON 0
|
||||
#define POSTED_INTR_SN 1
|
||||
|
||||
#define PID_TABLE_ENTRY_VALID 1
|
||||
|
||||
/* Posted-Interrupt Descriptor */
|
||||
struct pi_desc {
|
||||
u32 pir[8]; /* Posted interrupt requested */
|
||||
union {
|
||||
struct {
|
||||
/* bit 256 - Outstanding Notification */
|
||||
u16 on : 1,
|
||||
/* bit 257 - Suppress Notification */
|
||||
sn : 1,
|
||||
/* bit 271:258 - Reserved */
|
||||
rsvd_1 : 14;
|
||||
/* bit 279:272 - Notification Vector */
|
||||
u8 nv;
|
||||
/* bit 287:280 - Reserved */
|
||||
u8 rsvd_2;
|
||||
/* bit 319:288 - Notification Destination */
|
||||
u32 ndst;
|
||||
};
|
||||
u64 control;
|
||||
};
|
||||
u32 rsvd[6];
|
||||
} __aligned(64);
|
||||
|
||||
static inline bool pi_test_and_set_on(struct pi_desc *pi_desc)
|
||||
{
|
||||
return test_and_set_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control);
|
||||
}
|
||||
|
||||
static inline bool pi_test_and_clear_on(struct pi_desc *pi_desc)
|
||||
{
|
||||
return test_and_clear_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control);
|
||||
}
|
||||
|
||||
static inline bool pi_test_and_clear_sn(struct pi_desc *pi_desc)
|
||||
{
|
||||
return test_and_clear_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control);
|
||||
}
|
||||
|
||||
static inline bool pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
|
||||
{
|
||||
return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
|
||||
}
|
||||
|
||||
static inline bool pi_is_pir_empty(struct pi_desc *pi_desc)
|
||||
{
|
||||
return bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS);
|
||||
}
|
||||
|
||||
static inline void pi_set_sn(struct pi_desc *pi_desc)
|
||||
{
|
||||
set_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control);
|
||||
}
|
||||
|
||||
static inline void pi_set_on(struct pi_desc *pi_desc)
|
||||
{
|
||||
set_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control);
|
||||
}
|
||||
|
||||
static inline void pi_clear_on(struct pi_desc *pi_desc)
|
||||
{
|
||||
clear_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control);
|
||||
}
|
||||
|
||||
static inline void pi_clear_sn(struct pi_desc *pi_desc)
|
||||
{
|
||||
clear_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control);
|
||||
}
|
||||
|
||||
static inline bool pi_test_on(struct pi_desc *pi_desc)
|
||||
{
|
||||
return test_bit(POSTED_INTR_ON, (unsigned long *)&pi_desc->control);
|
||||
}
|
||||
|
||||
static inline bool pi_test_sn(struct pi_desc *pi_desc)
|
||||
{
|
||||
return test_bit(POSTED_INTR_SN, (unsigned long *)&pi_desc->control);
|
||||
}
|
||||
|
||||
#endif /* _X86_POSTED_INTR_H */
|
@ -26,6 +26,10 @@ kvm-intel-y += vmx/vmx.o vmx/vmenter.o vmx/pmu_intel.o vmx/vmcs12.o \
|
||||
vmx/hyperv.o vmx/nested.o vmx/posted_intr.o
|
||||
kvm-intel-$(CONFIG_X86_SGX_KVM) += vmx/sgx.o
|
||||
|
||||
ifdef CONFIG_HYPERV
|
||||
kvm-intel-y += vmx/vmx_onhyperv.o
|
||||
endif
|
||||
|
||||
kvm-amd-y += svm/svm.o svm/vmenter.o svm/pmu.o svm/nested.o svm/avic.o \
|
||||
svm/sev.o svm/hyperv.o
|
||||
|
||||
|
@ -13,111 +13,6 @@
|
||||
|
||||
#define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
|
||||
|
||||
/*
|
||||
* Enlightened VMCSv1 doesn't support these:
|
||||
*
|
||||
* POSTED_INTR_NV = 0x00000002,
|
||||
* GUEST_INTR_STATUS = 0x00000810,
|
||||
* APIC_ACCESS_ADDR = 0x00002014,
|
||||
* POSTED_INTR_DESC_ADDR = 0x00002016,
|
||||
* EOI_EXIT_BITMAP0 = 0x0000201c,
|
||||
* EOI_EXIT_BITMAP1 = 0x0000201e,
|
||||
* EOI_EXIT_BITMAP2 = 0x00002020,
|
||||
* EOI_EXIT_BITMAP3 = 0x00002022,
|
||||
* GUEST_PML_INDEX = 0x00000812,
|
||||
* PML_ADDRESS = 0x0000200e,
|
||||
* VM_FUNCTION_CONTROL = 0x00002018,
|
||||
* EPTP_LIST_ADDRESS = 0x00002024,
|
||||
* VMREAD_BITMAP = 0x00002026,
|
||||
* VMWRITE_BITMAP = 0x00002028,
|
||||
*
|
||||
* TSC_MULTIPLIER = 0x00002032,
|
||||
* PLE_GAP = 0x00004020,
|
||||
* PLE_WINDOW = 0x00004022,
|
||||
* VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
|
||||
*
|
||||
* Currently unsupported in KVM:
|
||||
* GUEST_IA32_RTIT_CTL = 0x00002814,
|
||||
*/
|
||||
#define EVMCS1_SUPPORTED_PINCTRL \
|
||||
(PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | \
|
||||
PIN_BASED_EXT_INTR_MASK | \
|
||||
PIN_BASED_NMI_EXITING | \
|
||||
PIN_BASED_VIRTUAL_NMIS)
|
||||
|
||||
#define EVMCS1_SUPPORTED_EXEC_CTRL \
|
||||
(CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | \
|
||||
CPU_BASED_HLT_EXITING | \
|
||||
CPU_BASED_CR3_LOAD_EXITING | \
|
||||
CPU_BASED_CR3_STORE_EXITING | \
|
||||
CPU_BASED_UNCOND_IO_EXITING | \
|
||||
CPU_BASED_MOV_DR_EXITING | \
|
||||
CPU_BASED_USE_TSC_OFFSETTING | \
|
||||
CPU_BASED_MWAIT_EXITING | \
|
||||
CPU_BASED_MONITOR_EXITING | \
|
||||
CPU_BASED_INVLPG_EXITING | \
|
||||
CPU_BASED_RDPMC_EXITING | \
|
||||
CPU_BASED_INTR_WINDOW_EXITING | \
|
||||
CPU_BASED_CR8_LOAD_EXITING | \
|
||||
CPU_BASED_CR8_STORE_EXITING | \
|
||||
CPU_BASED_RDTSC_EXITING | \
|
||||
CPU_BASED_TPR_SHADOW | \
|
||||
CPU_BASED_USE_IO_BITMAPS | \
|
||||
CPU_BASED_MONITOR_TRAP_FLAG | \
|
||||
CPU_BASED_USE_MSR_BITMAPS | \
|
||||
CPU_BASED_NMI_WINDOW_EXITING | \
|
||||
CPU_BASED_PAUSE_EXITING | \
|
||||
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
|
||||
|
||||
#define EVMCS1_SUPPORTED_2NDEXEC \
|
||||
(SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | \
|
||||
SECONDARY_EXEC_WBINVD_EXITING | \
|
||||
SECONDARY_EXEC_ENABLE_VPID | \
|
||||
SECONDARY_EXEC_ENABLE_EPT | \
|
||||
SECONDARY_EXEC_UNRESTRICTED_GUEST | \
|
||||
SECONDARY_EXEC_DESC | \
|
||||
SECONDARY_EXEC_ENABLE_RDTSCP | \
|
||||
SECONDARY_EXEC_ENABLE_INVPCID | \
|
||||
SECONDARY_EXEC_ENABLE_XSAVES | \
|
||||
SECONDARY_EXEC_RDSEED_EXITING | \
|
||||
SECONDARY_EXEC_RDRAND_EXITING | \
|
||||
SECONDARY_EXEC_TSC_SCALING | \
|
||||
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | \
|
||||
SECONDARY_EXEC_PT_USE_GPA | \
|
||||
SECONDARY_EXEC_PT_CONCEAL_VMX | \
|
||||
SECONDARY_EXEC_BUS_LOCK_DETECTION | \
|
||||
SECONDARY_EXEC_NOTIFY_VM_EXITING | \
|
||||
SECONDARY_EXEC_ENCLS_EXITING)
|
||||
|
||||
#define EVMCS1_SUPPORTED_3RDEXEC (0ULL)
|
||||
|
||||
#define EVMCS1_SUPPORTED_VMEXIT_CTRL \
|
||||
(VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | \
|
||||
VM_EXIT_SAVE_DEBUG_CONTROLS | \
|
||||
VM_EXIT_ACK_INTR_ON_EXIT | \
|
||||
VM_EXIT_HOST_ADDR_SPACE_SIZE | \
|
||||
VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \
|
||||
VM_EXIT_SAVE_IA32_PAT | \
|
||||
VM_EXIT_LOAD_IA32_PAT | \
|
||||
VM_EXIT_SAVE_IA32_EFER | \
|
||||
VM_EXIT_LOAD_IA32_EFER | \
|
||||
VM_EXIT_CLEAR_BNDCFGS | \
|
||||
VM_EXIT_PT_CONCEAL_PIP | \
|
||||
VM_EXIT_CLEAR_IA32_RTIT_CTL)
|
||||
|
||||
#define EVMCS1_SUPPORTED_VMENTRY_CTRL \
|
||||
(VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | \
|
||||
VM_ENTRY_LOAD_DEBUG_CONTROLS | \
|
||||
VM_ENTRY_IA32E_MODE | \
|
||||
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | \
|
||||
VM_ENTRY_LOAD_IA32_PAT | \
|
||||
VM_ENTRY_LOAD_IA32_EFER | \
|
||||
VM_ENTRY_LOAD_BNDCFGS | \
|
||||
VM_ENTRY_PT_CONCEAL_PIP | \
|
||||
VM_ENTRY_LOAD_IA32_RTIT_CTL)
|
||||
|
||||
#define EVMCS1_SUPPORTED_VMFUNC (0)
|
||||
|
||||
#define EVMCS1_OFFSET(x) offsetof(struct hv_enlightened_vmcs, x)
|
||||
#define EVMCS1_FIELD(number, name, clean_field)[ROL16(number, 6)] = \
|
||||
{EVMCS1_OFFSET(name), clean_field}
|
||||
@ -608,40 +503,6 @@ int nested_evmcs_check_controls(struct vmcs12 *vmcs12)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_HYPERV)
|
||||
DEFINE_STATIC_KEY_FALSE(__kvm_is_using_evmcs);
|
||||
|
||||
/*
|
||||
* KVM on Hyper-V always uses the latest known eVMCSv1 revision, the assumption
|
||||
* is: in case a feature has corresponding fields in eVMCS described and it was
|
||||
* exposed in VMX feature MSRs, KVM is free to use it. Warn if KVM meets a
|
||||
* feature which has no corresponding eVMCS field, this likely means that KVM
|
||||
* needs to be updated.
|
||||
*/
|
||||
#define evmcs_check_vmcs_conf(field, ctrl) \
|
||||
do { \
|
||||
typeof(vmcs_conf->field) unsupported; \
|
||||
\
|
||||
unsupported = vmcs_conf->field & ~EVMCS1_SUPPORTED_ ## ctrl; \
|
||||
if (unsupported) { \
|
||||
pr_warn_once(#field " unsupported with eVMCS: 0x%llx\n",\
|
||||
(u64)unsupported); \
|
||||
vmcs_conf->field &= EVMCS1_SUPPORTED_ ## ctrl; \
|
||||
} \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
|
||||
{
|
||||
evmcs_check_vmcs_conf(cpu_based_exec_ctrl, EXEC_CTRL);
|
||||
evmcs_check_vmcs_conf(pin_based_exec_ctrl, PINCTRL);
|
||||
evmcs_check_vmcs_conf(cpu_based_2nd_exec_ctrl, 2NDEXEC);
|
||||
evmcs_check_vmcs_conf(cpu_based_3rd_exec_ctrl, 3RDEXEC);
|
||||
evmcs_check_vmcs_conf(vmentry_ctrl, VMENTRY_CTRL);
|
||||
evmcs_check_vmcs_conf(vmexit_ctrl, VMEXIT_CTRL);
|
||||
}
|
||||
#endif
|
||||
|
||||
int nested_enable_evmcs(struct kvm_vcpu *vcpu,
|
||||
uint16_t *vmcs_version)
|
||||
{
|
||||
|
@ -14,12 +14,113 @@
|
||||
#include "vmcs.h"
|
||||
#include "vmcs12.h"
|
||||
|
||||
struct vmcs_config;
|
||||
|
||||
#define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
|
||||
|
||||
#define KVM_EVMCS_VERSION 1
|
||||
|
||||
/*
|
||||
* Enlightened VMCSv1 doesn't support these:
|
||||
*
|
||||
* POSTED_INTR_NV = 0x00000002,
|
||||
* GUEST_INTR_STATUS = 0x00000810,
|
||||
* APIC_ACCESS_ADDR = 0x00002014,
|
||||
* POSTED_INTR_DESC_ADDR = 0x00002016,
|
||||
* EOI_EXIT_BITMAP0 = 0x0000201c,
|
||||
* EOI_EXIT_BITMAP1 = 0x0000201e,
|
||||
* EOI_EXIT_BITMAP2 = 0x00002020,
|
||||
* EOI_EXIT_BITMAP3 = 0x00002022,
|
||||
* GUEST_PML_INDEX = 0x00000812,
|
||||
* PML_ADDRESS = 0x0000200e,
|
||||
* VM_FUNCTION_CONTROL = 0x00002018,
|
||||
* EPTP_LIST_ADDRESS = 0x00002024,
|
||||
* VMREAD_BITMAP = 0x00002026,
|
||||
* VMWRITE_BITMAP = 0x00002028,
|
||||
*
|
||||
* TSC_MULTIPLIER = 0x00002032,
|
||||
* PLE_GAP = 0x00004020,
|
||||
* PLE_WINDOW = 0x00004022,
|
||||
* VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
|
||||
*
|
||||
* Currently unsupported in KVM:
|
||||
* GUEST_IA32_RTIT_CTL = 0x00002814,
|
||||
*/
|
||||
#define EVMCS1_SUPPORTED_PINCTRL \
|
||||
(PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | \
|
||||
PIN_BASED_EXT_INTR_MASK | \
|
||||
PIN_BASED_NMI_EXITING | \
|
||||
PIN_BASED_VIRTUAL_NMIS)
|
||||
|
||||
#define EVMCS1_SUPPORTED_EXEC_CTRL \
|
||||
(CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | \
|
||||
CPU_BASED_HLT_EXITING | \
|
||||
CPU_BASED_CR3_LOAD_EXITING | \
|
||||
CPU_BASED_CR3_STORE_EXITING | \
|
||||
CPU_BASED_UNCOND_IO_EXITING | \
|
||||
CPU_BASED_MOV_DR_EXITING | \
|
||||
CPU_BASED_USE_TSC_OFFSETTING | \
|
||||
CPU_BASED_MWAIT_EXITING | \
|
||||
CPU_BASED_MONITOR_EXITING | \
|
||||
CPU_BASED_INVLPG_EXITING | \
|
||||
CPU_BASED_RDPMC_EXITING | \
|
||||
CPU_BASED_INTR_WINDOW_EXITING | \
|
||||
CPU_BASED_CR8_LOAD_EXITING | \
|
||||
CPU_BASED_CR8_STORE_EXITING | \
|
||||
CPU_BASED_RDTSC_EXITING | \
|
||||
CPU_BASED_TPR_SHADOW | \
|
||||
CPU_BASED_USE_IO_BITMAPS | \
|
||||
CPU_BASED_MONITOR_TRAP_FLAG | \
|
||||
CPU_BASED_USE_MSR_BITMAPS | \
|
||||
CPU_BASED_NMI_WINDOW_EXITING | \
|
||||
CPU_BASED_PAUSE_EXITING | \
|
||||
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
|
||||
|
||||
#define EVMCS1_SUPPORTED_2NDEXEC \
|
||||
(SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | \
|
||||
SECONDARY_EXEC_WBINVD_EXITING | \
|
||||
SECONDARY_EXEC_ENABLE_VPID | \
|
||||
SECONDARY_EXEC_ENABLE_EPT | \
|
||||
SECONDARY_EXEC_UNRESTRICTED_GUEST | \
|
||||
SECONDARY_EXEC_DESC | \
|
||||
SECONDARY_EXEC_ENABLE_RDTSCP | \
|
||||
SECONDARY_EXEC_ENABLE_INVPCID | \
|
||||
SECONDARY_EXEC_ENABLE_XSAVES | \
|
||||
SECONDARY_EXEC_RDSEED_EXITING | \
|
||||
SECONDARY_EXEC_RDRAND_EXITING | \
|
||||
SECONDARY_EXEC_TSC_SCALING | \
|
||||
SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE | \
|
||||
SECONDARY_EXEC_PT_USE_GPA | \
|
||||
SECONDARY_EXEC_PT_CONCEAL_VMX | \
|
||||
SECONDARY_EXEC_BUS_LOCK_DETECTION | \
|
||||
SECONDARY_EXEC_NOTIFY_VM_EXITING | \
|
||||
SECONDARY_EXEC_ENCLS_EXITING)
|
||||
|
||||
#define EVMCS1_SUPPORTED_3RDEXEC (0ULL)
|
||||
|
||||
#define EVMCS1_SUPPORTED_VMEXIT_CTRL \
|
||||
(VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR | \
|
||||
VM_EXIT_SAVE_DEBUG_CONTROLS | \
|
||||
VM_EXIT_ACK_INTR_ON_EXIT | \
|
||||
VM_EXIT_HOST_ADDR_SPACE_SIZE | \
|
||||
VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | \
|
||||
VM_EXIT_SAVE_IA32_PAT | \
|
||||
VM_EXIT_LOAD_IA32_PAT | \
|
||||
VM_EXIT_SAVE_IA32_EFER | \
|
||||
VM_EXIT_LOAD_IA32_EFER | \
|
||||
VM_EXIT_CLEAR_BNDCFGS | \
|
||||
VM_EXIT_PT_CONCEAL_PIP | \
|
||||
VM_EXIT_CLEAR_IA32_RTIT_CTL)
|
||||
|
||||
#define EVMCS1_SUPPORTED_VMENTRY_CTRL \
|
||||
(VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | \
|
||||
VM_ENTRY_LOAD_DEBUG_CONTROLS | \
|
||||
VM_ENTRY_IA32E_MODE | \
|
||||
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | \
|
||||
VM_ENTRY_LOAD_IA32_PAT | \
|
||||
VM_ENTRY_LOAD_IA32_EFER | \
|
||||
VM_ENTRY_LOAD_BNDCFGS | \
|
||||
VM_ENTRY_PT_CONCEAL_PIP | \
|
||||
VM_ENTRY_LOAD_IA32_RTIT_CTL)
|
||||
|
||||
#define EVMCS1_SUPPORTED_VMFUNC (0)
|
||||
|
||||
struct evmcs_field {
|
||||
u16 offset;
|
||||
u16 clean_field;
|
||||
@ -65,114 +166,6 @@ static inline u64 evmcs_read_any(struct hv_enlightened_vmcs *evmcs,
|
||||
return vmcs12_read_any((void *)evmcs, field, offset);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_HYPERV)
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(__kvm_is_using_evmcs);
|
||||
|
||||
static __always_inline bool kvm_is_using_evmcs(void)
|
||||
{
|
||||
return static_branch_unlikely(&__kvm_is_using_evmcs);
|
||||
}
|
||||
|
||||
static __always_inline int get_evmcs_offset(unsigned long field,
|
||||
u16 *clean_field)
|
||||
{
|
||||
int offset = evmcs_field_offset(field, clean_field);
|
||||
|
||||
WARN_ONCE(offset < 0, "accessing unsupported EVMCS field %lx\n", field);
|
||||
return offset;
|
||||
}
|
||||
|
||||
static __always_inline void evmcs_write64(unsigned long field, u64 value)
|
||||
{
|
||||
u16 clean_field;
|
||||
int offset = get_evmcs_offset(field, &clean_field);
|
||||
|
||||
if (offset < 0)
|
||||
return;
|
||||
|
||||
*(u64 *)((char *)current_evmcs + offset) = value;
|
||||
|
||||
current_evmcs->hv_clean_fields &= ~clean_field;
|
||||
}
|
||||
|
||||
static __always_inline void evmcs_write32(unsigned long field, u32 value)
|
||||
{
|
||||
u16 clean_field;
|
||||
int offset = get_evmcs_offset(field, &clean_field);
|
||||
|
||||
if (offset < 0)
|
||||
return;
|
||||
|
||||
*(u32 *)((char *)current_evmcs + offset) = value;
|
||||
current_evmcs->hv_clean_fields &= ~clean_field;
|
||||
}
|
||||
|
||||
static __always_inline void evmcs_write16(unsigned long field, u16 value)
|
||||
{
|
||||
u16 clean_field;
|
||||
int offset = get_evmcs_offset(field, &clean_field);
|
||||
|
||||
if (offset < 0)
|
||||
return;
|
||||
|
||||
*(u16 *)((char *)current_evmcs + offset) = value;
|
||||
current_evmcs->hv_clean_fields &= ~clean_field;
|
||||
}
|
||||
|
||||
static __always_inline u64 evmcs_read64(unsigned long field)
|
||||
{
|
||||
int offset = get_evmcs_offset(field, NULL);
|
||||
|
||||
if (offset < 0)
|
||||
return 0;
|
||||
|
||||
return *(u64 *)((char *)current_evmcs + offset);
|
||||
}
|
||||
|
||||
static __always_inline u32 evmcs_read32(unsigned long field)
|
||||
{
|
||||
int offset = get_evmcs_offset(field, NULL);
|
||||
|
||||
if (offset < 0)
|
||||
return 0;
|
||||
|
||||
return *(u32 *)((char *)current_evmcs + offset);
|
||||
}
|
||||
|
||||
static __always_inline u16 evmcs_read16(unsigned long field)
|
||||
{
|
||||
int offset = get_evmcs_offset(field, NULL);
|
||||
|
||||
if (offset < 0)
|
||||
return 0;
|
||||
|
||||
return *(u16 *)((char *)current_evmcs + offset);
|
||||
}
|
||||
|
||||
static inline void evmcs_load(u64 phys_addr)
|
||||
{
|
||||
struct hv_vp_assist_page *vp_ap =
|
||||
hv_get_vp_assist_page(smp_processor_id());
|
||||
|
||||
if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall)
|
||||
vp_ap->nested_control.features.directhypercall = 1;
|
||||
vp_ap->current_nested_vmcs = phys_addr;
|
||||
vp_ap->enlighten_vmentry = 1;
|
||||
}
|
||||
|
||||
void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
|
||||
#else /* !IS_ENABLED(CONFIG_HYPERV) */
|
||||
static __always_inline bool kvm_is_using_evmcs(void) { return false; }
|
||||
static __always_inline void evmcs_write64(unsigned long field, u64 value) {}
|
||||
static __always_inline void evmcs_write32(unsigned long field, u32 value) {}
|
||||
static __always_inline void evmcs_write16(unsigned long field, u16 value) {}
|
||||
static __always_inline u64 evmcs_read64(unsigned long field) { return 0; }
|
||||
static __always_inline u32 evmcs_read32(unsigned long field) { return 0; }
|
||||
static __always_inline u16 evmcs_read16(unsigned long field) { return 0; }
|
||||
static inline void evmcs_load(u64 phys_addr) {}
|
||||
#endif /* IS_ENABLED(CONFIG_HYPERV) */
|
||||
|
||||
#define EVMPTR_INVALID (-1ULL)
|
||||
#define EVMPTR_MAP_PENDING (-2ULL)
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include "mmu.h"
|
||||
#include "nested.h"
|
||||
#include "pmu.h"
|
||||
#include "posted_intr.h"
|
||||
#include "sgx.h"
|
||||
#include "trace.h"
|
||||
#include "vmx.h"
|
||||
@ -3830,8 +3831,8 @@ static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
|
||||
if (!pi_test_and_clear_on(vmx->nested.pi_desc))
|
||||
return 0;
|
||||
|
||||
max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
|
||||
if (max_irr != 256) {
|
||||
max_irr = pi_find_highest_vector(vmx->nested.pi_desc);
|
||||
if (max_irr > 0) {
|
||||
vapic_page = vmx->nested.virtual_apic_map.hva;
|
||||
if (!vapic_page)
|
||||
goto mmio_needed;
|
||||
@ -3964,8 +3965,40 @@ static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu)
|
||||
|
||||
static bool vmx_has_nested_events(struct kvm_vcpu *vcpu, bool for_injection)
|
||||
{
|
||||
return nested_vmx_preemption_timer_pending(vcpu) ||
|
||||
to_vmx(vcpu)->nested.mtf_pending;
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
void *vapic = vmx->nested.virtual_apic_map.hva;
|
||||
int max_irr, vppr;
|
||||
|
||||
if (nested_vmx_preemption_timer_pending(vcpu) ||
|
||||
vmx->nested.mtf_pending)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* Virtual Interrupt Delivery doesn't require manual injection. Either
|
||||
* the interrupt is already in GUEST_RVI and will be recognized by CPU
|
||||
* at VM-Entry, or there is a KVM_REQ_EVENT pending and KVM will move
|
||||
* the interrupt from the PIR to RVI prior to entering the guest.
|
||||
*/
|
||||
if (for_injection)
|
||||
return false;
|
||||
|
||||
if (!nested_cpu_has_vid(get_vmcs12(vcpu)) ||
|
||||
__vmx_interrupt_blocked(vcpu))
|
||||
return false;
|
||||
|
||||
if (!vapic)
|
||||
return false;
|
||||
|
||||
vppr = *((u32 *)(vapic + APIC_PROCPRI));
|
||||
|
||||
if (vmx->nested.pi_pending && vmx->nested.pi_desc &&
|
||||
pi_test_on(vmx->nested.pi_desc)) {
|
||||
max_irr = pi_find_highest_vector(vmx->nested.pi_desc);
|
||||
if (max_irr > 0 && (max_irr & 0xf0) > (vppr & 0xf0))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2,97 +2,8 @@
|
||||
#ifndef __KVM_X86_VMX_POSTED_INTR_H
|
||||
#define __KVM_X86_VMX_POSTED_INTR_H
|
||||
|
||||
#define POSTED_INTR_ON 0
|
||||
#define POSTED_INTR_SN 1
|
||||
|
||||
#define PID_TABLE_ENTRY_VALID 1
|
||||
|
||||
/* Posted-Interrupt Descriptor */
|
||||
struct pi_desc {
|
||||
u32 pir[8]; /* Posted interrupt requested */
|
||||
union {
|
||||
struct {
|
||||
/* bit 256 - Outstanding Notification */
|
||||
u16 on : 1,
|
||||
/* bit 257 - Suppress Notification */
|
||||
sn : 1,
|
||||
/* bit 271:258 - Reserved */
|
||||
rsvd_1 : 14;
|
||||
/* bit 279:272 - Notification Vector */
|
||||
u8 nv;
|
||||
/* bit 287:280 - Reserved */
|
||||
u8 rsvd_2;
|
||||
/* bit 319:288 - Notification Destination */
|
||||
u32 ndst;
|
||||
};
|
||||
u64 control;
|
||||
};
|
||||
u32 rsvd[6];
|
||||
} __aligned(64);
|
||||
|
||||
static inline bool pi_test_and_set_on(struct pi_desc *pi_desc)
|
||||
{
|
||||
return test_and_set_bit(POSTED_INTR_ON,
|
||||
(unsigned long *)&pi_desc->control);
|
||||
}
|
||||
|
||||
static inline bool pi_test_and_clear_on(struct pi_desc *pi_desc)
|
||||
{
|
||||
return test_and_clear_bit(POSTED_INTR_ON,
|
||||
(unsigned long *)&pi_desc->control);
|
||||
}
|
||||
|
||||
static inline bool pi_test_and_clear_sn(struct pi_desc *pi_desc)
|
||||
{
|
||||
return test_and_clear_bit(POSTED_INTR_SN,
|
||||
(unsigned long *)&pi_desc->control);
|
||||
}
|
||||
|
||||
static inline bool pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
|
||||
{
|
||||
return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
|
||||
}
|
||||
|
||||
static inline bool pi_is_pir_empty(struct pi_desc *pi_desc)
|
||||
{
|
||||
return bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS);
|
||||
}
|
||||
|
||||
static inline void pi_set_sn(struct pi_desc *pi_desc)
|
||||
{
|
||||
set_bit(POSTED_INTR_SN,
|
||||
(unsigned long *)&pi_desc->control);
|
||||
}
|
||||
|
||||
static inline void pi_set_on(struct pi_desc *pi_desc)
|
||||
{
|
||||
set_bit(POSTED_INTR_ON,
|
||||
(unsigned long *)&pi_desc->control);
|
||||
}
|
||||
|
||||
static inline void pi_clear_on(struct pi_desc *pi_desc)
|
||||
{
|
||||
clear_bit(POSTED_INTR_ON,
|
||||
(unsigned long *)&pi_desc->control);
|
||||
}
|
||||
|
||||
static inline void pi_clear_sn(struct pi_desc *pi_desc)
|
||||
{
|
||||
clear_bit(POSTED_INTR_SN,
|
||||
(unsigned long *)&pi_desc->control);
|
||||
}
|
||||
|
||||
static inline bool pi_test_on(struct pi_desc *pi_desc)
|
||||
{
|
||||
return test_bit(POSTED_INTR_ON,
|
||||
(unsigned long *)&pi_desc->control);
|
||||
}
|
||||
|
||||
static inline bool pi_test_sn(struct pi_desc *pi_desc)
|
||||
{
|
||||
return test_bit(POSTED_INTR_SN,
|
||||
(unsigned long *)&pi_desc->control);
|
||||
}
|
||||
#include <linux/find.h>
|
||||
#include <asm/posted_intr.h>
|
||||
|
||||
void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu);
|
||||
void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu);
|
||||
@ -103,4 +14,12 @@ int vmx_pi_update_irte(struct kvm *kvm, unsigned int host_irq,
|
||||
uint32_t guest_irq, bool set);
|
||||
void vmx_pi_start_assignment(struct kvm *kvm);
|
||||
|
||||
static inline int pi_find_highest_vector(struct pi_desc *pi_desc)
|
||||
{
|
||||
int vec;
|
||||
|
||||
vec = find_last_bit((unsigned long *)pi_desc->pir, 256);
|
||||
return vec < 256 ? vec : -1;
|
||||
}
|
||||
|
||||
#endif /* __KVM_X86_VMX_POSTED_INTR_H */
|
||||
|
@ -66,6 +66,8 @@
|
||||
#include "vmx.h"
|
||||
#include "x86.h"
|
||||
#include "smm.h"
|
||||
#include "vmx_onhyperv.h"
|
||||
#include "posted_intr.h"
|
||||
|
||||
MODULE_AUTHOR("Qumranet");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -7,10 +7,10 @@
|
||||
#include <asm/kvm.h>
|
||||
#include <asm/intel_pt.h>
|
||||
#include <asm/perf_event.h>
|
||||
#include <asm/posted_intr.h>
|
||||
|
||||
#include "capabilities.h"
|
||||
#include "../kvm_cache_regs.h"
|
||||
#include "posted_intr.h"
|
||||
#include "vmcs.h"
|
||||
#include "vmx_ops.h"
|
||||
#include "../cpuid.h"
|
||||
|
36
arch/x86/kvm/vmx/vmx_onhyperv.c
Normal file
36
arch/x86/kvm/vmx/vmx_onhyperv.c
Normal file
@ -0,0 +1,36 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
|
||||
#include "capabilities.h"
|
||||
#include "vmx_onhyperv.h"
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(__kvm_is_using_evmcs);
|
||||
|
||||
/*
|
||||
* KVM on Hyper-V always uses the latest known eVMCSv1 revision, the assumption
|
||||
* is: in case a feature has corresponding fields in eVMCS described and it was
|
||||
* exposed in VMX feature MSRs, KVM is free to use it. Warn if KVM meets a
|
||||
* feature which has no corresponding eVMCS field, this likely means that KVM
|
||||
* needs to be updated.
|
||||
*/
|
||||
#define evmcs_check_vmcs_conf(field, ctrl) \
|
||||
do { \
|
||||
typeof(vmcs_conf->field) unsupported; \
|
||||
\
|
||||
unsupported = vmcs_conf->field & ~EVMCS1_SUPPORTED_ ## ctrl; \
|
||||
if (unsupported) { \
|
||||
pr_warn_once(#field " unsupported with eVMCS: 0x%llx\n",\
|
||||
(u64)unsupported); \
|
||||
vmcs_conf->field &= EVMCS1_SUPPORTED_ ## ctrl; \
|
||||
} \
|
||||
} \
|
||||
while (0)
|
||||
|
||||
void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf)
|
||||
{
|
||||
evmcs_check_vmcs_conf(cpu_based_exec_ctrl, EXEC_CTRL);
|
||||
evmcs_check_vmcs_conf(pin_based_exec_ctrl, PINCTRL);
|
||||
evmcs_check_vmcs_conf(cpu_based_2nd_exec_ctrl, 2NDEXEC);
|
||||
evmcs_check_vmcs_conf(cpu_based_3rd_exec_ctrl, 3RDEXEC);
|
||||
evmcs_check_vmcs_conf(vmentry_ctrl, VMENTRY_CTRL);
|
||||
evmcs_check_vmcs_conf(vmexit_ctrl, VMEXIT_CTRL);
|
||||
}
|
124
arch/x86/kvm/vmx/vmx_onhyperv.h
Normal file
124
arch/x86/kvm/vmx/vmx_onhyperv.h
Normal file
@ -0,0 +1,124 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
#ifndef __ARCH_X86_KVM_VMX_ONHYPERV_H__
|
||||
#define __ARCH_X86_KVM_VMX_ONHYPERV_H__
|
||||
|
||||
#include <asm/hyperv-tlfs.h>
|
||||
|
||||
#include <linux/jump_label.h>
|
||||
|
||||
#include "capabilities.h"
|
||||
#include "hyperv.h"
|
||||
#include "vmcs12.h"
|
||||
|
||||
#define current_evmcs ((struct hv_enlightened_vmcs *)this_cpu_read(current_vmcs))
|
||||
|
||||
#if IS_ENABLED(CONFIG_HYPERV)
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(__kvm_is_using_evmcs);
|
||||
|
||||
static __always_inline bool kvm_is_using_evmcs(void)
|
||||
{
|
||||
return static_branch_unlikely(&__kvm_is_using_evmcs);
|
||||
}
|
||||
|
||||
static __always_inline int get_evmcs_offset(unsigned long field,
|
||||
u16 *clean_field)
|
||||
{
|
||||
int offset = evmcs_field_offset(field, clean_field);
|
||||
|
||||
WARN_ONCE(offset < 0, "accessing unsupported EVMCS field %lx\n", field);
|
||||
return offset;
|
||||
}
|
||||
|
||||
static __always_inline void evmcs_write64(unsigned long field, u64 value)
|
||||
{
|
||||
u16 clean_field;
|
||||
int offset = get_evmcs_offset(field, &clean_field);
|
||||
|
||||
if (offset < 0)
|
||||
return;
|
||||
|
||||
*(u64 *)((char *)current_evmcs + offset) = value;
|
||||
|
||||
current_evmcs->hv_clean_fields &= ~clean_field;
|
||||
}
|
||||
|
||||
static __always_inline void evmcs_write32(unsigned long field, u32 value)
|
||||
{
|
||||
u16 clean_field;
|
||||
int offset = get_evmcs_offset(field, &clean_field);
|
||||
|
||||
if (offset < 0)
|
||||
return;
|
||||
|
||||
*(u32 *)((char *)current_evmcs + offset) = value;
|
||||
current_evmcs->hv_clean_fields &= ~clean_field;
|
||||
}
|
||||
|
||||
static __always_inline void evmcs_write16(unsigned long field, u16 value)
|
||||
{
|
||||
u16 clean_field;
|
||||
int offset = get_evmcs_offset(field, &clean_field);
|
||||
|
||||
if (offset < 0)
|
||||
return;
|
||||
|
||||
*(u16 *)((char *)current_evmcs + offset) = value;
|
||||
current_evmcs->hv_clean_fields &= ~clean_field;
|
||||
}
|
||||
|
||||
static __always_inline u64 evmcs_read64(unsigned long field)
|
||||
{
|
||||
int offset = get_evmcs_offset(field, NULL);
|
||||
|
||||
if (offset < 0)
|
||||
return 0;
|
||||
|
||||
return *(u64 *)((char *)current_evmcs + offset);
|
||||
}
|
||||
|
||||
static __always_inline u32 evmcs_read32(unsigned long field)
|
||||
{
|
||||
int offset = get_evmcs_offset(field, NULL);
|
||||
|
||||
if (offset < 0)
|
||||
return 0;
|
||||
|
||||
return *(u32 *)((char *)current_evmcs + offset);
|
||||
}
|
||||
|
||||
static __always_inline u16 evmcs_read16(unsigned long field)
|
||||
{
|
||||
int offset = get_evmcs_offset(field, NULL);
|
||||
|
||||
if (offset < 0)
|
||||
return 0;
|
||||
|
||||
return *(u16 *)((char *)current_evmcs + offset);
|
||||
}
|
||||
|
||||
static inline void evmcs_load(u64 phys_addr)
|
||||
{
|
||||
struct hv_vp_assist_page *vp_ap =
|
||||
hv_get_vp_assist_page(smp_processor_id());
|
||||
|
||||
if (current_evmcs->hv_enlightenments_control.nested_flush_hypercall)
|
||||
vp_ap->nested_control.features.directhypercall = 1;
|
||||
vp_ap->current_nested_vmcs = phys_addr;
|
||||
vp_ap->enlighten_vmentry = 1;
|
||||
}
|
||||
|
||||
void evmcs_sanitize_exec_ctrls(struct vmcs_config *vmcs_conf);
|
||||
#else /* !IS_ENABLED(CONFIG_HYPERV) */
|
||||
static __always_inline bool kvm_is_using_evmcs(void) { return false; }
|
||||
static __always_inline void evmcs_write64(unsigned long field, u64 value) {}
|
||||
static __always_inline void evmcs_write32(unsigned long field, u32 value) {}
|
||||
static __always_inline void evmcs_write16(unsigned long field, u16 value) {}
|
||||
static __always_inline u64 evmcs_read64(unsigned long field) { return 0; }
|
||||
static __always_inline u32 evmcs_read32(unsigned long field) { return 0; }
|
||||
static __always_inline u16 evmcs_read16(unsigned long field) { return 0; }
|
||||
static inline void evmcs_load(u64 phys_addr) {}
|
||||
#endif /* IS_ENABLED(CONFIG_HYPERV) */
|
||||
|
||||
#endif /* __ARCH_X86_KVM_VMX_ONHYPERV_H__ */
|
@ -6,7 +6,7 @@
|
||||
|
||||
#include <asm/vmx.h>
|
||||
|
||||
#include "hyperv.h"
|
||||
#include "vmx_onhyperv.h"
|
||||
#include "vmcs.h"
|
||||
#include "../x86.h"
|
||||
|
||||
|
@ -2845,6 +2845,9 @@ static int btintel_setup_combined(struct hci_dev *hdev)
|
||||
btintel_set_dsm_reset_method(hdev, &ver_tlv);
|
||||
|
||||
err = btintel_bootloader_setup_tlv(hdev, &ver_tlv);
|
||||
if (err)
|
||||
goto exit_error;
|
||||
|
||||
btintel_register_devcoredump_support(hdev);
|
||||
break;
|
||||
default:
|
||||
|
@ -40,10 +40,14 @@ struct qcom_cpufreq_match_data {
|
||||
const char **genpd_names;
|
||||
};
|
||||
|
||||
struct qcom_cpufreq_drv_cpu {
|
||||
int opp_token;
|
||||
};
|
||||
|
||||
struct qcom_cpufreq_drv {
|
||||
int *opp_tokens;
|
||||
u32 versions;
|
||||
const struct qcom_cpufreq_match_data *data;
|
||||
struct qcom_cpufreq_drv_cpu cpus[];
|
||||
};
|
||||
|
||||
static struct platform_device *cpufreq_dt_pdev, *cpufreq_pdev;
|
||||
@ -243,42 +247,39 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
drv = kzalloc(sizeof(*drv), GFP_KERNEL);
|
||||
if (!drv)
|
||||
drv = devm_kzalloc(&pdev->dev, struct_size(drv, cpus, num_possible_cpus()),
|
||||
GFP_KERNEL);
|
||||
if (!drv) {
|
||||
of_node_put(np);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
match = pdev->dev.platform_data;
|
||||
drv->data = match->data;
|
||||
if (!drv->data) {
|
||||
ret = -ENODEV;
|
||||
goto free_drv;
|
||||
of_node_put(np);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (drv->data->get_version) {
|
||||
speedbin_nvmem = of_nvmem_cell_get(np, NULL);
|
||||
if (IS_ERR(speedbin_nvmem)) {
|
||||
ret = dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
|
||||
"Could not get nvmem cell\n");
|
||||
goto free_drv;
|
||||
of_node_put(np);
|
||||
return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem),
|
||||
"Could not get nvmem cell\n");
|
||||
}
|
||||
|
||||
ret = drv->data->get_version(cpu_dev,
|
||||
speedbin_nvmem, &pvs_name, drv);
|
||||
if (ret) {
|
||||
of_node_put(np);
|
||||
nvmem_cell_put(speedbin_nvmem);
|
||||
goto free_drv;
|
||||
return ret;
|
||||
}
|
||||
nvmem_cell_put(speedbin_nvmem);
|
||||
}
|
||||
of_node_put(np);
|
||||
|
||||
drv->opp_tokens = kcalloc(num_possible_cpus(), sizeof(*drv->opp_tokens),
|
||||
GFP_KERNEL);
|
||||
if (!drv->opp_tokens) {
|
||||
ret = -ENOMEM;
|
||||
goto free_drv;
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct dev_pm_opp_config config = {
|
||||
.supported_hw = NULL,
|
||||
@ -304,9 +305,9 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
if (config.supported_hw || config.genpd_names) {
|
||||
drv->opp_tokens[cpu] = dev_pm_opp_set_config(cpu_dev, &config);
|
||||
if (drv->opp_tokens[cpu] < 0) {
|
||||
ret = drv->opp_tokens[cpu];
|
||||
drv->cpus[cpu].opp_token = dev_pm_opp_set_config(cpu_dev, &config);
|
||||
if (drv->cpus[cpu].opp_token < 0) {
|
||||
ret = drv->cpus[cpu].opp_token;
|
||||
dev_err(cpu_dev, "Failed to set OPP config\n");
|
||||
goto free_opp;
|
||||
}
|
||||
@ -325,11 +326,7 @@ static int qcom_cpufreq_probe(struct platform_device *pdev)
|
||||
|
||||
free_opp:
|
||||
for_each_possible_cpu(cpu)
|
||||
dev_pm_opp_clear_config(drv->opp_tokens[cpu]);
|
||||
kfree(drv->opp_tokens);
|
||||
free_drv:
|
||||
kfree(drv);
|
||||
|
||||
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -341,10 +338,7 @@ static void qcom_cpufreq_remove(struct platform_device *pdev)
|
||||
platform_device_unregister(cpufreq_dt_pdev);
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
dev_pm_opp_clear_config(drv->opp_tokens[cpu]);
|
||||
|
||||
kfree(drv->opp_tokens);
|
||||
kfree(drv);
|
||||
dev_pm_opp_clear_config(drv->cpus[cpu].opp_token);
|
||||
}
|
||||
|
||||
static struct platform_driver qcom_cpufreq_driver = {
|
||||
|
@ -3,6 +3,7 @@
|
||||
// Copyright (c) 2013-2014 Freescale Semiconductor, Inc
|
||||
// Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
@ -74,18 +75,10 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
|
||||
|
||||
flags = fsl_edma_drvflags(fsl_chan);
|
||||
val = edma_readl_chreg(fsl_chan, ch_sbr);
|
||||
/* Remote/local swapped wrongly on iMX8 QM Audio edma */
|
||||
if (flags & FSL_EDMA_DRV_QUIRK_SWAPPED) {
|
||||
if (!fsl_chan->is_rxchan)
|
||||
val |= EDMA_V3_CH_SBR_RD;
|
||||
else
|
||||
val |= EDMA_V3_CH_SBR_WR;
|
||||
} else {
|
||||
if (fsl_chan->is_rxchan)
|
||||
val |= EDMA_V3_CH_SBR_RD;
|
||||
else
|
||||
val |= EDMA_V3_CH_SBR_WR;
|
||||
}
|
||||
if (fsl_chan->is_rxchan)
|
||||
val |= EDMA_V3_CH_SBR_RD;
|
||||
else
|
||||
val |= EDMA_V3_CH_SBR_WR;
|
||||
|
||||
if (fsl_chan->is_remote)
|
||||
val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR);
|
||||
@ -97,8 +90,8 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
|
||||
* ch_mux: With the exception of 0, attempts to write a value
|
||||
* already in use will be forced to 0.
|
||||
*/
|
||||
if (!edma_readl_chreg(fsl_chan, ch_mux))
|
||||
edma_writel_chreg(fsl_chan, fsl_chan->srcid, ch_mux);
|
||||
if (!edma_readl(fsl_chan->edma, fsl_chan->mux_addr))
|
||||
edma_writel(fsl_chan->edma, fsl_chan->srcid, fsl_chan->mux_addr);
|
||||
}
|
||||
|
||||
val = edma_readl_chreg(fsl_chan, ch_csr);
|
||||
@ -134,7 +127,7 @@ static void fsl_edma3_disable_request(struct fsl_edma_chan *fsl_chan)
|
||||
flags = fsl_edma_drvflags(fsl_chan);
|
||||
|
||||
if (flags & FSL_EDMA_DRV_HAS_CHMUX)
|
||||
edma_writel_chreg(fsl_chan, 0, ch_mux);
|
||||
edma_writel(fsl_chan->edma, 0, fsl_chan->mux_addr);
|
||||
|
||||
val &= ~EDMA_V3_CH_CSR_ERQ;
|
||||
edma_writel_chreg(fsl_chan, val, ch_csr);
|
||||
@ -754,6 +747,8 @@ struct dma_async_tx_descriptor *fsl_edma_prep_memcpy(struct dma_chan *chan,
|
||||
fsl_desc->iscyclic = false;
|
||||
|
||||
fsl_chan->is_sw = true;
|
||||
if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_MEM_REMOTE)
|
||||
fsl_chan->is_remote = true;
|
||||
|
||||
/* To match with copy_align and max_seg_size so 1 tcd is enough */
|
||||
fsl_edma_fill_tcd(fsl_chan, fsl_desc->tcd[0].vtcd, dma_src, dma_dst,
|
||||
@ -802,6 +797,9 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
|
||||
|
||||
if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK)
|
||||
clk_prepare_enable(fsl_chan->clk);
|
||||
|
||||
fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
|
||||
sizeof(struct fsl_edma_hw_tcd),
|
||||
32, 0);
|
||||
@ -829,6 +827,9 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
|
||||
fsl_chan->tcd_pool = NULL;
|
||||
fsl_chan->is_sw = false;
|
||||
fsl_chan->srcid = 0;
|
||||
fsl_chan->is_remote = false;
|
||||
if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK)
|
||||
clk_disable_unprepare(fsl_chan->clk);
|
||||
}
|
||||
|
||||
void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
|
||||
|
@ -146,6 +146,7 @@ struct fsl_edma_chan {
|
||||
enum dma_data_direction dma_dir;
|
||||
char chan_name[32];
|
||||
struct fsl_edma_hw_tcd __iomem *tcd;
|
||||
void __iomem *mux_addr;
|
||||
u32 real_count;
|
||||
struct work_struct issue_worker;
|
||||
struct platform_device *pdev;
|
||||
@ -177,8 +178,7 @@ struct fsl_edma_desc {
|
||||
#define FSL_EDMA_DRV_HAS_PD BIT(5)
|
||||
#define FSL_EDMA_DRV_HAS_CHCLK BIT(6)
|
||||
#define FSL_EDMA_DRV_HAS_CHMUX BIT(7)
|
||||
/* imx8 QM audio edma remote local swapped */
|
||||
#define FSL_EDMA_DRV_QUIRK_SWAPPED BIT(8)
|
||||
#define FSL_EDMA_DRV_MEM_REMOTE BIT(8)
|
||||
/* control and status register is in tcd address space, edma3 reg layout */
|
||||
#define FSL_EDMA_DRV_SPLIT_REG BIT(9)
|
||||
#define FSL_EDMA_DRV_BUS_8BYTE BIT(10)
|
||||
@ -207,6 +207,8 @@ struct fsl_edma_drvdata {
|
||||
u32 chreg_off;
|
||||
u32 chreg_space_sz;
|
||||
u32 flags;
|
||||
u32 mux_off; /* channel mux register offset */
|
||||
u32 mux_skip; /* how much skip for each channel */
|
||||
int (*setup_irq)(struct platform_device *pdev,
|
||||
struct fsl_edma_engine *fsl_edma);
|
||||
};
|
||||
|
@ -340,16 +340,19 @@ static struct fsl_edma_drvdata imx7ulp_data = {
|
||||
};
|
||||
|
||||
static struct fsl_edma_drvdata imx8qm_data = {
|
||||
.flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3,
|
||||
.flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MEM_REMOTE,
|
||||
.chreg_space_sz = 0x10000,
|
||||
.chreg_off = 0x10000,
|
||||
.setup_irq = fsl_edma3_irq_init,
|
||||
};
|
||||
|
||||
static struct fsl_edma_drvdata imx8qm_audio_data = {
|
||||
.flags = FSL_EDMA_DRV_QUIRK_SWAPPED | FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3,
|
||||
static struct fsl_edma_drvdata imx8ulp_data = {
|
||||
.flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_CHCLK | FSL_EDMA_DRV_HAS_DMACLK |
|
||||
FSL_EDMA_DRV_EDMA3,
|
||||
.chreg_space_sz = 0x10000,
|
||||
.chreg_off = 0x10000,
|
||||
.mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux),
|
||||
.mux_skip = 0x10000,
|
||||
.setup_irq = fsl_edma3_irq_init,
|
||||
};
|
||||
|
||||
@ -364,6 +367,8 @@ static struct fsl_edma_drvdata imx93_data4 = {
|
||||
.flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4,
|
||||
.chreg_space_sz = 0x8000,
|
||||
.chreg_off = 0x10000,
|
||||
.mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux),
|
||||
.mux_skip = 0x8000,
|
||||
.setup_irq = fsl_edma3_irq_init,
|
||||
};
|
||||
|
||||
@ -372,7 +377,7 @@ static const struct of_device_id fsl_edma_dt_ids[] = {
|
||||
{ .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
|
||||
{ .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data},
|
||||
{ .compatible = "fsl,imx8qm-edma", .data = &imx8qm_data},
|
||||
{ .compatible = "fsl,imx8qm-adma", .data = &imx8qm_audio_data},
|
||||
{ .compatible = "fsl,imx8ulp-edma", .data = &imx8ulp_data},
|
||||
{ .compatible = "fsl,imx93-edma3", .data = &imx93_data3},
|
||||
{ .compatible = "fsl,imx93-edma4", .data = &imx93_data4},
|
||||
{ /* sentinel */ }
|
||||
@ -427,6 +432,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
|
||||
struct fsl_edma_engine *fsl_edma;
|
||||
const struct fsl_edma_drvdata *drvdata = NULL;
|
||||
u32 chan_mask[2] = {0, 0};
|
||||
char clk_name[36];
|
||||
struct edma_regs *regs;
|
||||
int chans;
|
||||
int ret, i;
|
||||
@ -540,12 +546,23 @@ static int fsl_edma_probe(struct platform_device *pdev)
|
||||
offsetof(struct fsl_edma3_ch_reg, tcd) : 0;
|
||||
fsl_chan->tcd = fsl_edma->membase
|
||||
+ i * drvdata->chreg_space_sz + drvdata->chreg_off + len;
|
||||
fsl_chan->mux_addr = fsl_edma->membase + drvdata->mux_off + i * drvdata->mux_skip;
|
||||
|
||||
if (drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) {
|
||||
snprintf(clk_name, sizeof(clk_name), "ch%02d", i);
|
||||
fsl_chan->clk = devm_clk_get_enabled(&pdev->dev,
|
||||
(const char *)clk_name);
|
||||
|
||||
if (IS_ERR(fsl_chan->clk))
|
||||
return PTR_ERR(fsl_chan->clk);
|
||||
}
|
||||
fsl_chan->pdev = pdev;
|
||||
vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
|
||||
|
||||
edma_write_tcdreg(fsl_chan, 0, csr);
|
||||
fsl_edma_chan_mux(fsl_chan, 0, false);
|
||||
if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK)
|
||||
clk_disable_unprepare(fsl_chan->clk);
|
||||
}
|
||||
|
||||
ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma);
|
||||
|
@ -229,6 +229,7 @@ config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
|
||||
config SYSFB
|
||||
bool
|
||||
select BOOT_VESA_SUPPORT
|
||||
select SCREEN_INFO
|
||||
|
||||
config SYSFB_SIMPLEFB
|
||||
bool "Mark VGA/VBE/EFI FB as generic system framebuffer"
|
||||
|
@ -77,6 +77,8 @@ static __init int sysfb_init(void)
|
||||
bool compatible;
|
||||
int ret = 0;
|
||||
|
||||
screen_info_apply_fixups();
|
||||
|
||||
mutex_lock(&disable_lock);
|
||||
if (disabled)
|
||||
goto unlock_mutex;
|
||||
|
@ -1556,7 +1556,7 @@ static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
|
||||
}
|
||||
|
||||
static int
|
||||
skl_ddi_calculate_wrpll(int clock /* in Hz */,
|
||||
skl_ddi_calculate_wrpll(int clock,
|
||||
int ref_clock,
|
||||
struct skl_wrpll_params *wrpll_params)
|
||||
{
|
||||
@ -1581,7 +1581,7 @@ skl_ddi_calculate_wrpll(int clock /* in Hz */,
|
||||
};
|
||||
unsigned int dco, d, i;
|
||||
unsigned int p0, p1, p2;
|
||||
u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
|
||||
u64 afe_clock = (u64)clock * 1000 * 5; /* AFE Clock is 5x Pixel clock, in Hz */
|
||||
|
||||
for (d = 0; d < ARRAY_SIZE(dividers); d++) {
|
||||
for (dco = 0; dco < ARRAY_SIZE(dco_central_freq); dco++) {
|
||||
@ -1713,7 +1713,7 @@ static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
|
||||
|
||||
ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
|
||||
|
||||
ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
|
||||
ret = skl_ddi_calculate_wrpll(crtc_state->port_clock,
|
||||
i915->display.dpll.ref_clks.nssc, &wrpll_params);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -249,7 +249,7 @@
|
||||
#define HDCP2_STREAM_STATUS(dev_priv, trans, port) \
|
||||
(GRAPHICS_VER(dev_priv) >= 12 ? \
|
||||
TRANS_HDCP2_STREAM_STATUS(trans) : \
|
||||
PIPE_HDCP2_STREAM_STATUS(pipe))
|
||||
PIPE_HDCP2_STREAM_STATUS(port))
|
||||
|
||||
#define _PORTA_HDCP2_AUTH_STREAM 0x66F00
|
||||
#define _PORTB_HDCP2_AUTH_STREAM 0x66F04
|
||||
|
@ -2781,26 +2781,6 @@ oa_configure_all_contexts(struct i915_perf_stream *stream,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gen12_configure_all_contexts(struct i915_perf_stream *stream,
|
||||
const struct i915_oa_config *oa_config,
|
||||
struct i915_active *active)
|
||||
{
|
||||
struct flex regs[] = {
|
||||
{
|
||||
GEN8_R_PWR_CLK_STATE(RENDER_RING_BASE),
|
||||
CTX_R_PWR_CLK_STATE,
|
||||
},
|
||||
};
|
||||
|
||||
if (stream->engine->class != RENDER_CLASS)
|
||||
return 0;
|
||||
|
||||
return oa_configure_all_contexts(stream,
|
||||
regs, ARRAY_SIZE(regs),
|
||||
active);
|
||||
}
|
||||
|
||||
static int
|
||||
lrc_configure_all_contexts(struct i915_perf_stream *stream,
|
||||
const struct i915_oa_config *oa_config,
|
||||
@ -2907,7 +2887,6 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
|
||||
{
|
||||
struct drm_i915_private *i915 = stream->perf->i915;
|
||||
struct intel_uncore *uncore = stream->uncore;
|
||||
struct i915_oa_config *oa_config = stream->oa_config;
|
||||
bool periodic = stream->periodic;
|
||||
u32 period_exponent = stream->period_exponent;
|
||||
u32 sqcnt1;
|
||||
@ -2951,15 +2930,6 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
|
||||
|
||||
intel_uncore_rmw(uncore, GEN12_SQCNT1, 0, sqcnt1);
|
||||
|
||||
/*
|
||||
* Update all contexts prior writing the mux configurations as we need
|
||||
* to make sure all slices/subslices are ON before writing to NOA
|
||||
* registers.
|
||||
*/
|
||||
ret = gen12_configure_all_contexts(stream, oa_config, active);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* For Gen12, performance counters are context
|
||||
* saved/restored. Only enable it for the context that
|
||||
@ -3014,9 +2984,6 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream)
|
||||
_MASKED_BIT_DISABLE(GEN12_DISABLE_DOP_GATING));
|
||||
}
|
||||
|
||||
/* Reset all contexts' slices/subslices configurations. */
|
||||
gen12_configure_all_contexts(stream, NULL, NULL);
|
||||
|
||||
/* disable the context save/restore or OAR counters */
|
||||
if (stream->ctx)
|
||||
gen12_configure_oar_context(stream, NULL);
|
||||
|
@ -64,7 +64,8 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
* to the caller, instead of a normal nouveau_bo ttm reference. */
|
||||
ret = drm_gem_object_init(dev, &nvbo->bo.base, size);
|
||||
if (ret) {
|
||||
nouveau_bo_ref(NULL, &nvbo);
|
||||
drm_gem_object_release(&nvbo->bo.base);
|
||||
kfree(nvbo);
|
||||
obj = ERR_PTR(-ENOMEM);
|
||||
goto unlock;
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ struct virtio_gpu_submit {
|
||||
static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit,
|
||||
struct dma_fence *in_fence)
|
||||
{
|
||||
u32 context = submit->fence_ctx + submit->ring_idx;
|
||||
u64 context = submit->fence_ctx + submit->ring_idx;
|
||||
|
||||
if (dma_fence_match_context(in_fence, context))
|
||||
return 0;
|
||||
|
@ -32,7 +32,6 @@
|
||||
#define VMW_FENCE_WRAP (1 << 31)
|
||||
|
||||
struct vmw_fence_manager {
|
||||
int num_fence_objects;
|
||||
struct vmw_private *dev_priv;
|
||||
spinlock_t lock;
|
||||
struct list_head fence_list;
|
||||
@ -124,13 +123,13 @@ static void vmw_fence_obj_destroy(struct dma_fence *f)
|
||||
{
|
||||
struct vmw_fence_obj *fence =
|
||||
container_of(f, struct vmw_fence_obj, base);
|
||||
|
||||
struct vmw_fence_manager *fman = fman_from_fence(fence);
|
||||
|
||||
spin_lock(&fman->lock);
|
||||
list_del_init(&fence->head);
|
||||
--fman->num_fence_objects;
|
||||
spin_unlock(&fman->lock);
|
||||
if (!list_empty(&fence->head)) {
|
||||
spin_lock(&fman->lock);
|
||||
list_del_init(&fence->head);
|
||||
spin_unlock(&fman->lock);
|
||||
}
|
||||
fence->destroy(fence);
|
||||
}
|
||||
|
||||
@ -257,7 +256,6 @@ static const struct dma_fence_ops vmw_fence_ops = {
|
||||
.release = vmw_fence_obj_destroy,
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* Execute signal actions on fences recently signaled.
|
||||
* This is done from a workqueue so we don't have to execute
|
||||
@ -355,7 +353,6 @@ static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
|
||||
goto out_unlock;
|
||||
}
|
||||
list_add_tail(&fence->head, &fman->fence_list);
|
||||
++fman->num_fence_objects;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&fman->lock);
|
||||
@ -403,7 +400,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
|
||||
u32 passed_seqno)
|
||||
{
|
||||
u32 goal_seqno;
|
||||
struct vmw_fence_obj *fence;
|
||||
struct vmw_fence_obj *fence, *next_fence;
|
||||
|
||||
if (likely(!fman->seqno_valid))
|
||||
return false;
|
||||
@ -413,7 +410,7 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
|
||||
return false;
|
||||
|
||||
fman->seqno_valid = false;
|
||||
list_for_each_entry(fence, &fman->fence_list, head) {
|
||||
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
|
||||
if (!list_empty(&fence->seq_passed_actions)) {
|
||||
fman->seqno_valid = true;
|
||||
vmw_fence_goal_write(fman->dev_priv,
|
||||
|
@ -92,7 +92,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
|
||||
{
|
||||
struct vmw_escape_video_flush *flush;
|
||||
size_t fifo_size;
|
||||
bool have_so = (dev_priv->active_display_unit == vmw_du_screen_object);
|
||||
bool have_so = (dev_priv->active_display_unit != vmw_du_legacy);
|
||||
int i, num_items;
|
||||
SVGAGuestPtr ptr;
|
||||
|
||||
|
@ -868,6 +868,32 @@ vmw_stdu_connector_mode_valid(struct drm_connector *connector,
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
/*
|
||||
* Trigger a modeset if the X,Y position of the Screen Target changes.
|
||||
* This is needed when multi-mon is cycled. The original Screen Target will have
|
||||
* the same mode but its relative X,Y position in the topology will change.
|
||||
*/
|
||||
static int vmw_stdu_connector_atomic_check(struct drm_connector *conn,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_connector_state *conn_state;
|
||||
struct vmw_screen_target_display_unit *du;
|
||||
struct drm_crtc_state *new_crtc_state;
|
||||
|
||||
conn_state = drm_atomic_get_connector_state(state, conn);
|
||||
du = vmw_connector_to_stdu(conn);
|
||||
|
||||
if (!conn_state->crtc)
|
||||
return 0;
|
||||
|
||||
new_crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
|
||||
if (du->base.gui_x != du->base.set_gui_x ||
|
||||
du->base.gui_y != du->base.set_gui_y)
|
||||
new_crtc_state->mode_changed = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
|
||||
.dpms = vmw_du_connector_dpms,
|
||||
.detect = vmw_du_connector_detect,
|
||||
@ -882,7 +908,8 @@ static const struct drm_connector_funcs vmw_stdu_connector_funcs = {
|
||||
static const struct
|
||||
drm_connector_helper_funcs vmw_stdu_connector_helper_funcs = {
|
||||
.get_modes = vmw_connector_get_modes,
|
||||
.mode_valid = vmw_stdu_connector_mode_valid
|
||||
.mode_valid = vmw_stdu_connector_mode_valid,
|
||||
.atomic_check = vmw_stdu_connector_atomic_check,
|
||||
};
|
||||
|
||||
|
||||
|
@ -288,12 +288,22 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
|
||||
mp2_ops->start(privdata, info);
|
||||
cl_data->sensor_sts[i] = amd_sfh_wait_for_response
|
||||
(privdata, cl_data->sensor_idx[i], SENSOR_ENABLED);
|
||||
|
||||
if (cl_data->sensor_sts[i] == SENSOR_ENABLED)
|
||||
cl_data->is_any_sensor_enabled = true;
|
||||
}
|
||||
|
||||
if (!cl_data->is_any_sensor_enabled ||
|
||||
(mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0)) {
|
||||
dev_warn(dev, "Failed to discover, sensors not enabled is %d\n",
|
||||
cl_data->is_any_sensor_enabled);
|
||||
rc = -EOPNOTSUPP;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
for (i = 0; i < cl_data->num_hid_devices; i++) {
|
||||
cl_data->cur_hid_dev = i;
|
||||
if (cl_data->sensor_sts[i] == SENSOR_ENABLED) {
|
||||
cl_data->is_any_sensor_enabled = true;
|
||||
rc = amdtp_hid_probe(i, cl_data);
|
||||
if (rc)
|
||||
goto cleanup;
|
||||
@ -305,12 +315,6 @@ int amd_sfh_hid_client_init(struct amd_mp2_dev *privdata)
|
||||
cl_data->sensor_sts[i]);
|
||||
}
|
||||
|
||||
if (!cl_data->is_any_sensor_enabled ||
|
||||
(mp2_ops->discovery_status && mp2_ops->discovery_status(privdata) == 0)) {
|
||||
dev_warn(dev, "Failed to discover, sensors not enabled is %d\n", cl_data->is_any_sensor_enabled);
|
||||
rc = -EOPNOTSUPP;
|
||||
goto cleanup;
|
||||
}
|
||||
schedule_delayed_work(&cl_data->work_buffer, msecs_to_jiffies(AMD_SFH_IDLE_LOOP));
|
||||
return 0;
|
||||
|
||||
|
@ -714,13 +714,12 @@ static int wacom_intuos_get_tool_type(int tool_id)
|
||||
case 0x8e2: /* IntuosHT2 pen */
|
||||
case 0x022:
|
||||
case 0x200: /* Pro Pen 3 */
|
||||
case 0x04200: /* Pro Pen 3 */
|
||||
case 0x10842: /* MobileStudio Pro Pro Pen slim */
|
||||
case 0x14802: /* Intuos4/5 13HD/24HD Classic Pen */
|
||||
case 0x16802: /* Cintiq 13HD Pro Pen */
|
||||
case 0x18802: /* DTH2242 Pen */
|
||||
case 0x10802: /* Intuos4/5 13HD/24HD General Pen */
|
||||
case 0x80842: /* Intuos Pro and Cintiq Pro 3D Pen */
|
||||
case 0x8842: /* Intuos Pro and Cintiq Pro 3D Pen */
|
||||
tool_type = BTN_TOOL_PEN;
|
||||
break;
|
||||
|
||||
|
@ -194,11 +194,24 @@ int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
|
||||
spin_unlock(&trig->leddev_list_lock);
|
||||
led_cdev->trigger = trig;
|
||||
|
||||
/*
|
||||
* Some activate() calls use led_trigger_event() to initialize
|
||||
* the brightness of the LED for which the trigger is being set.
|
||||
* Ensure the led_cdev is visible on trig->led_cdevs for this.
|
||||
*/
|
||||
synchronize_rcu();
|
||||
|
||||
/*
|
||||
* If "set brightness to 0" is pending in workqueue,
|
||||
* we don't want that to be reordered after ->activate()
|
||||
*/
|
||||
flush_work(&led_cdev->set_brightness_work);
|
||||
|
||||
ret = 0;
|
||||
if (trig->activate)
|
||||
ret = trig->activate(led_cdev);
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
led_set_brightness(led_cdev, trig->brightness);
|
||||
if (ret)
|
||||
goto err_activate;
|
||||
|
||||
@ -269,19 +282,6 @@ void led_trigger_set_default(struct led_classdev *led_cdev)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(led_trigger_set_default);
|
||||
|
||||
void led_trigger_rename_static(const char *name, struct led_trigger *trig)
|
||||
{
|
||||
/* new name must be on a temporary string to prevent races */
|
||||
BUG_ON(name == trig->name);
|
||||
|
||||
down_write(&triggers_list_lock);
|
||||
/* this assumes that trig->name was originaly allocated to
|
||||
* non constant storage */
|
||||
strcpy((char *)trig->name, name);
|
||||
up_write(&triggers_list_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(led_trigger_rename_static);
|
||||
|
||||
/* LED Trigger Interface */
|
||||
|
||||
int led_trigger_register(struct led_trigger *trig)
|
||||
@ -386,6 +386,8 @@ void led_trigger_event(struct led_trigger *trig,
|
||||
if (!trig)
|
||||
return;
|
||||
|
||||
trig->brightness = brightness;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(led_cdev, &trig->led_cdevs, trig_list)
|
||||
led_set_brightness(led_cdev, brightness);
|
||||
|
@ -110,11 +110,6 @@ static int timer_trig_activate(struct led_classdev *led_cdev)
|
||||
led_cdev->flags &= ~LED_INIT_DEFAULT_TRIGGER;
|
||||
}
|
||||
|
||||
/*
|
||||
* If "set brightness to 0" is pending in workqueue, we don't
|
||||
* want that to be reordered after blink_set()
|
||||
*/
|
||||
flush_work(&led_cdev->set_brightness_work);
|
||||
led_blink_set(led_cdev, &led_cdev->blink_delay_on,
|
||||
&led_cdev->blink_delay_off);
|
||||
|
||||
|
@ -456,7 +456,7 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
|
||||
if (rx_ring->vsi->type == ICE_VSI_PF)
|
||||
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
|
||||
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
|
||||
rx_ring->xdp_prog = NULL;
|
||||
WRITE_ONCE(rx_ring->xdp_prog, NULL);
|
||||
if (rx_ring->xsk_pool) {
|
||||
kfree(rx_ring->xdp_buf);
|
||||
rx_ring->xdp_buf = NULL;
|
||||
|
@ -52,10 +52,8 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
|
||||
static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
|
||||
{
|
||||
ice_clean_tx_ring(vsi->tx_rings[q_idx]);
|
||||
if (ice_is_xdp_ena_vsi(vsi)) {
|
||||
synchronize_rcu();
|
||||
if (ice_is_xdp_ena_vsi(vsi))
|
||||
ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
|
||||
}
|
||||
ice_clean_rx_ring(vsi->rx_rings[q_idx]);
|
||||
}
|
||||
|
||||
@ -180,11 +178,12 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
|
||||
usleep_range(1000, 2000);
|
||||
}
|
||||
|
||||
synchronize_net();
|
||||
netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
|
||||
|
||||
ice_qvec_dis_irq(vsi, rx_ring, q_vector);
|
||||
ice_qvec_toggle_napi(vsi, q_vector, false);
|
||||
|
||||
netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
|
||||
|
||||
ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
|
||||
err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
|
||||
if (err)
|
||||
@ -199,10 +198,8 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false);
|
||||
ice_qp_clean_rings(vsi, q_idx);
|
||||
ice_qp_reset_stats(vsi, q_idx);
|
||||
|
||||
@ -1068,6 +1065,10 @@ bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
|
||||
|
||||
ice_clean_xdp_irq_zc(xdp_ring);
|
||||
|
||||
if (!netif_carrier_ok(xdp_ring->vsi->netdev) ||
|
||||
!netif_running(xdp_ring->vsi->netdev))
|
||||
return true;
|
||||
|
||||
budget = ICE_DESC_UNUSED(xdp_ring);
|
||||
budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));
|
||||
|
||||
@ -1111,7 +1112,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
struct ice_tx_ring *ring;
|
||||
|
||||
if (test_bit(ICE_VSI_DOWN, vsi->state))
|
||||
if (test_bit(ICE_VSI_DOWN, vsi->state) || !netif_carrier_ok(netdev))
|
||||
return -ENETDOWN;
|
||||
|
||||
if (!ice_is_xdp_ena_vsi(vsi))
|
||||
|
@ -6208,21 +6208,6 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
|
||||
size_t n;
|
||||
int i;
|
||||
|
||||
switch (qopt->cmd) {
|
||||
case TAPRIO_CMD_REPLACE:
|
||||
break;
|
||||
case TAPRIO_CMD_DESTROY:
|
||||
return igc_tsn_clear_schedule(adapter);
|
||||
case TAPRIO_CMD_STATS:
|
||||
igc_taprio_stats(adapter->netdev, &qopt->stats);
|
||||
return 0;
|
||||
case TAPRIO_CMD_QUEUE_STATS:
|
||||
igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (qopt->base_time < 0)
|
||||
return -ERANGE;
|
||||
|
||||
@ -6331,7 +6316,23 @@ static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
|
||||
if (hw->mac.type != igc_i225)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
err = igc_save_qbv_schedule(adapter, qopt);
|
||||
switch (qopt->cmd) {
|
||||
case TAPRIO_CMD_REPLACE:
|
||||
err = igc_save_qbv_schedule(adapter, qopt);
|
||||
break;
|
||||
case TAPRIO_CMD_DESTROY:
|
||||
err = igc_tsn_clear_schedule(adapter);
|
||||
break;
|
||||
case TAPRIO_CMD_STATS:
|
||||
igc_taprio_stats(adapter->netdev, &qopt->stats);
|
||||
return 0;
|
||||
case TAPRIO_CMD_QUEUE_STATS:
|
||||
igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats);
|
||||
return 0;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -953,13 +953,13 @@ static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port,
|
||||
static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en)
|
||||
{
|
||||
struct mvpp2_port *port;
|
||||
int i;
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < priv->port_count; i++) {
|
||||
port = priv->port_list[i];
|
||||
if (port->priv->percpu_pools) {
|
||||
for (i = 0; i < port->nrxqs; i++)
|
||||
mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i],
|
||||
for (j = 0; j < port->nrxqs; j++)
|
||||
mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[j],
|
||||
port->tx_fc & en);
|
||||
} else {
|
||||
mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en);
|
||||
|
@ -920,6 +920,7 @@ err_rule:
|
||||
mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, mh);
|
||||
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
|
||||
err_mod_hdr:
|
||||
*attr = *old_attr;
|
||||
kfree(old_attr);
|
||||
err_attr:
|
||||
kvfree(spec);
|
||||
|
@ -50,9 +50,10 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
|
||||
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
|
||||
caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
|
||||
|
||||
if ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
|
||||
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
|
||||
MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level))
|
||||
if (IS_ENABLED(CONFIG_MLX5_CLS_ACT) &&
|
||||
((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
|
||||
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
|
||||
MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level)))
|
||||
caps |= MLX5_IPSEC_CAP_PRIO;
|
||||
|
||||
if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
|
||||
|
@ -1223,7 +1223,12 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
|
||||
if (!an_changes && link_modes == eproto.admin)
|
||||
goto out;
|
||||
|
||||
mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
|
||||
err = mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
|
||||
if (err) {
|
||||
netdev_err(priv->netdev, "%s: failed to set ptys reg: %d\n", __func__, err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
mlx5_toggle_port_link(mdev);
|
||||
|
||||
out:
|
||||
|
@ -206,6 +206,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
|
||||
static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unloaded)
|
||||
{
|
||||
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
|
||||
struct devlink *devlink = priv_to_devlink(dev);
|
||||
|
||||
/* if this is the driver that initiated the fw reset, devlink completed the reload */
|
||||
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
|
||||
@ -217,9 +218,11 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unload
|
||||
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
|
||||
else
|
||||
mlx5_load_one(dev, true);
|
||||
devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
|
||||
devl_lock(devlink);
|
||||
devlink_remote_reload_actions_performed(devlink, 0,
|
||||
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
|
||||
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
|
||||
devl_unlock(devlink);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -48,6 +48,7 @@ static struct mlx5_irq *
|
||||
irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
|
||||
{
|
||||
struct irq_affinity_desc auto_desc = {};
|
||||
struct mlx5_irq *irq;
|
||||
u32 irq_index;
|
||||
int err;
|
||||
|
||||
@ -64,9 +65,12 @@ irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_de
|
||||
else
|
||||
cpu_get(pool, cpumask_first(&af_desc->mask));
|
||||
}
|
||||
return mlx5_irq_alloc(pool, irq_index,
|
||||
cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
|
||||
NULL);
|
||||
irq = mlx5_irq_alloc(pool, irq_index,
|
||||
cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
|
||||
NULL);
|
||||
if (IS_ERR(irq))
|
||||
xa_erase(&pool->irqs, irq_index);
|
||||
return irq;
|
||||
}
|
||||
|
||||
/* Looking for the IRQ with the smallest refcount that fits req_mask.
|
||||
|
@ -1512,7 +1512,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
|
||||
goto unlock;
|
||||
|
||||
for (i = 0; i < ldev->ports; i++) {
|
||||
if (ldev->pf[MLX5_LAG_P1].netdev == slave) {
|
||||
if (ldev->pf[i].netdev == slave) {
|
||||
port = i;
|
||||
break;
|
||||
}
|
||||
|
@ -2130,7 +2130,6 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
|
||||
/* Panic tear down fw command will stop the PCI bus communication
|
||||
* with the HCA, so the health poll is no longer needed.
|
||||
*/
|
||||
mlx5_drain_health_wq(dev);
|
||||
mlx5_stop_health_poll(dev, false);
|
||||
|
||||
ret = mlx5_cmd_fast_teardown_hca(dev);
|
||||
@ -2165,6 +2164,7 @@ static void shutdown(struct pci_dev *pdev)
|
||||
|
||||
mlx5_core_info(dev, "Shutdown was called\n");
|
||||
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
|
||||
mlx5_drain_health_wq(dev);
|
||||
err = mlx5_try_fast_unload(dev);
|
||||
if (err)
|
||||
mlx5_unload_one(dev, false);
|
||||
|
@ -90,6 +90,7 @@ static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
|
||||
struct mlx5_core_dev *mdev = sf_dev->mdev;
|
||||
|
||||
set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
|
||||
mlx5_drain_health_wq(mdev);
|
||||
mlx5_unload_one(mdev, false);
|
||||
}
|
||||
|
||||
|
@ -4256,7 +4256,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
|
||||
if (unlikely(!rtl_tx_slots_avail(tp))) {
|
||||
if (net_ratelimit())
|
||||
netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
|
||||
goto err_stop_0;
|
||||
netif_stop_queue(dev);
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
opts[1] = rtl8169_tx_vlan_tag(skb);
|
||||
@ -4312,11 +4313,6 @@ err_dma_0:
|
||||
dev_kfree_skb_any(skb);
|
||||
dev->stats.tx_dropped++;
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
err_stop_0:
|
||||
netif_stop_queue(dev);
|
||||
dev->stats.tx_dropped++;
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
||||
static unsigned int rtl_last_frag_len(struct sk_buff *skb)
|
||||
|
@ -1826,9 +1826,9 @@ static void axienet_dma_err_handler(struct work_struct *work)
|
||||
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
|
||||
axienet_set_mac_address(ndev, NULL);
|
||||
axienet_set_multicast_list(ndev);
|
||||
axienet_setoptions(ndev, lp->options);
|
||||
napi_enable(&lp->napi_rx);
|
||||
napi_enable(&lp->napi_tx);
|
||||
axienet_setoptions(ndev, lp->options);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1293,6 +1293,8 @@ static int ksz9131_config_init(struct phy_device *phydev)
|
||||
const struct device *dev_walker;
|
||||
int ret;
|
||||
|
||||
phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
|
||||
|
||||
dev_walker = &phydev->mdio.dev;
|
||||
do {
|
||||
of_node = dev_walker->of_node;
|
||||
@ -1342,28 +1344,30 @@ static int ksz9131_config_init(struct phy_device *phydev)
|
||||
#define MII_KSZ9131_AUTO_MDIX 0x1C
|
||||
#define MII_KSZ9131_AUTO_MDI_SET BIT(7)
|
||||
#define MII_KSZ9131_AUTO_MDIX_SWAP_OFF BIT(6)
|
||||
#define MII_KSZ9131_DIG_AXAN_STS 0x14
|
||||
#define MII_KSZ9131_DIG_AXAN_STS_LINK_DET BIT(14)
|
||||
#define MII_KSZ9131_DIG_AXAN_STS_A_SELECT BIT(12)
|
||||
|
||||
static int ksz9131_mdix_update(struct phy_device *phydev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = phy_read(phydev, MII_KSZ9131_AUTO_MDIX);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (ret & MII_KSZ9131_AUTO_MDIX_SWAP_OFF) {
|
||||
if (ret & MII_KSZ9131_AUTO_MDI_SET)
|
||||
phydev->mdix_ctrl = ETH_TP_MDI;
|
||||
else
|
||||
phydev->mdix_ctrl = ETH_TP_MDI_X;
|
||||
if (phydev->mdix_ctrl != ETH_TP_MDI_AUTO) {
|
||||
phydev->mdix = phydev->mdix_ctrl;
|
||||
} else {
|
||||
phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
|
||||
}
|
||||
ret = phy_read(phydev, MII_KSZ9131_DIG_AXAN_STS);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (ret & MII_KSZ9131_AUTO_MDI_SET)
|
||||
phydev->mdix = ETH_TP_MDI;
|
||||
else
|
||||
phydev->mdix = ETH_TP_MDI_X;
|
||||
if (ret & MII_KSZ9131_DIG_AXAN_STS_LINK_DET) {
|
||||
if (ret & MII_KSZ9131_DIG_AXAN_STS_A_SELECT)
|
||||
phydev->mdix = ETH_TP_MDI;
|
||||
else
|
||||
phydev->mdix = ETH_TP_MDI_X;
|
||||
} else {
|
||||
phydev->mdix = ETH_TP_MDI_INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1083,6 +1083,13 @@ static struct phy_driver realtek_drvs[] = {
|
||||
.handle_interrupt = genphy_handle_interrupt_no_ack,
|
||||
.suspend = genphy_suspend,
|
||||
.resume = genphy_resume,
|
||||
}, {
|
||||
PHY_ID_MATCH_EXACT(0x001cc960),
|
||||
.name = "RTL8366S Gigabit Ethernet",
|
||||
.suspend = genphy_suspend,
|
||||
.resume = genphy_resume,
|
||||
.read_mmd = genphy_read_mmd_unsupported,
|
||||
.write_mmd = genphy_write_mmd_unsupported,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -179,6 +179,7 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
|
||||
struct usbnet *dev = netdev_priv(netdev);
|
||||
__le16 res;
|
||||
int rc = 0;
|
||||
int err;
|
||||
|
||||
if (phy_id) {
|
||||
netdev_dbg(netdev, "Only internal phy supported\n");
|
||||
@ -189,11 +190,17 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
|
||||
if (loc == MII_BMSR) {
|
||||
u8 value;
|
||||
|
||||
sr_read_reg(dev, SR_NSR, &value);
|
||||
err = sr_read_reg(dev, SR_NSR, &value);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (value & NSR_LINKST)
|
||||
rc = 1;
|
||||
}
|
||||
sr_share_read_word(dev, 1, loc, &res);
|
||||
err = sr_share_read_word(dev, 1, loc, &res);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (rc == 1)
|
||||
res = le16_to_cpu(res) | BMSR_LSTATUS;
|
||||
else
|
||||
|
@ -363,6 +363,37 @@ struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from)
|
||||
}
|
||||
EXPORT_SYMBOL(pci_get_class);
|
||||
|
||||
/**
|
||||
* pci_get_base_class - searching for a PCI device by matching against the base class code only
|
||||
* @class: search for a PCI device with this base class code
|
||||
* @from: Previous PCI device found in search, or %NULL for new search.
|
||||
*
|
||||
* Iterates through the list of known PCI devices. If a PCI device is found
|
||||
* with a matching base class code, the reference count to the device is
|
||||
* incremented. See pci_match_one_device() to figure out how does this works.
|
||||
* A new search is initiated by passing %NULL as the @from argument.
|
||||
* Otherwise if @from is not %NULL, searches continue from next device on the
|
||||
* global list. The reference count for @from is always decremented if it is
|
||||
* not %NULL.
|
||||
*
|
||||
* Returns:
|
||||
* A pointer to a matched PCI device, %NULL Otherwise.
|
||||
*/
|
||||
struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from)
|
||||
{
|
||||
struct pci_device_id id = {
|
||||
.vendor = PCI_ANY_ID,
|
||||
.device = PCI_ANY_ID,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
.class_mask = 0xFF0000,
|
||||
.class = class << 16,
|
||||
};
|
||||
|
||||
return pci_get_dev_by_id(&id, from);
|
||||
}
|
||||
EXPORT_SYMBOL(pci_get_base_class);
|
||||
|
||||
/**
|
||||
* pci_dev_present - Returns 1 if device matching the device list is present, 0 if not.
|
||||
* @ids: A pointer to a null terminated list of struct pci_device_id structures
|
||||
|
@ -476,12 +476,12 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
|
||||
hwc->idx = counter;
|
||||
hwc->state |= PERF_HES_STOPPED;
|
||||
|
||||
if (flags & PERF_EF_START)
|
||||
ddr_perf_event_start(event, flags);
|
||||
|
||||
/* read trans, write trans, read beat */
|
||||
ddr_perf_monitor_config(pmu, cfg, cfg1, cfg2);
|
||||
|
||||
if (flags & PERF_EF_START)
|
||||
ddr_perf_event_start(event, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -355,7 +355,7 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
|
||||
* but not in the user access mode as we want to use the other counters
|
||||
* that support sampling/filtering.
|
||||
*/
|
||||
if (hwc->flags & PERF_EVENT_FLAG_LEGACY) {
|
||||
if ((hwc->flags & PERF_EVENT_FLAG_LEGACY) && (event->attr.type == PERF_TYPE_HARDWARE)) {
|
||||
if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
|
||||
cflags |= SBI_PMU_CFG_FLAG_SKIP_MATCH;
|
||||
cmask = 1;
|
||||
|
@ -805,9 +805,11 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
|
||||
if (ret == -ENOPROTOOPT) {
|
||||
dev_dbg(ec_dev->dev,
|
||||
"GET_NEXT_EVENT returned invalid version error.\n");
|
||||
mutex_lock(&ec_dev->lock);
|
||||
ret = cros_ec_get_host_command_version_mask(ec_dev,
|
||||
EC_CMD_GET_NEXT_EVENT,
|
||||
&ver_mask);
|
||||
mutex_unlock(&ec_dev->lock);
|
||||
if (ret < 0 || ver_mask == 0)
|
||||
/*
|
||||
* Do not change the MKBP supported version if we can't
|
||||
|
@ -185,7 +185,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
|
||||
return err;
|
||||
}
|
||||
|
||||
data->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
data->clk = devm_clk_get_enabled(&pdev->dev, NULL);
|
||||
if (IS_ERR(data->clk)) {
|
||||
err = PTR_ERR(data->clk);
|
||||
if (err != -EPROBE_DEFER)
|
||||
@ -193,10 +193,6 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = clk_prepare_enable(data->clk);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
rate = clk_get_rate(data->clk);
|
||||
if ((rate < 1920000) || (rate > 5000000))
|
||||
dev_warn(&pdev->dev,
|
||||
@ -211,7 +207,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
|
||||
dev_err(&pdev->dev,
|
||||
"Failed to register the thermal device: %d\n",
|
||||
err);
|
||||
goto err_clk;
|
||||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -236,7 +232,7 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
|
||||
dev_err(&pdev->dev,
|
||||
"Not able to read trip_temp: %d\n",
|
||||
err);
|
||||
goto err_tz;
|
||||
return err;
|
||||
}
|
||||
|
||||
/* set bandgap reference voltage and enable voltage regulator */
|
||||
@ -269,32 +265,23 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
|
||||
*/
|
||||
err = thermal_add_hwmon_sysfs(tz);
|
||||
if (err)
|
||||
goto err_tz;
|
||||
return err;
|
||||
|
||||
bcm2835_thermal_debugfs(pdev);
|
||||
|
||||
return 0;
|
||||
err_tz:
|
||||
devm_thermal_of_zone_unregister(&pdev->dev, tz);
|
||||
err_clk:
|
||||
clk_disable_unprepare(data->clk);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int bcm2835_thermal_remove(struct platform_device *pdev)
|
||||
static void bcm2835_thermal_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct bcm2835_thermal_data *data = platform_get_drvdata(pdev);
|
||||
|
||||
debugfs_remove_recursive(data->debugfsdir);
|
||||
clk_disable_unprepare(data->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver bcm2835_thermal_driver = {
|
||||
.probe = bcm2835_thermal_probe,
|
||||
.remove = bcm2835_thermal_remove,
|
||||
.remove_new = bcm2835_thermal_remove,
|
||||
.driver = {
|
||||
.name = "bcm2835_thermal",
|
||||
.of_match_table = bcm2835_thermal_of_match_table,
|
||||
|
@ -11,6 +11,10 @@ config APERTURE_HELPERS
|
||||
Support tracking and hand-over of aperture ownership. Required
|
||||
by graphics drivers for firmware-provided framebuffers.
|
||||
|
||||
config SCREEN_INFO
|
||||
bool
|
||||
default n
|
||||
|
||||
config STI_CORE
|
||||
bool
|
||||
depends on PARISC
|
||||
|
@ -1,12 +1,16 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
obj-$(CONFIG_APERTURE_HELPERS) += aperture.o
|
||||
obj-$(CONFIG_SCREEN_INFO) += screen_info.o
|
||||
obj-$(CONFIG_STI_CORE) += sticore.o
|
||||
obj-$(CONFIG_VGASTATE) += vgastate.o
|
||||
obj-$(CONFIG_VIDEO_CMDLINE) += cmdline.o
|
||||
obj-$(CONFIG_VIDEO_NOMODESET) += nomodeset.o
|
||||
obj-$(CONFIG_HDMI) += hdmi.o
|
||||
|
||||
screen_info-y := screen_info_generic.o
|
||||
screen_info-$(CONFIG_PCI) += screen_info_pci.o
|
||||
|
||||
obj-$(CONFIG_VT) += console/
|
||||
obj-$(CONFIG_FB_STI) += console/
|
||||
obj-$(CONFIG_LOGO) += logo/
|
||||
|
@ -243,6 +243,7 @@ static int vesafb_setup(char *options)
|
||||
|
||||
static int vesafb_probe(struct platform_device *dev)
|
||||
{
|
||||
struct screen_info *si = &screen_info;
|
||||
struct fb_info *info;
|
||||
struct vesafb_par *par;
|
||||
int i, err;
|
||||
@ -255,17 +256,17 @@ static int vesafb_probe(struct platform_device *dev)
|
||||
fb_get_options("vesafb", &option);
|
||||
vesafb_setup(option);
|
||||
|
||||
if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
|
||||
if (si->orig_video_isVGA != VIDEO_TYPE_VLFB)
|
||||
return -ENODEV;
|
||||
|
||||
vga_compat = (screen_info.capabilities & 2) ? 0 : 1;
|
||||
vesafb_fix.smem_start = screen_info.lfb_base;
|
||||
vesafb_defined.bits_per_pixel = screen_info.lfb_depth;
|
||||
vga_compat = !__screen_info_vbe_mode_nonvga(si);
|
||||
vesafb_fix.smem_start = si->lfb_base;
|
||||
vesafb_defined.bits_per_pixel = si->lfb_depth;
|
||||
if (15 == vesafb_defined.bits_per_pixel)
|
||||
vesafb_defined.bits_per_pixel = 16;
|
||||
vesafb_defined.xres = screen_info.lfb_width;
|
||||
vesafb_defined.yres = screen_info.lfb_height;
|
||||
vesafb_fix.line_length = screen_info.lfb_linelength;
|
||||
vesafb_defined.xres = si->lfb_width;
|
||||
vesafb_defined.yres = si->lfb_height;
|
||||
vesafb_fix.line_length = si->lfb_linelength;
|
||||
vesafb_fix.visual = (vesafb_defined.bits_per_pixel == 8) ?
|
||||
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
|
||||
|
||||
@ -277,7 +278,7 @@ static int vesafb_probe(struct platform_device *dev)
|
||||
/* size_total -- all video memory we have. Used for mtrr
|
||||
* entries, resource allocation and bounds
|
||||
* checking. */
|
||||
size_total = screen_info.lfb_size * 65536;
|
||||
size_total = si->lfb_size * 65536;
|
||||
if (vram_total)
|
||||
size_total = vram_total * 1024 * 1024;
|
||||
if (size_total < size_vmode)
|
||||
@ -297,7 +298,7 @@ static int vesafb_probe(struct platform_device *dev)
|
||||
vesafb_fix.smem_len = size_remap;
|
||||
|
||||
#ifndef __i386__
|
||||
screen_info.vesapm_seg = 0;
|
||||
si->vesapm_seg = 0;
|
||||
#endif
|
||||
|
||||
if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
|
||||
@ -317,23 +318,26 @@ static int vesafb_probe(struct platform_device *dev)
|
||||
par = info->par;
|
||||
info->pseudo_palette = par->pseudo_palette;
|
||||
|
||||
par->base = screen_info.lfb_base;
|
||||
par->base = si->lfb_base;
|
||||
par->size = size_total;
|
||||
|
||||
printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
|
||||
vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
|
||||
vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel,
|
||||
vesafb_fix.line_length, si->pages);
|
||||
|
||||
if (screen_info.vesapm_seg) {
|
||||
if (si->vesapm_seg) {
|
||||
printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
|
||||
screen_info.vesapm_seg,screen_info.vesapm_off);
|
||||
si->vesapm_seg, si->vesapm_off);
|
||||
}
|
||||
|
||||
if (screen_info.vesapm_seg < 0xc000)
|
||||
if (si->vesapm_seg < 0xc000)
|
||||
ypan = pmi_setpal = 0; /* not available or some DOS TSR ... */
|
||||
|
||||
if (ypan || pmi_setpal) {
|
||||
unsigned long pmi_phys;
|
||||
unsigned short *pmi_base;
|
||||
pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
|
||||
pmi_phys = ((unsigned long)si->vesapm_seg << 4) + si->vesapm_off;
|
||||
pmi_base = (unsigned short *)phys_to_virt(pmi_phys);
|
||||
pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
|
||||
pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
|
||||
printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
|
||||
@ -377,14 +381,14 @@ static int vesafb_probe(struct platform_device *dev)
|
||||
vesafb_defined.left_margin = (vesafb_defined.xres / 8) & 0xf8;
|
||||
vesafb_defined.hsync_len = (vesafb_defined.xres / 8) & 0xf8;
|
||||
|
||||
vesafb_defined.red.offset = screen_info.red_pos;
|
||||
vesafb_defined.red.length = screen_info.red_size;
|
||||
vesafb_defined.green.offset = screen_info.green_pos;
|
||||
vesafb_defined.green.length = screen_info.green_size;
|
||||
vesafb_defined.blue.offset = screen_info.blue_pos;
|
||||
vesafb_defined.blue.length = screen_info.blue_size;
|
||||
vesafb_defined.transp.offset = screen_info.rsvd_pos;
|
||||
vesafb_defined.transp.length = screen_info.rsvd_size;
|
||||
vesafb_defined.red.offset = si->red_pos;
|
||||
vesafb_defined.red.length = si->red_size;
|
||||
vesafb_defined.green.offset = si->green_pos;
|
||||
vesafb_defined.green.length = si->green_size;
|
||||
vesafb_defined.blue.offset = si->blue_pos;
|
||||
vesafb_defined.blue.length = si->blue_size;
|
||||
vesafb_defined.transp.offset = si->rsvd_pos;
|
||||
vesafb_defined.transp.length = si->rsvd_size;
|
||||
|
||||
if (vesafb_defined.bits_per_pixel <= 8) {
|
||||
depth = vesafb_defined.green.length;
|
||||
@ -399,14 +403,14 @@ static int vesafb_probe(struct platform_device *dev)
|
||||
(vesafb_defined.bits_per_pixel > 8) ?
|
||||
"Truecolor" : (vga_compat || pmi_setpal) ?
|
||||
"Pseudocolor" : "Static Pseudocolor",
|
||||
screen_info.rsvd_size,
|
||||
screen_info.red_size,
|
||||
screen_info.green_size,
|
||||
screen_info.blue_size,
|
||||
screen_info.rsvd_pos,
|
||||
screen_info.red_pos,
|
||||
screen_info.green_pos,
|
||||
screen_info.blue_pos);
|
||||
si->rsvd_size,
|
||||
si->red_size,
|
||||
si->green_size,
|
||||
si->blue_size,
|
||||
si->rsvd_pos,
|
||||
si->red_pos,
|
||||
si->green_pos,
|
||||
si->blue_pos);
|
||||
|
||||
vesafb_fix.ypanstep = ypan ? 1 : 0;
|
||||
vesafb_fix.ywrapstep = (ypan>1) ? 1 : 0;
|
||||
|
146
drivers/video/screen_info_generic.c
Normal file
146
drivers/video/screen_info_generic.c
Normal file
@ -0,0 +1,146 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/export.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/screen_info.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
static void resource_init_named(struct resource *r,
|
||||
resource_size_t start, resource_size_t size,
|
||||
const char *name, unsigned int flags)
|
||||
{
|
||||
memset(r, 0, sizeof(*r));
|
||||
|
||||
r->start = start;
|
||||
r->end = start + size - 1;
|
||||
r->name = name;
|
||||
r->flags = flags;
|
||||
}
|
||||
|
||||
static void resource_init_io_named(struct resource *r,
|
||||
resource_size_t start, resource_size_t size,
|
||||
const char *name)
|
||||
{
|
||||
resource_init_named(r, start, size, name, IORESOURCE_IO);
|
||||
}
|
||||
|
||||
static void resource_init_mem_named(struct resource *r,
|
||||
resource_size_t start, resource_size_t size,
|
||||
const char *name)
|
||||
{
|
||||
resource_init_named(r, start, size, name, IORESOURCE_MEM);
|
||||
}
|
||||
|
||||
static inline bool __screen_info_has_ega_gfx(unsigned int mode)
|
||||
{
|
||||
switch (mode) {
|
||||
case 0x0d: /* 320x200-4 */
|
||||
case 0x0e: /* 640x200-4 */
|
||||
case 0x0f: /* 640x350-1 */
|
||||
case 0x10: /* 640x350-4 */
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool __screen_info_has_vga_gfx(unsigned int mode)
|
||||
{
|
||||
switch (mode) {
|
||||
case 0x10: /* 640x480-1 */
|
||||
case 0x12: /* 640x480-4 */
|
||||
case 0x13: /* 320-200-8 */
|
||||
case 0x6a: /* 800x600-4 (VESA) */
|
||||
return true;
|
||||
default:
|
||||
return __screen_info_has_ega_gfx(mode);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* screen_info_resources() - Get resources from screen_info structure
|
||||
* @si: the screen_info
|
||||
* @r: pointer to an array of resource structures
|
||||
* @num: number of elements in @r:
|
||||
*
|
||||
* Returns:
|
||||
* The number of resources stored in @r on success, or a negative errno code otherwise.
|
||||
*
|
||||
* A call to screen_info_resources() returns the resources consumed by the
|
||||
* screen_info's device or framebuffer. The result is stored in the caller-supplied
|
||||
* array @r with up to @num elements. The function returns the number of
|
||||
* initialized elements.
|
||||
*/
|
||||
ssize_t screen_info_resources(const struct screen_info *si, struct resource *r, size_t num)
|
||||
{
|
||||
struct resource *pos = r;
|
||||
unsigned int type = screen_info_video_type(si);
|
||||
u64 base, size;
|
||||
|
||||
switch (type) {
|
||||
case VIDEO_TYPE_MDA:
|
||||
if (num > 0)
|
||||
resource_init_io_named(pos++, 0x3b0, 12, "mda");
|
||||
if (num > 1)
|
||||
resource_init_io_named(pos++, 0x3bf, 0x01, "mda");
|
||||
if (num > 2)
|
||||
resource_init_mem_named(pos++, 0xb0000, 0x2000, "mda");
|
||||
break;
|
||||
case VIDEO_TYPE_CGA:
|
||||
if (num > 0)
|
||||
resource_init_io_named(pos++, 0x3d4, 0x02, "cga");
|
||||
if (num > 1)
|
||||
resource_init_mem_named(pos++, 0xb8000, 0x2000, "cga");
|
||||
break;
|
||||
case VIDEO_TYPE_EGAM:
|
||||
if (num > 0)
|
||||
resource_init_io_named(pos++, 0x3bf, 0x10, "ega");
|
||||
if (num > 1)
|
||||
resource_init_mem_named(pos++, 0xb0000, 0x8000, "ega");
|
||||
break;
|
||||
case VIDEO_TYPE_EGAC:
|
||||
if (num > 0)
|
||||
resource_init_io_named(pos++, 0x3c0, 0x20, "ega");
|
||||
if (num > 1) {
|
||||
if (__screen_info_has_ega_gfx(si->orig_video_mode))
|
||||
resource_init_mem_named(pos++, 0xa0000, 0x10000, "ega");
|
||||
else
|
||||
resource_init_mem_named(pos++, 0xb8000, 0x8000, "ega");
|
||||
}
|
||||
break;
|
||||
case VIDEO_TYPE_VGAC:
|
||||
if (num > 0)
|
||||
resource_init_io_named(pos++, 0x3c0, 0x20, "vga+");
|
||||
if (num > 1) {
|
||||
if (__screen_info_has_vga_gfx(si->orig_video_mode))
|
||||
resource_init_mem_named(pos++, 0xa0000, 0x10000, "vga+");
|
||||
else
|
||||
resource_init_mem_named(pos++, 0xb8000, 0x8000, "vga+");
|
||||
}
|
||||
break;
|
||||
case VIDEO_TYPE_VLFB:
|
||||
case VIDEO_TYPE_EFI:
|
||||
base = __screen_info_lfb_base(si);
|
||||
if (!base)
|
||||
break;
|
||||
size = __screen_info_lfb_size(si, type);
|
||||
if (!size)
|
||||
break;
|
||||
if (num > 0)
|
||||
resource_init_mem_named(pos++, base, size, "lfb");
|
||||
break;
|
||||
case VIDEO_TYPE_PICA_S3:
|
||||
case VIDEO_TYPE_MIPS_G364:
|
||||
case VIDEO_TYPE_SGI:
|
||||
case VIDEO_TYPE_TGAC:
|
||||
case VIDEO_TYPE_SUN:
|
||||
case VIDEO_TYPE_SUNPCI:
|
||||
case VIDEO_TYPE_PMAC:
|
||||
default:
|
||||
/* not supported */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return pos - r;
|
||||
}
|
||||
EXPORT_SYMBOL(screen_info_resources);
|
136
drivers/video/screen_info_pci.c
Normal file
136
drivers/video/screen_info_pci.c
Normal file
@ -0,0 +1,136 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/screen_info.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
static struct pci_dev *screen_info_lfb_pdev;
|
||||
static size_t screen_info_lfb_bar;
|
||||
static resource_size_t screen_info_lfb_offset;
|
||||
static struct resource screen_info_lfb_res = DEFINE_RES_MEM(0, 0);
|
||||
|
||||
static bool __screen_info_relocation_is_valid(const struct screen_info *si, struct resource *pr)
|
||||
{
|
||||
u64 size = __screen_info_lfb_size(si, screen_info_video_type(si));
|
||||
|
||||
if (screen_info_lfb_offset > resource_size(pr))
|
||||
return false;
|
||||
if (size > resource_size(pr))
|
||||
return false;
|
||||
if (resource_size(pr) - size < screen_info_lfb_offset)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void screen_info_apply_fixups(void)
|
||||
{
|
||||
struct screen_info *si = &screen_info;
|
||||
|
||||
if (screen_info_lfb_pdev) {
|
||||
struct resource *pr = &screen_info_lfb_pdev->resource[screen_info_lfb_bar];
|
||||
|
||||
if (pr->start != screen_info_lfb_res.start) {
|
||||
if (__screen_info_relocation_is_valid(si, pr)) {
|
||||
/*
|
||||
* Only update base if we have an actual
|
||||
* relocation to a valid I/O range.
|
||||
*/
|
||||
__screen_info_set_lfb_base(si, pr->start + screen_info_lfb_offset);
|
||||
pr_info("Relocating firmware framebuffer to offset %pa[d] within %pr\n",
|
||||
&screen_info_lfb_offset, pr);
|
||||
} else {
|
||||
pr_warn("Invalid relocating, disabling firmware framebuffer\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void screen_info_fixup_lfb(struct pci_dev *pdev)
|
||||
{
|
||||
unsigned int type;
|
||||
struct resource res[SCREEN_INFO_MAX_RESOURCES];
|
||||
size_t i, numres;
|
||||
int ret;
|
||||
const struct screen_info *si = &screen_info;
|
||||
|
||||
if (screen_info_lfb_pdev)
|
||||
return; // already found
|
||||
|
||||
type = screen_info_video_type(si);
|
||||
if (type != VIDEO_TYPE_EFI)
|
||||
return; // only applies to EFI
|
||||
|
||||
ret = screen_info_resources(si, res, ARRAY_SIZE(res));
|
||||
if (ret < 0)
|
||||
return;
|
||||
numres = ret;
|
||||
|
||||
for (i = 0; i < numres; ++i) {
|
||||
struct resource *r = &res[i];
|
||||
const struct resource *pr;
|
||||
|
||||
if (!(r->flags & IORESOURCE_MEM))
|
||||
continue;
|
||||
pr = pci_find_resource(pdev, r);
|
||||
if (!pr)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* We've found a PCI device with the framebuffer
|
||||
* resource. Store away the parameters to track
|
||||
* relocation of the framebuffer aperture.
|
||||
*/
|
||||
screen_info_lfb_pdev = pdev;
|
||||
screen_info_lfb_bar = pr - pdev->resource;
|
||||
screen_info_lfb_offset = r->start - pr->start;
|
||||
memcpy(&screen_info_lfb_res, r, sizeof(screen_info_lfb_res));
|
||||
}
|
||||
}
|
||||
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY, 16,
|
||||
screen_info_fixup_lfb);
|
||||
|
||||
static struct pci_dev *__screen_info_pci_dev(struct resource *res)
|
||||
{
|
||||
struct pci_dev *pdev = NULL;
|
||||
const struct resource *r = NULL;
|
||||
|
||||
if (!(res->flags & IORESOURCE_MEM))
|
||||
return NULL;
|
||||
|
||||
while (!r && (pdev = pci_get_base_class(PCI_BASE_CLASS_DISPLAY, pdev))) {
|
||||
r = pci_find_resource(pdev, res);
|
||||
}
|
||||
|
||||
return pdev;
|
||||
}
|
||||
|
||||
/**
|
||||
* screen_info_pci_dev() - Return PCI parent device that contains screen_info's framebuffer
|
||||
* @si: the screen_info
|
||||
*
|
||||
* Returns:
|
||||
* The screen_info's parent device or NULL on success, or a pointer-encoded
|
||||
* errno value otherwise. The value NULL is not an error. It signals that no
|
||||
* PCI device has been found.
|
||||
*/
|
||||
struct pci_dev *screen_info_pci_dev(const struct screen_info *si)
|
||||
{
|
||||
struct resource res[SCREEN_INFO_MAX_RESOURCES];
|
||||
ssize_t i, numres;
|
||||
|
||||
numres = screen_info_resources(si, res, ARRAY_SIZE(res));
|
||||
if (numres < 0)
|
||||
return ERR_PTR(numres);
|
||||
|
||||
for (i = 0; i < numres; ++i) {
|
||||
struct pci_dev *pdev = __screen_info_pci_dev(&res[i]);
|
||||
|
||||
if (pdev)
|
||||
return pdev;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(screen_info_pci_dev);
|
@ -1214,8 +1214,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
|
||||
block_group->space_info->total_bytes -= block_group->length;
|
||||
block_group->space_info->bytes_readonly -=
|
||||
(block_group->length - block_group->zone_unusable);
|
||||
block_group->space_info->bytes_zone_unusable -=
|
||||
block_group->zone_unusable;
|
||||
btrfs_space_info_update_bytes_zone_unusable(fs_info, block_group->space_info,
|
||||
-block_group->zone_unusable);
|
||||
block_group->space_info->disk_total -= block_group->length * factor;
|
||||
|
||||
spin_unlock(&block_group->space_info->lock);
|
||||
@ -1399,7 +1399,8 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
|
||||
if (btrfs_is_zoned(cache->fs_info)) {
|
||||
/* Migrate zone_unusable bytes to readonly */
|
||||
sinfo->bytes_readonly += cache->zone_unusable;
|
||||
sinfo->bytes_zone_unusable -= cache->zone_unusable;
|
||||
btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo,
|
||||
-cache->zone_unusable);
|
||||
cache->zone_unusable = 0;
|
||||
}
|
||||
cache->ro++;
|
||||
@ -3023,9 +3024,11 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
|
||||
if (btrfs_is_zoned(cache->fs_info)) {
|
||||
/* Migrate zone_unusable bytes back */
|
||||
cache->zone_unusable =
|
||||
(cache->alloc_offset - cache->used) +
|
||||
(cache->alloc_offset - cache->used - cache->pinned -
|
||||
cache->reserved) +
|
||||
(cache->length - cache->zone_capacity);
|
||||
sinfo->bytes_zone_unusable += cache->zone_unusable;
|
||||
btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo,
|
||||
cache->zone_unusable);
|
||||
sinfo->bytes_readonly -= cache->zone_unusable;
|
||||
}
|
||||
num_bytes = cache->length - cache->reserved -
|
||||
|
@ -2749,7 +2749,8 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
|
||||
readonly = true;
|
||||
} else if (btrfs_is_zoned(fs_info)) {
|
||||
/* Need reset before reusing in a zoned block group */
|
||||
space_info->bytes_zone_unusable += len;
|
||||
btrfs_space_info_update_bytes_zone_unusable(fs_info, space_info,
|
||||
len);
|
||||
readonly = true;
|
||||
}
|
||||
spin_unlock(&cache->lock);
|
||||
|
@ -2721,8 +2721,10 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
|
||||
* If the block group is read-only, we should account freed space into
|
||||
* bytes_readonly.
|
||||
*/
|
||||
if (!block_group->ro)
|
||||
if (!block_group->ro) {
|
||||
block_group->zone_unusable += to_unusable;
|
||||
WARN_ON(block_group->zone_unusable > block_group->length);
|
||||
}
|
||||
spin_unlock(&ctl->tree_lock);
|
||||
if (!used) {
|
||||
spin_lock(&block_group->lock);
|
||||
|
@ -312,7 +312,7 @@ void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
|
||||
found->bytes_used += block_group->used;
|
||||
found->disk_used += block_group->used * factor;
|
||||
found->bytes_readonly += block_group->bytes_super;
|
||||
found->bytes_zone_unusable += block_group->zone_unusable;
|
||||
btrfs_space_info_update_bytes_zone_unusable(info, found, block_group->zone_unusable);
|
||||
if (block_group->length > 0)
|
||||
found->full = 0;
|
||||
btrfs_try_granting_tickets(info, found);
|
||||
@ -524,8 +524,7 @@ again:
|
||||
|
||||
spin_lock(&cache->lock);
|
||||
avail = cache->length - cache->used - cache->pinned -
|
||||
cache->reserved - cache->delalloc_bytes -
|
||||
cache->bytes_super - cache->zone_unusable;
|
||||
cache->reserved - cache->bytes_super - cache->zone_unusable;
|
||||
btrfs_info(fs_info,
|
||||
"block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu delalloc %llu super %llu zone_unusable (%llu bytes available) %s",
|
||||
cache->start, cache->length, cache->used, cache->pinned,
|
||||
|
@ -197,6 +197,7 @@ btrfs_space_info_update_##name(struct btrfs_fs_info *fs_info, \
|
||||
|
||||
DECLARE_SPACE_INFO_UPDATE(bytes_may_use, "space_info");
|
||||
DECLARE_SPACE_INFO_UPDATE(bytes_pinned, "pinned");
|
||||
DECLARE_SPACE_INFO_UPDATE(bytes_zone_unusable, "zone_unusable");
|
||||
|
||||
int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
|
||||
void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
|
||||
|
@ -453,6 +453,35 @@ static void ext4_map_blocks_es_recheck(handle_t *handle,
|
||||
}
|
||||
#endif /* ES_AGGRESSIVE_TEST */
|
||||
|
||||
static int ext4_map_query_blocks(handle_t *handle, struct inode *inode,
|
||||
struct ext4_map_blocks *map)
|
||||
{
|
||||
unsigned int status;
|
||||
int retval;
|
||||
|
||||
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
|
||||
retval = ext4_ext_map_blocks(handle, inode, map, 0);
|
||||
else
|
||||
retval = ext4_ind_map_blocks(handle, inode, map, 0);
|
||||
|
||||
if (retval <= 0)
|
||||
return retval;
|
||||
|
||||
if (unlikely(retval != map->m_len)) {
|
||||
ext4_warning(inode->i_sb,
|
||||
"ES len assertion failed for inode "
|
||||
"%lu: retval %d != map->m_len %d",
|
||||
inode->i_ino, retval, map->m_len);
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
|
||||
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
|
||||
ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
|
||||
map->m_pblk, status);
|
||||
return retval;
|
||||
}
|
||||
|
||||
/*
|
||||
* The ext4_map_blocks() function tries to look up the requested blocks,
|
||||
* and returns if the blocks are already mapped.
|
||||
@ -1705,12 +1734,10 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
|
||||
|
||||
/* Lookup extent status tree firstly */
|
||||
if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
|
||||
if (ext4_es_is_hole(&es)) {
|
||||
retval = 0;
|
||||
down_read(&EXT4_I(inode)->i_data_sem);
|
||||
if (ext4_es_is_hole(&es))
|
||||
goto add_delayed;
|
||||
}
|
||||
|
||||
found:
|
||||
/*
|
||||
* Delayed extent could be allocated by fallocate.
|
||||
* So we need to check it.
|
||||
@ -1747,49 +1774,42 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
|
||||
down_read(&EXT4_I(inode)->i_data_sem);
|
||||
if (ext4_has_inline_data(inode))
|
||||
retval = 0;
|
||||
else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
|
||||
retval = ext4_ext_map_blocks(NULL, inode, map, 0);
|
||||
else
|
||||
retval = ext4_ind_map_blocks(NULL, inode, map, 0);
|
||||
retval = ext4_map_query_blocks(NULL, inode, map);
|
||||
up_read(&EXT4_I(inode)->i_data_sem);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
add_delayed:
|
||||
if (retval == 0) {
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* XXX: __block_prepare_write() unmaps passed block,
|
||||
* is it OK?
|
||||
*/
|
||||
|
||||
ret = ext4_insert_delayed_block(inode, map->m_lblk);
|
||||
if (ret != 0) {
|
||||
retval = ret;
|
||||
goto out_unlock;
|
||||
down_write(&EXT4_I(inode)->i_data_sem);
|
||||
/*
|
||||
* Page fault path (ext4_page_mkwrite does not take i_rwsem)
|
||||
* and fallocate path (no folio lock) can race. Make sure we
|
||||
* lookup the extent status tree here again while i_data_sem
|
||||
* is held in write mode, before inserting a new da entry in
|
||||
* the extent status tree.
|
||||
*/
|
||||
if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
|
||||
if (!ext4_es_is_hole(&es)) {
|
||||
up_write(&EXT4_I(inode)->i_data_sem);
|
||||
goto found;
|
||||
}
|
||||
|
||||
map_bh(bh, inode->i_sb, invalid_block);
|
||||
set_buffer_new(bh);
|
||||
set_buffer_delay(bh);
|
||||
} else if (retval > 0) {
|
||||
unsigned int status;
|
||||
|
||||
if (unlikely(retval != map->m_len)) {
|
||||
ext4_warning(inode->i_sb,
|
||||
"ES len assertion failed for inode "
|
||||
"%lu: retval %d != map->m_len %d",
|
||||
inode->i_ino, retval, map->m_len);
|
||||
WARN_ON(1);
|
||||
} else if (!ext4_has_inline_data(inode)) {
|
||||
retval = ext4_map_query_blocks(NULL, inode, map);
|
||||
if (retval) {
|
||||
up_write(&EXT4_I(inode)->i_data_sem);
|
||||
return retval;
|
||||
}
|
||||
|
||||
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
|
||||
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
|
||||
ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
|
||||
map->m_pblk, status);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
up_read((&EXT4_I(inode)->i_data_sem));
|
||||
retval = ext4_insert_delayed_block(inode, map->m_lblk);
|
||||
up_write(&EXT4_I(inode)->i_data_sem);
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
map_bh(bh, inode->i_sb, invalid_block);
|
||||
set_buffer_new(bh);
|
||||
set_buffer_delay(bh);
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
@ -3350,7 +3350,9 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
|
||||
if (page_private_gcing(fio->page)) {
|
||||
if (fio->sbi->am.atgc_enabled &&
|
||||
(fio->io_type == FS_DATA_IO) &&
|
||||
(fio->sbi->gc_mode != GC_URGENT_HIGH))
|
||||
(fio->sbi->gc_mode != GC_URGENT_HIGH) &&
|
||||
__is_valid_data_blkaddr(fio->old_blkaddr) &&
|
||||
!is_inode_flag_set(inode, FI_OPU_WRITE))
|
||||
return CURSEG_ALL_DATA_ATGC;
|
||||
else
|
||||
return CURSEG_COLD_DATA;
|
||||
|
@ -1124,6 +1124,7 @@ __releases(&files->file_lock)
|
||||
* tables and this condition does not arise without those.
|
||||
*/
|
||||
fdt = files_fdtable(files);
|
||||
fd = array_index_nospec(fd, fdt->max_fds);
|
||||
tofree = fdt->fd[fd];
|
||||
if (!tofree && fd_is_open(fd, fdt))
|
||||
goto Ebusy;
|
||||
|
@ -480,12 +480,10 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
|
||||
make_empty_dir_inode(inode);
|
||||
}
|
||||
|
||||
inode->i_uid = GLOBAL_ROOT_UID;
|
||||
inode->i_gid = GLOBAL_ROOT_GID;
|
||||
if (root->set_ownership)
|
||||
root->set_ownership(head, table, &inode->i_uid, &inode->i_gid);
|
||||
else {
|
||||
inode->i_uid = GLOBAL_ROOT_UID;
|
||||
inode->i_gid = GLOBAL_ROOT_GID;
|
||||
}
|
||||
root->set_ownership(head, &inode->i_uid, &inode->i_gid);
|
||||
|
||||
return inode;
|
||||
}
|
||||
|
@ -474,6 +474,9 @@ struct led_trigger {
|
||||
int (*activate)(struct led_classdev *led_cdev);
|
||||
void (*deactivate)(struct led_classdev *led_cdev);
|
||||
|
||||
/* Brightness set by led_trigger_event */
|
||||
enum led_brightness brightness;
|
||||
|
||||
/* LED-private triggers have this set */
|
||||
struct led_hw_trigger_type *trigger_type;
|
||||
|
||||
@ -527,22 +530,11 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
|
||||
return led_cdev->trigger_data;
|
||||
}
|
||||
|
||||
/**
|
||||
* led_trigger_rename_static - rename a trigger
|
||||
* @name: the new trigger name
|
||||
* @trig: the LED trigger to rename
|
||||
*
|
||||
* Change a LED trigger name by copying the string passed in
|
||||
* name into current trigger name, which MUST be large
|
||||
* enough for the new string.
|
||||
*
|
||||
* Note that name must NOT point to the same string used
|
||||
* during LED registration, as that could lead to races.
|
||||
*
|
||||
* This is meant to be used on triggers with statically
|
||||
* allocated name.
|
||||
*/
|
||||
void led_trigger_rename_static(const char *name, struct led_trigger *trig);
|
||||
static inline enum led_brightness
|
||||
led_trigger_get_brightness(const struct led_trigger *trigger)
|
||||
{
|
||||
return trigger ? trigger->brightness : LED_OFF;
|
||||
}
|
||||
|
||||
#define module_led_trigger(__led_trigger) \
|
||||
module_driver(__led_trigger, led_trigger_register, \
|
||||
@ -580,6 +572,12 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline enum led_brightness
|
||||
led_trigger_get_brightness(const struct led_trigger *trigger)
|
||||
{
|
||||
return LED_OFF;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_LEDS_TRIGGERS */
|
||||
|
||||
/* Trigger specific enum */
|
||||
|
@ -1182,6 +1182,8 @@ struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
|
||||
struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
|
||||
unsigned int devfn);
|
||||
struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
|
||||
struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from);
|
||||
|
||||
int pci_dev_present(const struct pci_device_id *ids);
|
||||
|
||||
int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
|
||||
@ -1958,6 +1960,9 @@ static inline struct pci_dev *pci_get_class(unsigned int class,
|
||||
struct pci_dev *from)
|
||||
{ return NULL; }
|
||||
|
||||
static inline struct pci_dev *pci_get_base_class(unsigned int class,
|
||||
struct pci_dev *from)
|
||||
{ return NULL; }
|
||||
|
||||
static inline int pci_dev_present(const struct pci_device_id *ids)
|
||||
{ return 0; }
|
||||
|
@ -4,6 +4,142 @@
|
||||
|
||||
#include <uapi/linux/screen_info.h>
|
||||
|
||||
#include <linux/bits.h>
|
||||
|
||||
/**
|
||||
* SCREEN_INFO_MAX_RESOURCES - maximum number of resources per screen_info
|
||||
*/
|
||||
#define SCREEN_INFO_MAX_RESOURCES 3
|
||||
|
||||
struct pci_dev;
|
||||
struct resource;
|
||||
|
||||
static inline bool __screen_info_has_lfb(unsigned int type)
|
||||
{
|
||||
return (type == VIDEO_TYPE_VLFB) || (type == VIDEO_TYPE_EFI);
|
||||
}
|
||||
|
||||
static inline u64 __screen_info_lfb_base(const struct screen_info *si)
|
||||
{
|
||||
u64 lfb_base = si->lfb_base;
|
||||
|
||||
if (si->capabilities & VIDEO_CAPABILITY_64BIT_BASE)
|
||||
lfb_base |= (u64)si->ext_lfb_base << 32;
|
||||
|
||||
return lfb_base;
|
||||
}
|
||||
|
||||
static inline void __screen_info_set_lfb_base(struct screen_info *si, u64 lfb_base)
|
||||
{
|
||||
si->lfb_base = lfb_base & GENMASK_ULL(31, 0);
|
||||
si->ext_lfb_base = (lfb_base & GENMASK_ULL(63, 32)) >> 32;
|
||||
|
||||
if (si->ext_lfb_base)
|
||||
si->capabilities |= VIDEO_CAPABILITY_64BIT_BASE;
|
||||
else
|
||||
si->capabilities &= ~VIDEO_CAPABILITY_64BIT_BASE;
|
||||
}
|
||||
|
||||
static inline u64 __screen_info_lfb_size(const struct screen_info *si, unsigned int type)
|
||||
{
|
||||
u64 lfb_size = si->lfb_size;
|
||||
|
||||
if (type == VIDEO_TYPE_VLFB)
|
||||
lfb_size <<= 16;
|
||||
return lfb_size;
|
||||
}
|
||||
|
||||
static inline bool __screen_info_vbe_mode_nonvga(const struct screen_info *si)
|
||||
{
|
||||
/*
|
||||
* VESA modes typically run on VGA hardware. Set bit 5 signals that this
|
||||
* is not the case. Drivers can then not make use of VGA resources. See
|
||||
* Sec 4.4 of the VBE 2.0 spec.
|
||||
*/
|
||||
return si->vesa_attributes & BIT(5);
|
||||
}
|
||||
|
||||
static inline unsigned int __screen_info_video_type(unsigned int type)
|
||||
{
|
||||
switch (type) {
|
||||
case VIDEO_TYPE_MDA:
|
||||
case VIDEO_TYPE_CGA:
|
||||
case VIDEO_TYPE_EGAM:
|
||||
case VIDEO_TYPE_EGAC:
|
||||
case VIDEO_TYPE_VGAC:
|
||||
case VIDEO_TYPE_VLFB:
|
||||
case VIDEO_TYPE_PICA_S3:
|
||||
case VIDEO_TYPE_MIPS_G364:
|
||||
case VIDEO_TYPE_SGI:
|
||||
case VIDEO_TYPE_TGAC:
|
||||
case VIDEO_TYPE_SUN:
|
||||
case VIDEO_TYPE_SUNPCI:
|
||||
case VIDEO_TYPE_PMAC:
|
||||
case VIDEO_TYPE_EFI:
|
||||
return type;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* screen_info_video_type() - Decodes the video type from struct screen_info
|
||||
* @si: an instance of struct screen_info
|
||||
*
|
||||
* Returns:
|
||||
* A VIDEO_TYPE_ constant representing si's type of video display, or 0 otherwise.
|
||||
*/
|
||||
static inline unsigned int screen_info_video_type(const struct screen_info *si)
|
||||
{
|
||||
unsigned int type;
|
||||
|
||||
// check if display output is on
|
||||
if (!si->orig_video_isVGA)
|
||||
return 0;
|
||||
|
||||
// check for a known VIDEO_TYPE_ constant
|
||||
type = __screen_info_video_type(si->orig_video_isVGA);
|
||||
if (type)
|
||||
return si->orig_video_isVGA;
|
||||
|
||||
// check if text mode has been initialized
|
||||
if (!si->orig_video_lines || !si->orig_video_cols)
|
||||
return 0;
|
||||
|
||||
// 80x25 text, mono
|
||||
if (si->orig_video_mode == 0x07) {
|
||||
if ((si->orig_video_ega_bx & 0xff) != 0x10)
|
||||
return VIDEO_TYPE_EGAM;
|
||||
else
|
||||
return VIDEO_TYPE_MDA;
|
||||
}
|
||||
|
||||
// EGA/VGA, 16 colors
|
||||
if ((si->orig_video_ega_bx & 0xff) != 0x10) {
|
||||
if (si->orig_video_isVGA)
|
||||
return VIDEO_TYPE_VGAC;
|
||||
else
|
||||
return VIDEO_TYPE_EGAC;
|
||||
}
|
||||
|
||||
// the rest...
|
||||
return VIDEO_TYPE_CGA;
|
||||
}
|
||||
|
||||
ssize_t screen_info_resources(const struct screen_info *si, struct resource *r, size_t num);
|
||||
|
||||
#if defined(CONFIG_PCI)
|
||||
void screen_info_apply_fixups(void);
|
||||
struct pci_dev *screen_info_pci_dev(const struct screen_info *si);
|
||||
#else
|
||||
static inline void screen_info_apply_fixups(void)
|
||||
{ }
|
||||
static inline struct pci_dev *screen_info_pci_dev(const struct screen_info *si)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
extern struct screen_info screen_info;
|
||||
|
||||
#endif /* _SCREEN_INFO_H */
|
||||
|
@ -205,7 +205,6 @@ struct ctl_table_root {
|
||||
struct ctl_table_set default_set;
|
||||
struct ctl_table_set *(*lookup)(struct ctl_table_root *root);
|
||||
void (*set_ownership)(struct ctl_table_header *head,
|
||||
struct ctl_table *table,
|
||||
kuid_t *uid, kgid_t *gid);
|
||||
int (*permissions)(struct ctl_table_header *head, struct ctl_table *table);
|
||||
};
|
||||
|
@ -2430,6 +2430,14 @@ DEFINE_EVENT(btrfs__space_info_update, update_bytes_pinned,
|
||||
TP_ARGS(fs_info, sinfo, old, diff)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btrfs__space_info_update, update_bytes_zone_unusable,
|
||||
|
||||
TP_PROTO(const struct btrfs_fs_info *fs_info,
|
||||
const struct btrfs_space_info *sinfo, u64 old, s64 diff),
|
||||
|
||||
TP_ARGS(fs_info, sinfo, old, diff)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(btrfs_raid56_bio,
|
||||
|
||||
TP_PROTO(const struct btrfs_raid_bio *rbio,
|
||||
|
@ -34,7 +34,7 @@ TRACE_EVENT(mptcp_subflow_get_send,
|
||||
struct sock *ssk;
|
||||
|
||||
__entry->active = mptcp_subflow_active(subflow);
|
||||
__entry->backup = subflow->backup;
|
||||
__entry->backup = subflow->backup || subflow->request_bkup;
|
||||
|
||||
if (subflow->tcp_sock && sk_fullsock(subflow->tcp_sock))
|
||||
__entry->free = sk_stream_memory_free(subflow->tcp_sock);
|
||||
|
@ -1898,6 +1898,7 @@ config RUST
|
||||
depends on !MODVERSIONS
|
||||
depends on !GCC_PLUGINS
|
||||
depends on !RANDSTRUCT
|
||||
depends on !SHADOW_CALL_STACK
|
||||
depends on !DEBUG_INFO_BTF || PAHOLE_HAS_LANG_EXCLUDE
|
||||
help
|
||||
Enables Rust support in the kernel.
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <linux/ipc_namespace.h>
|
||||
#include <linux/msg.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/cred.h>
|
||||
#include "util.h"
|
||||
|
||||
static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
|
||||
@ -190,25 +191,56 @@ static int set_is_seen(struct ctl_table_set *set)
|
||||
return ¤t->nsproxy->ipc_ns->ipc_set == set;
|
||||
}
|
||||
|
||||
static void ipc_set_ownership(struct ctl_table_header *head,
|
||||
kuid_t *uid, kgid_t *gid)
|
||||
{
|
||||
struct ipc_namespace *ns =
|
||||
container_of(head->set, struct ipc_namespace, ipc_set);
|
||||
|
||||
kuid_t ns_root_uid = make_kuid(ns->user_ns, 0);
|
||||
kgid_t ns_root_gid = make_kgid(ns->user_ns, 0);
|
||||
|
||||
*uid = uid_valid(ns_root_uid) ? ns_root_uid : GLOBAL_ROOT_UID;
|
||||
*gid = gid_valid(ns_root_gid) ? ns_root_gid : GLOBAL_ROOT_GID;
|
||||
}
|
||||
|
||||
static int ipc_permissions(struct ctl_table_header *head, struct ctl_table *table)
|
||||
{
|
||||
int mode = table->mode;
|
||||
|
||||
#ifdef CONFIG_CHECKPOINT_RESTORE
|
||||
struct ipc_namespace *ns = current->nsproxy->ipc_ns;
|
||||
struct ipc_namespace *ns =
|
||||
container_of(head->set, struct ipc_namespace, ipc_set);
|
||||
|
||||
if (((table->data == &ns->ids[IPC_SEM_IDS].next_id) ||
|
||||
(table->data == &ns->ids[IPC_MSG_IDS].next_id) ||
|
||||
(table->data == &ns->ids[IPC_SHM_IDS].next_id)) &&
|
||||
checkpoint_restore_ns_capable(ns->user_ns))
|
||||
mode = 0666;
|
||||
else
|
||||
#endif
|
||||
return mode;
|
||||
{
|
||||
kuid_t ns_root_uid;
|
||||
kgid_t ns_root_gid;
|
||||
|
||||
ipc_set_ownership(head, &ns_root_uid, &ns_root_gid);
|
||||
|
||||
if (uid_eq(current_euid(), ns_root_uid))
|
||||
mode >>= 6;
|
||||
|
||||
else if (in_egroup_p(ns_root_gid))
|
||||
mode >>= 3;
|
||||
}
|
||||
|
||||
mode &= 7;
|
||||
|
||||
return (mode << 6) | (mode << 3) | mode;
|
||||
}
|
||||
|
||||
static struct ctl_table_root set_root = {
|
||||
.lookup = set_lookup,
|
||||
.permissions = ipc_permissions,
|
||||
.set_ownership = ipc_set_ownership,
|
||||
};
|
||||
|
||||
bool setup_ipc_sysctls(struct ipc_namespace *ns)
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/stat.h>
|
||||
#include <linux/capability.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/cred.h>
|
||||
|
||||
static int msg_max_limit_min = MIN_MSGMAX;
|
||||
static int msg_max_limit_max = HARD_MSGMAX;
|
||||
@ -76,8 +77,42 @@ static int set_is_seen(struct ctl_table_set *set)
|
||||
return ¤t->nsproxy->ipc_ns->mq_set == set;
|
||||
}
|
||||
|
||||
static void mq_set_ownership(struct ctl_table_header *head,
|
||||
kuid_t *uid, kgid_t *gid)
|
||||
{
|
||||
struct ipc_namespace *ns =
|
||||
container_of(head->set, struct ipc_namespace, mq_set);
|
||||
|
||||
kuid_t ns_root_uid = make_kuid(ns->user_ns, 0);
|
||||
kgid_t ns_root_gid = make_kgid(ns->user_ns, 0);
|
||||
|
||||
*uid = uid_valid(ns_root_uid) ? ns_root_uid : GLOBAL_ROOT_UID;
|
||||
*gid = gid_valid(ns_root_gid) ? ns_root_gid : GLOBAL_ROOT_GID;
|
||||
}
|
||||
|
||||
static int mq_permissions(struct ctl_table_header *head, struct ctl_table *table)
|
||||
{
|
||||
int mode = table->mode;
|
||||
kuid_t ns_root_uid;
|
||||
kgid_t ns_root_gid;
|
||||
|
||||
mq_set_ownership(head, &ns_root_uid, &ns_root_gid);
|
||||
|
||||
if (uid_eq(current_euid(), ns_root_uid))
|
||||
mode >>= 6;
|
||||
|
||||
else if (in_egroup_p(ns_root_gid))
|
||||
mode >>= 3;
|
||||
|
||||
mode &= 7;
|
||||
|
||||
return (mode << 6) | (mode << 3) | mode;
|
||||
}
|
||||
|
||||
static struct ctl_table_root set_root = {
|
||||
.lookup = set_lookup,
|
||||
.permissions = mq_permissions,
|
||||
.set_ownership = mq_set_ownership,
|
||||
};
|
||||
|
||||
bool setup_mq_sysctls(struct ipc_namespace *ns)
|
||||
|
11
mm/Kconfig
11
mm/Kconfig
@ -704,6 +704,17 @@ config HUGETLB_PAGE_SIZE_VARIABLE
|
||||
config CONTIG_ALLOC
|
||||
def_bool (MEMORY_ISOLATION && COMPACTION) || CMA
|
||||
|
||||
config PCP_BATCH_SCALE_MAX
|
||||
int "Maximum scale factor of PCP (Per-CPU pageset) batch allocate/free"
|
||||
default 5
|
||||
range 0 6
|
||||
help
|
||||
In page allocator, PCP (Per-CPU pageset) is refilled and drained in
|
||||
batches. The batch number is scaled automatically to improve page
|
||||
allocation/free throughput. But too large scale factor may hurt
|
||||
latency. This option sets the upper limit of scale factor to limit
|
||||
the maximum latency.
|
||||
|
||||
config PHYS_ADDR_T_64BIT
|
||||
def_bool 64BIT
|
||||
|
||||
|
@ -2185,14 +2185,21 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
|
||||
*/
|
||||
static void drain_pages_zone(unsigned int cpu, struct zone *zone)
|
||||
{
|
||||
struct per_cpu_pages *pcp;
|
||||
struct per_cpu_pages *pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
|
||||
int count;
|
||||
|
||||
pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu);
|
||||
if (pcp->count) {
|
||||
do {
|
||||
spin_lock(&pcp->lock);
|
||||
free_pcppages_bulk(zone, pcp->count, pcp, 0);
|
||||
count = pcp->count;
|
||||
if (count) {
|
||||
int to_drain = min(count,
|
||||
pcp->batch << CONFIG_PCP_BATCH_SCALE_MAX);
|
||||
|
||||
free_pcppages_bulk(zone, to_drain, pcp, 0);
|
||||
count -= to_drain;
|
||||
}
|
||||
spin_unlock(&pcp->lock);
|
||||
}
|
||||
} while (count);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2343,7 +2350,7 @@ static int nr_pcp_free(struct per_cpu_pages *pcp, int high, bool free_high)
|
||||
* freeing of pages without any allocation.
|
||||
*/
|
||||
batch <<= pcp->free_factor;
|
||||
if (batch < max_nr_free)
|
||||
if (batch < max_nr_free && pcp->free_factor < CONFIG_PCP_BATCH_SCALE_MAX)
|
||||
pcp->free_factor++;
|
||||
batch = clamp(batch, min_nr_free, max_nr_free);
|
||||
|
||||
|
@ -2862,6 +2862,27 @@ static int hci_passive_scan_sync(struct hci_dev *hdev)
|
||||
*/
|
||||
filter_policy = hci_update_accept_list_sync(hdev);
|
||||
|
||||
/* If suspended and filter_policy set to 0x00 (no acceptlist) then
|
||||
* passive scanning cannot be started since that would require the host
|
||||
* to be woken up to process the reports.
|
||||
*/
|
||||
if (hdev->suspended && !filter_policy) {
|
||||
/* Check if accept list is empty then there is no need to scan
|
||||
* while suspended.
|
||||
*/
|
||||
if (list_empty(&hdev->le_accept_list))
|
||||
return 0;
|
||||
|
||||
/* If there are devices is the accept_list that means some
|
||||
* devices could not be programmed which in non-suspended case
|
||||
* means filter_policy needs to be set to 0x00 so the host needs
|
||||
* to filter, but since this is treating suspended case we
|
||||
* can ignore device needing host to filter to allow devices in
|
||||
* the acceptlist to be able to wakeup the system.
|
||||
*/
|
||||
filter_policy = 0x01;
|
||||
}
|
||||
|
||||
/* When the controller is using random resolvable addresses and
|
||||
* with that having LE privacy enabled, then controllers with
|
||||
* Extended Scanner Filter Policies support can now enable support
|
||||
|
@ -3263,7 +3263,7 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
|
||||
if (ifm->ifi_index > 0)
|
||||
dev = __dev_get_by_index(tgt_net, ifm->ifi_index);
|
||||
else if (tb[IFLA_IFNAME] || tb[IFLA_ALT_IFNAME])
|
||||
dev = rtnl_dev_get(net, tb);
|
||||
dev = rtnl_dev_get(tgt_net, tb);
|
||||
else if (tb[IFLA_GROUP])
|
||||
err = rtnl_group_dellink(tgt_net, nla_get_u32(tb[IFLA_GROUP]));
|
||||
else
|
||||
|
@ -145,25 +145,27 @@ static struct pernet_operations iptable_nat_net_ops = {
|
||||
|
||||
static int __init iptable_nat_init(void)
|
||||
{
|
||||
int ret = xt_register_template(&nf_nat_ipv4_table,
|
||||
iptable_nat_table_init);
|
||||
int ret;
|
||||
|
||||
/* net->gen->ptr[iptable_nat_net_id] must be allocated
|
||||
* before calling iptable_nat_table_init().
|
||||
*/
|
||||
ret = register_pernet_subsys(&iptable_nat_net_ops);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = register_pernet_subsys(&iptable_nat_net_ops);
|
||||
if (ret < 0) {
|
||||
xt_unregister_template(&nf_nat_ipv4_table);
|
||||
return ret;
|
||||
}
|
||||
ret = xt_register_template(&nf_nat_ipv4_table,
|
||||
iptable_nat_table_init);
|
||||
if (ret < 0)
|
||||
unregister_pernet_subsys(&iptable_nat_net_ops);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit iptable_nat_exit(void)
|
||||
{
|
||||
unregister_pernet_subsys(&iptable_nat_net_ops);
|
||||
xt_unregister_template(&nf_nat_ipv4_table);
|
||||
unregister_pernet_subsys(&iptable_nat_net_ops);
|
||||
}
|
||||
|
||||
module_init(iptable_nat_init);
|
||||
|
@ -424,7 +424,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
/* Try to redo what tcp_v4_send_synack did. */
|
||||
req->rsk_window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW);
|
||||
req->rsk_window_clamp = READ_ONCE(tp->window_clamp) ? :
|
||||
dst_metric(&rt->dst, RTAX_WINDOW);
|
||||
/* limit the window selection if the user enforce a smaller rx buffer */
|
||||
full_space = tcp_full_space(sk);
|
||||
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
|
||||
|
@ -1723,7 +1723,7 @@ int tcp_set_rcvlowat(struct sock *sk, int val)
|
||||
space = tcp_space_from_win(sk, val);
|
||||
if (space > sk->sk_rcvbuf) {
|
||||
WRITE_ONCE(sk->sk_rcvbuf, space);
|
||||
tcp_sk(sk)->window_clamp = val;
|
||||
WRITE_ONCE(tcp_sk(sk)->window_clamp, val);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -3386,7 +3386,7 @@ int tcp_set_window_clamp(struct sock *sk, int val)
|
||||
if (!val) {
|
||||
if (sk->sk_state != TCP_CLOSE)
|
||||
return -EINVAL;
|
||||
tp->window_clamp = 0;
|
||||
WRITE_ONCE(tp->window_clamp, 0);
|
||||
} else {
|
||||
u32 new_rcv_ssthresh, old_window_clamp = tp->window_clamp;
|
||||
u32 new_window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
|
||||
@ -3395,7 +3395,7 @@ int tcp_set_window_clamp(struct sock *sk, int val)
|
||||
if (new_window_clamp == old_window_clamp)
|
||||
return 0;
|
||||
|
||||
tp->window_clamp = new_window_clamp;
|
||||
WRITE_ONCE(tp->window_clamp, new_window_clamp);
|
||||
if (new_window_clamp < old_window_clamp) {
|
||||
/* need to apply the reserved mem provisioning only
|
||||
* when shrinking the window clamp
|
||||
@ -4020,7 +4020,7 @@ int do_tcp_getsockopt(struct sock *sk, int level,
|
||||
TCP_RTO_MAX / HZ);
|
||||
break;
|
||||
case TCP_WINDOW_CLAMP:
|
||||
val = tp->window_clamp;
|
||||
val = READ_ONCE(tp->window_clamp);
|
||||
break;
|
||||
case TCP_INFO: {
|
||||
struct tcp_info info;
|
||||
|
@ -570,19 +570,20 @@ static void tcp_init_buffer_space(struct sock *sk)
|
||||
maxwin = tcp_full_space(sk);
|
||||
|
||||
if (tp->window_clamp >= maxwin) {
|
||||
tp->window_clamp = maxwin;
|
||||
WRITE_ONCE(tp->window_clamp, maxwin);
|
||||
|
||||
if (tcp_app_win && maxwin > 4 * tp->advmss)
|
||||
tp->window_clamp = max(maxwin -
|
||||
(maxwin >> tcp_app_win),
|
||||
4 * tp->advmss);
|
||||
WRITE_ONCE(tp->window_clamp,
|
||||
max(maxwin - (maxwin >> tcp_app_win),
|
||||
4 * tp->advmss));
|
||||
}
|
||||
|
||||
/* Force reservation of one segment. */
|
||||
if (tcp_app_win &&
|
||||
tp->window_clamp > 2 * tp->advmss &&
|
||||
tp->window_clamp + tp->advmss > maxwin)
|
||||
tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss);
|
||||
WRITE_ONCE(tp->window_clamp,
|
||||
max(2 * tp->advmss, maxwin - tp->advmss));
|
||||
|
||||
tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
|
||||
tp->snd_cwnd_stamp = tcp_jiffies32;
|
||||
@ -747,8 +748,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
|
||||
* <prev RTT . ><current RTT .. ><next RTT .... >
|
||||
*/
|
||||
|
||||
if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf) &&
|
||||
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
|
||||
if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf)) {
|
||||
u64 rcvwin, grow;
|
||||
int rcvbuf;
|
||||
|
||||
@ -764,11 +764,22 @@ void tcp_rcv_space_adjust(struct sock *sk)
|
||||
|
||||
rcvbuf = min_t(u64, tcp_space_from_win(sk, rcvwin),
|
||||
READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
|
||||
if (rcvbuf > sk->sk_rcvbuf) {
|
||||
WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
|
||||
if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
|
||||
if (rcvbuf > sk->sk_rcvbuf) {
|
||||
WRITE_ONCE(sk->sk_rcvbuf, rcvbuf);
|
||||
|
||||
/* Make the window clamp follow along. */
|
||||
tp->window_clamp = tcp_win_from_space(sk, rcvbuf);
|
||||
/* Make the window clamp follow along. */
|
||||
WRITE_ONCE(tp->window_clamp,
|
||||
tcp_win_from_space(sk, rcvbuf));
|
||||
}
|
||||
} else {
|
||||
/* Make the window clamp follow along while being bounded
|
||||
* by SO_RCVBUF.
|
||||
*/
|
||||
int clamp = tcp_win_from_space(sk, min(rcvbuf, sk->sk_rcvbuf));
|
||||
|
||||
if (clamp > tp->window_clamp)
|
||||
WRITE_ONCE(tp->window_clamp, clamp);
|
||||
}
|
||||
}
|
||||
tp->rcvq_space.space = copied;
|
||||
@ -6347,7 +6358,8 @@ consume:
|
||||
|
||||
if (!tp->rx_opt.wscale_ok) {
|
||||
tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0;
|
||||
tp->window_clamp = min(tp->window_clamp, 65535U);
|
||||
WRITE_ONCE(tp->window_clamp,
|
||||
min(tp->window_clamp, 65535U));
|
||||
}
|
||||
|
||||
if (tp->rx_opt.saw_tstamp) {
|
||||
|
@ -203,16 +203,17 @@ static inline void tcp_event_ack_sent(struct sock *sk, u32 rcv_nxt)
|
||||
* This MUST be enforced by all callers.
|
||||
*/
|
||||
void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
|
||||
__u32 *rcv_wnd, __u32 *window_clamp,
|
||||
__u32 *rcv_wnd, __u32 *__window_clamp,
|
||||
int wscale_ok, __u8 *rcv_wscale,
|
||||
__u32 init_rcv_wnd)
|
||||
{
|
||||
unsigned int space = (__space < 0 ? 0 : __space);
|
||||
u32 window_clamp = READ_ONCE(*__window_clamp);
|
||||
|
||||
/* If no clamp set the clamp to the max possible scaled window */
|
||||
if (*window_clamp == 0)
|
||||
(*window_clamp) = (U16_MAX << TCP_MAX_WSCALE);
|
||||
space = min(*window_clamp, space);
|
||||
if (window_clamp == 0)
|
||||
window_clamp = (U16_MAX << TCP_MAX_WSCALE);
|
||||
space = min(window_clamp, space);
|
||||
|
||||
/* Quantize space offering to a multiple of mss if possible. */
|
||||
if (space > mss)
|
||||
@ -239,12 +240,13 @@ void tcp_select_initial_window(const struct sock *sk, int __space, __u32 mss,
|
||||
/* Set window scaling on max possible window */
|
||||
space = max_t(u32, space, READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2]));
|
||||
space = max_t(u32, space, READ_ONCE(sysctl_rmem_max));
|
||||
space = min_t(u32, space, *window_clamp);
|
||||
space = min_t(u32, space, window_clamp);
|
||||
*rcv_wscale = clamp_t(int, ilog2(space) - 15,
|
||||
0, TCP_MAX_WSCALE);
|
||||
}
|
||||
/* Set the clamp no higher than max representable value */
|
||||
(*window_clamp) = min_t(__u32, U16_MAX << (*rcv_wscale), *window_clamp);
|
||||
WRITE_ONCE(*__window_clamp,
|
||||
min_t(__u32, U16_MAX << (*rcv_wscale), window_clamp));
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_select_initial_window);
|
||||
|
||||
@ -3787,7 +3789,7 @@ static void tcp_connect_init(struct sock *sk)
|
||||
tcp_ca_dst_init(sk, dst);
|
||||
|
||||
if (!tp->window_clamp)
|
||||
tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
|
||||
WRITE_ONCE(tp->window_clamp, dst_metric(dst, RTAX_WINDOW));
|
||||
tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
|
||||
|
||||
tcp_initialize_rcv_mss(sk);
|
||||
@ -3795,7 +3797,7 @@ static void tcp_connect_init(struct sock *sk)
|
||||
/* limit the window selection if the user enforce a smaller rx buffer */
|
||||
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
|
||||
(tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0))
|
||||
tp->window_clamp = tcp_full_space(sk);
|
||||
WRITE_ONCE(tp->window_clamp, tcp_full_space(sk));
|
||||
|
||||
rcv_wnd = tcp_rwnd_init_bpf(sk);
|
||||
if (rcv_wnd == 0)
|
||||
|
@ -227,6 +227,7 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
|
||||
return NULL;
|
||||
memset(ndopts, 0, sizeof(*ndopts));
|
||||
while (opt_len) {
|
||||
bool unknown = false;
|
||||
int l;
|
||||
if (opt_len < sizeof(struct nd_opt_hdr))
|
||||
return NULL;
|
||||
@ -262,22 +263,23 @@ struct ndisc_options *ndisc_parse_options(const struct net_device *dev,
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
if (ndisc_is_useropt(dev, nd_opt)) {
|
||||
ndopts->nd_useropts_end = nd_opt;
|
||||
if (!ndopts->nd_useropts)
|
||||
ndopts->nd_useropts = nd_opt;
|
||||
} else {
|
||||
/*
|
||||
* Unknown options must be silently ignored,
|
||||
* to accommodate future extension to the
|
||||
* protocol.
|
||||
*/
|
||||
ND_PRINTK(2, notice,
|
||||
"%s: ignored unsupported option; type=%d, len=%d\n",
|
||||
__func__,
|
||||
nd_opt->nd_opt_type,
|
||||
nd_opt->nd_opt_len);
|
||||
}
|
||||
unknown = true;
|
||||
}
|
||||
if (ndisc_is_useropt(dev, nd_opt)) {
|
||||
ndopts->nd_useropts_end = nd_opt;
|
||||
if (!ndopts->nd_useropts)
|
||||
ndopts->nd_useropts = nd_opt;
|
||||
} else if (unknown) {
|
||||
/*
|
||||
* Unknown options must be silently ignored,
|
||||
* to accommodate future extension to the
|
||||
* protocol.
|
||||
*/
|
||||
ND_PRINTK(2, notice,
|
||||
"%s: ignored unsupported option; type=%d, len=%d\n",
|
||||
__func__,
|
||||
nd_opt->nd_opt_type,
|
||||
nd_opt->nd_opt_len);
|
||||
}
|
||||
next_opt:
|
||||
opt_len -= l;
|
||||
|
@ -147,23 +147,27 @@ static struct pernet_operations ip6table_nat_net_ops = {
|
||||
|
||||
static int __init ip6table_nat_init(void)
|
||||
{
|
||||
int ret = xt_register_template(&nf_nat_ipv6_table,
|
||||
ip6table_nat_table_init);
|
||||
int ret;
|
||||
|
||||
/* net->gen->ptr[ip6table_nat_net_id] must be allocated
|
||||
* before calling ip6t_nat_register_lookups().
|
||||
*/
|
||||
ret = register_pernet_subsys(&ip6table_nat_net_ops);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = register_pernet_subsys(&ip6table_nat_net_ops);
|
||||
ret = xt_register_template(&nf_nat_ipv6_table,
|
||||
ip6table_nat_table_init);
|
||||
if (ret)
|
||||
xt_unregister_template(&nf_nat_ipv6_table);
|
||||
unregister_pernet_subsys(&ip6table_nat_net_ops);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit ip6table_nat_exit(void)
|
||||
{
|
||||
unregister_pernet_subsys(&ip6table_nat_net_ops);
|
||||
xt_unregister_template(&nf_nat_ipv6_table);
|
||||
unregister_pernet_subsys(&ip6table_nat_net_ops);
|
||||
}
|
||||
|
||||
module_init(ip6table_nat_init);
|
||||
|
@ -243,7 +243,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
req->rsk_window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW);
|
||||
req->rsk_window_clamp = READ_ONCE(tp->window_clamp) ? :dst_metric(dst, RTAX_WINDOW);
|
||||
/* limit the window selection if the user enforce a smaller rx buffer */
|
||||
full_space = tcp_full_space(sk);
|
||||
if (sk->sk_userlocks & SOCK_RCVBUF_LOCK &&
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user