SCSI misc on 20251002

Usual driver updates (ufs, mpi3mr, lpfc, pm80xx, mpt3sas) plus
 assorted cleanups and fixes.  The only core update is to sd.c and is
 mostly cosmetic.
 
 Signed-off-by: James E.J. Bottomley <James.Bottomley@HansenPartnership.com>
 -----BEGIN PGP SIGNATURE-----
 
 iLgEABMIAGAWIQTnYEDbdso9F2cI+arnQslM7pishQUCaN7vMhsUgAAAAAAEAA5t
 YW51MiwyLjUrMS4xMSwyLDImHGphbWVzLmJvdHRvbWxleUBoYW5zZW5wYXJ0bmVy
 c2hpcC5jb20ACgkQ50LJTO6YrIXdXgD9HsCcnqb5KzHs0EMzy752KjHoHeS5Tg59
 CkTZcpTFAfgBAJJIHo5AripQ0cqPqTzVEkhWbd9q277A3HLgu3NFcsWG
 =seOt
 -----END PGP SIGNATURE-----

Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI updates from James Bottomley:
 "Usual driver updates (ufs, mpi3mr, lpfc, pm80xx, mpt3sas) plus
  assorted cleanups and fixes.

  The only core update is to sd.c and is mostly cosmetic"

* tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (105 commits)
  scsi: MAINTAINERS: Update FC element owners
  scsi: mpt3sas: Update driver version to 54.100.00.00
  scsi: mpt3sas: Add support for 22.5 Gbps SAS link rate
  scsi: mpt3sas: Suppress unnecessary IOCLogInfo on CONFIG_INVALID_PAGE
  scsi: mpt3sas: Fix crash in transport port remove by using ioc_info()
  scsi: ufs: ufs-qcom: Add support for limiting HS gear and rate
  scsi: ufs: pltfrm: Add DT support to limit HS gear and gear rate
  scsi: ufs: ufs-qcom: Remove redundant re-assignment to hs_rate
  scsi: ufs: dt-bindings: Document gear and rate limit properties
  scsi: ufs: core: Fix data race in CPU latency PM QoS request handling
  scsi: libfc: Fix potential buffer overflow in fc_ct_ms_fill()
  scsi: storvsc: Remove redundant ternary operators
  scsi: ufs: core: Change MCQ interrupt enable flow
  scsi: smartpqi: Replace kmalloc() + copy_from_user() with memdup_user()
  scsi: hpsa: Replace kmalloc() + copy_from_user() with memdup_user()
  scsi: hpsa: Fix potential memory leak in hpsa_big_passthru_ioctl()
  scsi: lpfc: Copyright updates for 14.4.0.11 patches
  scsi: lpfc: Update lpfc version to 14.4.0.11
  scsi: lpfc: Convert debugfs directory counts from atomic to unsigned int
  scsi: lpfc: Clean up extraneous phba dentries
  ...
This commit is contained in:
Linus Torvalds 2025-10-03 19:17:48 -07:00
commit 674b0ddb75
80 changed files with 1624 additions and 1133 deletions

View File

@ -0,0 +1,167 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/ufs/qcom,sc7180-ufshc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm SC7180 and Other SoCs UFS Controllers
maintainers:
- Bjorn Andersson <bjorn.andersson@linaro.org>
# Select only our matches, not all jedec,ufs-2.0
select:
properties:
compatible:
contains:
enum:
- qcom,msm8998-ufshc
- qcom,qcs8300-ufshc
- qcom,sa8775p-ufshc
- qcom,sc7180-ufshc
- qcom,sc7280-ufshc
- qcom,sc8180x-ufshc
- qcom,sc8280xp-ufshc
- qcom,sm8250-ufshc
- qcom,sm8350-ufshc
- qcom,sm8450-ufshc
- qcom,sm8550-ufshc
required:
- compatible
properties:
compatible:
items:
- enum:
- qcom,msm8998-ufshc
- qcom,qcs8300-ufshc
- qcom,sa8775p-ufshc
- qcom,sc7180-ufshc
- qcom,sc7280-ufshc
- qcom,sc8180x-ufshc
- qcom,sc8280xp-ufshc
- qcom,sm8250-ufshc
- qcom,sm8350-ufshc
- qcom,sm8450-ufshc
- qcom,sm8550-ufshc
- const: qcom,ufshc
- const: jedec,ufs-2.0
reg:
maxItems: 1
reg-names:
items:
- const: std
clocks:
minItems: 7
maxItems: 8
clock-names:
minItems: 7
items:
- const: core_clk
- const: bus_aggr_clk
- const: iface_clk
- const: core_clk_unipro
- const: ref_clk
- const: tx_lane0_sync_clk
- const: rx_lane0_sync_clk
- const: rx_lane1_sync_clk
qcom,ice:
$ref: /schemas/types.yaml#/definitions/phandle
description: phandle to the Inline Crypto Engine node
required:
- compatible
- reg
allOf:
- $ref: qcom,ufs-common.yaml
- if:
properties:
compatible:
contains:
enum:
- qcom,sc7180-ufshc
then:
properties:
clocks:
maxItems: 7
clock-names:
maxItems: 7
else:
properties:
clocks:
minItems: 8
clock-names:
minItems: 8
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/qcom,gcc-sm8450.h>
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interconnect/qcom,sm8450.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
soc {
#address-cells = <2>;
#size-cells = <2>;
ufs@1d84000 {
compatible = "qcom,sm8450-ufshc", "qcom,ufshc",
"jedec,ufs-2.0";
reg = <0x0 0x01d84000 0x0 0x3000>;
interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
phys = <&ufs_mem_phy_lanes>;
phy-names = "ufsphy";
lanes-per-direction = <2>;
#reset-cells = <1>;
resets = <&gcc GCC_UFS_PHY_BCR>;
reset-names = "rst";
reset-gpios = <&tlmm 210 GPIO_ACTIVE_LOW>;
vcc-supply = <&vreg_l7b_2p5>;
vcc-max-microamp = <1100000>;
vccq-supply = <&vreg_l9b_1p2>;
vccq-max-microamp = <1200000>;
power-domains = <&gcc UFS_PHY_GDSC>;
iommus = <&apps_smmu 0xe0 0x0>;
interconnects = <&aggre1_noc MASTER_UFS_MEM &mc_virt SLAVE_EBI1>,
<&gem_noc MASTER_APPSS_PROC &config_noc SLAVE_UFS_MEM_CFG>;
interconnect-names = "ufs-ddr", "cpu-ufs";
clocks = <&gcc GCC_UFS_PHY_AXI_CLK>,
<&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
<&gcc GCC_UFS_PHY_AHB_CLK>,
<&gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
<&rpmhcc RPMH_CXO_CLK>,
<&gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
<&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
<&gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>;
clock-names = "core_clk",
"bus_aggr_clk",
"iface_clk",
"core_clk_unipro",
"ref_clk",
"tx_lane0_sync_clk",
"rx_lane0_sync_clk",
"rx_lane1_sync_clk";
freq-table-hz = <75000000 300000000>,
<0 0>,
<0 0>,
<75000000 300000000>,
<75000000 300000000>,
<0 0>,
<0 0>,
<0 0>;
qcom,ice = <&ice>;
};
};

View File

@ -0,0 +1,178 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/ufs/qcom,sm8650-ufshc.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm SM8650 and Other SoCs UFS Controllers
maintainers:
- Bjorn Andersson <bjorn.andersson@linaro.org>
# Select only our matches, not all jedec,ufs-2.0
select:
properties:
compatible:
contains:
enum:
- qcom,sm8650-ufshc
- qcom,sm8750-ufshc
required:
- compatible
properties:
compatible:
items:
- enum:
- qcom,sm8650-ufshc
- qcom,sm8750-ufshc
- const: qcom,ufshc
- const: jedec,ufs-2.0
reg:
minItems: 1
maxItems: 2
reg-names:
minItems: 1
items:
- const: std
- const: mcq
clocks:
minItems: 8
maxItems: 8
clock-names:
items:
- const: core_clk
- const: bus_aggr_clk
- const: iface_clk
- const: core_clk_unipro
- const: ref_clk
- const: tx_lane0_sync_clk
- const: rx_lane0_sync_clk
- const: rx_lane1_sync_clk
qcom,ice:
$ref: /schemas/types.yaml#/definitions/phandle
description: phandle to the Inline Crypto Engine node
required:
- compatible
- reg
allOf:
- $ref: qcom,ufs-common.yaml
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/qcom,sm8650-gcc.h>
#include <dt-bindings/clock/qcom,sm8650-tcsr.h>
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interconnect/qcom,icc.h>
#include <dt-bindings/interconnect/qcom,sm8650-rpmh.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
soc {
#address-cells = <2>;
#size-cells = <2>;
ufshc@1d84000 {
compatible = "qcom,sm8650-ufshc", "qcom,ufshc", "jedec,ufs-2.0";
reg = <0x0 0x01d84000 0x0 0x3000>;
interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH 0>;
clocks = <&gcc GCC_UFS_PHY_AXI_CLK>,
<&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
<&gcc GCC_UFS_PHY_AHB_CLK>,
<&gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
<&tcsr TCSR_UFS_PAD_CLKREF_EN>,
<&gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
<&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
<&gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>;
clock-names = "core_clk",
"bus_aggr_clk",
"iface_clk",
"core_clk_unipro",
"ref_clk",
"tx_lane0_sync_clk",
"rx_lane0_sync_clk",
"rx_lane1_sync_clk";
resets = <&gcc GCC_UFS_PHY_BCR>;
reset-names = "rst";
reset-gpios = <&tlmm 210 GPIO_ACTIVE_LOW>;
interconnects = <&aggre1_noc MASTER_UFS_MEM QCOM_ICC_TAG_ALWAYS
&mc_virt SLAVE_EBI1 QCOM_ICC_TAG_ALWAYS>,
<&gem_noc MASTER_APPSS_PROC QCOM_ICC_TAG_ACTIVE_ONLY
&config_noc SLAVE_UFS_MEM_CFG QCOM_ICC_TAG_ACTIVE_ONLY>;
interconnect-names = "ufs-ddr",
"cpu-ufs";
power-domains = <&gcc UFS_PHY_GDSC>;
required-opps = <&rpmhpd_opp_nom>;
operating-points-v2 = <&ufs_opp_table>;
iommus = <&apps_smmu 0x60 0>;
lanes-per-direction = <2>;
qcom,ice = <&ice>;
phys = <&ufs_mem_phy>;
phy-names = "ufsphy";
#reset-cells = <1>;
vcc-supply = <&vreg_l7b_2p5>;
vcc-max-microamp = <1100000>;
vccq-supply = <&vreg_l9b_1p2>;
vccq-max-microamp = <1200000>;
ufs_opp_table: opp-table {
compatible = "operating-points-v2";
opp-100000000 {
opp-hz = /bits/ 64 <100000000>,
/bits/ 64 <0>,
/bits/ 64 <0>,
/bits/ 64 <100000000>,
/bits/ 64 <0>,
/bits/ 64 <0>,
/bits/ 64 <0>,
/bits/ 64 <0>;
required-opps = <&rpmhpd_opp_low_svs>;
};
opp-201500000 {
opp-hz = /bits/ 64 <201500000>,
/bits/ 64 <0>,
/bits/ 64 <0>,
/bits/ 64 <201500000>,
/bits/ 64 <0>,
/bits/ 64 <0>,
/bits/ 64 <0>,
/bits/ 64 <0>;
required-opps = <&rpmhpd_opp_svs>;
};
opp-403000000 {
opp-hz = /bits/ 64 <403000000>,
/bits/ 64 <0>,
/bits/ 64 <0>,
/bits/ 64 <403000000>,
/bits/ 64 <0>,
/bits/ 64 <0>,
/bits/ 64 <0>,
/bits/ 64 <0>;
required-opps = <&rpmhpd_opp_nom>;
};
};
};
};

View File

@ -0,0 +1,67 @@
# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/ufs/qcom,ufs-common.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Universal Flash Storage (UFS) Controller Common Properties
maintainers:
- Bjorn Andersson <bjorn.andersson@linaro.org>
properties:
clocks:
minItems: 7
maxItems: 9
clock-names:
minItems: 7
maxItems: 9
dma-coherent: true
interconnects:
minItems: 2
maxItems: 2
interconnect-names:
items:
- const: ufs-ddr
- const: cpu-ufs
iommus:
minItems: 1
maxItems: 2
phys:
maxItems: 1
phy-names:
items:
- const: ufsphy
power-domains:
maxItems: 1
required-opps:
maxItems: 1
resets:
maxItems: 1
'#reset-cells':
const: 1
reset-names:
items:
- const: rst
reset-gpios:
maxItems: 1
description:
GPIO connected to the RESET pin of the UFS memory device.
allOf:
- $ref: ufs-common.yaml
additionalProperties: true

View File

@ -15,7 +15,15 @@ select:
properties:
compatible:
contains:
const: qcom,ufshc
enum:
- qcom,msm8994-ufshc
- qcom,msm8996-ufshc
- qcom,qcs615-ufshc
- qcom,sdm845-ufshc
- qcom,sm6115-ufshc
- qcom,sm6125-ufshc
- qcom,sm6350-ufshc
- qcom,sm8150-ufshc
required:
- compatible
@ -25,61 +33,15 @@ properties:
- enum:
- qcom,msm8994-ufshc
- qcom,msm8996-ufshc
- qcom,msm8998-ufshc
- qcom,qcs615-ufshc
- qcom,qcs8300-ufshc
- qcom,sa8775p-ufshc
- qcom,sc7180-ufshc
- qcom,sc7280-ufshc
- qcom,sc8180x-ufshc
- qcom,sc8280xp-ufshc
- qcom,sdm845-ufshc
- qcom,sm6115-ufshc
- qcom,sm6125-ufshc
- qcom,sm6350-ufshc
- qcom,sm8150-ufshc
- qcom,sm8250-ufshc
- qcom,sm8350-ufshc
- qcom,sm8450-ufshc
- qcom,sm8550-ufshc
- qcom,sm8650-ufshc
- qcom,sm8750-ufshc
- const: qcom,ufshc
- const: jedec,ufs-2.0
clocks:
minItems: 7
maxItems: 9
clock-names:
minItems: 7
maxItems: 9
dma-coherent: true
interconnects:
minItems: 2
maxItems: 2
interconnect-names:
items:
- const: ufs-ddr
- const: cpu-ufs
iommus:
minItems: 1
maxItems: 2
phys:
maxItems: 1
phy-names:
items:
- const: ufsphy
power-domains:
maxItems: 1
qcom,ice:
$ref: /schemas/types.yaml#/definitions/phandle
description: phandle to the Inline Crypto Engine node
@ -93,93 +55,12 @@ properties:
- const: std
- const: ice
required-opps:
maxItems: 1
resets:
maxItems: 1
'#reset-cells':
const: 1
reset-names:
items:
- const: rst
reset-gpios:
maxItems: 1
description:
GPIO connected to the RESET pin of the UFS memory device.
required:
- compatible
- reg
allOf:
- $ref: ufs-common.yaml
- if:
properties:
compatible:
contains:
enum:
- qcom,sc7180-ufshc
then:
properties:
clocks:
minItems: 7
maxItems: 7
clock-names:
items:
- const: core_clk
- const: bus_aggr_clk
- const: iface_clk
- const: core_clk_unipro
- const: ref_clk
- const: tx_lane0_sync_clk
- const: rx_lane0_sync_clk
reg:
maxItems: 1
reg-names:
maxItems: 1
- if:
properties:
compatible:
contains:
enum:
- qcom,msm8998-ufshc
- qcom,qcs8300-ufshc
- qcom,sa8775p-ufshc
- qcom,sc7280-ufshc
- qcom,sc8180x-ufshc
- qcom,sc8280xp-ufshc
- qcom,sm8250-ufshc
- qcom,sm8350-ufshc
- qcom,sm8450-ufshc
- qcom,sm8550-ufshc
- qcom,sm8650-ufshc
- qcom,sm8750-ufshc
then:
properties:
clocks:
minItems: 8
maxItems: 8
clock-names:
items:
- const: core_clk
- const: bus_aggr_clk
- const: iface_clk
- const: core_clk_unipro
- const: ref_clk
- const: tx_lane0_sync_clk
- const: rx_lane0_sync_clk
- const: rx_lane1_sync_clk
reg:
minItems: 1
maxItems: 1
reg-names:
maxItems: 1
- $ref: qcom,ufs-common.yaml
- if:
properties:
@ -297,10 +178,10 @@ unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/qcom,gcc-sm8450.h>
#include <dt-bindings/clock/qcom,gcc-sm8150.h>
#include <dt-bindings/clock/qcom,rpmh.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interconnect/qcom,sm8450.h>
#include <dt-bindings/interconnect/qcom,sm8150.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
soc {
@ -308,9 +189,12 @@ examples:
#size-cells = <2>;
ufs@1d84000 {
compatible = "qcom,sm8450-ufshc", "qcom,ufshc",
compatible = "qcom,sm8150-ufshc", "qcom,ufshc",
"jedec,ufs-2.0";
reg = <0 0x01d84000 0 0x3000>;
reg = <0x0 0x01d84000 0x0 0x2500>,
<0x0 0x01d90000 0x0 0x8000>;
reg-names = "std", "ice";
interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>;
phys = <&ufs_mem_phy_lanes>;
phy-names = "ufsphy";
@ -326,19 +210,8 @@ examples:
vccq-max-microamp = <1200000>;
power-domains = <&gcc UFS_PHY_GDSC>;
iommus = <&apps_smmu 0xe0 0x0>;
interconnects = <&aggre1_noc MASTER_UFS_MEM &mc_virt SLAVE_EBI1>,
<&gem_noc MASTER_APPSS_PROC &config_noc SLAVE_UFS_MEM_CFG>;
interconnect-names = "ufs-ddr", "cpu-ufs";
iommus = <&apps_smmu 0x300 0>;
clock-names = "core_clk",
"bus_aggr_clk",
"iface_clk",
"core_clk_unipro",
"ref_clk",
"tx_lane0_sync_clk",
"rx_lane0_sync_clk",
"rx_lane1_sync_clk";
clocks = <&gcc GCC_UFS_PHY_AXI_CLK>,
<&gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
<&gcc GCC_UFS_PHY_AHB_CLK>,
@ -346,15 +219,25 @@ examples:
<&rpmhcc RPMH_CXO_CLK>,
<&gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
<&gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
<&gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>;
freq-table-hz = <75000000 300000000>,
<&gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>,
<&gcc GCC_UFS_PHY_ICE_CORE_CLK>;
clock-names = "core_clk",
"bus_aggr_clk",
"iface_clk",
"core_clk_unipro",
"ref_clk",
"tx_lane0_sync_clk",
"rx_lane0_sync_clk",
"rx_lane1_sync_clk",
"ice_core_clk";
freq-table-hz = <37500000 300000000>,
<0 0>,
<0 0>,
<75000000 300000000>,
<75000000 300000000>,
<37500000 300000000>,
<0 0>,
<0 0>,
<0 0>;
qcom,ice = <&ice>;
<0 0>,
<0 0>,
<0 300000000>;
};
};

View File

@ -89,6 +89,22 @@ properties:
msi-parent: true
limit-hs-gear:
$ref: /schemas/types.yaml#/definitions/uint32
minimum: 1
maximum: 6
default: 6
description:
Restricts the maximum HS gear used in both TX and RX directions.
limit-gear-rate:
$ref: /schemas/types.yaml#/definitions/string
enum: [rate-a, rate-b]
default: rate-b
description:
Restricts the UFS controller to rate-a or rate-b for both TX and
RX directions.
dependencies:
freq-table-hz: [ clocks ]
operating-points-v2: [ clocks, clock-names ]

View File

@ -9135,7 +9135,6 @@ F: drivers/infiniband/hw/ocrdma/
F: include/uapi/rdma/ocrdma-abi.h
EMULEX/BROADCOM EFCT FC/FCOE SCSI TARGET DRIVER
M: James Smart <james.smart@broadcom.com>
M: Ram Vegesna <ram.vegesna@broadcom.com>
L: linux-scsi@vger.kernel.org
L: target-devel@vger.kernel.org
@ -9144,8 +9143,8 @@ W: http://www.broadcom.com
F: drivers/scsi/elx/
EMULEX/BROADCOM LPFC FC/FCOE SCSI DRIVER
M: James Smart <james.smart@broadcom.com>
M: Dick Kennedy <dick.kennedy@broadcom.com>
M: Justin Tee <justin.tee@broadcom.com>
M: Paul Ely <paul.ely@broadcom.com>
L: linux-scsi@vger.kernel.org
S: Supported
W: http://www.broadcom.com
@ -18405,7 +18404,9 @@ F: drivers/nvme/target/fabrics-cmd-auth.c
F: include/linux/nvme-auth.h
NVM EXPRESS FC TRANSPORT DRIVERS
M: James Smart <james.smart@broadcom.com>
M: Justin Tee <justin.tee@broadcom.com>
M: Naresh Gottumukkala <nareshgottumukkala83@gmail.com>
M: Paul Ely <paul.ely@broadcom.com>
L: linux-nvme@lists.infradead.org
S: Supported
F: drivers/nvme/host/fc.c

View File

@ -488,7 +488,6 @@ static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task,
scb->ssp_task.conn_handle = cpu_to_le16(
(u16)(unsigned long)dev->lldd_dev);
scb->ssp_task.data_dir = data_dir_flags[task->data_dir];
scb->ssp_task.retry_count = scb->ssp_task.retry_count;
ascb->tasklet_complete = asd_task_tasklet_complete;

View File

@ -1282,7 +1282,6 @@ bfa_iocfc_cfgrsp(struct bfa_s *bfa)
struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
fwcfg->num_cqs = fwcfg->num_cqs;
fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs);
fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);

View File

@ -960,7 +960,7 @@ csio_wr_copy_to_wrp(void *data_buf, struct csio_wr_pair *wrp,
memcpy((uint8_t *) wrp->addr1 + wr_off, data_buf, nbytes);
data_len -= nbytes;
/* Write the remaining data from the begining of circular buffer */
/* Write the remaining data from the beginning of circular buffer */
if (data_len) {
CSIO_DB_ASSERT(data_len <= wrp->size2);
CSIO_DB_ASSERT(wrp->addr2 != NULL);
@ -1224,7 +1224,7 @@ csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
/*
* We need to re-arm SGE interrupts in case we got a stray interrupt,
* especially in msix mode. With INTx, this may be a common occurence.
* especially in msix mode. With INTx, this may be a common occurrence.
*/
if (unlikely(!q->inc_idx)) {
CSIO_INC_STATS(q, n_stray_comp);

View File

@ -876,7 +876,7 @@ static int hisi_sas_dev_found(struct domain_device *device)
device->lldd_dev = sas_dev;
hisi_hba->hw->setup_itct(hisi_hba, sas_dev);
if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
if (dev_parent_is_expander(device)) {
int phy_no;
phy_no = sas_find_attached_phy_id(&parent_dev->ex_dev, device);

View File

@ -925,7 +925,6 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
struct device *dev = hisi_hba->dev;
u64 qw0, device_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
struct domain_device *parent_dev = device->parent;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
u64 sas_addr;
@ -942,7 +941,7 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba,
break;
case SAS_SATA_DEV:
case SAS_SATA_PENDING:
if (parent_dev && dev_is_expander(parent_dev->dev_type))
if (dev_parent_is_expander(device))
qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF;
else
qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF;
@ -2494,7 +2493,6 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
{
struct sas_task *task = slot->task;
struct domain_device *device = task->dev;
struct domain_device *parent_dev = device->parent;
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct asd_sas_port *sas_port = device->port;
@ -2509,7 +2507,7 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
/* create header */
/* dw0 */
dw0 = port->id << CMD_HDR_PORT_OFF;
if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
if (dev_parent_is_expander(device)) {
dw0 |= 3 << CMD_HDR_CMD_OFF;
} else {
phy_id = device->phy->identify.phy_identifier;

View File

@ -874,7 +874,6 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
struct device *dev = hisi_hba->dev;
u64 qw0, device_id = sas_dev->device_id;
struct hisi_sas_itct *itct = &hisi_hba->itct[device_id];
struct domain_device *parent_dev = device->parent;
struct asd_sas_port *sas_port = device->port;
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
u64 sas_addr;
@ -891,7 +890,7 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba,
break;
case SAS_SATA_DEV:
case SAS_SATA_PENDING:
if (parent_dev && dev_is_expander(parent_dev->dev_type))
if (dev_parent_is_expander(device))
qw0 = HISI_SAS_DEV_TYPE_STP << ITCT_HDR_DEV_TYPE_OFF;
else
qw0 = HISI_SAS_DEV_TYPE_SATA << ITCT_HDR_DEV_TYPE_OFF;
@ -1476,7 +1475,6 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
{
struct sas_task *task = slot->task;
struct domain_device *device = task->dev;
struct domain_device *parent_dev = device->parent;
struct hisi_sas_device *sas_dev = device->lldd_dev;
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
struct asd_sas_port *sas_port = device->port;
@ -1487,7 +1485,7 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
u32 dw1 = 0, dw2 = 0;
hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
if (dev_parent_is_expander(device)) {
hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF);
} else {
phy_id = device->phy->identify.phy_identifier;

View File

@ -2662,10 +2662,8 @@ static void complete_scsi_command(struct CommandList *cp)
case CMD_TARGET_STATUS:
cmd->result |= ei->ScsiStatus;
/* copy the sense data */
if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
sense_data_size = SCSI_SENSE_BUFFERSIZE;
else
sense_data_size = sizeof(ei->SenseInfo);
sense_data_size = min_t(unsigned long, SCSI_SENSE_BUFFERSIZE,
sizeof(ei->SenseInfo));
if (ei->SenseLen < sense_data_size)
sense_data_size = ei->SenseLen;
memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
@ -3628,10 +3626,7 @@ static bool hpsa_vpd_page_supported(struct ctlr_info *h,
if (rc != 0)
goto exit_unsupported;
pages = buf[3];
if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
bufsize = pages + HPSA_VPD_HEADER_SZ;
else
bufsize = 255;
bufsize = min(pages + HPSA_VPD_HEADER_SZ, 255);
/* Get the whole VPD page list */
rc = hpsa_scsi_do_inquiry(h, scsi3addr,
@ -6407,18 +6402,14 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h,
return -EINVAL;
}
if (iocommand->buf_size > 0) {
buff = kmalloc(iocommand->buf_size, GFP_KERNEL);
if (buff == NULL)
return -ENOMEM;
if (iocommand->Request.Type.Direction & XFER_WRITE) {
/* Copy the data into the buffer we created */
if (copy_from_user(buff, iocommand->buf,
iocommand->buf_size)) {
rc = -EFAULT;
goto out_kfree;
}
buff = memdup_user(iocommand->buf, iocommand->buf_size);
if (IS_ERR(buff))
return PTR_ERR(buff);
} else {
memset(buff, 0, iocommand->buf_size);
buff = kzalloc(iocommand->buf_size, GFP_KERNEL);
if (!buff)
return -ENOMEM;
}
}
c = cmd_alloc(h);
@ -6478,7 +6469,6 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h,
}
out:
cmd_free(h, c);
out_kfree:
kfree(buff);
return rc;
}
@ -6522,18 +6512,21 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h,
while (left) {
sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
buff_size[sg_used] = sz;
buff[sg_used] = kmalloc(sz, GFP_KERNEL);
if (buff[sg_used] == NULL) {
status = -ENOMEM;
goto cleanup1;
}
if (ioc->Request.Type.Direction & XFER_WRITE) {
if (copy_from_user(buff[sg_used], data_ptr, sz)) {
status = -EFAULT;
buff[sg_used] = memdup_user(data_ptr, sz);
if (IS_ERR(buff[sg_used])) {
status = PTR_ERR(buff[sg_used]);
goto cleanup1;
}
} else
memset(buff[sg_used], 0, sz);
} else {
buff[sg_used] = kzalloc(sz, GFP_KERNEL);
if (!buff[sg_used]) {
status = -ENOMEM;
goto cleanup1;
}
}
left -= sz;
data_ptr += sz;
sg_used++;
@ -7632,8 +7625,8 @@ static void hpsa_free_cfgtables(struct ctlr_info *h)
}
/* Find and map CISS config table and transfer table
+ * several items must be unmapped (freed) later
+ * */
* several items must be unmapped (freed) later
*/
static int hpsa_find_cfgtables(struct ctlr_info *h)
{
u64 cfg_offset;

View File

@ -4281,11 +4281,11 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
}
if (ioa_cfg->sis64)
ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
sizeof(__be32 *)));
ioa_data = vmalloc_array(IPR_FMT3_MAX_NUM_DUMP_PAGES,
sizeof(__be32 *));
else
ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
sizeof(__be32 *)));
ioa_data = vmalloc_array(IPR_FMT2_MAX_NUM_DUMP_PAGES,
sizeof(__be32 *));
if (!ioa_data) {
ipr_err("Dump memory allocation failed\n");

View File

@ -1434,7 +1434,7 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport,
struct domain_device *dev = idev->domain_dev;
enum sci_status status;
if (dev->parent && dev_is_expander(dev->parent->dev_type))
if (dev_parent_is_expander(dev))
status = sci_remote_device_ea_construct(iport, idev);
else
status = sci_remote_device_da_construct(iport, idev);

View File

@ -356,7 +356,7 @@ static inline int fc_ct_ms_fill(struct fc_lport *lport,
put_unaligned_be16(len, &entry->len);
snprintf((char *)&entry->value,
FC_FDMI_HBA_ATTR_OSNAMEVERSION_LEN,
"%s v%s",
"%.62s v%.62s",
init_utsname()->sysname,
init_utsname()->release);

View File

@ -1313,10 +1313,7 @@ static int sas_check_parent_topology(struct domain_device *child)
int i;
int res = 0;
if (!child->parent)
return 0;
if (!dev_is_expander(child->parent->dev_type))
if (!dev_parent_is_expander(child))
return 0;
parent_ex = &child->parent->ex_dev;

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@ -661,15 +661,12 @@ struct lpfc_vport {
uint32_t num_disc_nodes; /* in addition to hba_state */
uint32_t gidft_inp; /* cnt of outstanding GID_FTs */
uint32_t fc_nlp_cnt; /* outstanding NODELIST requests */
uint32_t fc_rscn_id_cnt; /* count of RSCNs payloads in list */
uint32_t fc_rscn_flush; /* flag use of fc_rscn_id_list */
struct lpfc_dmabuf *fc_rscn_id_list[FC_MAX_HOLD_RSCN];
struct lpfc_name fc_nodename; /* fc nodename */
struct lpfc_name fc_portname; /* fc portname */
struct lpfc_work_evt disc_timeout_evt;
struct timer_list fc_disctmo; /* Discovery rescue timer */
uint8_t fc_ns_retry; /* retries for fabric nameserver */
uint32_t fc_prli_sent; /* cntr for outstanding PRLIs */
@ -744,12 +741,6 @@ struct lpfc_vport {
struct lpfc_vmid_priority_info vmid_priority;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct dentry *debug_disc_trc;
struct dentry *debug_nodelist;
struct dentry *debug_nvmestat;
struct dentry *debug_scsistat;
struct dentry *debug_ioktime;
struct dentry *debug_hdwqstat;
struct dentry *vport_debugfs_root;
struct lpfc_debugfs_trc *disc_trc;
atomic_t disc_trc_cnt;
@ -767,7 +758,6 @@ struct lpfc_vport {
/* There is a single nvme instance per vport. */
struct nvme_fc_local_port *localport;
uint8_t nvmei_support; /* driver supports NVME Initiator */
uint32_t last_fcp_wqidx;
uint32_t rcv_flogi_cnt; /* How many unsol FLOGIs ACK'd. */
};
@ -1060,8 +1050,6 @@ struct lpfc_hba {
struct lpfc_dmabuf hbqslimp;
uint16_t pci_cfg_value;
uint8_t fc_linkspeed; /* Link speed after last READ_LA */
uint32_t fc_eventTag; /* event tag for link attention */
@ -1088,7 +1076,6 @@ struct lpfc_hba {
struct lpfc_stats fc_stat;
struct lpfc_nodelist fc_fcpnodev; /* nodelist entry for no device */
uint32_t nport_event_cnt; /* timestamp for nlplist entry */
uint8_t wwnn[8];
@ -1229,9 +1216,6 @@ struct lpfc_hba {
uint32_t hbq_count; /* Count of configured HBQs */
struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies */
atomic_t fcp_qidx; /* next FCP WQ (RR Policy) */
atomic_t nvme_qidx; /* next NVME WQ (RR Policy) */
phys_addr_t pci_bar0_map; /* Physical address for PCI BAR0 */
phys_addr_t pci_bar1_map; /* Physical address for PCI BAR1 */
phys_addr_t pci_bar2_map; /* Physical address for PCI BAR2 */
@ -1348,30 +1332,9 @@ struct lpfc_hba {
unsigned long last_ramp_down_time;
#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
struct dentry *hba_debugfs_root;
atomic_t debugfs_vport_count;
struct dentry *debug_multixri_pools;
struct dentry *debug_hbqinfo;
struct dentry *debug_dumpHostSlim;
struct dentry *debug_dumpHBASlim;
struct dentry *debug_InjErrLBA; /* LBA to inject errors at */
struct dentry *debug_InjErrNPortID; /* NPortID to inject errors at */
struct dentry *debug_InjErrWWPN; /* WWPN to inject errors at */
struct dentry *debug_writeGuard; /* inject write guard_tag errors */
struct dentry *debug_writeApp; /* inject write app_tag errors */
struct dentry *debug_writeRef; /* inject write ref_tag errors */
struct dentry *debug_readGuard; /* inject read guard_tag errors */
struct dentry *debug_readApp; /* inject read app_tag errors */
struct dentry *debug_readRef; /* inject read ref_tag errors */
unsigned int debugfs_vport_count;
struct dentry *debug_nvmeio_trc;
struct lpfc_debugfs_nvmeio_trc *nvmeio_trc;
struct dentry *debug_hdwqinfo;
#ifdef LPFC_HDWQ_LOCK_STAT
struct dentry *debug_lockstat;
#endif
struct dentry *debug_cgn_buffer;
struct dentry *debug_rx_monitor;
struct dentry *debug_ras_log;
atomic_t nvmeio_trc_cnt;
uint32_t nvmeio_trc_size;
uint32_t nvmeio_trc_output_idx;
@ -1388,19 +1351,10 @@ struct lpfc_hba {
sector_t lpfc_injerr_lba;
#define LPFC_INJERR_LBA_OFF (sector_t)(-1)
struct dentry *debug_slow_ring_trc;
struct lpfc_debugfs_trc *slow_ring_trc;
atomic_t slow_ring_trc_cnt;
/* iDiag debugfs sub-directory */
struct dentry *idiag_root;
struct dentry *idiag_pci_cfg;
struct dentry *idiag_bar_acc;
struct dentry *idiag_que_info;
struct dentry *idiag_que_acc;
struct dentry *idiag_drb_acc;
struct dentry *idiag_ctl_acc;
struct dentry *idiag_mbx_acc;
struct dentry *idiag_ext_acc;
uint8_t lpfc_idiag_last_eq;
#endif
uint16_t nvmeio_trc_on;

View File

@ -2373,93 +2373,117 @@ out:
static ssize_t
lpfc_debugfs_dif_err_read(struct file *file, char __user *buf,
size_t nbytes, loff_t *ppos)
size_t nbytes, loff_t *ppos)
{
struct lpfc_hba *phba = file->private_data;
int kind = debugfs_get_aux_num(file);
char cbuf[32];
uint64_t tmp = 0;
char cbuf[32] = {0};
int cnt = 0;
if (kind == writeGuard)
cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wgrd_cnt);
else if (kind == writeApp)
cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wapp_cnt);
else if (kind == writeRef)
cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_wref_cnt);
else if (kind == readGuard)
cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rgrd_cnt);
else if (kind == readApp)
cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rapp_cnt);
else if (kind == readRef)
cnt = scnprintf(cbuf, 32, "%u\n", phba->lpfc_injerr_rref_cnt);
else if (kind == InjErrNPortID)
cnt = scnprintf(cbuf, 32, "0x%06x\n",
switch (kind) {
case writeGuard:
cnt = scnprintf(cbuf, sizeof(cbuf), "%u\n",
phba->lpfc_injerr_wgrd_cnt);
break;
case writeApp:
cnt = scnprintf(cbuf, sizeof(cbuf), "%u\n",
phba->lpfc_injerr_wapp_cnt);
break;
case writeRef:
cnt = scnprintf(cbuf, sizeof(cbuf), "%u\n",
phba->lpfc_injerr_wref_cnt);
break;
case readGuard:
cnt = scnprintf(cbuf, sizeof(cbuf), "%u\n",
phba->lpfc_injerr_rgrd_cnt);
break;
case readApp:
cnt = scnprintf(cbuf, sizeof(cbuf), "%u\n",
phba->lpfc_injerr_rapp_cnt);
break;
case readRef:
cnt = scnprintf(cbuf, sizeof(cbuf), "%u\n",
phba->lpfc_injerr_rref_cnt);
break;
case InjErrNPortID:
cnt = scnprintf(cbuf, sizeof(cbuf), "0x%06x\n",
phba->lpfc_injerr_nportid);
else if (kind == InjErrWWPN) {
memcpy(&tmp, &phba->lpfc_injerr_wwpn, sizeof(struct lpfc_name));
tmp = cpu_to_be64(tmp);
cnt = scnprintf(cbuf, 32, "0x%016llx\n", tmp);
} else if (kind == InjErrLBA) {
if (phba->lpfc_injerr_lba == (sector_t)(-1))
cnt = scnprintf(cbuf, 32, "off\n");
break;
case InjErrWWPN:
cnt = scnprintf(cbuf, sizeof(cbuf), "0x%016llx\n",
be64_to_cpu(phba->lpfc_injerr_wwpn.u.wwn_be));
break;
case InjErrLBA:
if (phba->lpfc_injerr_lba == LPFC_INJERR_LBA_OFF)
cnt = scnprintf(cbuf, sizeof(cbuf), "off\n");
else
cnt = scnprintf(cbuf, 32, "0x%llx\n",
(uint64_t) phba->lpfc_injerr_lba);
} else
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0547 Unknown debugfs error injection entry\n");
cnt = scnprintf(cbuf, sizeof(cbuf), "0x%llx\n",
(uint64_t)phba->lpfc_injerr_lba);
break;
default:
lpfc_log_msg(phba, KERN_WARNING, LOG_INIT,
"0547 Unknown debugfs error injection entry\n");
break;
}
return simple_read_from_buffer(buf, nbytes, ppos, &cbuf, cnt);
}
static ssize_t
lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf,
size_t nbytes, loff_t *ppos)
size_t nbytes, loff_t *ppos)
{
struct lpfc_hba *phba = file->private_data;
int kind = debugfs_get_aux_num(file);
char dstbuf[33];
uint64_t tmp = 0;
int size;
char dstbuf[33] = {0};
unsigned long long tmp;
unsigned long size;
memset(dstbuf, 0, 33);
size = (nbytes < 32) ? nbytes : 32;
size = (nbytes < (sizeof(dstbuf) - 1)) ? nbytes : (sizeof(dstbuf) - 1);
if (copy_from_user(dstbuf, buf, size))
return -EFAULT;
if (kind == InjErrLBA) {
if ((dstbuf[0] == 'o') && (dstbuf[1] == 'f') &&
(dstbuf[2] == 'f'))
tmp = (uint64_t)(-1);
if (kstrtoull(dstbuf, 0, &tmp)) {
if (kind != InjErrLBA || !strstr(dstbuf, "off"))
return -EINVAL;
}
if ((tmp == 0) && (kstrtoull(dstbuf, 0, &tmp)))
return -EINVAL;
if (kind == writeGuard)
switch (kind) {
case writeGuard:
phba->lpfc_injerr_wgrd_cnt = (uint32_t)tmp;
else if (kind == writeApp)
break;
case writeApp:
phba->lpfc_injerr_wapp_cnt = (uint32_t)tmp;
else if (kind == writeRef)
break;
case writeRef:
phba->lpfc_injerr_wref_cnt = (uint32_t)tmp;
else if (kind == readGuard)
break;
case readGuard:
phba->lpfc_injerr_rgrd_cnt = (uint32_t)tmp;
else if (kind == readApp)
break;
case readApp:
phba->lpfc_injerr_rapp_cnt = (uint32_t)tmp;
else if (kind == readRef)
break;
case readRef:
phba->lpfc_injerr_rref_cnt = (uint32_t)tmp;
else if (kind == InjErrLBA)
phba->lpfc_injerr_lba = (sector_t)tmp;
else if (kind == InjErrNPortID)
break;
case InjErrLBA:
if (strstr(dstbuf, "off"))
phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
else
phba->lpfc_injerr_lba = (sector_t)tmp;
break;
case InjErrNPortID:
phba->lpfc_injerr_nportid = (uint32_t)(tmp & Mask_DID);
else if (kind == InjErrWWPN) {
tmp = cpu_to_be64(tmp);
memcpy(&phba->lpfc_injerr_wwpn, &tmp, sizeof(struct lpfc_name));
} else
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0548 Unknown debugfs error injection entry\n");
break;
case InjErrWWPN:
phba->lpfc_injerr_wwpn.u.wwn_be = cpu_to_be64(tmp);
break;
default:
lpfc_log_msg(phba, KERN_WARNING, LOG_INIT,
"0548 Unknown debugfs error injection entry\n");
break;
}
return nbytes;
}
@ -5728,7 +5752,7 @@ static const struct file_operations lpfc_debugfs_op_slow_ring_trc = {
};
static struct dentry *lpfc_debugfs_root = NULL;
static atomic_t lpfc_debugfs_hba_count;
static unsigned int lpfc_debugfs_hba_count;
/*
* File operations for the iDiag debugfs
@ -6050,7 +6074,12 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
/* Setup lpfc root directory */
if (!lpfc_debugfs_root) {
lpfc_debugfs_root = debugfs_create_dir("lpfc", NULL);
atomic_set(&lpfc_debugfs_hba_count, 0);
lpfc_debugfs_hba_count = 0;
if (IS_ERR(lpfc_debugfs_root)) {
lpfc_vlog_msg(vport, KERN_WARNING, LOG_INIT,
"0527 Cannot create debugfs lpfc\n");
return;
}
}
if (!lpfc_debugfs_start_time)
lpfc_debugfs_start_time = jiffies;
@ -6061,150 +6090,96 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
pport_setup = true;
phba->hba_debugfs_root =
debugfs_create_dir(name, lpfc_debugfs_root);
atomic_inc(&lpfc_debugfs_hba_count);
atomic_set(&phba->debugfs_vport_count, 0);
phba->debugfs_vport_count = 0;
if (IS_ERR(phba->hba_debugfs_root)) {
lpfc_vlog_msg(vport, KERN_WARNING, LOG_INIT,
"0528 Cannot create debugfs %s\n", name);
return;
}
lpfc_debugfs_hba_count++;
/* Multi-XRI pools */
snprintf(name, sizeof(name), "multixripools");
phba->debug_multixri_pools =
debugfs_create_file(name, S_IFREG | 0644,
phba->hba_debugfs_root,
phba,
&lpfc_debugfs_op_multixripools);
if (IS_ERR(phba->debug_multixri_pools)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0527 Cannot create debugfs multixripools\n");
goto debug_failed;
}
debugfs_create_file("multixripools", 0644,
phba->hba_debugfs_root, phba,
&lpfc_debugfs_op_multixripools);
/* Congestion Info Buffer */
scnprintf(name, sizeof(name), "cgn_buffer");
phba->debug_cgn_buffer =
debugfs_create_file(name, S_IFREG | 0644,
phba->hba_debugfs_root,
phba, &lpfc_cgn_buffer_op);
if (IS_ERR(phba->debug_cgn_buffer)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"6527 Cannot create debugfs "
"cgn_buffer\n");
goto debug_failed;
}
debugfs_create_file("cgn_buffer", 0644, phba->hba_debugfs_root,
phba, &lpfc_cgn_buffer_op);
/* RX Monitor */
scnprintf(name, sizeof(name), "rx_monitor");
phba->debug_rx_monitor =
debugfs_create_file(name, S_IFREG | 0644,
phba->hba_debugfs_root,
phba, &lpfc_rx_monitor_op);
if (IS_ERR(phba->debug_rx_monitor)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"6528 Cannot create debugfs "
"rx_monitor\n");
goto debug_failed;
}
debugfs_create_file("rx_monitor", 0644, phba->hba_debugfs_root,
phba, &lpfc_rx_monitor_op);
/* RAS log */
snprintf(name, sizeof(name), "ras_log");
phba->debug_ras_log =
debugfs_create_file(name, 0644,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_ras_log);
if (IS_ERR(phba->debug_ras_log)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"6148 Cannot create debugfs"
" ras_log\n");
goto debug_failed;
}
debugfs_create_file("ras_log", 0644, phba->hba_debugfs_root,
phba, &lpfc_debugfs_ras_log);
/* Setup hbqinfo */
snprintf(name, sizeof(name), "hbqinfo");
phba->debug_hbqinfo =
debugfs_create_file(name, S_IFREG | 0644,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_hbqinfo);
debugfs_create_file("hbqinfo", 0644, phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_hbqinfo);
#ifdef LPFC_HDWQ_LOCK_STAT
/* Setup lockstat */
snprintf(name, sizeof(name), "lockstat");
phba->debug_lockstat =
debugfs_create_file(name, S_IFREG | 0644,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_lockstat);
if (IS_ERR(phba->debug_lockstat)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"4610 Can't create debugfs lockstat\n");
goto debug_failed;
}
debugfs_create_file("lockstat", 0644, phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_lockstat);
#endif
/* Setup dumpHBASlim */
if (phba->sli_rev < LPFC_SLI_REV4) {
snprintf(name, sizeof(name), "dumpHBASlim");
phba->debug_dumpHBASlim =
debugfs_create_file(name,
S_IFREG|S_IRUGO|S_IWUSR,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_dumpHBASlim);
} else
phba->debug_dumpHBASlim = NULL;
/* Setup dumpHBASlim */
debugfs_create_file("dumpHBASlim", 0644,
phba->hba_debugfs_root, phba,
&lpfc_debugfs_op_dumpHBASlim);
}
/* Setup dumpHostSlim */
if (phba->sli_rev < LPFC_SLI_REV4) {
snprintf(name, sizeof(name), "dumpHostSlim");
phba->debug_dumpHostSlim =
debugfs_create_file(name,
S_IFREG|S_IRUGO|S_IWUSR,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_dumpHostSlim);
} else
phba->debug_dumpHostSlim = NULL;
/* Setup dumpHostSlim */
debugfs_create_file("dumpHostSlim", 0644,
phba->hba_debugfs_root, phba,
&lpfc_debugfs_op_dumpHostSlim);
}
/* Setup DIF Error Injections */
phba->debug_InjErrLBA =
debugfs_create_file_aux_num("InjErrLBA", 0644,
phba->hba_debugfs_root,
phba, InjErrLBA, &lpfc_debugfs_op_dif_err);
debugfs_create_file_aux_num("InjErrLBA", 0644,
phba->hba_debugfs_root, phba,
InjErrLBA,
&lpfc_debugfs_op_dif_err);
phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF;
phba->debug_InjErrNPortID =
debugfs_create_file_aux_num("InjErrNPortID", 0644,
phba->hba_debugfs_root,
phba, InjErrNPortID, &lpfc_debugfs_op_dif_err);
debugfs_create_file_aux_num("InjErrNPortID", 0644,
phba->hba_debugfs_root, phba,
InjErrNPortID,
&lpfc_debugfs_op_dif_err);
phba->debug_InjErrWWPN =
debugfs_create_file_aux_num("InjErrWWPN", 0644,
phba->hba_debugfs_root,
phba, InjErrWWPN, &lpfc_debugfs_op_dif_err);
debugfs_create_file_aux_num("InjErrWWPN", 0644,
phba->hba_debugfs_root, phba,
InjErrWWPN,
&lpfc_debugfs_op_dif_err);
phba->debug_writeGuard =
debugfs_create_file_aux_num("writeGuardInjErr", 0644,
phba->hba_debugfs_root,
phba, writeGuard, &lpfc_debugfs_op_dif_err);
debugfs_create_file_aux_num("writeGuardInjErr", 0644,
phba->hba_debugfs_root, phba,
writeGuard,
&lpfc_debugfs_op_dif_err);
phba->debug_writeApp =
debugfs_create_file_aux_num("writeAppInjErr", 0644,
phba->hba_debugfs_root,
phba, writeApp, &lpfc_debugfs_op_dif_err);
debugfs_create_file_aux_num("writeAppInjErr", 0644,
phba->hba_debugfs_root, phba,
writeApp, &lpfc_debugfs_op_dif_err);
phba->debug_writeRef =
debugfs_create_file_aux_num("writeRefInjErr", 0644,
phba->hba_debugfs_root,
phba, writeRef, &lpfc_debugfs_op_dif_err);
debugfs_create_file_aux_num("writeRefInjErr", 0644,
phba->hba_debugfs_root, phba,
writeRef, &lpfc_debugfs_op_dif_err);
phba->debug_readGuard =
debugfs_create_file_aux_num("readGuardInjErr", 0644,
phba->hba_debugfs_root,
phba, readGuard, &lpfc_debugfs_op_dif_err);
debugfs_create_file_aux_num("readGuardInjErr", 0644,
phba->hba_debugfs_root, phba,
readGuard,
&lpfc_debugfs_op_dif_err);
phba->debug_readApp =
debugfs_create_file_aux_num("readAppInjErr", 0644,
phba->hba_debugfs_root,
phba, readApp, &lpfc_debugfs_op_dif_err);
debugfs_create_file_aux_num("readAppInjErr", 0644,
phba->hba_debugfs_root, phba,
readApp, &lpfc_debugfs_op_dif_err);
phba->debug_readRef =
debugfs_create_file_aux_num("readRefInjErr", 0644,
phba->hba_debugfs_root,
phba, readRef, &lpfc_debugfs_op_dif_err);
debugfs_create_file_aux_num("readRefInjErr", 0644,
phba->hba_debugfs_root, phba,
readRef, &lpfc_debugfs_op_dif_err);
/* Setup slow ring trace */
if (lpfc_debugfs_max_slow_ring_trc) {
@ -6224,11 +6199,9 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
}
}
snprintf(name, sizeof(name), "slow_ring_trace");
phba->debug_slow_ring_trc =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_slow_ring_trc);
debugfs_create_file("slow_ring_trace", 0644,
phba->hba_debugfs_root, phba,
&lpfc_debugfs_op_slow_ring_trc);
if (!phba->slow_ring_trc) {
phba->slow_ring_trc = kcalloc(
lpfc_debugfs_max_slow_ring_trc,
@ -6238,16 +6211,13 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0416 Cannot create debugfs "
"slow_ring buffer\n");
goto debug_failed;
goto out;
}
atomic_set(&phba->slow_ring_trc_cnt, 0);
}
snprintf(name, sizeof(name), "nvmeio_trc");
phba->debug_nvmeio_trc =
debugfs_create_file(name, 0644,
phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_nvmeio_trc);
debugfs_create_file("nvmeio_trc", 0644, phba->hba_debugfs_root,
phba, &lpfc_debugfs_op_nvmeio_trc);
atomic_set(&phba->nvmeio_trc_cnt, 0);
if (lpfc_debugfs_max_nvmeio_trc) {
@ -6293,7 +6263,12 @@ nvmeio_off:
if (!vport->vport_debugfs_root) {
vport->vport_debugfs_root =
debugfs_create_dir(name, phba->hba_debugfs_root);
atomic_inc(&phba->debugfs_vport_count);
if (IS_ERR(vport->vport_debugfs_root)) {
lpfc_vlog_msg(vport, KERN_WARNING, LOG_INIT,
"0529 Cannot create debugfs %s\n", name);
return;
}
phba->debugfs_vport_count++;
}
if (lpfc_debugfs_max_disc_trc) {
@ -6320,54 +6295,27 @@ nvmeio_off:
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0418 Cannot create debugfs disc trace "
"buffer\n");
goto debug_failed;
goto out;
}
atomic_set(&vport->disc_trc_cnt, 0);
snprintf(name, sizeof(name), "discovery_trace");
vport->debug_disc_trc =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
vport->vport_debugfs_root,
vport, &lpfc_debugfs_op_disc_trc);
snprintf(name, sizeof(name), "nodelist");
vport->debug_nodelist =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
vport->vport_debugfs_root,
vport, &lpfc_debugfs_op_nodelist);
debugfs_create_file("discovery_trace", 0644, vport->vport_debugfs_root,
vport, &lpfc_debugfs_op_disc_trc);
snprintf(name, sizeof(name), "nvmestat");
vport->debug_nvmestat =
debugfs_create_file(name, 0644,
vport->vport_debugfs_root,
vport, &lpfc_debugfs_op_nvmestat);
debugfs_create_file("nodelist", 0644, vport->vport_debugfs_root, vport,
&lpfc_debugfs_op_nodelist);
snprintf(name, sizeof(name), "scsistat");
vport->debug_scsistat =
debugfs_create_file(name, 0644,
vport->vport_debugfs_root,
vport, &lpfc_debugfs_op_scsistat);
if (IS_ERR(vport->debug_scsistat)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"4611 Cannot create debugfs scsistat\n");
goto debug_failed;
}
debugfs_create_file("nvmestat", 0644, vport->vport_debugfs_root, vport,
&lpfc_debugfs_op_nvmestat);
snprintf(name, sizeof(name), "ioktime");
vport->debug_ioktime =
debugfs_create_file(name, 0644,
vport->vport_debugfs_root,
vport, &lpfc_debugfs_op_ioktime);
if (IS_ERR(vport->debug_ioktime)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
"0815 Cannot create debugfs ioktime\n");
goto debug_failed;
}
debugfs_create_file("scsistat", 0644, vport->vport_debugfs_root, vport,
&lpfc_debugfs_op_scsistat);
snprintf(name, sizeof(name), "hdwqstat");
vport->debug_hdwqstat =
debugfs_create_file(name, 0644,
vport->vport_debugfs_root,
vport, &lpfc_debugfs_op_hdwqstat);
debugfs_create_file("ioktime", 0644, vport->vport_debugfs_root, vport,
&lpfc_debugfs_op_ioktime);
debugfs_create_file("hdwqstat", 0644, vport->vport_debugfs_root, vport,
&lpfc_debugfs_op_hdwqstat);
/*
* The following section is for additional directories/files for the
@ -6375,93 +6323,58 @@ nvmeio_off:
*/
if (!pport_setup)
goto debug_failed;
return;
/*
* iDiag debugfs root entry points for SLI4 device only
*/
if (phba->sli_rev < LPFC_SLI_REV4)
goto debug_failed;
return;
snprintf(name, sizeof(name), "iDiag");
if (!phba->idiag_root) {
phba->idiag_root =
debugfs_create_dir(name, phba->hba_debugfs_root);
debugfs_create_dir("iDiag", phba->hba_debugfs_root);
/* Initialize iDiag data structure */
memset(&idiag, 0, sizeof(idiag));
}
/* iDiag read PCI config space */
snprintf(name, sizeof(name), "pciCfg");
if (!phba->idiag_pci_cfg) {
phba->idiag_pci_cfg =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->idiag_root, phba, &lpfc_idiag_op_pciCfg);
idiag.offset.last_rd = 0;
}
debugfs_create_file("pciCfg", 0644, phba->idiag_root, phba,
&lpfc_idiag_op_pciCfg);
idiag.offset.last_rd = 0;
/* iDiag PCI BAR access */
snprintf(name, sizeof(name), "barAcc");
if (!phba->idiag_bar_acc) {
phba->idiag_bar_acc =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->idiag_root, phba, &lpfc_idiag_op_barAcc);
idiag.offset.last_rd = 0;
}
debugfs_create_file("barAcc", 0644, phba->idiag_root, phba,
&lpfc_idiag_op_barAcc);
idiag.offset.last_rd = 0;
/* iDiag get PCI function queue information */
snprintf(name, sizeof(name), "queInfo");
if (!phba->idiag_que_info) {
phba->idiag_que_info =
debugfs_create_file(name, S_IFREG|S_IRUGO,
phba->idiag_root, phba, &lpfc_idiag_op_queInfo);
}
debugfs_create_file("queInfo", 0444, phba->idiag_root, phba,
&lpfc_idiag_op_queInfo);
/* iDiag access PCI function queue */
snprintf(name, sizeof(name), "queAcc");
if (!phba->idiag_que_acc) {
phba->idiag_que_acc =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->idiag_root, phba, &lpfc_idiag_op_queAcc);
}
debugfs_create_file("queAcc", 0644, phba->idiag_root, phba,
&lpfc_idiag_op_queAcc);
/* iDiag access PCI function doorbell registers */
snprintf(name, sizeof(name), "drbAcc");
if (!phba->idiag_drb_acc) {
phba->idiag_drb_acc =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->idiag_root, phba, &lpfc_idiag_op_drbAcc);
}
debugfs_create_file("drbAcc", 0644, phba->idiag_root, phba,
&lpfc_idiag_op_drbAcc);
/* iDiag access PCI function control registers */
snprintf(name, sizeof(name), "ctlAcc");
if (!phba->idiag_ctl_acc) {
phba->idiag_ctl_acc =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->idiag_root, phba, &lpfc_idiag_op_ctlAcc);
}
debugfs_create_file("ctlAcc", 0644, phba->idiag_root, phba,
&lpfc_idiag_op_ctlAcc);
/* iDiag access mbox commands */
snprintf(name, sizeof(name), "mbxAcc");
if (!phba->idiag_mbx_acc) {
phba->idiag_mbx_acc =
debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR,
phba->idiag_root, phba, &lpfc_idiag_op_mbxAcc);
}
debugfs_create_file("mbxAcc", 0644, phba->idiag_root, phba,
&lpfc_idiag_op_mbxAcc);
/* iDiag extents access commands */
if (phba->sli4_hba.extents_in_use) {
snprintf(name, sizeof(name), "extAcc");
if (!phba->idiag_ext_acc) {
phba->idiag_ext_acc =
debugfs_create_file(name,
S_IFREG|S_IRUGO|S_IWUSR,
phba->idiag_root, phba,
&lpfc_idiag_op_extAcc);
}
debugfs_create_file("extAcc", 0644, phba->idiag_root, phba,
&lpfc_idiag_op_extAcc);
}
debug_failed:
out:
/* alloc'ed items are kfree'd in lpfc_debugfs_terminate */
return;
#endif
}
@ -6486,145 +6399,26 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport)
kfree(vport->disc_trc);
vport->disc_trc = NULL;
debugfs_remove(vport->debug_disc_trc); /* discovery_trace */
vport->debug_disc_trc = NULL;
debugfs_remove(vport->debug_nodelist); /* nodelist */
vport->debug_nodelist = NULL;
debugfs_remove(vport->debug_nvmestat); /* nvmestat */
vport->debug_nvmestat = NULL;
debugfs_remove(vport->debug_scsistat); /* scsistat */
vport->debug_scsistat = NULL;
debugfs_remove(vport->debug_ioktime); /* ioktime */
vport->debug_ioktime = NULL;
debugfs_remove(vport->debug_hdwqstat); /* hdwqstat */
vport->debug_hdwqstat = NULL;
if (vport->vport_debugfs_root) {
debugfs_remove(vport->vport_debugfs_root); /* vportX */
vport->vport_debugfs_root = NULL;
atomic_dec(&phba->debugfs_vport_count);
phba->debugfs_vport_count--;
}
if (atomic_read(&phba->debugfs_vport_count) == 0) {
debugfs_remove(phba->debug_multixri_pools); /* multixripools*/
phba->debug_multixri_pools = NULL;
debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */
phba->debug_hbqinfo = NULL;
debugfs_remove(phba->debug_cgn_buffer);
phba->debug_cgn_buffer = NULL;
debugfs_remove(phba->debug_rx_monitor);
phba->debug_rx_monitor = NULL;
debugfs_remove(phba->debug_ras_log);
phba->debug_ras_log = NULL;
#ifdef LPFC_HDWQ_LOCK_STAT
debugfs_remove(phba->debug_lockstat); /* lockstat */
phba->debug_lockstat = NULL;
#endif
debugfs_remove(phba->debug_dumpHBASlim); /* HBASlim */
phba->debug_dumpHBASlim = NULL;
debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */
phba->debug_dumpHostSlim = NULL;
debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */
phba->debug_InjErrLBA = NULL;
debugfs_remove(phba->debug_InjErrNPortID);
phba->debug_InjErrNPortID = NULL;
debugfs_remove(phba->debug_InjErrWWPN); /* InjErrWWPN */
phba->debug_InjErrWWPN = NULL;
debugfs_remove(phba->debug_writeGuard); /* writeGuard */
phba->debug_writeGuard = NULL;
debugfs_remove(phba->debug_writeApp); /* writeApp */
phba->debug_writeApp = NULL;
debugfs_remove(phba->debug_writeRef); /* writeRef */
phba->debug_writeRef = NULL;
debugfs_remove(phba->debug_readGuard); /* readGuard */
phba->debug_readGuard = NULL;
debugfs_remove(phba->debug_readApp); /* readApp */
phba->debug_readApp = NULL;
debugfs_remove(phba->debug_readRef); /* readRef */
phba->debug_readRef = NULL;
if (!phba->debugfs_vport_count) {
kfree(phba->slow_ring_trc);
phba->slow_ring_trc = NULL;
/* slow_ring_trace */
debugfs_remove(phba->debug_slow_ring_trc);
phba->debug_slow_ring_trc = NULL;
debugfs_remove(phba->debug_nvmeio_trc);
phba->debug_nvmeio_trc = NULL;
kfree(phba->nvmeio_trc);
phba->nvmeio_trc = NULL;
/*
* iDiag release
*/
if (phba->sli_rev == LPFC_SLI_REV4) {
/* iDiag extAcc */
debugfs_remove(phba->idiag_ext_acc);
phba->idiag_ext_acc = NULL;
/* iDiag mbxAcc */
debugfs_remove(phba->idiag_mbx_acc);
phba->idiag_mbx_acc = NULL;
/* iDiag ctlAcc */
debugfs_remove(phba->idiag_ctl_acc);
phba->idiag_ctl_acc = NULL;
/* iDiag drbAcc */
debugfs_remove(phba->idiag_drb_acc);
phba->idiag_drb_acc = NULL;
/* iDiag queAcc */
debugfs_remove(phba->idiag_que_acc);
phba->idiag_que_acc = NULL;
/* iDiag queInfo */
debugfs_remove(phba->idiag_que_info);
phba->idiag_que_info = NULL;
/* iDiag barAcc */
debugfs_remove(phba->idiag_bar_acc);
phba->idiag_bar_acc = NULL;
/* iDiag pciCfg */
debugfs_remove(phba->idiag_pci_cfg);
phba->idiag_pci_cfg = NULL;
/* Finally remove the iDiag debugfs root */
debugfs_remove(phba->idiag_root);
phba->idiag_root = NULL;
}
if (phba->hba_debugfs_root) {
debugfs_remove(phba->hba_debugfs_root); /* fnX */
phba->hba_debugfs_root = NULL;
atomic_dec(&lpfc_debugfs_hba_count);
lpfc_debugfs_hba_count--;
}
if (atomic_read(&lpfc_debugfs_hba_count) == 0) {
if (!lpfc_debugfs_hba_count) {
debugfs_remove(lpfc_debugfs_root); /* lpfc */
lpfc_debugfs_root = NULL;
}

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2022 Broadcom. All Rights Reserved. The term *
* Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2007-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@ -44,6 +44,9 @@
/* hbqinfo output buffer size */
#define LPFC_HBQINFO_SIZE 8192
/* hdwqinfo output buffer size */
#define LPFC_HDWQINFO_SIZE 8192
/* nvmestat output buffer size */
#define LPFC_NVMESTAT_SIZE 8192
#define LPFC_IOKTIME_SIZE 8192

View File

@ -3762,7 +3762,7 @@ lpfc_issue_els_rdf(struct lpfc_vport *vport, uint8_t retry)
memset(prdf, 0, cmdsize);
prdf->rdf.fpin_cmd = ELS_RDF;
prdf->rdf.desc_len = cpu_to_be32(sizeof(struct lpfc_els_rdf_req) -
sizeof(struct fc_els_rdf));
sizeof(struct fc_els_rdf_hdr));
prdf->reg_d1.reg_desc.desc_tag = cpu_to_be32(ELS_DTAG_FPIN_REGISTER);
prdf->reg_d1.reg_desc.desc_len = cpu_to_be32(
FC_TLV_DESC_LENGTH_FROM_SZ(prdf->reg_d1));
@ -5339,12 +5339,12 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
ulp_status, ulp_word4, did);
/* ELS response tag <ulpIoTag> completes */
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
"0110 ELS response tag x%x completes "
"0110 ELS response tag x%x completes fc_flag x%lx"
"Data: x%x x%x x%x x%x x%lx x%x x%x x%x %p %p\n",
iotag, ulp_status, ulp_word4, tmo,
iotag, vport->fc_flag, ulp_status, ulp_word4, tmo,
ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp);
if (mbox) {
if (mbox && !test_bit(FC_PT2PT, &vport->fc_flag)) {
if (ulp_status == 0 &&
test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag)) {
if (!lpfc_unreg_rpi(vport, ndlp) &&
@ -5403,6 +5403,10 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
}
out_free_mbox:
lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
} else if (mbox && test_bit(FC_PT2PT, &vport->fc_flag) &&
test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag)) {
lpfc_mbx_cmpl_reg_login(phba, mbox);
clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag);
}
out:
if (ndlp && shost) {
@ -11259,6 +11263,11 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS,
"0126 FDISC cmpl status: x%x/x%x)\n",
ulp_status, ulp_word4);
/* drop initial reference */
if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag))
lpfc_nlp_put(ndlp);
goto fdisc_failed;
}
@ -12008,7 +12017,11 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
sglq_entry->state = SGL_FREED;
spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
iflag);
lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI |
LOG_DISCOVERY | LOG_NODE,
"0732 ELS XRI ABORT on Node: ndlp=x%px "
"xri=x%x\n",
ndlp, xri);
if (ndlp) {
lpfc_set_rrq_active(phba, ndlp,
sglq_entry->sli4_lxritag,

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@ -366,6 +366,7 @@ struct lpfc_name {
} s;
uint8_t wwn[8];
uint64_t name __packed __aligned(4);
__be64 wwn_be __packed __aligned(4);
} u;
};

View File

@ -4909,18 +4909,18 @@ struct send_frame_wqe {
#define ELS_RDF_REG_TAG_CNT 4
struct lpfc_els_rdf_reg_desc {
struct fc_df_desc_fpin_reg reg_desc; /* descriptor header */
struct fc_df_desc_fpin_reg_hdr reg_desc; /* descriptor header */
__be32 desc_tags[ELS_RDF_REG_TAG_CNT];
/* tags in reg_desc */
};
struct lpfc_els_rdf_req {
struct fc_els_rdf rdf; /* hdr up to descriptors */
struct fc_els_rdf_hdr rdf; /* hdr up to descriptors */
struct lpfc_els_rdf_reg_desc reg_d1; /* 1st descriptor */
};
struct lpfc_els_rdf_rsp {
struct fc_els_rdf_resp rdf_resp; /* hdr up to descriptors */
struct fc_els_rdf_resp_hdr rdf_resp; /* hdr up to descriptors */
struct lpfc_els_rdf_reg_desc reg_d1; /* 1st descriptor */
};

View File

@ -3057,13 +3057,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
lpfc_vmid_vport_cleanup(vport);
list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
if (vport->port_type != LPFC_PHYSICAL_PORT &&
ndlp->nlp_DID == Fabric_DID) {
/* Just free up ndlp with Fabric_DID for vports */
lpfc_nlp_put(ndlp);
continue;
}
if (ndlp->nlp_DID == Fabric_Cntl_DID &&
ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
lpfc_nlp_put(ndlp);
@ -8300,10 +8293,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt,
phba->cfg_nvme_seg_cnt);
if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE)
i = phba->cfg_sg_dma_buf_size;
else
i = SLI4_PAGE_SIZE;
i = min(phba->cfg_sg_dma_buf_size, SLI4_PAGE_SIZE);
phba->lpfc_sg_dma_buf_pool =
dma_pool_create("lpfc_sg_dma_buf_pool",

View File

@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
* Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
* Copyright (C) 2017-2025 Broadcom. All Rights Reserved. The term *
* Broadcom refers to Broadcom Inc. and/or its subsidiaries. *
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
@ -326,8 +326,14 @@ lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
/* Now that REG_RPI completed successfully,
* we can now proceed with sending the PLOGI ACC.
*/
rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI,
save_iocb, ndlp, NULL);
if (test_bit(FC_PT2PT, &ndlp->vport->fc_flag)) {
rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI,
save_iocb, ndlp, login_mbox);
} else {
rc = lpfc_els_rsp_acc(login_mbox->vport, ELS_CMD_PLOGI,
save_iocb, ndlp, NULL);
}
if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"4576 PLOGI ACC fails pt2pt discovery: "
@ -335,9 +341,16 @@ lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox)
}
}
/* Now process the REG_RPI cmpl */
lpfc_mbx_cmpl_reg_login(phba, login_mbox);
clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag);
/* If this is a fabric topology, complete the reg_rpi and prli now.
* For Pt2Pt, the reg_rpi and PRLI are deferred until after the LS_ACC
* completes. This ensures, in Pt2Pt, that the PLOGI LS_ACC is sent
* before the PRLI.
*/
if (!test_bit(FC_PT2PT, &ndlp->vport->fc_flag)) {
/* Now process the REG_RPI cmpl */
lpfc_mbx_cmpl_reg_login(phba, login_mbox);
clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag);
}
kfree(save_iocb);
}

View File

@ -1234,12 +1234,8 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
if ((phba->cfg_nvme_enable_fb) &&
test_bit(NLP_FIRSTBURST, &pnode->nlp_flag)) {
req_len = lpfc_ncmd->nvmeCmd->payload_length;
if (req_len < pnode->nvme_fb_size)
wqe->fcp_iwrite.initial_xfer_len =
req_len;
else
wqe->fcp_iwrite.initial_xfer_len =
pnode->nvme_fb_size;
wqe->fcp_iwrite.initial_xfer_len = min(req_len,
pnode->nvme_fb_size);
} else {
wqe->fcp_iwrite.initial_xfer_len = 0;
}

View File

@ -5935,7 +5935,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct fc_rport *rport)
/**
* lpfc_reset_flush_io_context -
* @vport: The virtual port (scsi_host) for the flush context
* @tgt_id: If aborting by Target contect - specifies the target id
* @tgt_id: If aborting by Target context - specifies the target id
* @lun_id: If aborting by Lun context - specifies the lun id
* @context: specifies the context level to flush at.
*
@ -6109,8 +6109,14 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
spin_unlock_irqrestore(&pnode->lock, flags);
}
lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
LPFC_CTX_TGT);
status = lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
LPFC_CTX_TGT);
if (status != SUCCESS) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"0726 Target Reset flush status x%x\n",
status);
return status;
}
return FAST_IO_FAIL;
}
@ -6202,7 +6208,7 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
int rc, ret = SUCCESS;
lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"3172 SCSI layer issued Host Reset Data:\n");
"3172 SCSI layer issued Host Reset\n");
lpfc_offline_prep(phba, LPFC_MBX_WAIT);
lpfc_offline(phba);

View File

@ -8820,7 +8820,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
if (unlikely(rc)) {
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0381 Error %d during queue setup.\n", rc);
goto out_stop_timers;
goto out_destroy_queue;
}
/* Initialize the driver internal SLI layer lists. */
lpfc_sli4_setup(phba);
@ -9103,7 +9103,6 @@ out_free_iocblist:
lpfc_free_iocb_list(phba);
out_destroy_queue:
lpfc_sli4_queue_destroy(phba);
out_stop_timers:
lpfc_stop_hba_timers(phba);
out_free_mbox:
mempool_free(mboxq, phba->mbox_mem_pool);
@ -12439,19 +12438,11 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
/*
* If we're unloading, don't abort iocb on the ELS ring, but change
* the callback so that nothing happens when it finishes.
* Always abort the outstanding WQE and set the IA bit correctly
* for the context. This is necessary for correctly removing
* outstanding ndlp reference counts when the CQE completes with
* the XB bit set.
*/
if (test_bit(FC_UNLOADING, &vport->load_flag) &&
pring->ringno == LPFC_ELS_RING) {
if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
else
cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
return retval;
}
/* issue ABTS for this IOCB based on iotag */
abtsiocbp = __lpfc_sli_get_iocbq(phba);
if (abtsiocbp == NULL)
return IOCB_NORESOURCE;
@ -21373,7 +21364,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
struct lpfc_sglq *sglq;
struct lpfc_sli_ring *pring;
unsigned long iflags;
uint32_t ret = 0;
int ret = 0;
/* NVME_LS and NVME_LS ABTS requests. */
if (pwqe->cmd_flag & LPFC_IO_NVME_LS) {

View File

@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
#define LPFC_DRIVER_VERSION "14.4.0.10"
#define LPFC_DRIVER_VERSION "14.4.0.11"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */

View File

@ -322,6 +322,9 @@ struct mpi3_man6_gpio_entry {
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_MASK (0x01)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_EDGE (0x00)
#define MPI3_MAN6_GPIO_EXTINT_PARAM1_FLAGS_TRIGGER_LEVEL (0x01)
#define MPI3_MAN6_GPIO_OVER_TEMP_PARAM1_LEVEL_WARNING (0x00)
#define MPI3_MAN6_GPIO_OVER_TEMP_PARAM1_LEVEL_CRITICAL (0x01)
#define MPI3_MAN6_GPIO_OVER_TEMP_PARAM1_LEVEL_FATAL (0x02)
#define MPI3_MAN6_GPIO_PORT_GREEN_PARAM1_PHY_STATUS_ALL_UP (0x00)
#define MPI3_MAN6_GPIO_PORT_GREEN_PARAM1_PHY_STATUS_ONE_OR_MORE_UP (0x01)
#define MPI3_MAN6_GPIO_CABLE_MGMT_PARAM1_INTERFACE_MODULE_PRESENT (0x00)
@ -1250,6 +1253,37 @@ struct mpi3_io_unit_page17 {
__le32 current_key[];
};
#define MPI3_IOUNIT17_PAGEVERSION (0x00)
struct mpi3_io_unit_page18 {
struct mpi3_config_page_header header;
u8 flags;
u8 poll_interval;
__le16 reserved0a;
__le32 reserved0c;
};
#define MPI3_IOUNIT18_PAGEVERSION (0x00)
#define MPI3_IOUNIT18_FLAGS_DIRECTATTACHED_ENABLE (0x01)
#define MPI3_IOUNIT18_POLLINTERVAL_DISABLE (0x00)
#ifndef MPI3_IOUNIT19_DEVICE_MAX
#define MPI3_IOUNIT19_DEVICE_MAX (1)
#endif
struct mpi3_iounit19_device {
__le16 temperature;
__le16 dev_handle;
__le16 persistent_id;
__le16 reserved06;
};
#define MPI3_IOUNIT19_DEVICE_TEMPERATURE_UNAVAILABLE (0x8000)
struct mpi3_io_unit_page19 {
struct mpi3_config_page_header header;
__le16 num_devices;
__le16 reserved0a;
__le32 reserved0c;
struct mpi3_iounit19_device device[MPI3_IOUNIT19_DEVICE_MAX];
};
#define MPI3_IOUNIT19_PAGEVERSION (0x00)
struct mpi3_ioc_page0 {
struct mpi3_config_page_header header;
__le32 reserved08;
@ -2356,7 +2390,9 @@ struct mpi3_device0_vd_format {
__le16 io_throttle_group;
__le16 io_throttle_group_low;
__le16 io_throttle_group_high;
__le32 reserved0c;
u8 vd_abort_to;
u8 vd_reset_to;
__le16 reserved0e;
};
#define MPI3_DEVICE0_VD_STATE_OFFLINE (0x00)
#define MPI3_DEVICE0_VD_STATE_PARTIALLY_DEGRADED (0x01)

View File

@ -9,9 +9,11 @@
#define MPI3_NVME_ENCAP_CMD_MAX (1)
#endif
#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_MASK (0x0002)
#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_SHIFT (1)
#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_FAIL_ONLY (0x0000)
#define MPI3_NVME_FLAGS_FORCE_ADMIN_ERR_REPLY_ALL (0x0002)
#define MPI3_NVME_FLAGS_SUBMISSIONQ_MASK (0x0001)
#define MPI3_NVME_FLAGS_SUBMISSIONQ_SHIFT (0)
#define MPI3_NVME_FLAGS_SUBMISSIONQ_IO (0x0000)
#define MPI3_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0001)

View File

@ -11,6 +11,7 @@
#define MPI3_SAS_DEVICE_INFO_STP_INITIATOR (0x00000010)
#define MPI3_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000008)
#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_MASK (0x00000007)
#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_SHIFT (0)
#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_NO_DEVICE (0x00000000)
#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_END_DEVICE (0x00000001)
#define MPI3_SAS_DEVICE_INFO_DEVICE_TYPE_EXPANDER (0x00000002)

View File

@ -18,7 +18,7 @@ union mpi3_version_union {
#define MPI3_VERSION_MAJOR (3)
#define MPI3_VERSION_MINOR (0)
#define MPI3_VERSION_UNIT (35)
#define MPI3_VERSION_UNIT (37)
#define MPI3_VERSION_DEV (0)
#define MPI3_DEVHANDLE_INVALID (0xffff)
struct mpi3_sysif_oper_queue_indexes {

View File

@ -56,8 +56,8 @@ extern struct list_head mrioc_list;
extern int prot_mask;
extern atomic64_t event_counter;
#define MPI3MR_DRIVER_VERSION "8.14.0.5.50"
#define MPI3MR_DRIVER_RELDATE "27-June-2025"
#define MPI3MR_DRIVER_VERSION "8.15.0.5.50"
#define MPI3MR_DRIVER_RELDATE "12-August-2025"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
@ -697,6 +697,8 @@ struct tgt_dev_vd {
u16 tg_id;
u32 tg_high;
u32 tg_low;
u8 abort_to;
u8 reset_to;
struct mpi3mr_throttle_group_info *tg;
};
@ -738,6 +740,8 @@ enum mpi3mr_dev_state {
* @wwid: World wide ID
* @enclosure_logical_id: Enclosure logical identifier
* @dev_spec: Device type specific information
* @abort_to: Timeout for abort TM
* @reset_to: Timeout for Target/LUN reset TM
* @ref_count: Reference count
* @state: device state
*/

View File

@ -2353,6 +2353,8 @@ static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
{
int retval = 0;
u16 num_queues = 0, i = 0, msix_count_op_q = 1;
u32 ioc_status;
enum mpi3mr_iocstate ioc_state;
num_queues = min_t(int, mrioc->facts.max_op_reply_q,
mrioc->facts.max_op_req_q);
@ -2408,6 +2410,14 @@ static int mpi3mr_create_op_queues(struct mpi3mr_ioc *mrioc)
retval = -1;
goto out_failed;
}
ioc_status = readl(&mrioc->sysif_regs->ioc_status);
ioc_state = mpi3mr_get_iocstate(mrioc);
if ((ioc_status & MPI3_SYSIF_IOC_STATUS_RESET_HISTORY) ||
ioc_state != MRIOC_STATE_READY) {
mpi3mr_print_fault_info(mrioc);
retval = -1;
goto out_failed;
}
mrioc->num_op_reply_q = mrioc->num_op_req_q = i;
ioc_info(mrioc,
"successfully created %d operational queue pairs(default/polled) queue = (%d/%d)\n",
@ -5420,6 +5430,7 @@ int mpi3mr_soft_reset_handler(struct mpi3mr_ioc *mrioc,
mpi3mr_reset_rc_name(reset_reason));
mrioc->device_refresh_on = 0;
scsi_block_requests(mrioc->shost);
mrioc->reset_in_progress = 1;
mrioc->stop_bsgs = 1;
mrioc->prev_reset_result = -1;
@ -5528,6 +5539,7 @@ out:
if (!retval) {
mrioc->diagsave_timeout = 0;
mrioc->reset_in_progress = 0;
scsi_unblock_requests(mrioc->shost);
mrioc->pel_abort_requested = 0;
if (mrioc->pel_enabled) {
mrioc->pel_cmds.retry_count = 0;
@ -5552,6 +5564,7 @@ out:
mrioc->device_refresh_on = 0;
mrioc->unrecoverable = 1;
mrioc->reset_in_progress = 0;
scsi_unblock_requests(mrioc->shost);
mrioc->stop_bsgs = 0;
retval = -1;
mpi3mr_flush_cmds_for_unrecovered_controller(mrioc);

View File

@ -1308,6 +1308,12 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
if (vdinf->vd_state == MPI3_DEVICE0_VD_STATE_OFFLINE)
tgtdev->is_hidden = 1;
tgtdev->non_stl = 1;
tgtdev->dev_spec.vd_inf.reset_to =
max_t(u8, vdinf->vd_reset_to,
MPI3MR_INTADMCMD_TIMEOUT);
tgtdev->dev_spec.vd_inf.abort_to =
max_t(u8, vdinf->vd_abort_to,
MPI3MR_INTADMCMD_TIMEOUT);
tgtdev->dev_spec.vd_inf.tg_id = vdinf_io_throttle_group;
tgtdev->dev_spec.vd_inf.tg_high =
le16_to_cpu(vdinf->io_throttle_group_high) * 2048;
@ -2049,8 +2055,8 @@ static void mpi3mr_fwevt_bh(struct mpi3mr_ioc *mrioc,
if (!fwevt->process_evt)
goto evt_ack;
dprint_event_bh(mrioc, "processing event(0x%02x) in the bottom half handler\n",
fwevt->event_id);
dprint_event_bh(mrioc, "processing event(0x%02x) -(0x%08x) in the bottom half handler\n",
fwevt->event_id, fwevt->evt_ctx);
switch (fwevt->event_id) {
case MPI3_EVENT_DEVICE_ADDED:
@ -2866,12 +2872,14 @@ static void mpi3mr_preparereset_evt_th(struct mpi3mr_ioc *mrioc,
"prepare for reset event top half with rc=start\n");
if (mrioc->prepare_for_reset)
return;
scsi_block_requests(mrioc->shost);
mrioc->prepare_for_reset = 1;
mrioc->prepare_for_reset_timeout_counter = 0;
} else if (evtdata->reason_code == MPI3_EVENT_PREPARE_RESET_RC_ABORT) {
dprint_event_th(mrioc,
"prepare for reset top half with rc=abort\n");
mrioc->prepare_for_reset = 0;
scsi_unblock_requests(mrioc->shost);
mrioc->prepare_for_reset_timeout_counter = 0;
}
if ((event_reply->msg_flags & MPI3_EVENT_NOTIFY_MSGFLAGS_ACK_MASK)
@ -3076,8 +3084,8 @@ void mpi3mr_os_handle_events(struct mpi3mr_ioc *mrioc,
}
if (process_evt_bh || ack_req) {
dprint_event_th(mrioc,
"scheduling bottom half handler for event(0x%02x),ack_required=%d\n",
evt_type, ack_req);
"scheduling bottom half handler for event(0x%02x) - (0x%08x), ack_required=%d\n",
evt_type, le32_to_cpu(event_reply->event_context), ack_req);
sz = event_reply->event_data_length * 4;
fwevt = mpi3mr_alloc_fwevt(sz);
if (!fwevt) {
@ -3915,11 +3923,13 @@ int mpi3mr_issue_tm(struct mpi3mr_ioc *mrioc, u8 tm_type,
if (scsi_tgt_priv_data)
atomic_inc(&scsi_tgt_priv_data->block_io);
if (tgtdev && (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)) {
if (cmd_priv && tgtdev->dev_spec.pcie_inf.abort_to)
timeout = tgtdev->dev_spec.pcie_inf.abort_to;
else if (!cmd_priv && tgtdev->dev_spec.pcie_inf.reset_to)
timeout = tgtdev->dev_spec.pcie_inf.reset_to;
if (tgtdev) {
if (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_PCIE)
timeout = cmd_priv ? tgtdev->dev_spec.pcie_inf.abort_to
: tgtdev->dev_spec.pcie_inf.reset_to;
else if (tgtdev->dev_type == MPI3_DEVICE_DEVFORM_VD)
timeout = cmd_priv ? tgtdev->dev_spec.vd_inf.abort_to
: tgtdev->dev_spec.vd_inf.reset_to;
}
init_completion(&drv_cmd->done);

View File

@ -413,9 +413,11 @@ static void mpi3mr_remove_device_by_sas_address(struct mpi3mr_ioc *mrioc,
sas_address, hba_port);
if (tgtdev) {
if (!list_empty(&tgtdev->list)) {
list_del_init(&tgtdev->list);
was_on_tgtdev_list = 1;
mpi3mr_tgtdev_put(tgtdev);
if (tgtdev->state == MPI3MR_DEV_REMOVE_HS_STARTED) {
list_del_init(&tgtdev->list);
mpi3mr_tgtdev_put(tgtdev);
}
}
}
spin_unlock_irqrestore(&mrioc->tgtdev_lock, flags);
@ -2079,6 +2081,8 @@ int mpi3mr_expander_add(struct mpi3mr_ioc *mrioc, u16 handle)
link_rate = (expander_pg1.negotiated_link_rate &
MPI3_SAS_NEG_LINK_RATE_LOGICAL_MASK) >>
MPI3_SAS_NEG_LINK_RATE_LOGICAL_SHIFT;
if (link_rate < MPI3_SAS_NEG_LINK_RATE_1_5)
link_rate = MPI3_SAS_NEG_LINK_RATE_1_5;
mpi3mr_update_links(mrioc, sas_address_parent,
handle, i, link_rate, hba_port);
}
@ -2388,6 +2392,9 @@ int mpi3mr_report_tgtdev_to_sas_transport(struct mpi3mr_ioc *mrioc,
link_rate = mpi3mr_get_sas_negotiated_logical_linkrate(mrioc, tgtdev);
if (link_rate < MPI3_SAS_NEG_LINK_RATE_1_5)
link_rate = MPI3_SAS_NEG_LINK_RATE_1_5;
mpi3mr_update_links(mrioc, sas_address_parent, tgtdev->dev_handle,
parent_phy_number, link_rate, hba_port);

View File

@ -1420,7 +1420,13 @@ _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
_base_sas_log_info(ioc, loginfo);
if (ioc->logging_level & MPT_DEBUG_REPLY)
_base_sas_log_info(ioc, loginfo);
else {
if (!((ioc_status & MPI2_IOCSTATUS_MASK) &
MPI2_IOCSTATUS_CONFIG_INVALID_PAGE))
_base_sas_log_info(ioc, loginfo);
}
}
if (ioc_status || loginfo) {

View File

@ -77,8 +77,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
#define MPT3SAS_DRIVER_VERSION "52.100.00.00"
#define MPT3SAS_MAJOR_VERSION 52
#define MPT3SAS_DRIVER_VERSION "54.100.00.00"
#define MPT3SAS_MAJOR_VERSION 54
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 00
#define MPT3SAS_RELEASE_VERSION 00

View File

@ -166,6 +166,9 @@ _transport_convert_phy_link_rate(u8 link_rate)
case MPI25_SAS_NEG_LINK_RATE_12_0:
rc = SAS_LINK_RATE_12_0_GBPS;
break;
case MPI26_SAS_NEG_LINK_RATE_22_5:
rc = SAS_LINK_RATE_22_5_GBPS;
break;
case MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED:
rc = SAS_PHY_DISABLED;
break;
@ -987,11 +990,9 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
list_for_each_entry_safe(mpt3sas_phy, next_phy,
&mpt3sas_port->phy_list, port_siblings) {
if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
dev_printk(KERN_INFO, &mpt3sas_port->port->dev,
"remove: sas_addr(0x%016llx), phy(%d)\n",
(unsigned long long)
mpt3sas_port->remote_identify.sas_address,
mpt3sas_phy->phy_id);
ioc_info(ioc, "remove: sas_addr(0x%016llx), phy(%d)\n",
(unsigned long long) mpt3sas_port->remote_identify.sas_address,
mpt3sas_phy->phy_id);
mpt3sas_phy->phy_belongs_to_port = 0;
if (!ioc->remove_host)
sas_port_delete_phy(mpt3sas_port->port,

View File

@ -1175,7 +1175,7 @@ static int mvs_dev_found_notify(struct domain_device *dev, int lock)
mvi_device->dev_type = dev->dev_type;
mvi_device->mvi_info = mvi;
mvi_device->sas_device = dev;
if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
if (dev_parent_is_expander(dev)) {
int phy_id;
phy_id = sas_find_attached_phy_id(&parent_dev->ex_dev, dev);

View File

@ -498,14 +498,14 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
/* Temporary dma mapping, used only in the scope of this function */
mbox = dma_alloc_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
&mbox_addr, GFP_KERNEL);
if (dma_mapping_error(&pdev->dev, mbox_addr))
if (!mbox)
return false;
/* These are the base addresses for the command memory mailbox array */
cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox);
cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size,
&cs->cmd_mbox_addr, GFP_KERNEL);
if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) {
if (!cmd_mbox) {
dev_err(&pdev->dev, "Failed to map command mailbox\n");
goto out_free;
}
@ -520,7 +520,7 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox);
stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size,
&cs->stat_mbox_addr, GFP_KERNEL);
if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) {
if (!stat_mbox) {
dev_err(&pdev->dev, "Failed to map status mailbox\n");
goto out_free;
}
@ -533,7 +533,7 @@ static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
cs->fwstat_buf = dma_alloc_coherent(&pdev->dev,
sizeof(struct myrs_fwstat),
&cs->fwstat_addr, GFP_KERNEL);
if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) {
if (!cs->fwstat_buf) {
dev_err(&pdev->dev, "Failed to map firmware health buffer\n");
cs->fwstat_buf = NULL;
goto out_free;

View File

@ -534,23 +534,25 @@ static ssize_t pm8001_ctl_iop_log_show(struct device *cdev,
char *str = buf;
u32 read_size =
pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size / 1024;
static u32 start, end, count;
u32 max_read_times = 32;
u32 max_count = (read_size * 1024) / (max_read_times * 4);
u32 *temp = (u32 *)pm8001_ha->memoryMap.region[IOP].virt_ptr;
if ((count % max_count) == 0) {
start = 0;
end = max_read_times;
count = 0;
mutex_lock(&pm8001_ha->iop_log_lock);
if ((pm8001_ha->iop_log_count % max_count) == 0) {
pm8001_ha->iop_log_start = 0;
pm8001_ha->iop_log_end = max_read_times;
pm8001_ha->iop_log_count = 0;
} else {
start = end;
end = end + max_read_times;
pm8001_ha->iop_log_start = pm8001_ha->iop_log_end;
pm8001_ha->iop_log_end = pm8001_ha->iop_log_end + max_read_times;
}
for (; start < end; start++)
str += sprintf(str, "%08x ", *(temp+start));
count++;
for (; pm8001_ha->iop_log_start < pm8001_ha->iop_log_end; pm8001_ha->iop_log_start++)
str += sprintf(str, "%08x ", *(temp+pm8001_ha->iop_log_start));
pm8001_ha->iop_log_count++;
mutex_unlock(&pm8001_ha->iop_log_lock);
return str - buf;
}
static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL);
@ -680,7 +682,7 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha)
struct pm8001_ioctl_payload *payload;
DECLARE_COMPLETION_ONSTACK(completion);
u8 *ioctlbuffer;
u32 ret;
int ret;
u32 length = 1024 * 5 + sizeof(*payload) - 1;
if (pm8001_ha->fw_image->size > 4096) {

View File

@ -2163,8 +2163,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) {
if (!((t->dev->parent) &&
(dev_is_expander(t->dev->parent->dev_type)))) {
if (!dev_parent_is_expander(t->dev)) {
for (i = 0, j = 4; j <= 7 && i <= 3; i++, j++)
sata_addr_low[i] = pm8001_ha->sas_addr[j];
for (i = 0, j = 0; j <= 3 && i <= 3; i++, j++)
@ -4168,7 +4167,6 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
u16 firstBurstSize = 0;
u16 ITNT = 2000;
struct domain_device *dev = pm8001_dev->sas_device;
struct domain_device *parent_dev = dev->parent;
struct pm8001_port *port = dev->port->lldd_port;
memset(&payload, 0, sizeof(payload));
@ -4186,10 +4184,9 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
dev_is_expander(pm8001_dev->dev_type))
stp_sspsmp_sata = 0x01; /*ssp or smp*/
}
if (parent_dev && dev_is_expander(parent_dev->dev_type))
phy_id = parent_dev->ex_dev.ex_phy->phy_id;
else
phy_id = pm8001_dev->attached_phy;
phy_id = pm80xx_get_local_phy_id(dev);
opc = OPC_INB_REG_DEV;
linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ?
pm8001_dev->sas_device->linkrate : dev->port->linkrate;

View File

@ -339,8 +339,10 @@ struct ssp_completion_resp {
__le32 status;
__le32 param;
__le32 ssptag_rescv_rescpad;
/* Must be last --ends in a flexible-array member. */
struct ssp_response_iu ssp_resp_iu;
__le32 residual_count;
/* __le32 residual_count; */
} __attribute__((packed, aligned(4)));

View File

@ -552,6 +552,7 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
pm8001_ha->id = pm8001_id++;
pm8001_ha->logging_level = logging_level;
pm8001_ha->non_fatal_count = 0;
mutex_init(&pm8001_ha->iop_log_lock);
if (link_rate >= 1 && link_rate <= 15)
pm8001_ha->link_rate = (link_rate << 8);
else {

View File

@ -130,6 +130,16 @@ static void pm80xx_get_tag_opcodes(struct sas_task *task, int *ata_op,
}
}
u32 pm80xx_get_local_phy_id(struct domain_device *dev)
{
struct pm8001_device *pm8001_dev = dev->lldd_dev;
if (dev_parent_is_expander(dev))
return dev->parent->ex_dev.ex_phy->phy_id;
return pm8001_dev->attached_phy;
}
void pm80xx_show_pending_commands(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *target_pm8001_dev)
{
@ -477,7 +487,7 @@ int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
struct pm8001_device *pm8001_dev = dev->lldd_dev;
bool internal_abort = sas_is_internal_abort(task);
struct pm8001_hba_info *pm8001_ha;
struct pm8001_port *port = NULL;
struct pm8001_port *port;
struct pm8001_ccb_info *ccb;
unsigned long flags;
u32 n_elem = 0;
@ -502,8 +512,7 @@ int pm8001_queue_command(struct sas_task *task, gfp_t gfp_flags)
spin_lock_irqsave(&pm8001_ha->lock, flags);
pm8001_dev = dev->lldd_dev;
port = pm8001_ha->phy[pm8001_dev->attached_phy].port;
port = dev->port->lldd_port;
if (!internal_abort &&
(DEV_IS_GONE(pm8001_dev) || !port || !port->port_attached)) {
@ -701,7 +710,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev)
dev->lldd_dev = pm8001_device;
pm8001_device->dev_type = dev->dev_type;
pm8001_device->dcompletion = &completion;
if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
if (dev_parent_is_expander(dev)) {
int phy_id;
phy_id = sas_find_attached_phy_id(&parent_dev->ex_dev, dev);
@ -766,7 +775,16 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
spin_lock_irqsave(&pm8001_ha->lock, flags);
}
PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
pm8001_ha->phy[pm8001_dev->attached_phy].phy_attached = 0;
/*
* The phy array only contains local phys. Thus, we cannot clear
* phy_attached for a device behind an expander.
*/
if (!dev_parent_is_expander(dev)) {
u32 phy_id = pm80xx_get_local_phy_id(dev);
pm8001_ha->phy[phy_id].phy_attached = 0;
}
pm8001_free_dev(pm8001_dev);
} else {
pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
@ -1048,7 +1066,7 @@ int pm8001_abort_task(struct sas_task *task)
struct pm8001_hba_info *pm8001_ha;
struct pm8001_device *pm8001_dev;
int rc = TMF_RESP_FUNC_FAILED, ret;
u32 phy_id, port_id;
u32 port_id;
struct sas_task_slow slow_task;
if (!task->lldd_task || !task->dev)
@ -1057,7 +1075,6 @@ int pm8001_abort_task(struct sas_task *task)
dev = task->dev;
pm8001_dev = dev->lldd_dev;
pm8001_ha = pm8001_find_ha_by_dev(dev);
phy_id = pm8001_dev->attached_phy;
if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
// If the controller is seeing fatal errors
@ -1089,7 +1106,8 @@ int pm8001_abort_task(struct sas_task *task)
if (pm8001_ha->chip_id == chip_8006) {
DECLARE_COMPLETION_ONSTACK(completion_reset);
DECLARE_COMPLETION_ONSTACK(completion);
struct pm8001_phy *phy = pm8001_ha->phy + phy_id;
u32 phy_id = pm80xx_get_local_phy_id(dev);
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
port_id = phy->port->port_id;
/* 1. Set Device state as Recovery */

View File

@ -547,6 +547,10 @@ struct pm8001_hba_info {
u32 ci_offset;
u32 pi_offset;
u32 max_memcnt;
u32 iop_log_start;
u32 iop_log_end;
u32 iop_log_count;
struct mutex iop_log_lock;
};
struct pm8001_work {
@ -798,6 +802,7 @@ void pm8001_setds_completion(struct domain_device *dev);
void pm8001_tmf_aborted(struct sas_task *task);
void pm80xx_show_pending_commands(struct pm8001_hba_info *pm8001_ha,
struct pm8001_device *dev);
u32 pm80xx_get_local_phy_id(struct domain_device *dev);
#endif

View File

@ -2340,8 +2340,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha,
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
(status != IO_UNDERFLOW)) {
if (!((t->dev->parent) &&
(dev_is_expander(t->dev->parent->dev_type)))) {
if (!dev_parent_is_expander(t->dev)) {
for (i = 0, j = 4; i <= 3 && j <= 7; i++, j++)
sata_addr_low[i] = pm8001_ha->sas_addr[j];
for (i = 0, j = 0; i <= 3 && j <= 3; i++, j++)
@ -4780,7 +4779,6 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
u16 firstBurstSize = 0;
u16 ITNT = 2000;
struct domain_device *dev = pm8001_dev->sas_device;
struct domain_device *parent_dev = dev->parent;
struct pm8001_port *port = dev->port->lldd_port;
memset(&payload, 0, sizeof(payload));
@ -4799,10 +4797,8 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha,
dev_is_expander(pm8001_dev->dev_type))
stp_sspsmp_sata = 0x01; /*ssp or smp*/
}
if (parent_dev && dev_is_expander(parent_dev->dev_type))
phy_id = parent_dev->ex_dev.ex_phy->phy_id;
else
phy_id = pm8001_dev->attached_phy;
phy_id = pm80xx_get_local_phy_id(dev);
opc = OPC_INB_REG_DEV;

View File

@ -558,8 +558,10 @@ struct ssp_completion_resp {
__le32 status;
__le32 param;
__le32 ssptag_rescv_rescpad;
/* Must be last --ends in a flexible-array member. */
struct ssp_response_iu ssp_resp_iu;
__le32 residual_count;
/* __le32 residual_count; */
} __attribute__((packed, aligned(4)));
#define SSP_RESCV_BIT 0x00010000

View File

@ -3106,8 +3106,8 @@ static bool qla_bsg_found(struct qla_qpair *qpair, struct bsg_job *bsg_job)
switch (rval) {
case QLA_SUCCESS:
/* Wait for the command completion. */
ratov_j = ha->r_a_tov / 10 * 4 * 1000;
ratov_j = msecs_to_jiffies(ratov_j);
ratov_j = ha->r_a_tov / 10 * 4;
ratov_j = secs_to_jiffies(ratov_j);
if (!wait_for_completion_timeout(&comp, ratov_j)) {
ql_log(ql_log_info, vha, 0x7089,

View File

@ -4890,9 +4890,7 @@ struct purex_item {
struct purex_item *pkt);
atomic_t in_use;
uint16_t size;
struct {
uint8_t iocb[64];
} iocb;
uint8_t iocb[] __counted_by(size);
};
#include "qla_edif.h"
@ -5101,7 +5099,6 @@ typedef struct scsi_qla_host {
struct list_head head;
spinlock_t lock;
} purex_list;
struct purex_item default_item;
struct name_list_extended gnl;
/* Count of active session/fcport */
@ -5130,6 +5127,11 @@ typedef struct scsi_qla_host {
#define DPORT_DIAG_IN_PROGRESS BIT_0
#define DPORT_DIAG_CHIP_RESET_IN_PROGRESS BIT_1
uint16_t dport_status;
/* Must be last --ends in a flexible-array member. */
TRAILING_OVERLAP(struct purex_item, default_item, iocb,
uint8_t __default_item_iocb[QLA_DEFAULT_PAYLOAD_SIZE];
);
} scsi_qla_host_t;
struct qla27xx_image_status {

View File

@ -1798,7 +1798,7 @@ retry:
switch (rval) {
case QLA_SUCCESS:
break;
case EAGAIN:
case -EAGAIN:
msleep(EDIF_MSLEEP_INTERVAL);
cnt++;
if (cnt < EDIF_RETRY_COUNT)
@ -3649,7 +3649,7 @@ retry:
p->e.extra_rx_xchg_address, p->e.extra_control_flags,
sp->handle, sp->remap.req.len, bsg_job);
break;
case EAGAIN:
case -EAGAIN:
msleep(EDIF_MSLEEP_INTERVAL);
cnt++;
if (cnt < EDIF_RETRY_COUNT)

View File

@ -2059,11 +2059,11 @@ static void qla_marker_sp_done(srb_t *sp, int res)
int cnt = 5; \
do { \
if (_chip_gen != sp->vha->hw->chip_reset || _login_gen != sp->fcport->login_gen) {\
_rval = EINVAL; \
_rval = -EINVAL; \
break; \
} \
_rval = qla2x00_start_sp(_sp); \
if (_rval == EAGAIN) \
if (_rval == -EAGAIN) \
msleep(1); \
else \
break; \

View File

@ -1077,17 +1077,17 @@ static struct purex_item *
qla24xx_alloc_purex_item(scsi_qla_host_t *vha, uint16_t size)
{
struct purex_item *item = NULL;
uint8_t item_hdr_size = sizeof(*item);
if (size > QLA_DEFAULT_PAYLOAD_SIZE) {
item = kzalloc(item_hdr_size +
(size - QLA_DEFAULT_PAYLOAD_SIZE), GFP_ATOMIC);
item = kzalloc(struct_size(item, iocb, size), GFP_ATOMIC);
} else {
if (atomic_inc_return(&vha->default_item.in_use) == 1) {
item = &vha->default_item;
goto initialize_purex_header;
} else {
item = kzalloc(item_hdr_size, GFP_ATOMIC);
item = kzalloc(
struct_size(item, iocb, QLA_DEFAULT_PAYLOAD_SIZE),
GFP_ATOMIC);
}
}
if (!item) {
@ -1127,17 +1127,16 @@ qla24xx_queue_purex_item(scsi_qla_host_t *vha, struct purex_item *pkt,
* @vha: SCSI driver HA context
* @pkt: ELS packet
*/
static struct purex_item
*qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt)
static struct purex_item *
qla24xx_copy_std_pkt(struct scsi_qla_host *vha, void *pkt)
{
struct purex_item *item;
item = qla24xx_alloc_purex_item(vha,
QLA_DEFAULT_PAYLOAD_SIZE);
item = qla24xx_alloc_purex_item(vha, QLA_DEFAULT_PAYLOAD_SIZE);
if (!item)
return item;
memcpy(&item->iocb, pkt, sizeof(item->iocb));
memcpy(&item->iocb, pkt, QLA_DEFAULT_PAYLOAD_SIZE);
return item;
}

View File

@ -419,7 +419,7 @@ retry:
switch (rval) {
case QLA_SUCCESS:
break;
case EAGAIN:
case -EAGAIN:
msleep(PURLS_MSLEEP_INTERVAL);
cnt++;
if (cnt < PURLS_RETRY_COUNT)
@ -1308,7 +1308,7 @@ void qla2xxx_process_purls_iocb(void **pkt, struct rsp_que **rsp)
ql_dbg(ql_dbg_unsol, vha, 0x2121,
"PURLS OP[%01x] size %d xchg addr 0x%x portid %06x\n",
item->iocb.iocb[3], item->size, uctx->exchange_address,
item->iocb[3], item->size, uctx->exchange_address,
fcport->d_id.b24);
/* +48 0 1 2 3 4 5 6 7 8 9 A B C D E F
* ----- -----------------------------------------------

View File

@ -1291,8 +1291,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
"Abort command mbx cmd=%p, rval=%x.\n", cmd, rval);
/* Wait for the command completion. */
ratov_j = ha->r_a_tov/10 * 4 * 1000;
ratov_j = msecs_to_jiffies(ratov_j);
ratov_j = ha->r_a_tov / 10 * 4;
ratov_j = secs_to_jiffies(ratov_j);
switch (rval) {
case QLA_SUCCESS:
if (!wait_for_completion_timeout(&comp, ratov_j)) {
@ -1806,8 +1806,8 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res,
rval = ha->isp_ops->abort_command(sp);
/* Wait for command completion. */
ret_cmd = false;
ratov_j = ha->r_a_tov/10 * 4 * 1000;
ratov_j = msecs_to_jiffies(ratov_j);
ratov_j = ha->r_a_tov / 10 * 4;
ratov_j = secs_to_jiffies(ratov_j);
switch (rval) {
case QLA_SUCCESS:
if (wait_for_completion_timeout(&comp, ratov_j)) {
@ -6459,9 +6459,10 @@ dealloc:
void
qla24xx_free_purex_item(struct purex_item *item)
{
if (item == &item->vha->default_item)
if (item == &item->vha->default_item) {
memset(&item->vha->default_item, 0, sizeof(struct purex_item));
else
memset(&item->vha->__default_item_iocb, 0, QLA_DEFAULT_PAYLOAD_SIZE);
} else
kfree(item);
}

View File

@ -1155,14 +1155,9 @@ static ssize_t sdebug_error_write(struct file *file, const char __user *ubuf,
struct sdebug_err_inject *inject;
struct scsi_device *sdev = (struct scsi_device *)file->f_inode->i_private;
buf = kzalloc(count + 1, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (copy_from_user(buf, ubuf, count)) {
kfree(buf);
return -EFAULT;
}
buf = memdup_user_nul(ubuf, count);
if (IS_ERR(buf))
return PTR_ERR(buf);
if (buf[0] == '-')
return sdebug_err_remove(sdev, buf, count);
@ -8805,8 +8800,8 @@ static int sdebug_add_store(void)
/* Logical Block Provisioning */
if (scsi_debug_lbp()) {
map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1;
sip->map_storep = vmalloc(array_size(sizeof(long),
BITS_TO_LONGS(map_size)));
sip->map_storep = vcalloc(BITS_TO_LONGS(map_size),
sizeof(long));
pr_info("%lu provisioning blocks\n", map_size);
@ -8815,8 +8810,6 @@ static int sdebug_add_store(void)
goto err;
}
bitmap_zero(sip->map_storep, map_size);
/* Map first 1KB for partition table */
if (sdebug_num_parts)
map_region(sip, 0, 2);

View File

@ -106,7 +106,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, struct queue_limits *lim,
unsigned int mode);
static void sd_config_write_same(struct scsi_disk *sdkp,
struct queue_limits *lim);
static int sd_revalidate_disk(struct gendisk *);
static void sd_revalidate_disk(struct gendisk *);
static void sd_unlock_native_capacity(struct gendisk *disk);
static void sd_shutdown(struct device *);
static void scsi_disk_release(struct device *cdev);
@ -3691,13 +3691,13 @@ static void sd_read_block_zero(struct scsi_disk *sdkp)
* performs disk spin up, read_capacity, etc.
* @disk: struct gendisk we care about
**/
static int sd_revalidate_disk(struct gendisk *disk)
static void sd_revalidate_disk(struct gendisk *disk)
{
struct scsi_disk *sdkp = scsi_disk(disk);
struct scsi_device *sdp = sdkp->device;
sector_t old_capacity = sdkp->capacity;
struct queue_limits lim;
unsigned char *buffer;
struct queue_limits *lim = NULL;
unsigned char *buffer = NULL;
unsigned int dev_max;
int err;
@ -3709,25 +3709,26 @@ static int sd_revalidate_disk(struct gendisk *disk)
* of the other niceties.
*/
if (!scsi_device_online(sdp))
goto out;
return;
lim = kmalloc(sizeof(*lim), GFP_KERNEL);
if (!lim)
return;
buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL);
if (!buffer) {
sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
"allocation failure.\n");
if (!buffer)
goto out;
}
sd_spinup_disk(sdkp);
lim = queue_limits_start_update(sdkp->disk->queue);
*lim = queue_limits_start_update(sdkp->disk->queue);
/*
* Without media there is no reason to ask; moreover, some devices
* react badly if we do.
*/
if (sdkp->media_present) {
sd_read_capacity(sdkp, &lim, buffer);
sd_read_capacity(sdkp, lim, buffer);
/*
* Some USB/UAS devices return generic values for mode pages
* until the media has been accessed. Trigger a READ operation
@ -3741,17 +3742,17 @@ static int sd_revalidate_disk(struct gendisk *disk)
* cause this to be updated correctly and any device which
* doesn't support it should be treated as rotational.
*/
lim.features |= (BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM);
lim->features |= (BLK_FEAT_ROTATIONAL | BLK_FEAT_ADD_RANDOM);
if (scsi_device_supports_vpd(sdp)) {
sd_read_block_provisioning(sdkp);
sd_read_block_limits(sdkp, &lim);
sd_read_block_limits(sdkp, lim);
sd_read_block_limits_ext(sdkp);
sd_read_block_characteristics(sdkp, &lim);
sd_zbc_read_zones(sdkp, &lim, buffer);
sd_read_block_characteristics(sdkp, lim);
sd_zbc_read_zones(sdkp, lim, buffer);
}
sd_config_discard(sdkp, &lim, sd_discard_mode(sdkp));
sd_config_discard(sdkp, lim, sd_discard_mode(sdkp));
sd_print_capacity(sdkp, old_capacity);
@ -3761,47 +3762,46 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_app_tag_own(sdkp, buffer);
sd_read_write_same(sdkp, buffer);
sd_read_security(sdkp, buffer);
sd_config_protection(sdkp, &lim);
sd_config_protection(sdkp, lim);
}
/*
* We now have all cache related info, determine how we deal
* with flush requests.
*/
sd_set_flush_flag(sdkp, &lim);
sd_set_flush_flag(sdkp, lim);
/* Initial block count limit based on CDB TRANSFER LENGTH field size. */
dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;
/* Some devices report a maximum block count for READ/WRITE requests. */
dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
lim.max_dev_sectors = logical_to_sectors(sdp, dev_max);
lim->max_dev_sectors = logical_to_sectors(sdp, dev_max);
if (sd_validate_min_xfer_size(sdkp))
lim.io_min = logical_to_bytes(sdp, sdkp->min_xfer_blocks);
lim->io_min = logical_to_bytes(sdp, sdkp->min_xfer_blocks);
else
lim.io_min = 0;
lim->io_min = 0;
/*
* Limit default to SCSI host optimal sector limit if set. There may be
* an impact on performance for when the size of a request exceeds this
* host limit.
*/
lim.io_opt = sdp->host->opt_sectors << SECTOR_SHIFT;
lim->io_opt = sdp->host->opt_sectors << SECTOR_SHIFT;
if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
lim.io_opt = min_not_zero(lim.io_opt,
lim->io_opt = min_not_zero(lim->io_opt,
logical_to_bytes(sdp, sdkp->opt_xfer_blocks));
}
sdkp->first_scan = 0;
set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity));
sd_config_write_same(sdkp, &lim);
kfree(buffer);
sd_config_write_same(sdkp, lim);
err = queue_limits_commit_update_frozen(sdkp->disk->queue, &lim);
err = queue_limits_commit_update_frozen(sdkp->disk->queue, lim);
if (err)
return err;
goto out;
/*
* Query concurrent positioning ranges after
@ -3820,7 +3820,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
set_capacity_and_notify(disk, 0);
out:
return 0;
kfree(buffer);
kfree(lim);
}
/**

View File

@ -20,6 +20,7 @@
#include <linux/reboot.h>
#include <linux/cciss_ioctl.h>
#include <linux/crash_dump.h>
#include <linux/string.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
@ -6774,17 +6775,15 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
}
if (iocommand.buf_size > 0) {
kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
if (!kernel_buffer)
return -ENOMEM;
if (iocommand.Request.Type.Direction & XFER_WRITE) {
if (copy_from_user(kernel_buffer, iocommand.buf,
iocommand.buf_size)) {
rc = -EFAULT;
goto out;
}
kernel_buffer = memdup_user(iocommand.buf,
iocommand.buf_size);
if (IS_ERR(kernel_buffer))
return PTR_ERR(kernel_buffer);
} else {
memset(kernel_buffer, 0, iocommand.buf_size);
kernel_buffer = kzalloc(iocommand.buf_size, GFP_KERNEL);
if (!kernel_buffer)
return -ENOMEM;
}
}

View File

@ -1941,8 +1941,8 @@ static int storvsc_probe(struct hv_device *device,
int num_present_cpus = num_present_cpus();
struct Scsi_Host *host;
struct hv_host_device *host_dev;
bool dev_is_ide = ((dev_id->driver_data == IDE_GUID) ? true : false);
bool is_fc = ((dev_id->driver_data == SFC_GUID) ? true : false);
bool dev_is_ide = dev_id->driver_data == IDE_GUID;
bool is_fc = dev_id->driver_data == SFC_GUID;
int target = 0;
struct storvsc_device *stor_device;
int max_sub_channels = 0;

View File

@ -665,7 +665,7 @@ static ssize_t lio_target_nacl_cmdsn_depth_store(struct config_item *item,
}
acl_ci = &se_nacl->acl_group.cg_item;
if (!acl_ci) {
pr_err("Unable to locatel acl_ci\n");
pr_err("Unable to locate acl_ci\n");
return -EINVAL;
}
tpg_ci = &acl_ci->ci_parent->ci_group->cg_item;
@ -684,7 +684,7 @@ static ssize_t lio_target_nacl_cmdsn_depth_store(struct config_item *item,
ret = core_tpg_set_initiator_node_queue_depth(se_nacl, cmdsn_depth);
pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for"
pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for "
"InitiatorName: %s\n", config_item_name(wwn_ci),
config_item_name(tpg_ci), cmdsn_depth,
config_item_name(acl_ci));
@ -1131,7 +1131,7 @@ static void lio_target_tiqn_deltpg(struct se_portal_group *se_tpg)
/* End items for lio_target_tiqn_cit */
/* Start LIO-Target TIQN struct contig_item lio_target_cit */
/* Start LIO-Target TIQN struct config_item lio_target_cit */
static ssize_t lio_target_wwn_lio_version_show(struct config_item *item,
char *page)

View File

@ -112,7 +112,8 @@ u8 iscsit_tmr_task_reassign(
struct iscsi_tmr_req *tmr_req = cmd->tmr_req;
struct se_tmr_req *se_tmr = cmd->se_cmd.se_tmr_req;
struct iscsi_tm *hdr = (struct iscsi_tm *) buf;
u64 ret, ref_lun;
u64 ref_lun;
int ret;
pr_debug("Got TASK_REASSIGN TMR ITT: 0x%08x,"
" RefTaskTag: 0x%08x, ExpDataSN: 0x%08x, CID: %hu\n",

View File

@ -29,6 +29,10 @@
#define MCQ_ENTRY_SIZE_IN_DWORD 8
#define CQE_UCD_BA GENMASK_ULL(63, 7)
#define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
UFSHCD_ERROR_MASK |\
MCQ_CQ_EVENT_STATUS)
/* Max mcq register polling time in microseconds */
#define MCQ_POLL_US 500000
@ -355,9 +359,16 @@ EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_lock);
void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba)
{
struct ufs_hw_queue *hwq;
u32 intrs;
u16 qsize;
int i;
/* Enable required interrupts */
intrs = UFSHCD_ENABLE_MCQ_INTRS;
if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_INTR)
intrs &= ~MCQ_CQ_EVENT_STATUS;
ufshcd_enable_intr(hba, intrs);
for (i = 0; i < hba->nr_hw_queues; i++) {
hwq = &hba->uhq[i];
hwq->id = i;

View File

@ -512,6 +512,8 @@ static ssize_t pm_qos_enable_show(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
guard(mutex)(&hba->pm_qos_mutex);
return sysfs_emit(buf, "%d\n", hba->pm_qos_enabled);
}

View File

@ -11,6 +11,7 @@
#include <ufs/ufs.h>
#include <linux/tracepoint.h>
#include "ufs_trace_types.h"
#define str_opcode(opcode) \
__print_symbolic(opcode, \

View File

@ -0,0 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _UFS_TRACE_TYPES_H_
#define _UFS_TRACE_TYPES_H_
enum ufs_trace_str_t {
UFS_CMD_SEND,
UFS_CMD_COMP,
UFS_DEV_COMP,
UFS_QUERY_SEND,
UFS_QUERY_COMP,
UFS_QUERY_ERR,
UFS_TM_SEND,
UFS_TM_COMP,
UFS_TM_ERR
};
enum ufs_trace_tsf_t {
UFS_TSF_CDB,
UFS_TSF_OSF,
UFS_TSF_TM_INPUT,
UFS_TSF_TM_OUTPUT
};
#endif /* _UFS_TRACE_TYPES_H_ */

View File

@ -45,11 +45,6 @@
UTP_TASK_REQ_COMPL |\
UFSHCD_ERROR_MASK)
#define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\
UFSHCD_ERROR_MASK |\
MCQ_CQ_EVENT_STATUS)
/* UIC command timeout, unit: ms */
enum {
UIC_CMD_TIMEOUT_DEFAULT = 500,
@ -316,6 +311,9 @@ static const struct ufs_dev_quirk ufs_fixups[] = {
{ .wmanufacturerid = UFS_VENDOR_TOSHIBA,
.model = "THGLF2G9D8KBADG",
.quirk = UFS_DEVICE_QUIRK_PA_TACTIVATE },
{ .wmanufacturerid = UFS_VENDOR_TOSHIBA,
.model = "THGJFJT1E45BATP",
.quirk = UFS_DEVICE_QUIRK_NO_TIMESTAMP_SUPPORT },
{}
};
@ -369,7 +367,7 @@ EXPORT_SYMBOL_GPL(ufshcd_disable_irq);
* @hba: per adapter instance
* @intrs: interrupt bits
*/
static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
{
u32 old_val = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
u32 new_val = old_val | intrs;
@ -606,10 +604,12 @@ void ufshcd_print_tr(struct ufs_hba *hba, int tag, bool pr_prdt)
lrbp = &hba->lrb[tag];
dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n",
tag, div_u64(lrbp->issue_time_stamp_local_clock, 1000));
dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n",
tag, div_u64(lrbp->compl_time_stamp_local_clock, 1000));
if (hba->monitor.enabled) {
dev_err(hba->dev, "UPIU[%d] - issue time %lld us\n", tag,
div_u64(lrbp->issue_time_stamp_local_clock, 1000));
dev_err(hba->dev, "UPIU[%d] - complete time %lld us\n", tag,
div_u64(lrbp->compl_time_stamp_local_clock, 1000));
}
dev_err(hba->dev,
"UPIU[%d] - Transfer Request Descriptor phys@0x%llx\n",
tag, (u64)lrbp->utrd_dma_addr);
@ -1045,6 +1045,7 @@ EXPORT_SYMBOL_GPL(ufshcd_is_hba_active);
*/
void ufshcd_pm_qos_init(struct ufs_hba *hba)
{
guard(mutex)(&hba->pm_qos_mutex);
if (hba->pm_qos_enabled)
return;
@ -1061,6 +1062,8 @@ void ufshcd_pm_qos_init(struct ufs_hba *hba)
*/
void ufshcd_pm_qos_exit(struct ufs_hba *hba)
{
guard(mutex)(&hba->pm_qos_mutex);
if (!hba->pm_qos_enabled)
return;
@ -1075,6 +1078,8 @@ void ufshcd_pm_qos_exit(struct ufs_hba *hba)
*/
static void ufshcd_pm_qos_update(struct ufs_hba *hba, bool on)
{
guard(mutex)(&hba->pm_qos_mutex);
if (!hba->pm_qos_enabled)
return;
@ -2230,11 +2235,13 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
{
bool queue_resume_work = false;
ktime_t curr_t = ktime_get();
ktime_t curr_t;
if (!ufshcd_is_clkscaling_supported(hba))
return;
curr_t = ktime_get();
guard(spinlock_irqsave)(&hba->clk_scaling.lock);
if (!hba->clk_scaling.active_reqs++)
@ -2354,10 +2361,12 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag,
struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
unsigned long flags;
lrbp->issue_time_stamp = ktime_get();
lrbp->issue_time_stamp_local_clock = local_clock();
lrbp->compl_time_stamp = ktime_set(0, 0);
lrbp->compl_time_stamp_local_clock = 0;
if (hba->monitor.enabled) {
lrbp->issue_time_stamp = ktime_get();
lrbp->issue_time_stamp_local_clock = local_clock();
lrbp->compl_time_stamp = ktime_set(0, 0);
lrbp->compl_time_stamp_local_clock = 0;
}
ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
if (lrbp->cmd)
ufshcd_clk_scaling_start_busy(hba);
@ -5622,8 +5631,10 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
enum utp_ocs ocs;
lrbp = &hba->lrb[task_tag];
lrbp->compl_time_stamp = ktime_get();
lrbp->compl_time_stamp_local_clock = local_clock();
if (hba->monitor.enabled) {
lrbp->compl_time_stamp = ktime_get();
lrbp->compl_time_stamp_local_clock = local_clock();
}
cmd = lrbp->cmd;
if (cmd) {
if (unlikely(ufshcd_should_inform_monitor(hba, lrbp)))
@ -6457,13 +6468,14 @@ void ufshcd_schedule_eh_work(struct ufs_hba *hba)
}
}
static void ufshcd_force_error_recovery(struct ufs_hba *hba)
void ufshcd_force_error_recovery(struct ufs_hba *hba)
{
spin_lock_irq(hba->host->host_lock);
hba->force_reset = true;
ufshcd_schedule_eh_work(hba);
spin_unlock_irq(hba->host->host_lock);
}
EXPORT_SYMBOL_GPL(ufshcd_force_error_recovery);
static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
{
@ -8786,7 +8798,8 @@ static void ufshcd_set_timestamp_attr(struct ufs_hba *hba)
struct ufs_dev_info *dev_info = &hba->dev_info;
struct utp_upiu_query_v4_0 *upiu_data;
if (dev_info->wspecversion < 0x400)
if (dev_info->wspecversion < 0x400 ||
hba->dev_quirks & UFS_DEVICE_QUIRK_NO_TIMESTAMP_SUPPORT)
return;
ufshcd_dev_man_lock(hba);
@ -8913,16 +8926,11 @@ err:
static void ufshcd_config_mcq(struct ufs_hba *hba)
{
int ret;
u32 intrs;
ret = ufshcd_mcq_vops_config_esi(hba);
hba->mcq_esi_enabled = !ret;
dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : "");
intrs = UFSHCD_ENABLE_MCQ_INTRS;
if (hba->quirks & UFSHCD_QUIRK_MCQ_BROKEN_INTR)
intrs &= ~MCQ_CQ_EVENT_STATUS;
ufshcd_enable_intr(hba, intrs);
ufshcd_mcq_make_queues_operational(hba);
ufshcd_mcq_config_mac(hba, hba->nutrs);
@ -10756,6 +10764,10 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
mutex_init(&hba->ee_ctrl_mutex);
mutex_init(&hba->wb_mutex);
/* Initialize mutex for PM QoS request synchronization */
mutex_init(&hba->pm_qos_mutex);
init_rwsem(&hba->clk_scaling_lock);
ufshcd_init_clk_gating(hba);

View File

@ -776,7 +776,7 @@ static void exynos_ufs_config_sync_pattern_mask(struct exynos_ufs *ufs,
u32 mask, sync_len;
enum {
SYNC_LEN_G1 = 80 * 1000, /* 80us */
SYNC_LEN_G2 = 40 * 1000, /* 44us */
SYNC_LEN_G2 = 40 * 1000, /* 40us */
SYNC_LEN_G3 = 20 * 1000, /* 20us */
};
int i;
@ -1896,6 +1896,13 @@ static int fsd_ufs_pre_pwr_change(struct exynos_ufs *ufs,
return 0;
}
static int fsd_ufs_suspend(struct exynos_ufs *ufs)
{
exynos_ufs_gate_clks(ufs);
hci_writel(ufs, 0, HCI_GPIO_OUT);
return 0;
}
static inline u32 get_mclk_period_unipro_18(struct exynos_ufs *ufs)
{
return (16 * 1000 * 1000000UL / ufs->mclk_rate);
@ -2162,6 +2169,7 @@ static const struct exynos_ufs_drv_data fsd_ufs_drvs = {
.pre_link = fsd_ufs_pre_link,
.post_link = fsd_ufs_post_link,
.pre_pwr_change = fsd_ufs_pre_pwr_change,
.suspend = fsd_ufs_suspend,
};
static const struct exynos_ufs_drv_data gs101_ufs_drvs = {

View File

@ -29,6 +29,7 @@
#include "ufs-mediatek-sip.h"
static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up);
#define CREATE_TRACE_POINTS
#include "ufs-mediatek-trace.h"
@ -415,7 +416,7 @@ static void ufs_mtk_dbg_sel(struct ufs_hba *hba)
}
}
static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
static int ufs_mtk_wait_idle_state(struct ufs_hba *hba,
unsigned long retry_ms)
{
u64 timeout, time_checked;
@ -451,8 +452,12 @@ static void ufs_mtk_wait_idle_state(struct ufs_hba *hba,
break;
} while (time_checked < timeout);
if (wait_idle && sm != VS_HCE_BASE)
if (wait_idle && sm != VS_HCE_BASE) {
dev_info(hba->dev, "wait idle tmo: 0x%x\n", val);
return -ETIMEDOUT;
}
return 0;
}
static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state,
@ -798,8 +803,14 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
clk_pwr_off = true;
}
if (clk_pwr_off)
if (clk_pwr_off) {
ufs_mtk_pwr_ctrl(hba, false);
} else {
dev_warn(hba->dev, "Clock is not turned off, hba->ahit = 0x%x, AHIT = 0x%x\n",
hba->ahit,
ufshcd_readl(hba,
REG_AUTO_HIBERNATE_IDLE_TIMER));
}
ufs_mtk_mcq_disable_irq(hba);
} else if (on && status == POST_CHANGE) {
ufs_mtk_pwr_ctrl(hba, true);
@ -1018,7 +1029,7 @@ static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba)
struct arm_smccc_res res;
int err, ver;
if (hba->vreg_info.vcc)
if (info->vcc)
return 0;
if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) {
@ -1075,6 +1086,80 @@ static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba)
}
}
static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
{
unsigned long flags;
u32 ah_ms = 10;
u32 ah_scale, ah_timer;
u32 scale_us[] = {1, 10, 100, 1000, 10000, 100000};
if (ufshcd_is_clkgating_allowed(hba)) {
if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) {
ah_scale = FIELD_GET(UFSHCI_AHIBERN8_SCALE_MASK,
hba->ahit);
ah_timer = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
hba->ahit);
if (ah_scale <= 5)
ah_ms = ah_timer * scale_us[ah_scale] / 1000;
}
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_gating.delay_ms = max(ah_ms, 10U);
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
}
/* Convert microseconds to Auto-Hibernate Idle Timer register value */
static u32 ufs_mtk_us_to_ahit(unsigned int timer)
{
unsigned int scale;
for (scale = 0; timer > UFSHCI_AHIBERN8_TIMER_MASK; ++scale)
timer /= UFSHCI_AHIBERN8_SCALE_FACTOR;
return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) |
FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale);
}
static void ufs_mtk_fix_ahit(struct ufs_hba *hba)
{
unsigned int us;
if (ufshcd_is_auto_hibern8_supported(hba)) {
switch (hba->dev_info.wmanufacturerid) {
case UFS_VENDOR_SAMSUNG:
/* configure auto-hibern8 timer to 3.5 ms */
us = 3500;
break;
case UFS_VENDOR_MICRON:
/* configure auto-hibern8 timer to 2 ms */
us = 2000;
break;
default:
/* configure auto-hibern8 timer to 1 ms */
us = 1000;
break;
}
hba->ahit = ufs_mtk_us_to_ahit(us);
}
ufs_mtk_setup_clk_gating(hba);
}
static void ufs_mtk_fix_clock_scaling(struct ufs_hba *hba)
{
/* UFS version is below 4.0, clock scaling is not necessary */
if ((hba->dev_info.wspecversion < 0x0400) &&
ufs_mtk_is_clk_scale_ready(hba)) {
hba->caps &= ~UFSHCD_CAP_CLK_SCALING;
_ufs_mtk_clk_scale(hba, false);
}
}
static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
@ -1240,6 +1325,10 @@ static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
dev_req_params->gear_rx < UFS_HS_G4)
return false;
if (dev_req_params->pwr_tx == SLOW_MODE ||
dev_req_params->pwr_rx == SLOW_MODE)
return false;
return true;
}
@ -1255,6 +1344,10 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
host_params.hs_rx_gear = UFS_HS_G5;
host_params.hs_tx_gear = UFS_HS_G5;
if (dev_max_params->pwr_rx == SLOW_MODE ||
dev_max_params->pwr_tx == SLOW_MODE)
host_params.desired_working_mode = UFS_PWM_MODE;
ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
if (ret) {
pr_info("%s: failed to determine capabilities\n",
@ -1278,6 +1371,28 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
PA_NO_ADAPT);
if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
DL_FC0ProtectionTimeOutVal_Default);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
DL_TC0ReplayTimeOutVal_Default);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
DL_AFC0ReqTimeOutVal_Default);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
DL_FC1ProtectionTimeOutVal_Default);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
DL_TC1ReplayTimeOutVal_Default);
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
DL_AFC1ReqTimeOutVal_Default);
ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
DL_FC0ProtectionTimeOutVal_Default);
ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
DL_TC0ReplayTimeOutVal_Default);
ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
DL_AFC0ReqTimeOutVal_Default);
}
ret = ufshcd_uic_change_pwr_mode(hba,
FASTAUTO_MODE << 4 | FASTAUTO_MODE);
@ -1287,10 +1402,59 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
}
}
if (host->hw_ver.major >= 3) {
/* if already configured to the requested pwr_mode, skip adapt */
if (dev_req_params->gear_rx == hba->pwr_info.gear_rx &&
dev_req_params->gear_tx == hba->pwr_info.gear_tx &&
dev_req_params->lane_rx == hba->pwr_info.lane_rx &&
dev_req_params->lane_tx == hba->pwr_info.lane_tx &&
dev_req_params->pwr_rx == hba->pwr_info.pwr_rx &&
dev_req_params->pwr_tx == hba->pwr_info.pwr_tx &&
dev_req_params->hs_rate == hba->pwr_info.hs_rate) {
return ret;
}
if (dev_req_params->pwr_rx == FAST_MODE ||
dev_req_params->pwr_rx == FASTAUTO_MODE) {
if (host->hw_ver.major >= 3) {
ret = ufshcd_dme_configure_adapt(hba,
dev_req_params->gear_tx,
PA_INITIAL_ADAPT);
} else {
ret = ufshcd_dme_configure_adapt(hba,
dev_req_params->gear_tx,
PA_NO_ADAPT);
}
} else {
ret = ufshcd_dme_configure_adapt(hba,
dev_req_params->gear_tx,
PA_INITIAL_ADAPT);
dev_req_params->gear_tx,
PA_NO_ADAPT);
}
return ret;
}
static int ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
{
int ret;
/* disable auto-hibern8 */
ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
/* wait host return to idle state when auto-hibern8 off */
ret = ufs_mtk_wait_idle_state(hba, 5);
if (ret)
goto out;
ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
out:
if (ret) {
dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
ufshcd_force_error_recovery(hba);
/* trigger error handler and break suspend */
ret = -EBUSY;
}
return ret;
@ -1302,13 +1466,20 @@ static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
struct ufs_pa_layer_attr *dev_req_params)
{
int ret = 0;
static u32 reg;
switch (stage) {
case PRE_CHANGE:
if (ufshcd_is_auto_hibern8_supported(hba)) {
reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
ufs_mtk_auto_hibern8_disable(hba);
}
ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
dev_req_params);
break;
case POST_CHANGE:
if (ufshcd_is_auto_hibern8_supported(hba))
ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER);
break;
default:
ret = -EINVAL;
@ -1342,6 +1513,7 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba)
{
int ret;
u32 tmp;
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
ufs_mtk_get_controller_version(hba);
@ -1367,34 +1539,33 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba)
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp);
return ret;
}
/* Enable the 1144 functions setting */
if (host->ip_ver == IP_VER_MT6989) {
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_DEBUGOMC), &tmp);
if (ret)
return ret;
static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba)
{
u32 ah_ms;
if (ufshcd_is_clkgating_allowed(hba)) {
if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit)
ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK,
hba->ahit);
else
ah_ms = 10;
ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5);
tmp |= 0x10;
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), tmp);
}
return ret;
}
static void ufs_mtk_post_link(struct ufs_hba *hba)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
u32 tmp;
/* fix device PA_INIT no adapt */
if (host->ip_ver >= IP_VER_MT6899) {
ufshcd_dme_get(hba, UIC_ARG_MIB(VS_DEBUGOMC), &tmp);
tmp |= 0x100;
ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), tmp);
}
/* enable unipro clock gating feature */
ufs_mtk_cfg_unipro_cg(hba, true);
/* will be configured during probe hba */
if (ufshcd_is_auto_hibern8_supported(hba))
hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) |
FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3);
ufs_mtk_setup_clk_gating(hba);
}
static int ufs_mtk_link_startup_notify(struct ufs_hba *hba,
@ -1421,11 +1592,11 @@ static int ufs_mtk_device_reset(struct ufs_hba *hba)
{
struct arm_smccc_res res;
/* disable hba before device reset */
ufshcd_hba_stop(hba);
ufs_mtk_device_reset_ctrl(0, res);
/* disable hba in middle of device reset */
ufshcd_hba_stop(hba);
/*
* The reset signal is active low. UFS devices shall detect
* more than or equal to 1us of positive or negative RST_n
@ -1462,7 +1633,11 @@ static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
return err;
/* Check link state to make sure exit h8 success */
ufs_mtk_wait_idle_state(hba, 5);
err = ufs_mtk_wait_idle_state(hba, 5);
if (err) {
dev_warn(hba->dev, "wait idle fail, err=%d\n", err);
return err;
}
err = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
if (err) {
dev_warn(hba->dev, "exit h8 state fail, err=%d\n", err);
@ -1507,6 +1682,9 @@ static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm)
{
struct ufs_vreg *vccqx = NULL;
if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2)
return;
if (hba->vreg_info.vccq)
vccqx = hba->vreg_info.vccq;
else
@ -1561,21 +1739,6 @@ static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm)
}
}
static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba)
{
int ret;
/* disable auto-hibern8 */
ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
/* wait host return to idle state when auto-hibern8 off */
ufs_mtk_wait_idle_state(hba, 5);
ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100);
if (ret)
dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret);
}
static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
enum ufs_notify_change_status status)
{
@ -1584,7 +1747,7 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
if (status == PRE_CHANGE) {
if (ufshcd_is_auto_hibern8_supported(hba))
ufs_mtk_auto_hibern8_disable(hba);
return ufs_mtk_auto_hibern8_disable(hba);
return 0;
}
@ -1642,8 +1805,21 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
}
return 0;
fail:
return ufshcd_link_recovery(hba);
/*
* Check if the platform (parent) device has resumed, and ensure that
* power, clock, and MTCMOS are all turned on.
*/
err = ufshcd_link_recovery(hba);
if (err) {
dev_err(hba->dev, "Device PM: req=%d, status:%d, err:%d\n",
hba->dev->power.request,
hba->dev->power.runtime_status,
hba->dev->power.runtime_error);
}
return 0; /* Cannot return a failure, otherwise, the I/O will hang. */
}
static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba)
@ -1726,6 +1902,8 @@ static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
ufs_mtk_vreg_fix_vcc(hba);
ufs_mtk_vreg_fix_vccqx(hba);
ufs_mtk_fix_ahit(hba);
ufs_mtk_fix_clock_scaling(hba);
}
static void ufs_mtk_event_notify(struct ufs_hba *hba,
@ -2012,6 +2190,7 @@ static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba)
return ret;
}
}
host->is_mcq_intr_enabled = true;
return 0;
}
@ -2095,10 +2274,12 @@ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
static int ufs_mtk_probe(struct platform_device *pdev)
{
int err;
struct device *dev = &pdev->dev;
struct device_node *reset_node;
struct platform_device *reset_pdev;
struct device *dev = &pdev->dev, *phy_dev = NULL;
struct device_node *reset_node, *phy_node = NULL;
struct platform_device *reset_pdev, *phy_pdev = NULL;
struct device_link *link;
struct ufs_hba *hba;
struct ufs_mtk_host *host;
reset_node = of_find_compatible_node(NULL, NULL,
"ti,syscon-reset");
@ -2125,13 +2306,51 @@ static int ufs_mtk_probe(struct platform_device *pdev)
}
skip_reset:
/* find phy node */
phy_node = of_parse_phandle(dev->of_node, "phys", 0);
if (phy_node) {
phy_pdev = of_find_device_by_node(phy_node);
if (!phy_pdev)
goto skip_phy;
phy_dev = &phy_pdev->dev;
pm_runtime_set_active(phy_dev);
pm_runtime_enable(phy_dev);
pm_runtime_get_sync(phy_dev);
put_device(phy_dev);
dev_info(dev, "phys node found\n");
} else {
dev_notice(dev, "phys node not found\n");
}
skip_phy:
/* perform generic probe */
err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops);
if (err) {
dev_err(dev, "probe failed %d\n", err);
goto out;
}
hba = platform_get_drvdata(pdev);
if (!hba)
goto out;
if (phy_node && phy_dev) {
host = ufshcd_get_variant(hba);
host->phy_dev = phy_dev;
}
/*
* Because the default power setting of VSx (the upper layer of
* VCCQ/VCCQ2) is HWLP, we need to prevent VCCQ/VCCQ2 from
* entering LPM.
*/
ufs_mtk_dev_vreg_set_lpm(hba, false);
out:
if (err)
dev_err(dev, "probe failed %d\n", err);
of_node_put(phy_node);
of_node_put(reset_node);
return err;
}
@ -2156,27 +2375,38 @@ static int ufs_mtk_system_suspend(struct device *dev)
ret = ufshcd_system_suspend(dev);
if (ret)
return ret;
goto out;
if (pm_runtime_suspended(hba->dev))
goto out;
ufs_mtk_dev_vreg_set_lpm(hba, true);
if (ufs_mtk_is_rtff_mtcmos(hba))
ufs_mtk_mtcmos_ctrl(false, res);
return 0;
out:
return ret;
}
static int ufs_mtk_system_resume(struct device *dev)
{
int ret = 0;
struct ufs_hba *hba = dev_get_drvdata(dev);
struct arm_smccc_res res;
ufs_mtk_dev_vreg_set_lpm(hba, false);
if (pm_runtime_suspended(hba->dev))
goto out;
if (ufs_mtk_is_rtff_mtcmos(hba))
ufs_mtk_mtcmos_ctrl(true, res);
return ufshcd_system_resume(dev);
ufs_mtk_dev_vreg_set_lpm(hba, false);
out:
ret = ufshcd_system_resume(dev);
return ret;
}
#endif
@ -2184,6 +2414,7 @@ static int ufs_mtk_system_resume(struct device *dev)
static int ufs_mtk_runtime_suspend(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct arm_smccc_res res;
int ret = 0;
@ -2196,17 +2427,24 @@ static int ufs_mtk_runtime_suspend(struct device *dev)
if (ufs_mtk_is_rtff_mtcmos(hba))
ufs_mtk_mtcmos_ctrl(false, res);
if (host->phy_dev)
pm_runtime_put_sync(host->phy_dev);
return 0;
}
static int ufs_mtk_runtime_resume(struct device *dev)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
struct arm_smccc_res res;
if (ufs_mtk_is_rtff_mtcmos(hba))
ufs_mtk_mtcmos_ctrl(true, res);
if (host->phy_dev)
pm_runtime_get_sync(host->phy_dev);
ufs_mtk_dev_vreg_set_lpm(hba, false);
return ufshcd_runtime_resume(dev);

View File

@ -193,6 +193,7 @@ struct ufs_mtk_host {
bool is_mcq_intr_enabled;
int mcq_nr_intr;
struct ufs_mtk_mcq_intr_info mcq_intr_info[UFSHCD_MAX_Q_NR];
struct device *phy_dev;
};
/* MTK delay of autosuspend: 500 ms */

View File

@ -38,6 +38,9 @@
#define DEEMPHASIS_3_5_dB 0x04
#define NO_DEEMPHASIS 0x0
#define UFS_ICE_SYNC_RST_SEL BIT(3)
#define UFS_ICE_SYNC_RST_SW BIT(4)
enum {
TSTBUS_UAWM,
TSTBUS_UARM,
@ -494,12 +497,8 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
* If the HS-G5 PHY gear is used, update host_params->hs_rate to Rate-A,
* so that the subsequent power mode change shall stick to Rate-A.
*/
if (host->hw_ver.major == 0x5) {
if (host->phy_gear == UFS_HS_G5)
host_params->hs_rate = PA_HS_MODE_A;
else
host_params->hs_rate = PA_HS_MODE_B;
}
if (host->hw_ver.major == 0x5 && host->phy_gear == UFS_HS_G5)
host_params->hs_rate = PA_HS_MODE_A;
mode = host_params->hs_rate == PA_HS_MODE_B ? PHY_MODE_UFS_HS_B : PHY_MODE_UFS_HS_A;
@ -751,11 +750,29 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err;
u32 reg_val;
err = ufs_qcom_enable_lane_clks(host);
if (err)
return err;
if ((!ufs_qcom_is_link_active(hba)) &&
host->hw_ver.major == 5 &&
host->hw_ver.minor == 0 &&
host->hw_ver.step == 0) {
ufshcd_writel(hba, UFS_ICE_SYNC_RST_SEL | UFS_ICE_SYNC_RST_SW, UFS_MEM_ICE_CFG);
reg_val = ufshcd_readl(hba, UFS_MEM_ICE_CFG);
reg_val &= ~(UFS_ICE_SYNC_RST_SEL | UFS_ICE_SYNC_RST_SW);
/*
* HW documentation doesn't recommend any delay between the
* reset set and clear. But we are enforcing an arbitrary delay
* to give flops enough time to settle in.
*/
usleep_range(50, 100);
ufshcd_writel(hba, reg_val, UFS_MEM_ICE_CFG);
ufshcd_readl(hba, UFS_MEM_ICE_CFG);
}
return ufs_qcom_ice_resume(host);
}
@ -1096,6 +1113,18 @@ static void ufs_qcom_set_phy_gear(struct ufs_qcom_host *host)
}
}
static void ufs_qcom_parse_gear_limits(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_host_params *host_params = &host->host_params;
u32 hs_gear_old = host_params->hs_tx_gear;
ufshcd_parse_gear_limits(hba, host_params);
if (host_params->hs_tx_gear != hs_gear_old) {
host->phy_gear = host_params->hs_tx_gear;
}
}
static void ufs_qcom_set_host_params(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@ -1162,6 +1191,13 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
case PRE_CHANGE:
if (on) {
ufs_qcom_icc_update_bw(host);
if (ufs_qcom_is_link_hibern8(hba)) {
err = ufs_qcom_enable_lane_clks(host);
if (err) {
dev_err(hba->dev, "enable lane clks failed, ret=%d\n", err);
return err;
}
}
} else {
if (!ufs_qcom_is_link_active(hba)) {
/* disable device ref_clk */
@ -1187,6 +1223,9 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
} else {
if (ufs_qcom_is_link_hibern8(hba))
ufs_qcom_disable_lane_clks(host);
ufs_qcom_icc_set_bw(host, ufs_qcom_bw_table[MODE_MIN][0][0].mem_bw,
ufs_qcom_bw_table[MODE_MIN][0][0].cfg_bw);
}
@ -1337,6 +1376,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_advertise_quirks(hba);
ufs_qcom_set_host_params(hba);
ufs_qcom_set_phy_gear(host);
ufs_qcom_parse_gear_limits(hba);
err = ufs_qcom_ice_init(host);
if (err)
@ -1742,7 +1782,7 @@ static void ufs_qcom_dump_testbus(struct ufs_hba *hba)
}
static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
const char *prefix, enum ufshcd_res id)
const char *prefix, void __iomem *base)
{
u32 *regs __free(kfree) = NULL;
size_t pos;
@ -1755,7 +1795,7 @@ static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
return -ENOMEM;
for (pos = 0; pos < len; pos += 4)
regs[pos / 4] = readl(hba->res[id].base + offset + pos);
regs[pos / 4] = readl(base + offset + pos);
print_hex_dump(KERN_ERR, prefix,
len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
@ -1766,30 +1806,34 @@ static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
static void ufs_qcom_dump_mcq_hci_regs(struct ufs_hba *hba)
{
struct ufshcd_mcq_opr_info_t *opr = &hba->mcq_opr[0];
void __iomem *mcq_vs_base = hba->mcq_base + UFS_MEM_VS_BASE;
struct dump_info {
void __iomem *base;
size_t offset;
size_t len;
const char *prefix;
enum ufshcd_res id;
};
struct dump_info mcq_dumps[] = {
{0x0, 256 * 4, "MCQ HCI-0 ", RES_MCQ},
{0x400, 256 * 4, "MCQ HCI-1 ", RES_MCQ},
{0x0, 5 * 4, "MCQ VS-0 ", RES_MCQ_VS},
{0x0, 256 * 4, "MCQ SQD-0 ", RES_MCQ_SQD},
{0x400, 256 * 4, "MCQ SQD-1 ", RES_MCQ_SQD},
{0x800, 256 * 4, "MCQ SQD-2 ", RES_MCQ_SQD},
{0xc00, 256 * 4, "MCQ SQD-3 ", RES_MCQ_SQD},
{0x1000, 256 * 4, "MCQ SQD-4 ", RES_MCQ_SQD},
{0x1400, 256 * 4, "MCQ SQD-5 ", RES_MCQ_SQD},
{0x1800, 256 * 4, "MCQ SQD-6 ", RES_MCQ_SQD},
{0x1c00, 256 * 4, "MCQ SQD-7 ", RES_MCQ_SQD},
{hba->mcq_base, 0x0, 256 * 4, "MCQ HCI-0 "},
{hba->mcq_base, 0x400, 256 * 4, "MCQ HCI-1 "},
{mcq_vs_base, 0x0, 5 * 4, "MCQ VS-0 "},
{opr->base, 0x0, 256 * 4, "MCQ SQD-0 "},
{opr->base, 0x400, 256 * 4, "MCQ SQD-1 "},
{opr->base, 0x800, 256 * 4, "MCQ SQD-2 "},
{opr->base, 0xc00, 256 * 4, "MCQ SQD-3 "},
{opr->base, 0x1000, 256 * 4, "MCQ SQD-4 "},
{opr->base, 0x1400, 256 * 4, "MCQ SQD-5 "},
{opr->base, 0x1800, 256 * 4, "MCQ SQD-6 "},
{opr->base, 0x1c00, 256 * 4, "MCQ SQD-7 "},
};
for (int i = 0; i < ARRAY_SIZE(mcq_dumps); i++) {
ufs_qcom_dump_regs(hba, mcq_dumps[i].offset, mcq_dumps[i].len,
mcq_dumps[i].prefix, mcq_dumps[i].id);
mcq_dumps[i].prefix, mcq_dumps[i].base);
cond_resched();
}
}
@ -1910,116 +1954,68 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
hba->clk_scaling.suspend_on_no_request = true;
}
/* Resources */
static const struct ufshcd_res_info ufs_res_info[RES_MAX] = {
{.name = "ufs_mem",},
{.name = "mcq",},
/* Submission Queue DAO */
{.name = "mcq_sqd",},
/* Submission Queue Interrupt Status */
{.name = "mcq_sqis",},
/* Completion Queue DAO */
{.name = "mcq_cqd",},
/* Completion Queue Interrupt Status */
{.name = "mcq_cqis",},
/* MCQ vendor specific */
{.name = "mcq_vs",},
};
static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
{
struct platform_device *pdev = to_platform_device(hba->dev);
struct ufshcd_res_info *res;
struct resource *res_mem, *res_mcq;
int i, ret;
struct resource *res;
memcpy(hba->res, ufs_res_info, sizeof(ufs_res_info));
for (i = 0; i < RES_MAX; i++) {
res = &hba->res[i];
res->resource = platform_get_resource_byname(pdev,
IORESOURCE_MEM,
res->name);
if (!res->resource) {
dev_info(hba->dev, "Resource %s not provided\n", res->name);
if (i == RES_UFS)
return -ENODEV;
continue;
} else if (i == RES_UFS) {
res_mem = res->resource;
res->base = hba->mmio_base;
continue;
}
res->base = devm_ioremap_resource(hba->dev, res->resource);
if (IS_ERR(res->base)) {
dev_err(hba->dev, "Failed to map res %s, err=%d\n",
res->name, (int)PTR_ERR(res->base));
ret = PTR_ERR(res->base);
res->base = NULL;
return ret;
}
/* Map the MCQ configuration region */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mcq");
if (!res) {
dev_err(hba->dev, "MCQ resource not found in device tree\n");
return -ENODEV;
}
/* MCQ resource provided in DT */
res = &hba->res[RES_MCQ];
/* Bail if MCQ resource is provided */
if (res->base)
goto out;
/* Explicitly allocate MCQ resource from ufs_mem */
res_mcq = devm_kzalloc(hba->dev, sizeof(*res_mcq), GFP_KERNEL);
if (!res_mcq)
return -ENOMEM;
res_mcq->start = res_mem->start +
MCQ_SQATTR_OFFSET(hba->mcq_capabilities);
res_mcq->end = res_mcq->start + hba->nr_hw_queues * MCQ_QCFG_SIZE - 1;
res_mcq->flags = res_mem->flags;
res_mcq->name = "mcq";
ret = insert_resource(&iomem_resource, res_mcq);
if (ret) {
dev_err(hba->dev, "Failed to insert MCQ resource, err=%d\n",
ret);
return ret;
hba->mcq_base = devm_ioremap_resource(hba->dev, res);
if (IS_ERR(hba->mcq_base)) {
dev_err(hba->dev, "Failed to map MCQ region: %ld\n",
PTR_ERR(hba->mcq_base));
return PTR_ERR(hba->mcq_base);
}
res->base = devm_ioremap_resource(hba->dev, res_mcq);
if (IS_ERR(res->base)) {
dev_err(hba->dev, "MCQ registers mapping failed, err=%d\n",
(int)PTR_ERR(res->base));
ret = PTR_ERR(res->base);
goto ioremap_err;
}
out:
hba->mcq_base = res->base;
return 0;
ioremap_err:
res->base = NULL;
remove_resource(res_mcq);
return ret;
}
static int ufs_qcom_op_runtime_config(struct ufs_hba *hba)
{
struct ufshcd_res_info *mem_res, *sqdao_res;
struct ufshcd_mcq_opr_info_t *opr;
int i;
u32 doorbell_offsets[OPR_MAX];
mem_res = &hba->res[RES_UFS];
sqdao_res = &hba->res[RES_MCQ_SQD];
/*
* Configure doorbell address offsets in MCQ configuration registers.
* These values are offsets relative to mmio_base (UFS_HCI_BASE).
*
* Memory Layout:
* - mmio_base = UFS_HCI_BASE
* - mcq_base = MCQ_CONFIG_BASE = mmio_base + (UFS_QCOM_MCQCAP_QCFGPTR * 0x200)
* - Doorbell registers are at: mmio_base + (UFS_QCOM_MCQCAP_QCFGPTR * 0x200) +
* - UFS_QCOM_MCQ_SQD_OFFSET
* - Which is also: mcq_base + UFS_QCOM_MCQ_SQD_OFFSET
*/
if (!mem_res->base || !sqdao_res->base)
return -EINVAL;
doorbell_offsets[OPR_SQD] = UFS_QCOM_SQD_ADDR_OFFSET;
doorbell_offsets[OPR_SQIS] = UFS_QCOM_SQIS_ADDR_OFFSET;
doorbell_offsets[OPR_CQD] = UFS_QCOM_CQD_ADDR_OFFSET;
doorbell_offsets[OPR_CQIS] = UFS_QCOM_CQIS_ADDR_OFFSET;
/*
* Configure MCQ operation registers.
*
* The doorbell registers are physically located within the MCQ region:
* - doorbell_physical_addr = mmio_base + doorbell_offset
* - doorbell_physical_addr = mcq_base + (doorbell_offset - MCQ_CONFIG_OFFSET)
*/
for (i = 0; i < OPR_MAX; i++) {
opr = &hba->mcq_opr[i];
opr->offset = sqdao_res->resource->start -
mem_res->resource->start + 0x40 * i;
opr->stride = 0x100;
opr->base = sqdao_res->base + 0x40 * i;
opr->offset = doorbell_offsets[i]; /* Offset relative to mmio_base */
opr->stride = UFS_QCOM_MCQ_STRIDE; /* 256 bytes between queues */
/*
* Calculate the actual doorbell base address within MCQ region:
* base = mcq_base + (doorbell_offset - MCQ_CONFIG_OFFSET)
*/
opr->base = hba->mcq_base + (opr->offset - UFS_QCOM_MCQ_CONFIG_OFFSET);
}
return 0;
@ -2034,12 +2030,8 @@ static int ufs_qcom_get_hba_mac(struct ufs_hba *hba)
static int ufs_qcom_get_outstanding_cqs(struct ufs_hba *hba,
unsigned long *ocqs)
{
struct ufshcd_res_info *mcq_vs_res = &hba->res[RES_MCQ_VS];
if (!mcq_vs_res->base)
return -EINVAL;
*ocqs = readl(mcq_vs_res->base + UFS_MEM_CQIS_VS);
/* Read from MCQ vendor-specific register in MCQ region */
*ocqs = readl(hba->mcq_base + UFS_MEM_CQIS_VS);
return 0;
}

View File

@ -33,6 +33,28 @@
#define DL_VS_CLK_CFG_MASK GENMASK(9, 0)
#define DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN BIT(9)
/* Qualcomm MCQ Configuration */
#define UFS_QCOM_MCQCAP_QCFGPTR 224 /* 0xE0 in hex */
#define UFS_QCOM_MCQ_CONFIG_OFFSET (UFS_QCOM_MCQCAP_QCFGPTR * 0x200) /* 0x1C000 */
/* Doorbell offsets within MCQ region (relative to MCQ_CONFIG_BASE) */
#define UFS_QCOM_MCQ_SQD_OFFSET 0x5000
#define UFS_QCOM_MCQ_CQD_OFFSET 0x5080
#define UFS_QCOM_MCQ_SQIS_OFFSET 0x5040
#define UFS_QCOM_MCQ_CQIS_OFFSET 0x50C0
#define UFS_QCOM_MCQ_STRIDE 0x100
/* Calculated doorbell address offsets (relative to mmio_base) */
#define UFS_QCOM_SQD_ADDR_OFFSET (UFS_QCOM_MCQ_CONFIG_OFFSET + UFS_QCOM_MCQ_SQD_OFFSET)
#define UFS_QCOM_CQD_ADDR_OFFSET (UFS_QCOM_MCQ_CONFIG_OFFSET + UFS_QCOM_MCQ_CQD_OFFSET)
#define UFS_QCOM_SQIS_ADDR_OFFSET (UFS_QCOM_MCQ_CONFIG_OFFSET + UFS_QCOM_MCQ_SQIS_OFFSET)
#define UFS_QCOM_CQIS_ADDR_OFFSET (UFS_QCOM_MCQ_CONFIG_OFFSET + UFS_QCOM_MCQ_CQIS_OFFSET)
#define REG_UFS_MCQ_STRIDE UFS_QCOM_MCQ_STRIDE
/* MCQ Vendor specific address offsets (relative to MCQ_CONFIG_BASE) */
#define UFS_MEM_VS_BASE 0x4000
#define UFS_MEM_CQIS_VS 0x4008
/* QCOM UFS host controller vendor specific registers */
enum {
REG_UFS_SYS1CLK_1US = 0xC0,
@ -60,7 +82,7 @@ enum {
UFS_AH8_CFG = 0xFC,
UFS_RD_REG_MCQ = 0xD00,
UFS_MEM_ICE_CFG = 0x2600,
REG_UFS_MEM_ICE_CONFIG = 0x260C,
REG_UFS_MEM_ICE_NUM_CORE = 0x2664,
@ -95,10 +117,6 @@ enum {
REG_UFS_SW_H8_EXIT_CNT = 0x2710,
};
enum {
UFS_MEM_CQIS_VS = 0x8,
};
#define UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(x) (0x000 + x)
#define UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(x) (0x400 + x)

View File

@ -430,6 +430,39 @@ int ufshcd_negotiate_pwr_params(const struct ufs_host_params *host_params,
}
EXPORT_SYMBOL_GPL(ufshcd_negotiate_pwr_params);
/**
* ufshcd_parse_gear_limits - Parse DT-based gear and rate limits for UFS
* @hba: Pointer to UFS host bus adapter instance
* @host_params: Pointer to UFS host parameters structure to be updated
*
* This function reads optional device tree properties to apply
* platform-specific constraints.
*
* "limit-hs-gear": Specifies the max HS gear.
* "limit-gear-rate": Specifies the max High-Speed rate.
*/
void ufshcd_parse_gear_limits(struct ufs_hba *hba, struct ufs_host_params *host_params)
{
struct device_node *np = hba->dev->of_node;
u32 hs_gear;
const char *hs_rate;
if (!of_property_read_u32(np, "limit-hs-gear", &hs_gear)) {
host_params->hs_tx_gear = hs_gear;
host_params->hs_rx_gear = hs_gear;
}
if (!of_property_read_string(np, "limit-gear-rate", &hs_rate)) {
if (!strcmp(hs_rate, "rate-a"))
host_params->hs_rate = PA_HS_MODE_A;
else if (!strcmp(hs_rate, "rate-b"))
host_params->hs_rate = PA_HS_MODE_B;
else
dev_warn(hba->dev, "Invalid rate: %s\n", hs_rate);
}
}
EXPORT_SYMBOL_GPL(ufshcd_parse_gear_limits);
void ufshcd_init_host_params(struct ufs_host_params *host_params)
{
*host_params = (struct ufs_host_params){

View File

@ -29,6 +29,7 @@ int ufshcd_negotiate_pwr_params(const struct ufs_host_params *host_params,
const struct ufs_pa_layer_attr *dev_max,
struct ufs_pa_layer_attr *agreed_pwr);
void ufshcd_init_host_params(struct ufs_host_params *host_params);
void ufshcd_parse_gear_limits(struct ufs_hba *hba, struct ufs_host_params *host_params);
int ufshcd_pltfrm_init(struct platform_device *pdev,
const struct ufs_hba_variant_ops *vops);
void ufshcd_pltfrm_remove(struct platform_device *pdev);

View File

@ -203,6 +203,14 @@ static inline bool dev_is_expander(enum sas_device_type type)
type == SAS_FANOUT_EXPANDER_DEVICE;
}
static inline bool dev_parent_is_expander(struct domain_device *dev)
{
if (!dev->parent)
return false;
return dev_is_expander(dev->parent->dev_type);
}
static inline void INIT_SAS_WORK(struct sas_work *sw, void (*fn)(struct work_struct *))
{
INIT_WORK(&sw->work, fn);

View File

@ -11,6 +11,12 @@
#include <linux/types.h>
#include <asm/byteorder.h>
#ifdef __KERNEL__
#include <linux/stddef.h> /* for offsetof */
#else
#include <stddef.h> /* for offsetof */
#endif
/*
* Fibre Channel Switch - Enhanced Link Services definitions.
* From T11 FC-LS Rev 1.2 June 7, 2005.
@ -1109,12 +1115,15 @@ struct fc_els_fpin {
/* Diagnostic Function Descriptor - FPIN Registration */
struct fc_df_desc_fpin_reg {
__be32 desc_tag; /* FPIN Registration (0x00030001) */
__be32 desc_len; /* Length of Descriptor (in bytes).
* Size of descriptor excluding
* desc_tag and desc_len fields.
*/
__be32 count; /* Number of desc_tags elements */
/* New members MUST be added within the __struct_group() macro below. */
__struct_group(fc_df_desc_fpin_reg_hdr, __hdr, /* no attrs */,
__be32 desc_tag; /* FPIN Registration (0x00030001) */
__be32 desc_len; /* Length of Descriptor (in bytes).
* Size of descriptor excluding
* desc_tag and desc_len fields.
*/
__be32 count; /* Number of desc_tags elements */
);
__be32 desc_tags[]; /* Array of Descriptor Tags.
* Each tag indicates a function
* supported by the N_Port (request)
@ -1124,33 +1133,44 @@ struct fc_df_desc_fpin_reg {
* See ELS_FN_DTAG_xxx for tag values.
*/
};
_Static_assert(offsetof(struct fc_df_desc_fpin_reg, desc_tags) == sizeof(struct fc_df_desc_fpin_reg_hdr),
"struct member likely outside of __struct_group()");
/*
* ELS_RDF - Register Diagnostic Functions
*/
struct fc_els_rdf {
__u8 fpin_cmd; /* command (0x19) */
__u8 fpin_zero[3]; /* specified as zero - part of cmd */
__be32 desc_len; /* Length of Descriptor List (in bytes).
* Size of ELS excluding fpin_cmd,
* fpin_zero and desc_len fields.
*/
/* New members MUST be added within the __struct_group() macro below. */
__struct_group(fc_els_rdf_hdr, __hdr, /* no attrs */,
__u8 fpin_cmd; /* command (0x19) */
__u8 fpin_zero[3]; /* specified as zero - part of cmd */
__be32 desc_len; /* Length of Descriptor List (in bytes).
* Size of ELS excluding fpin_cmd,
* fpin_zero and desc_len fields.
*/
);
struct fc_tlv_desc desc[]; /* Descriptor list */
};
_Static_assert(offsetof(struct fc_els_rdf, desc) == sizeof(struct fc_els_rdf_hdr),
"struct member likely outside of __struct_group()");
/*
* ELS RDF LS_ACC Response.
*/
struct fc_els_rdf_resp {
struct fc_els_ls_acc acc_hdr;
__be32 desc_list_len; /* Length of response (in
* bytes). Excludes acc_hdr
* and desc_list_len fields.
*/
struct fc_els_lsri_desc lsri;
/* New members MUST be added within the __struct_group() macro below. */
__struct_group(fc_els_rdf_resp_hdr, __hdr, /* no attrs */,
struct fc_els_ls_acc acc_hdr;
__be32 desc_list_len; /* Length of response (in
* bytes). Excludes acc_hdr
* and desc_list_len fields.
*/
struct fc_els_lsri_desc lsri;
);
struct fc_tlv_desc desc[]; /* Supported Descriptor list */
};
_Static_assert(offsetof(struct fc_els_rdf_resp, desc) == sizeof(struct fc_els_rdf_resp_hdr),
"struct member likely outside of __struct_group()");
/*
* Diagnostic Capability Descriptors for EDC ELS

View File

@ -653,21 +653,4 @@ struct ufs_dev_info {
bool hid_sup;
};
/*
* This enum is used in string mapping in ufs_trace.h.
*/
enum ufs_trace_str_t {
UFS_CMD_SEND, UFS_CMD_COMP, UFS_DEV_COMP,
UFS_QUERY_SEND, UFS_QUERY_COMP, UFS_QUERY_ERR,
UFS_TM_SEND, UFS_TM_COMP, UFS_TM_ERR
};
/*
* Transaction Specific Fields (TSF) type in the UPIU package, this enum is
* used in ufs_trace.h for UFS command trace.
*/
enum ufs_trace_tsf_t {
UFS_TSF_CDB, UFS_TSF_OSF, UFS_TSF_TM_INPUT, UFS_TSF_TM_OUTPUT
};
#endif /* End of Header */

View File

@ -113,4 +113,7 @@ struct ufs_dev_quirk {
*/
#define UFS_DEVICE_QUIRK_PA_HIBER8TIME (1 << 12)
/* Some UFS 4 devices do not support the qTimestamp attribute */
#define UFS_DEVICE_QUIRK_NO_TIMESTAMP_SUPPORT (1 << 13)
#endif /* UFS_QUIRKS_H_ */

View File

@ -167,13 +167,13 @@ struct ufs_pm_lvl_states {
* @task_tag: Task tag of the command
* @lun: LUN of the command
* @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
* @req_abort_skip: skip request abort task flag
* @issue_time_stamp: time stamp for debug purposes (CLOCK_MONOTONIC)
* @issue_time_stamp_local_clock: time stamp for debug purposes (local_clock)
* @compl_time_stamp: time stamp for statistics (CLOCK_MONOTONIC)
* @compl_time_stamp_local_clock: time stamp for debug purposes (local_clock)
* @crypto_key_slot: the key slot to use for inline crypto (-1 if none)
* @data_unit_num: the data unit number for the first block for inline crypto
* @req_abort_skip: skip request abort task flag
*/
struct ufshcd_lrb {
struct utp_transfer_req_desc *utr_descriptor_ptr;
@ -193,6 +193,7 @@ struct ufshcd_lrb {
int task_tag;
u8 lun; /* UPIU LUN id field is only 8-bit wide */
bool intr_cmd;
bool req_abort_skip;
ktime_t issue_time_stamp;
u64 issue_time_stamp_local_clock;
ktime_t compl_time_stamp;
@ -201,8 +202,6 @@ struct ufshcd_lrb {
int crypto_key_slot;
u64 data_unit_num;
#endif
bool req_abort_skip;
};
/**
@ -794,30 +793,6 @@ struct ufs_hba_monitor {
bool enabled;
};
/**
* struct ufshcd_res_info_t - MCQ related resource regions
*
* @name: resource name
* @resource: pointer to resource region
* @base: register base address
*/
struct ufshcd_res_info {
const char *name;
struct resource *resource;
void __iomem *base;
};
enum ufshcd_res {
RES_UFS,
RES_MCQ,
RES_MCQ_SQD,
RES_MCQ_SQIS,
RES_MCQ_CQD,
RES_MCQ_CQIS,
RES_MCQ_VS,
RES_MAX,
};
/**
* struct ufshcd_mcq_opr_info_t - Operation and Runtime registers
*
@ -963,6 +938,7 @@ enum ufshcd_mcq_opr {
* @ufs_rtc_update_work: A work for UFS RTC periodic update
* @pm_qos_req: PM QoS request handle
* @pm_qos_enabled: flag to check if pm qos is enabled
* @pm_qos_mutex: synchronizes PM QoS request and status updates
* @critical_health_count: count of critical health exceptions
* @dev_lvl_exception_count: count of device level exceptions since last reset
* @dev_lvl_exception_id: vendor specific information about the
@ -1127,7 +1103,6 @@ struct ufs_hba {
bool lsdb_sup;
bool mcq_enabled;
bool mcq_esi_enabled;
struct ufshcd_res_info res[RES_MAX];
void __iomem *mcq_base;
struct ufs_hw_queue *uhq;
struct ufs_hw_queue *dev_cmd_queue;
@ -1136,6 +1111,8 @@ struct ufs_hba {
struct delayed_work ufs_rtc_update_work;
struct pm_qos_request pm_qos_req;
bool pm_qos_enabled;
/* synchronizes PM QoS request and status updates */
struct mutex pm_qos_mutex;
int critical_health_count;
atomic_t dev_lvl_exception_count;
@ -1318,6 +1295,7 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
void ufshcd_enable_irq(struct ufs_hba *hba);
void ufshcd_disable_irq(struct ufs_hba *hba);
void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs);
int ufshcd_alloc_host(struct device *, struct ufs_hba **);
int ufshcd_hba_enable(struct ufs_hba *hba);
int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
@ -1508,5 +1486,6 @@ int __ufshcd_write_ee_control(struct ufs_hba *hba, u32 ee_ctrl_mask);
int ufshcd_write_ee_control(struct ufs_hba *hba);
int ufshcd_update_ee_control(struct ufs_hba *hba, u16 *mask,
const u16 *other_mask, u16 set, u16 clr);
void ufshcd_force_error_recovery(struct ufs_hba *hba);
#endif /* End of Header */