mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/netfilter/nf.git
synced 2026-04-05 00:08:32 -04:00
- Extend PCI_FIND_NEXT_CAP() and PCI_FIND_NEXT_EXT_CAP() to return a pointer to the preceding Capability (Qiang Yu) - Add dw_pcie_remove_capability() and dw_pcie_remove_ext_capability() to remove Capabilities that are advertised but not fully implemented (Qiang Yu) - Remove MSI and MSI-X Capabilities for DWC controllers in platforms that can't support them, so we automatically fall back to INTx (Qiang Yu) - Remove MSI-X and DPC Capabilities for Qualcomm platforms that advertise but don't support them (Qiang Yu) - Remove duplicate dw_pcie_ep_hide_ext_capability() function and replace with dw_pcie_remove_ext_capability() (Qiang Yu) - Add ASPM L1.1 and L1.2 Substates context to debugfs ltssm_status for drivers that support this (Shawn Lin) - Skip PME_Turn_Off broadcast and L2/L3 transition during suspend if link is not up to avoid an unnecessary timeout (Manivannan Sadhasivam) - Revert dw-rockchip, qcom, and DWC core changes that used link-up IRQs to trigger enumeration instead of waiting for link to be up because the PCI core doesn't allocate bus number space for hierarchies that might be attached (Niklas Cassel) - Make endpoint iATU entry for MSI permanent instead of programming it dynamically, which is slow and racy with respect to other concurrent traffic, e.g., eDMA (Koichiro Den) - Use iMSI-RX MSI target address when possible to fix endpoints using 32-bit MSI (Shawn Lin) - Make dw_pcie_ltssm_status_string() available and use it for logging errors in dw_pcie_wait_for_link() (Manivannan Sadhasivam) - Return -ENODEV when dw_pcie_wait_for_link() finds no devices, -EIO for device present but inactive, -ETIMEDOUT for other failures, so callers can handle these cases differently (Manivannan Sadhasivam) - Allow DWC host controller driver probe to continue if device is not found or found but inactive; only fail when there's an error with the link (Manivannan Sadhasivam) - For controllers like NXP i.MX6QP and i.MX7D, where LTSSM registers are not accessible after PME_Turn_Off, simply wait 10ms instead of polling for L2/L3 Ready (Richard Zhu) - Use multiple iATU entries to map large bridge windows and DMA ranges when necessary instead of failing (Samuel Holland) - Rename struct dw_pcie_rp.has_msi_ctrl to .use_imsi_rx for clarity (Qiang Yu) - Add EPC dynamic_inbound_mapping feature bit for Endpoint Controllers that can update BAR inbound address translation without requiring EPF driver to clear/reset the BAR first, and advertise it for DWC-based Endpoints (Koichiro Den) - Add EPC subrange_mapping feature bit for Endpoint Controllers that can map multiple independent inbound regions in a single BAR, implement subrange mapping, advertise it for DWC-based Endpoints, and add Endpoint selftests for it (Koichiro Den) - Allow overriding default BAR sizes for pci-epf-test (Niklas Cassel) - Make resizable BARs work for Endpoint multi-PF configurations; previously it only worked for PF 0 (Aksh Garg) - Fix Endpoint non-PF 0 support for BAR configuration, ATU mappings, and Address Match Mode (Aksh Garg) - Fix issues with outbound iATU index assignment that caused iATU index to be out of bounds (Niklas Cassel) - Clean up iATU index tracking to be consistent (Niklas Cassel) - Set up iATU when ECAM is enabled; previously IO and MEM outbound windows weren't programmed, and ECAM-related iATU entries weren't restored after suspend/resume, so config accesses failed (Krishna Chaitanya Chundru) * pci/controller/dwc: PCI: dwc: Fix missing iATU setup when ECAM is enabled PCI: dwc: Clean up iATU index usage in dw_pcie_iatu_setup() PCI: dwc: Fix msg_atu_index assignment PCI: dwc: ep: Add comment explaining controller level PTM access in multi PF setup PCI: dwc: ep: Add per-PF BAR and inbound ATU mapping support PCI: dwc: ep: Fix resizable BAR support for multi-PF configurations PCI: endpoint: pci-epf-test: Allow overriding default BAR sizes selftests: pci_endpoint: Add BAR subrange mapping test case misc: pci_endpoint_test: Add BAR subrange mapping test case PCI: endpoint: pci-epf-test: Add BAR subrange mapping test support Documentation: PCI: endpoint: Clarify pci_epc_set_bar() usage PCI: dwc: ep: Support BAR subrange inbound mapping via Address Match Mode iATU PCI: dwc: Advertise dynamic inbound mapping support PCI: endpoint: Add BAR subrange mapping support PCI: endpoint: Add dynamic_inbound_mapping EPC feature PCI: dwc: Rename dw_pcie_rp::has_msi_ctrl to dw_pcie_rp::use_imsi_rx for clarity PCI: dwc: Fix grammar and formatting for comment in dw_pcie_remove_ext_capability() PCI: dwc: Use multiple iATU windows for mapping large bridge windows and DMA ranges PCI: dwc: Remove duplicate dw_pcie_ep_hide_ext_capability() function PCI: dwc: Skip waiting for L2/L3 Ready if dw_pcie_rp::skip_l23_wait is true PCI: dwc: Fail dw_pcie_host_init() if dw_pcie_wait_for_link() returns -ETIMEDOUT PCI: dwc: Rework the error print of dw_pcie_wait_for_link() PCI: dwc: Rename and move ltssm_status_string() to pcie-designware.c PCI: dwc: Return -EIO from dw_pcie_wait_for_link() if device is not active PCI: dwc: Return -ENODEV from dw_pcie_wait_for_link() if device is not found PCI: dwc: Use cfg0_base as iMSI-RX target address to support 32-bit MSI devices PCI: dwc: ep: Cache MSI outbound iATU mapping Revert "PCI: dwc: Don't wait for link up if driver can detect Link Up event" Revert "PCI: qcom: Enumerate endpoints based on Link up event in 'global_irq' interrupt" Revert "PCI: qcom: Enable MSI interrupts together with Link up if 'Global IRQ' is supported" Revert "PCI: qcom: Don't wait for link if we can detect Link Up" Revert "PCI: dw-rockchip: Enumerate endpoints based on dll_link_up IRQ" Revert "PCI: dw-rockchip: Don't wait for link since we can detect Link Up" PCI: dwc: Skip PME_Turn_Off broadcast and L2/L3 transition during suspend if link is not up PCI: dw-rockchip: Change get_ltssm() to provide L1 Substates info PCI: dwc: Add L1 Substates context to ltssm_status of debugfs PCI: qcom: Remove DPC Extended Capability PCI: qcom: Remove MSI-X Capability for Root Ports PCI: dwc: Remove MSI/MSIX capability for Root Port if iMSI-RX is used as MSI controller PCI: dwc: Add new APIs to remove standard and extended Capability PCI: Add preceding capability position support in PCI_FIND_NEXT_*_CAP macros
992 lines
28 KiB
C
992 lines
28 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Synopsys DesignWare PCIe host controller driver
|
|
*
|
|
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
|
|
* https://www.samsung.com
|
|
*
|
|
* Author: Jingoo Han <jg1.han@samsung.com>
|
|
*/
|
|
|
|
#ifndef _PCIE_DESIGNWARE_H
|
|
#define _PCIE_DESIGNWARE_H
|
|
|
|
#include <linux/bitfield.h>
|
|
#include <linux/bitops.h>
|
|
#include <linux/clk.h>
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/dma/edma.h>
|
|
#include <linux/gpio/consumer.h>
|
|
#include <linux/irq.h>
|
|
#include <linux/msi.h>
|
|
#include <linux/pci.h>
|
|
#include <linux/pci-ecam.h>
|
|
#include <linux/reset.h>
|
|
|
|
#include <linux/pci-epc.h>
|
|
#include <linux/pci-epf.h>
|
|
|
|
#include "../../pci.h"
|
|
|
|
/* DWC PCIe IP-core versions (native support since v4.70a) */
|
|
#define DW_PCIE_VER_365A 0x3336352a
|
|
#define DW_PCIE_VER_460A 0x3436302a
|
|
#define DW_PCIE_VER_470A 0x3437302a
|
|
#define DW_PCIE_VER_480A 0x3438302a
|
|
#define DW_PCIE_VER_490A 0x3439302a
|
|
#define DW_PCIE_VER_520A 0x3532302a
|
|
#define DW_PCIE_VER_540A 0x3534302a
|
|
|
|
#define __dw_pcie_ver_cmp(_pci, _ver, _op) \
|
|
((_pci)->version _op DW_PCIE_VER_ ## _ver)
|
|
|
|
#define dw_pcie_ver_is(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, ==)
|
|
|
|
#define dw_pcie_ver_is_ge(_pci, _ver) __dw_pcie_ver_cmp(_pci, _ver, >=)
|
|
|
|
#define dw_pcie_ver_type_is(_pci, _ver, _type) \
|
|
(__dw_pcie_ver_cmp(_pci, _ver, ==) && \
|
|
__dw_pcie_ver_cmp(_pci, TYPE_ ## _type, ==))
|
|
|
|
#define dw_pcie_ver_type_is_ge(_pci, _ver, _type) \
|
|
(__dw_pcie_ver_cmp(_pci, _ver, ==) && \
|
|
__dw_pcie_ver_cmp(_pci, TYPE_ ## _type, >=))
|
|
|
|
/* DWC PCIe controller capabilities */
|
|
#define DW_PCIE_CAP_REQ_RES 0
|
|
#define DW_PCIE_CAP_IATU_UNROLL 1
|
|
#define DW_PCIE_CAP_CDM_CHECK 2
|
|
|
|
#define dw_pcie_cap_is(_pci, _cap) \
|
|
test_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps)
|
|
|
|
#define dw_pcie_cap_set(_pci, _cap) \
|
|
set_bit(DW_PCIE_CAP_ ## _cap, &(_pci)->caps)
|
|
|
|
/* Parameters for the waiting for iATU enabled routine */
|
|
#define LINK_WAIT_MAX_IATU_RETRIES 5
|
|
#define LINK_WAIT_IATU 9
|
|
|
|
/* Synopsys-specific PCIe configuration registers */
|
|
#define PCIE_PORT_FORCE 0x708
|
|
#define PORT_FORCE_DO_DESKEW_FOR_SRIS BIT(23)
|
|
|
|
#define PCIE_PORT_AFR 0x70C
|
|
#define PORT_AFR_N_FTS_MASK GENMASK(15, 8)
|
|
#define PORT_AFR_N_FTS(n) FIELD_PREP(PORT_AFR_N_FTS_MASK, n)
|
|
#define PORT_AFR_CC_N_FTS_MASK GENMASK(23, 16)
|
|
#define PORT_AFR_CC_N_FTS(n) FIELD_PREP(PORT_AFR_CC_N_FTS_MASK, n)
|
|
#define PORT_AFR_ENTER_ASPM BIT(30)
|
|
#define PORT_AFR_L0S_ENTRANCE_LAT_SHIFT 24
|
|
#define PORT_AFR_L0S_ENTRANCE_LAT_MASK GENMASK(26, 24)
|
|
#define PORT_AFR_L1_ENTRANCE_LAT_SHIFT 27
|
|
#define PORT_AFR_L1_ENTRANCE_LAT_MASK GENMASK(29, 27)
|
|
|
|
#define PCIE_PORT_LINK_CONTROL 0x710
|
|
#define PORT_LINK_DLL_LINK_EN BIT(5)
|
|
#define PORT_LINK_FAST_LINK_MODE BIT(7)
|
|
#define PORT_LINK_MODE_MASK GENMASK(21, 16)
|
|
#define PORT_LINK_MODE(n) FIELD_PREP(PORT_LINK_MODE_MASK, n)
|
|
#define PORT_LINK_MODE_1_LANES PORT_LINK_MODE(0x1)
|
|
#define PORT_LINK_MODE_2_LANES PORT_LINK_MODE(0x3)
|
|
#define PORT_LINK_MODE_4_LANES PORT_LINK_MODE(0x7)
|
|
#define PORT_LINK_MODE_8_LANES PORT_LINK_MODE(0xf)
|
|
#define PORT_LINK_MODE_16_LANES PORT_LINK_MODE(0x1f)
|
|
|
|
#define PCIE_PORT_LANE_SKEW 0x714
|
|
#define PORT_LANE_SKEW_INSERT_MASK GENMASK(23, 0)
|
|
|
|
#define PCIE_PORT_DEBUG0 0x728
|
|
#define PORT_LOGIC_LTSSM_STATE_MASK 0x3f
|
|
#define PORT_LOGIC_LTSSM_STATE_L0 0x11
|
|
#define PCIE_PORT_DEBUG1 0x72C
|
|
#define PCIE_PORT_DEBUG1_LINK_UP BIT(4)
|
|
#define PCIE_PORT_DEBUG1_LINK_IN_TRAINING BIT(29)
|
|
|
|
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
|
|
#define PORT_LOGIC_N_FTS_MASK GENMASK(7, 0)
|
|
#define PORT_LOGIC_SPEED_CHANGE BIT(17)
|
|
#define PORT_LOGIC_LINK_WIDTH_MASK GENMASK(12, 8)
|
|
#define PORT_LOGIC_LINK_WIDTH(n) FIELD_PREP(PORT_LOGIC_LINK_WIDTH_MASK, n)
|
|
#define PORT_LOGIC_LINK_WIDTH_1_LANES PORT_LOGIC_LINK_WIDTH(0x1)
|
|
#define PORT_LOGIC_LINK_WIDTH_2_LANES PORT_LOGIC_LINK_WIDTH(0x2)
|
|
#define PORT_LOGIC_LINK_WIDTH_4_LANES PORT_LOGIC_LINK_WIDTH(0x4)
|
|
#define PORT_LOGIC_LINK_WIDTH_8_LANES PORT_LOGIC_LINK_WIDTH(0x8)
|
|
|
|
#define PCIE_MSI_ADDR_LO 0x820
|
|
#define PCIE_MSI_ADDR_HI 0x824
|
|
#define PCIE_MSI_INTR0_ENABLE 0x828
|
|
#define PCIE_MSI_INTR0_MASK 0x82C
|
|
#define PCIE_MSI_INTR0_STATUS 0x830
|
|
|
|
#define GEN3_RELATED_OFF 0x890
|
|
#define GEN3_RELATED_OFF_GEN3_ZRXDC_NONCOMPL BIT(0)
|
|
#define GEN3_RELATED_OFF_EQ_PHASE_2_3 BIT(9)
|
|
#define GEN3_RELATED_OFF_RXEQ_RGRDLESS_RXTS BIT(13)
|
|
#define GEN3_RELATED_OFF_GEN3_EQ_DISABLE BIT(16)
|
|
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_SHIFT 24
|
|
#define GEN3_RELATED_OFF_RATE_SHADOW_SEL_MASK GENMASK(25, 24)
|
|
|
|
#define GEN3_EQ_CONTROL_OFF 0x8A8
|
|
#define GEN3_EQ_CONTROL_OFF_FB_MODE GENMASK(3, 0)
|
|
#define GEN3_EQ_CONTROL_OFF_PHASE23_EXIT_MODE BIT(4)
|
|
#define GEN3_EQ_CONTROL_OFF_PSET_REQ_VEC GENMASK(23, 8)
|
|
#define GEN3_EQ_CONTROL_OFF_FOM_INC_INITIAL_EVAL BIT(24)
|
|
|
|
#define GEN3_EQ_FB_MODE_DIR_CHANGE_OFF 0x8AC
|
|
#define GEN3_EQ_FMDC_T_MIN_PHASE23 GENMASK(4, 0)
|
|
#define GEN3_EQ_FMDC_N_EVALS GENMASK(9, 5)
|
|
#define GEN3_EQ_FMDC_MAX_PRE_CURSOR_DELTA GENMASK(13, 10)
|
|
#define GEN3_EQ_FMDC_MAX_POST_CURSOR_DELTA GENMASK(17, 14)
|
|
|
|
#define COHERENCY_CONTROL_1_OFF 0x8E0
|
|
#define CFG_MEMTYPE_BOUNDARY_LOW_ADDR_MASK GENMASK(31, 2)
|
|
#define CFG_MEMTYPE_VALUE BIT(0)
|
|
|
|
#define COHERENCY_CONTROL_2_OFF 0x8E4
|
|
#define COHERENCY_CONTROL_3_OFF 0x8E8
|
|
|
|
#define PCIE_PORT_MULTI_LANE_CTRL 0x8C0
|
|
#define PORT_MLTI_UPCFG_SUPPORT BIT(7)
|
|
|
|
#define PCIE_VERSION_NUMBER 0x8F8
|
|
#define PCIE_VERSION_TYPE 0x8FC
|
|
|
|
/*
|
|
* iATU inbound and outbound windows CSRs. Before the IP-core v4.80a each
|
|
* iATU region CSRs had been indirectly accessible by means of the dedicated
|
|
* viewport selector. The iATU/eDMA CSRs space was re-designed in DWC PCIe
|
|
* v4.80a in a way so the viewport was unrolled into the directly accessible
|
|
* iATU/eDMA CSRs space.
|
|
*/
|
|
#define PCIE_ATU_VIEWPORT 0x900
|
|
#define PCIE_ATU_REGION_DIR_IB BIT(31)
|
|
#define PCIE_ATU_REGION_DIR_OB 0
|
|
#define PCIE_ATU_VIEWPORT_BASE 0x904
|
|
#define PCIE_ATU_UNROLL_BASE(dir, index) \
|
|
(((index) << 9) | ((dir == PCIE_ATU_REGION_DIR_IB) ? BIT(8) : 0))
|
|
#define PCIE_ATU_VIEWPORT_SIZE 0x2C
|
|
#define PCIE_ATU_REGION_CTRL1 0x000
|
|
#define PCIE_ATU_INCREASE_REGION_SIZE BIT(13)
|
|
#define PCIE_ATU_TYPE_MEM 0x0
|
|
#define PCIE_ATU_TYPE_IO 0x2
|
|
#define PCIE_ATU_TYPE_CFG0 0x4
|
|
#define PCIE_ATU_TYPE_CFG1 0x5
|
|
#define PCIE_ATU_TYPE_MSG 0x10
|
|
#define PCIE_ATU_TD BIT(8)
|
|
#define PCIE_ATU_FUNC_NUM(pf) ((pf) << 20)
|
|
#define PCIE_ATU_REGION_CTRL2 0x004
|
|
#define PCIE_ATU_ENABLE BIT(31)
|
|
#define PCIE_ATU_BAR_MODE_ENABLE BIT(30)
|
|
#define PCIE_ATU_CFG_SHIFT_MODE_ENABLE BIT(28)
|
|
#define PCIE_ATU_INHIBIT_PAYLOAD BIT(22)
|
|
#define PCIE_ATU_FUNC_NUM_MATCH_EN BIT(19)
|
|
#define PCIE_ATU_LOWER_BASE 0x008
|
|
#define PCIE_ATU_UPPER_BASE 0x00C
|
|
#define PCIE_ATU_LIMIT 0x010
|
|
#define PCIE_ATU_LOWER_TARGET 0x014
|
|
#define PCIE_ATU_BUS(x) FIELD_PREP(GENMASK(31, 24), x)
|
|
#define PCIE_ATU_DEV(x) FIELD_PREP(GENMASK(23, 19), x)
|
|
#define PCIE_ATU_FUNC(x) FIELD_PREP(GENMASK(18, 16), x)
|
|
#define PCIE_ATU_UPPER_TARGET 0x018
|
|
#define PCIE_ATU_UPPER_LIMIT 0x020
|
|
|
|
#define PCIE_MISC_CONTROL_1_OFF 0x8BC
|
|
#define PCIE_DBI_RO_WR_EN BIT(0)
|
|
|
|
#define PCIE_MSIX_DOORBELL 0x948
|
|
#define PCIE_MSIX_DOORBELL_PF_SHIFT 24
|
|
|
|
/*
|
|
* eDMA CSRs. DW PCIe IP-core v4.70a and older had the eDMA registers accessible
|
|
* over the Port Logic registers space. Afterwards the unrolled mapping was
|
|
* introduced so eDMA and iATU could be accessed via a dedicated registers
|
|
* space.
|
|
*/
|
|
#define PCIE_DMA_VIEWPORT_BASE 0x970
|
|
#define PCIE_DMA_UNROLL_BASE 0x80000
|
|
#define PCIE_DMA_CTRL 0x008
|
|
#define PCIE_DMA_NUM_WR_CHAN GENMASK(3, 0)
|
|
#define PCIE_DMA_NUM_RD_CHAN GENMASK(19, 16)
|
|
|
|
#define PCIE_PL_CHK_REG_CONTROL_STATUS 0xB20
|
|
#define PCIE_PL_CHK_REG_CHK_REG_START BIT(0)
|
|
#define PCIE_PL_CHK_REG_CHK_REG_CONTINUOUS BIT(1)
|
|
#define PCIE_PL_CHK_REG_CHK_REG_COMPARISON_ERROR BIT(16)
|
|
#define PCIE_PL_CHK_REG_CHK_REG_LOGIC_ERROR BIT(17)
|
|
#define PCIE_PL_CHK_REG_CHK_REG_COMPLETE BIT(18)
|
|
|
|
#define PCIE_PL_CHK_REG_ERR_ADDR 0xB28
|
|
|
|
/*
|
|
* 16.0 GT/s (Gen 4) lane margining register definitions
|
|
*/
|
|
#define GEN4_LANE_MARGINING_1_OFF 0xB80
|
|
#define MARGINING_MAX_VOLTAGE_OFFSET GENMASK(29, 24)
|
|
#define MARGINING_NUM_VOLTAGE_STEPS GENMASK(22, 16)
|
|
#define MARGINING_MAX_TIMING_OFFSET GENMASK(13, 8)
|
|
#define MARGINING_NUM_TIMING_STEPS GENMASK(5, 0)
|
|
|
|
#define GEN4_LANE_MARGINING_2_OFF 0xB84
|
|
#define MARGINING_IND_ERROR_SAMPLER BIT(28)
|
|
#define MARGINING_SAMPLE_REPORTING_METHOD BIT(27)
|
|
#define MARGINING_IND_LEFT_RIGHT_TIMING BIT(26)
|
|
#define MARGINING_IND_UP_DOWN_VOLTAGE BIT(25)
|
|
#define MARGINING_VOLTAGE_SUPPORTED BIT(24)
|
|
#define MARGINING_MAXLANES GENMASK(20, 16)
|
|
#define MARGINING_SAMPLE_RATE_TIMING GENMASK(13, 8)
|
|
#define MARGINING_SAMPLE_RATE_VOLTAGE GENMASK(5, 0)
|
|
/*
|
|
* iATU Unroll-specific register definitions
|
|
* From 4.80 core version the address translation will be made by unroll
|
|
*/
|
|
#define PCIE_ATU_UNR_REGION_CTRL1 0x00
|
|
#define PCIE_ATU_UNR_REGION_CTRL2 0x04
|
|
#define PCIE_ATU_UNR_LOWER_BASE 0x08
|
|
#define PCIE_ATU_UNR_UPPER_BASE 0x0C
|
|
#define PCIE_ATU_UNR_LOWER_LIMIT 0x10
|
|
#define PCIE_ATU_UNR_LOWER_TARGET 0x14
|
|
#define PCIE_ATU_UNR_UPPER_TARGET 0x18
|
|
#define PCIE_ATU_UNR_UPPER_LIMIT 0x20
|
|
|
|
/*
|
|
* RAS-DES register definitions
|
|
*/
|
|
#define PCIE_RAS_DES_EVENT_COUNTER_CONTROL 0x8
|
|
#define EVENT_COUNTER_ALL_CLEAR 0x3
|
|
#define EVENT_COUNTER_ENABLE_ALL 0x7
|
|
#define EVENT_COUNTER_ENABLE_SHIFT 2
|
|
#define EVENT_COUNTER_EVENT_SEL_MASK GENMASK(7, 0)
|
|
#define EVENT_COUNTER_EVENT_SEL_SHIFT 16
|
|
#define EVENT_COUNTER_EVENT_Tx_L0S 0x2
|
|
#define EVENT_COUNTER_EVENT_Rx_L0S 0x3
|
|
#define EVENT_COUNTER_EVENT_L1 0x5
|
|
#define EVENT_COUNTER_EVENT_L1_1 0x7
|
|
#define EVENT_COUNTER_EVENT_L1_2 0x8
|
|
#define EVENT_COUNTER_GROUP_SEL_SHIFT 24
|
|
#define EVENT_COUNTER_GROUP_5 0x5
|
|
|
|
#define PCIE_RAS_DES_EVENT_COUNTER_DATA 0xc
|
|
|
|
/* PTM register definitions */
|
|
#define PTM_RES_REQ_CTRL 0x8
|
|
#define PTM_RES_CCONTEXT_VALID BIT(0)
|
|
#define PTM_REQ_AUTO_UPDATE_ENABLED BIT(0)
|
|
#define PTM_REQ_START_UPDATE BIT(1)
|
|
|
|
#define PTM_LOCAL_LSB 0x10
|
|
#define PTM_LOCAL_MSB 0x14
|
|
#define PTM_T1_T2_LSB 0x18
|
|
#define PTM_T1_T2_MSB 0x1c
|
|
#define PTM_T3_T4_LSB 0x28
|
|
#define PTM_T3_T4_MSB 0x2c
|
|
#define PTM_MASTER_LSB 0x38
|
|
#define PTM_MASTER_MSB 0x3c
|
|
|
|
/*
|
|
* The default address offset between dbi_base and atu_base. Root controller
|
|
* drivers are not required to initialize atu_base if the offset matches this
|
|
* default; the driver core automatically derives atu_base from dbi_base using
|
|
* this offset, if atu_base not set.
|
|
*/
|
|
#define DEFAULT_DBI_ATU_OFFSET (0x3 << 20)
|
|
#define DEFAULT_DBI_DMA_OFFSET PCIE_DMA_UNROLL_BASE
|
|
|
|
#define MAX_MSI_IRQS 256
|
|
#define MAX_MSI_IRQS_PER_CTRL 32
|
|
#define MAX_MSI_CTRLS (MAX_MSI_IRQS / MAX_MSI_IRQS_PER_CTRL)
|
|
#define MSI_REG_CTRL_BLOCK_SIZE 12
|
|
#define MSI_DEF_NUM_VECTORS 32
|
|
|
|
/* Maximum number of inbound/outbound iATUs */
|
|
#define MAX_IATU_IN 256
|
|
#define MAX_IATU_OUT 256
|
|
|
|
/* Default eDMA LLP memory size */
|
|
#define DMA_LLP_MEM_SIZE PAGE_SIZE
|
|
|
|
/* Common struct pci_epc_feature bits among DWC EP glue drivers */
|
|
#define DWC_EPC_COMMON_FEATURES .dynamic_inbound_mapping = true, \
|
|
.subrange_mapping = true
|
|
|
|
struct dw_pcie;
|
|
struct dw_pcie_rp;
|
|
struct dw_pcie_ep;
|
|
|
|
enum dw_pcie_device_mode {
|
|
DW_PCIE_UNKNOWN_TYPE,
|
|
DW_PCIE_EP_TYPE,
|
|
DW_PCIE_LEG_EP_TYPE,
|
|
DW_PCIE_RC_TYPE,
|
|
};
|
|
|
|
enum dw_pcie_app_clk {
|
|
DW_PCIE_DBI_CLK,
|
|
DW_PCIE_MSTR_CLK,
|
|
DW_PCIE_SLV_CLK,
|
|
DW_PCIE_NUM_APP_CLKS
|
|
};
|
|
|
|
enum dw_pcie_core_clk {
|
|
DW_PCIE_PIPE_CLK,
|
|
DW_PCIE_CORE_CLK,
|
|
DW_PCIE_AUX_CLK,
|
|
DW_PCIE_REF_CLK,
|
|
DW_PCIE_NUM_CORE_CLKS
|
|
};
|
|
|
|
enum dw_pcie_app_rst {
|
|
DW_PCIE_DBI_RST,
|
|
DW_PCIE_MSTR_RST,
|
|
DW_PCIE_SLV_RST,
|
|
DW_PCIE_NUM_APP_RSTS
|
|
};
|
|
|
|
enum dw_pcie_core_rst {
|
|
DW_PCIE_NON_STICKY_RST,
|
|
DW_PCIE_STICKY_RST,
|
|
DW_PCIE_CORE_RST,
|
|
DW_PCIE_PIPE_RST,
|
|
DW_PCIE_PHY_RST,
|
|
DW_PCIE_HOT_RST,
|
|
DW_PCIE_PWR_RST,
|
|
DW_PCIE_NUM_CORE_RSTS
|
|
};
|
|
|
|
enum dw_pcie_ltssm {
|
|
/* Need to align with PCIE_PORT_DEBUG0 bits 0:5 */
|
|
DW_PCIE_LTSSM_DETECT_QUIET = 0x0,
|
|
DW_PCIE_LTSSM_DETECT_ACT = 0x1,
|
|
DW_PCIE_LTSSM_POLL_ACTIVE = 0x2,
|
|
DW_PCIE_LTSSM_POLL_COMPLIANCE = 0x3,
|
|
DW_PCIE_LTSSM_POLL_CONFIG = 0x4,
|
|
DW_PCIE_LTSSM_PRE_DETECT_QUIET = 0x5,
|
|
DW_PCIE_LTSSM_DETECT_WAIT = 0x6,
|
|
DW_PCIE_LTSSM_CFG_LINKWD_START = 0x7,
|
|
DW_PCIE_LTSSM_CFG_LINKWD_ACEPT = 0x8,
|
|
DW_PCIE_LTSSM_CFG_LANENUM_WAI = 0x9,
|
|
DW_PCIE_LTSSM_CFG_LANENUM_ACEPT = 0xa,
|
|
DW_PCIE_LTSSM_CFG_COMPLETE = 0xb,
|
|
DW_PCIE_LTSSM_CFG_IDLE = 0xc,
|
|
DW_PCIE_LTSSM_RCVRY_LOCK = 0xd,
|
|
DW_PCIE_LTSSM_RCVRY_SPEED = 0xe,
|
|
DW_PCIE_LTSSM_RCVRY_RCVRCFG = 0xf,
|
|
DW_PCIE_LTSSM_RCVRY_IDLE = 0x10,
|
|
DW_PCIE_LTSSM_L0 = 0x11,
|
|
DW_PCIE_LTSSM_L0S = 0x12,
|
|
DW_PCIE_LTSSM_L123_SEND_EIDLE = 0x13,
|
|
DW_PCIE_LTSSM_L1_IDLE = 0x14,
|
|
DW_PCIE_LTSSM_L2_IDLE = 0x15,
|
|
DW_PCIE_LTSSM_L2_WAKE = 0x16,
|
|
DW_PCIE_LTSSM_DISABLED_ENTRY = 0x17,
|
|
DW_PCIE_LTSSM_DISABLED_IDLE = 0x18,
|
|
DW_PCIE_LTSSM_DISABLED = 0x19,
|
|
DW_PCIE_LTSSM_LPBK_ENTRY = 0x1a,
|
|
DW_PCIE_LTSSM_LPBK_ACTIVE = 0x1b,
|
|
DW_PCIE_LTSSM_LPBK_EXIT = 0x1c,
|
|
DW_PCIE_LTSSM_LPBK_EXIT_TIMEOUT = 0x1d,
|
|
DW_PCIE_LTSSM_HOT_RESET_ENTRY = 0x1e,
|
|
DW_PCIE_LTSSM_HOT_RESET = 0x1f,
|
|
DW_PCIE_LTSSM_RCVRY_EQ0 = 0x20,
|
|
DW_PCIE_LTSSM_RCVRY_EQ1 = 0x21,
|
|
DW_PCIE_LTSSM_RCVRY_EQ2 = 0x22,
|
|
DW_PCIE_LTSSM_RCVRY_EQ3 = 0x23,
|
|
|
|
/* Vendor glue drivers provide pseudo L1 substates from get_ltssm() */
|
|
DW_PCIE_LTSSM_L1_1 = 0x141,
|
|
DW_PCIE_LTSSM_L1_2 = 0x142,
|
|
|
|
DW_PCIE_LTSSM_UNKNOWN = 0xFFFFFFFF,
|
|
};
|
|
|
|
struct dw_pcie_ob_atu_cfg {
|
|
int index;
|
|
int type;
|
|
u8 func_no;
|
|
u8 code;
|
|
u8 routing;
|
|
u32 ctrl2;
|
|
u64 parent_bus_addr;
|
|
u64 pci_addr;
|
|
u64 size;
|
|
};
|
|
|
|
struct dw_pcie_host_ops {
|
|
int (*init)(struct dw_pcie_rp *pp);
|
|
void (*deinit)(struct dw_pcie_rp *pp);
|
|
void (*post_init)(struct dw_pcie_rp *pp);
|
|
int (*msi_init)(struct dw_pcie_rp *pp);
|
|
void (*pme_turn_off)(struct dw_pcie_rp *pp);
|
|
};
|
|
|
|
struct dw_pcie_rp {
|
|
bool use_imsi_rx:1;
|
|
bool cfg0_io_shared:1;
|
|
u64 cfg0_base;
|
|
void __iomem *va_cfg0_base;
|
|
u32 cfg0_size;
|
|
resource_size_t io_base;
|
|
phys_addr_t io_bus_addr;
|
|
u32 io_size;
|
|
int irq;
|
|
const struct dw_pcie_host_ops *ops;
|
|
int msi_irq[MAX_MSI_CTRLS];
|
|
struct irq_domain *irq_domain;
|
|
dma_addr_t msi_data;
|
|
struct irq_chip *msi_irq_chip;
|
|
u32 num_vectors;
|
|
u32 irq_mask[MAX_MSI_CTRLS];
|
|
struct pci_host_bridge *bridge;
|
|
raw_spinlock_t lock;
|
|
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
|
|
bool use_atu_msg;
|
|
int msg_atu_index;
|
|
struct resource *msg_res;
|
|
struct pci_eq_presets presets;
|
|
struct pci_config_window *cfg;
|
|
bool ecam_enabled;
|
|
bool native_ecam;
|
|
bool skip_l23_ready;
|
|
};
|
|
|
|
struct dw_pcie_ep_ops {
|
|
void (*pre_init)(struct dw_pcie_ep *ep);
|
|
void (*init)(struct dw_pcie_ep *ep);
|
|
int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
|
|
unsigned int type, u16 interrupt_num);
|
|
const struct pci_epc_features* (*get_features)(struct dw_pcie_ep *ep);
|
|
/*
|
|
* Provide a method to implement the different func config space
|
|
* access for different platform, if different func have different
|
|
* offset, return the offset of func. if use write a register way
|
|
* return a 0, and implement code in callback function of platform
|
|
* driver.
|
|
*/
|
|
unsigned int (*get_dbi_offset)(struct dw_pcie_ep *ep, u8 func_no);
|
|
unsigned int (*get_dbi2_offset)(struct dw_pcie_ep *ep, u8 func_no);
|
|
};
|
|
|
|
struct dw_pcie_ep_func {
|
|
struct list_head list;
|
|
u8 func_no;
|
|
u8 msi_cap; /* MSI capability offset */
|
|
u8 msix_cap; /* MSI-X capability offset */
|
|
u8 bar_to_atu[PCI_STD_NUM_BARS];
|
|
struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS];
|
|
|
|
/* Only for Address Match Mode inbound iATU */
|
|
u32 *ib_atu_indexes[PCI_STD_NUM_BARS];
|
|
unsigned int num_ib_atu_indexes[PCI_STD_NUM_BARS];
|
|
};
|
|
|
|
struct dw_pcie_ep {
|
|
struct pci_epc *epc;
|
|
struct list_head func_list;
|
|
const struct dw_pcie_ep_ops *ops;
|
|
phys_addr_t phys_base;
|
|
size_t addr_size;
|
|
size_t page_size;
|
|
phys_addr_t *outbound_addr;
|
|
unsigned long *ib_window_map;
|
|
unsigned long *ob_window_map;
|
|
void __iomem *msi_mem;
|
|
phys_addr_t msi_mem_phys;
|
|
|
|
/* MSI outbound iATU state */
|
|
bool msi_iatu_mapped;
|
|
u64 msi_msg_addr;
|
|
size_t msi_map_size;
|
|
};
|
|
|
|
struct dw_pcie_ops {
|
|
u64 (*cpu_addr_fixup)(struct dw_pcie *pcie, u64 cpu_addr);
|
|
u32 (*read_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
|
|
size_t size);
|
|
void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
|
|
size_t size, u32 val);
|
|
void (*write_dbi2)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
|
|
size_t size, u32 val);
|
|
bool (*link_up)(struct dw_pcie *pcie);
|
|
enum dw_pcie_ltssm (*get_ltssm)(struct dw_pcie *pcie);
|
|
int (*start_link)(struct dw_pcie *pcie);
|
|
void (*stop_link)(struct dw_pcie *pcie);
|
|
};
|
|
|
|
struct debugfs_info {
|
|
struct dentry *debug_dir;
|
|
void *rasdes_info;
|
|
};
|
|
|
|
struct dw_pcie {
|
|
struct device *dev;
|
|
void __iomem *dbi_base;
|
|
resource_size_t dbi_phys_addr;
|
|
void __iomem *dbi_base2;
|
|
void __iomem *atu_base;
|
|
void __iomem *elbi_base;
|
|
resource_size_t atu_phys_addr;
|
|
size_t atu_size;
|
|
resource_size_t parent_bus_offset;
|
|
u32 num_ib_windows;
|
|
u32 num_ob_windows;
|
|
u32 region_align;
|
|
u64 region_limit;
|
|
struct dw_pcie_rp pp;
|
|
struct dw_pcie_ep ep;
|
|
const struct dw_pcie_ops *ops;
|
|
u32 version;
|
|
u32 type;
|
|
unsigned long caps;
|
|
int num_lanes;
|
|
int max_link_speed;
|
|
u8 n_fts[2];
|
|
struct dw_edma_chip edma;
|
|
bool l1ss_support; /* L1 PM Substates support */
|
|
struct clk_bulk_data app_clks[DW_PCIE_NUM_APP_CLKS];
|
|
struct clk_bulk_data core_clks[DW_PCIE_NUM_CORE_CLKS];
|
|
struct reset_control_bulk_data app_rsts[DW_PCIE_NUM_APP_RSTS];
|
|
struct reset_control_bulk_data core_rsts[DW_PCIE_NUM_CORE_RSTS];
|
|
struct gpio_desc *pe_rst;
|
|
bool suspended;
|
|
struct debugfs_info *debugfs;
|
|
enum dw_pcie_device_mode mode;
|
|
u16 ptm_vsec_offset;
|
|
struct pci_ptm_debugfs *ptm_debugfs;
|
|
|
|
/*
|
|
* If iATU input addresses are offset from CPU physical addresses,
|
|
* we previously required .cpu_addr_fixup() to convert them. We
|
|
* now rely on the devicetree instead. If .cpu_addr_fixup()
|
|
* exists, we compare its results with devicetree.
|
|
*
|
|
* If .cpu_addr_fixup() does not exist, we assume the offset is
|
|
* zero and warn if devicetree claims otherwise. If we know all
|
|
* devicetrees correctly describe the offset, set
|
|
* use_parent_dt_ranges to true to avoid this warning.
|
|
*/
|
|
bool use_parent_dt_ranges;
|
|
};
|
|
|
|
#define to_dw_pcie_from_pp(port) container_of((port), struct dw_pcie, pp)
|
|
|
|
#define to_dw_pcie_from_ep(endpoint) \
|
|
container_of((endpoint), struct dw_pcie, ep)
|
|
|
|
int dw_pcie_get_resources(struct dw_pcie *pci);
|
|
|
|
void dw_pcie_version_detect(struct dw_pcie *pci);
|
|
|
|
u8 dw_pcie_find_capability(struct dw_pcie *pci, u8 cap);
|
|
u16 dw_pcie_find_ext_capability(struct dw_pcie *pci, u8 cap);
|
|
void dw_pcie_remove_capability(struct dw_pcie *pci, u8 cap);
|
|
void dw_pcie_remove_ext_capability(struct dw_pcie *pci, u8 cap);
|
|
u16 dw_pcie_find_rasdes_capability(struct dw_pcie *pci);
|
|
u16 dw_pcie_find_ptm_capability(struct dw_pcie *pci);
|
|
|
|
int dw_pcie_read(void __iomem *addr, int size, u32 *val);
|
|
int dw_pcie_write(void __iomem *addr, int size, u32 val);
|
|
|
|
u32 dw_pcie_read_dbi(struct dw_pcie *pci, u32 reg, size_t size);
|
|
void dw_pcie_write_dbi(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
|
|
void dw_pcie_write_dbi2(struct dw_pcie *pci, u32 reg, size_t size, u32 val);
|
|
bool dw_pcie_link_up(struct dw_pcie *pci);
|
|
void dw_pcie_upconfig_setup(struct dw_pcie *pci);
|
|
int dw_pcie_wait_for_link(struct dw_pcie *pci);
|
|
int dw_pcie_link_get_max_link_width(struct dw_pcie *pci);
|
|
int dw_pcie_prog_outbound_atu(struct dw_pcie *pci,
|
|
const struct dw_pcie_ob_atu_cfg *atu);
|
|
int dw_pcie_prog_inbound_atu(struct dw_pcie *pci, int index, int type,
|
|
u64 parent_bus_addr, u64 pci_addr, u64 size);
|
|
int dw_pcie_prog_ep_inbound_atu(struct dw_pcie *pci, u8 func_no, int index,
|
|
int type, u64 parent_bus_addr,
|
|
u8 bar, size_t size);
|
|
void dw_pcie_disable_atu(struct dw_pcie *pci, u32 dir, int index);
|
|
void dw_pcie_hide_unsupported_l1ss(struct dw_pcie *pci);
|
|
void dw_pcie_setup(struct dw_pcie *pci);
|
|
void dw_pcie_iatu_detect(struct dw_pcie *pci);
|
|
int dw_pcie_edma_detect(struct dw_pcie *pci);
|
|
void dw_pcie_edma_remove(struct dw_pcie *pci);
|
|
resource_size_t dw_pcie_parent_bus_offset(struct dw_pcie *pci,
|
|
const char *reg_name,
|
|
resource_size_t cpu_phy_addr);
|
|
|
|
static inline void dw_pcie_writel_dbi(struct dw_pcie *pci, u32 reg, u32 val)
|
|
{
|
|
dw_pcie_write_dbi(pci, reg, 0x4, val);
|
|
}
|
|
|
|
static inline u32 dw_pcie_readl_dbi(struct dw_pcie *pci, u32 reg)
|
|
{
|
|
return dw_pcie_read_dbi(pci, reg, 0x4);
|
|
}
|
|
|
|
static inline void dw_pcie_writew_dbi(struct dw_pcie *pci, u32 reg, u16 val)
|
|
{
|
|
dw_pcie_write_dbi(pci, reg, 0x2, val);
|
|
}
|
|
|
|
static inline u16 dw_pcie_readw_dbi(struct dw_pcie *pci, u32 reg)
|
|
{
|
|
return dw_pcie_read_dbi(pci, reg, 0x2);
|
|
}
|
|
|
|
static inline void dw_pcie_writeb_dbi(struct dw_pcie *pci, u32 reg, u8 val)
|
|
{
|
|
dw_pcie_write_dbi(pci, reg, 0x1, val);
|
|
}
|
|
|
|
static inline u8 dw_pcie_readb_dbi(struct dw_pcie *pci, u32 reg)
|
|
{
|
|
return dw_pcie_read_dbi(pci, reg, 0x1);
|
|
}
|
|
|
|
static inline void dw_pcie_writel_dbi2(struct dw_pcie *pci, u32 reg, u32 val)
|
|
{
|
|
dw_pcie_write_dbi2(pci, reg, 0x4, val);
|
|
}
|
|
|
|
static inline int dw_pcie_read_cfg_byte(struct dw_pcie *pci, int where,
|
|
u8 *val)
|
|
{
|
|
*val = dw_pcie_readb_dbi(pci, where);
|
|
return PCIBIOS_SUCCESSFUL;
|
|
}
|
|
|
|
static inline int dw_pcie_read_cfg_word(struct dw_pcie *pci, int where,
|
|
u16 *val)
|
|
{
|
|
*val = dw_pcie_readw_dbi(pci, where);
|
|
return PCIBIOS_SUCCESSFUL;
|
|
}
|
|
|
|
static inline int dw_pcie_read_cfg_dword(struct dw_pcie *pci, int where,
|
|
u32 *val)
|
|
{
|
|
*val = dw_pcie_readl_dbi(pci, where);
|
|
return PCIBIOS_SUCCESSFUL;
|
|
}
|
|
|
|
static inline unsigned int dw_pcie_ep_get_dbi_offset(struct dw_pcie_ep *ep,
|
|
u8 func_no)
|
|
{
|
|
unsigned int dbi_offset = 0;
|
|
|
|
if (ep->ops->get_dbi_offset)
|
|
dbi_offset = ep->ops->get_dbi_offset(ep, func_no);
|
|
|
|
return dbi_offset;
|
|
}
|
|
|
|
static inline u32 dw_pcie_ep_read_dbi(struct dw_pcie_ep *ep, u8 func_no,
|
|
u32 reg, size_t size)
|
|
{
|
|
unsigned int offset = dw_pcie_ep_get_dbi_offset(ep, func_no);
|
|
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
|
|
|
return dw_pcie_read_dbi(pci, offset + reg, size);
|
|
}
|
|
|
|
static inline void dw_pcie_ep_write_dbi(struct dw_pcie_ep *ep, u8 func_no,
|
|
u32 reg, size_t size, u32 val)
|
|
{
|
|
unsigned int offset = dw_pcie_ep_get_dbi_offset(ep, func_no);
|
|
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
|
|
|
dw_pcie_write_dbi(pci, offset + reg, size, val);
|
|
}
|
|
|
|
static inline void dw_pcie_ep_writel_dbi(struct dw_pcie_ep *ep, u8 func_no,
|
|
u32 reg, u32 val)
|
|
{
|
|
dw_pcie_ep_write_dbi(ep, func_no, reg, 0x4, val);
|
|
}
|
|
|
|
static inline u32 dw_pcie_ep_readl_dbi(struct dw_pcie_ep *ep, u8 func_no,
|
|
u32 reg)
|
|
{
|
|
return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x4);
|
|
}
|
|
|
|
static inline void dw_pcie_ep_writew_dbi(struct dw_pcie_ep *ep, u8 func_no,
|
|
u32 reg, u16 val)
|
|
{
|
|
dw_pcie_ep_write_dbi(ep, func_no, reg, 0x2, val);
|
|
}
|
|
|
|
static inline u16 dw_pcie_ep_readw_dbi(struct dw_pcie_ep *ep, u8 func_no,
|
|
u32 reg)
|
|
{
|
|
return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x2);
|
|
}
|
|
|
|
static inline void dw_pcie_ep_writeb_dbi(struct dw_pcie_ep *ep, u8 func_no,
|
|
u32 reg, u8 val)
|
|
{
|
|
dw_pcie_ep_write_dbi(ep, func_no, reg, 0x1, val);
|
|
}
|
|
|
|
static inline u8 dw_pcie_ep_readb_dbi(struct dw_pcie_ep *ep, u8 func_no,
|
|
u32 reg)
|
|
{
|
|
return dw_pcie_ep_read_dbi(ep, func_no, reg, 0x1);
|
|
}
|
|
|
|
static inline int dw_pcie_ep_read_cfg_byte(struct dw_pcie_ep *ep, u8 func_no,
|
|
int where, u8 *val)
|
|
{
|
|
*val = dw_pcie_ep_readb_dbi(ep, func_no, where);
|
|
return PCIBIOS_SUCCESSFUL;
|
|
}
|
|
|
|
static inline int dw_pcie_ep_read_cfg_word(struct dw_pcie_ep *ep, u8 func_no,
|
|
int where, u16 *val)
|
|
{
|
|
*val = dw_pcie_ep_readw_dbi(ep, func_no, where);
|
|
return PCIBIOS_SUCCESSFUL;
|
|
}
|
|
|
|
static inline int dw_pcie_ep_read_cfg_dword(struct dw_pcie_ep *ep, u8 func_no,
|
|
int where, u32 *val)
|
|
{
|
|
*val = dw_pcie_ep_readl_dbi(ep, func_no, where);
|
|
return PCIBIOS_SUCCESSFUL;
|
|
}
|
|
|
|
static inline unsigned int dw_pcie_ep_get_dbi2_offset(struct dw_pcie_ep *ep,
|
|
u8 func_no)
|
|
{
|
|
unsigned int dbi2_offset = 0;
|
|
|
|
if (ep->ops->get_dbi2_offset)
|
|
dbi2_offset = ep->ops->get_dbi2_offset(ep, func_no);
|
|
else if (ep->ops->get_dbi_offset) /* for backward compatibility */
|
|
dbi2_offset = ep->ops->get_dbi_offset(ep, func_no);
|
|
|
|
return dbi2_offset;
|
|
}
|
|
|
|
static inline void dw_pcie_ep_write_dbi2(struct dw_pcie_ep *ep, u8 func_no,
|
|
u32 reg, size_t size, u32 val)
|
|
{
|
|
unsigned int offset = dw_pcie_ep_get_dbi2_offset(ep, func_no);
|
|
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
|
|
|
|
dw_pcie_write_dbi2(pci, offset + reg, size, val);
|
|
}
|
|
|
|
static inline void dw_pcie_ep_writel_dbi2(struct dw_pcie_ep *ep, u8 func_no,
|
|
u32 reg, u32 val)
|
|
{
|
|
dw_pcie_ep_write_dbi2(ep, func_no, reg, 0x4, val);
|
|
}
|
|
|
|
static inline void dw_pcie_dbi_ro_wr_en(struct dw_pcie *pci)
|
|
{
|
|
u32 reg;
|
|
u32 val;
|
|
|
|
reg = PCIE_MISC_CONTROL_1_OFF;
|
|
val = dw_pcie_readl_dbi(pci, reg);
|
|
val |= PCIE_DBI_RO_WR_EN;
|
|
dw_pcie_writel_dbi(pci, reg, val);
|
|
}
|
|
|
|
static inline void dw_pcie_dbi_ro_wr_dis(struct dw_pcie *pci)
|
|
{
|
|
u32 reg;
|
|
u32 val;
|
|
|
|
reg = PCIE_MISC_CONTROL_1_OFF;
|
|
val = dw_pcie_readl_dbi(pci, reg);
|
|
val &= ~PCIE_DBI_RO_WR_EN;
|
|
dw_pcie_writel_dbi(pci, reg, val);
|
|
}
|
|
|
|
static inline int dw_pcie_start_link(struct dw_pcie *pci)
|
|
{
|
|
if (pci->ops && pci->ops->start_link)
|
|
return pci->ops->start_link(pci);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline void dw_pcie_stop_link(struct dw_pcie *pci)
|
|
{
|
|
if (pci->ops && pci->ops->stop_link)
|
|
pci->ops->stop_link(pci);
|
|
}
|
|
|
|
static inline enum dw_pcie_ltssm dw_pcie_get_ltssm(struct dw_pcie *pci)
|
|
{
|
|
u32 val;
|
|
|
|
if (pci->ops && pci->ops->get_ltssm)
|
|
return pci->ops->get_ltssm(pci);
|
|
|
|
val = dw_pcie_readl_dbi(pci, PCIE_PORT_DEBUG0);
|
|
|
|
return (enum dw_pcie_ltssm)FIELD_GET(PORT_LOGIC_LTSSM_STATE_MASK, val);
|
|
}
|
|
|
|
const char *dw_pcie_ltssm_status_string(enum dw_pcie_ltssm ltssm);
|
|
|
|
#ifdef CONFIG_PCIE_DW_HOST
|
|
int dw_pcie_suspend_noirq(struct dw_pcie *pci);
|
|
int dw_pcie_resume_noirq(struct dw_pcie *pci);
|
|
irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp);
|
|
void dw_pcie_msi_init(struct dw_pcie_rp *pp);
|
|
int dw_pcie_msi_host_init(struct dw_pcie_rp *pp);
|
|
void dw_pcie_free_msi(struct dw_pcie_rp *pp);
|
|
int dw_pcie_setup_rc(struct dw_pcie_rp *pp);
|
|
int dw_pcie_host_init(struct dw_pcie_rp *pp);
|
|
void dw_pcie_host_deinit(struct dw_pcie_rp *pp);
|
|
int dw_pcie_allocate_domains(struct dw_pcie_rp *pp);
|
|
void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus, unsigned int devfn,
|
|
int where);
|
|
#else
|
|
static inline int dw_pcie_suspend_noirq(struct dw_pcie *pci)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int dw_pcie_resume_noirq(struct dw_pcie *pci)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
|
|
{
|
|
return IRQ_NONE;
|
|
}
|
|
|
|
static inline void dw_pcie_msi_init(struct dw_pcie_rp *pp)
|
|
{ }
|
|
|
|
static inline int dw_pcie_msi_host_init(struct dw_pcie_rp *pp)
|
|
{
|
|
return -ENODEV;
|
|
}
|
|
|
|
static inline void dw_pcie_free_msi(struct dw_pcie_rp *pp)
|
|
{ }
|
|
|
|
static inline int dw_pcie_setup_rc(struct dw_pcie_rp *pp)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int dw_pcie_host_init(struct dw_pcie_rp *pp)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void dw_pcie_host_deinit(struct dw_pcie_rp *pp)
|
|
{
|
|
}
|
|
|
|
static inline int dw_pcie_allocate_domains(struct dw_pcie_rp *pp)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline void __iomem *dw_pcie_own_conf_map_bus(struct pci_bus *bus,
|
|
unsigned int devfn,
|
|
int where)
|
|
{
|
|
return NULL;
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_PCIE_DW_EP
|
|
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
|
|
void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep);
|
|
int dw_pcie_ep_init(struct dw_pcie_ep *ep);
|
|
int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep);
|
|
void dw_pcie_ep_deinit(struct dw_pcie_ep *ep);
|
|
void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep);
|
|
int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no);
|
|
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
|
|
u8 interrupt_num);
|
|
int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
|
|
u16 interrupt_num);
|
|
int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep, u8 func_no,
|
|
u16 interrupt_num);
|
|
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
|
|
struct dw_pcie_ep_func *
|
|
dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no);
|
|
#else
|
|
static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
|
|
{
|
|
}
|
|
|
|
static inline void dw_pcie_ep_linkdown(struct dw_pcie_ep *ep)
|
|
{
|
|
}
|
|
|
|
static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int dw_pcie_ep_init_registers(struct dw_pcie_ep *ep)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void dw_pcie_ep_deinit(struct dw_pcie_ep *ep)
|
|
{
|
|
}
|
|
|
|
static inline void dw_pcie_ep_cleanup(struct dw_pcie_ep *ep)
|
|
{
|
|
}
|
|
|
|
static inline int dw_pcie_ep_raise_intx_irq(struct dw_pcie_ep *ep, u8 func_no)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
|
|
u8 interrupt_num)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int dw_pcie_ep_raise_msix_irq(struct dw_pcie_ep *ep, u8 func_no,
|
|
u16 interrupt_num)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int dw_pcie_ep_raise_msix_irq_doorbell(struct dw_pcie_ep *ep,
|
|
u8 func_no,
|
|
u16 interrupt_num)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
|
|
{
|
|
}
|
|
|
|
static inline struct dw_pcie_ep_func *
|
|
dw_pcie_ep_get_func_from_ep(struct dw_pcie_ep *ep, u8 func_no)
|
|
{
|
|
return NULL;
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_PCIE_DW_DEBUGFS
|
|
void dwc_pcie_debugfs_init(struct dw_pcie *pci, enum dw_pcie_device_mode mode);
|
|
void dwc_pcie_debugfs_deinit(struct dw_pcie *pci);
|
|
#else
|
|
static inline void dwc_pcie_debugfs_init(struct dw_pcie *pci,
|
|
enum dw_pcie_device_mode mode)
|
|
{
|
|
}
|
|
static inline void dwc_pcie_debugfs_deinit(struct dw_pcie *pci)
|
|
{
|
|
}
|
|
#endif
|
|
|
|
#endif /* _PCIE_DESIGNWARE_H */
|