Merge branch '6.15/scsi-fixes' into 6.16/scsi-staging
Pull in fixes from 6.15 and resolve a few conflicts so we can have a clean base for UFS patches. Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
commit
3c400df513
|
@ -1605,6 +1605,38 @@ Description:
|
|||
|
||||
The attribute is read/write.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/device_lvl_exception_count
|
||||
What: /sys/bus/platform/devices/*.ufs/device_lvl_exception_count
|
||||
Date: March 2025
|
||||
Contact: Bao D. Nguyen <quic_nguyenb@quicinc.com>
|
||||
Description:
|
||||
This attribute is applicable to ufs devices compliant to the
|
||||
JEDEC specifications version 4.1 or later. The
|
||||
device_lvl_exception_count is a counter indicating the number of
|
||||
times the device level exceptions have occurred since the last
|
||||
time this variable is reset. Writing a 0 value to this
|
||||
attribute will reset the device_lvl_exception_count. If the
|
||||
device_lvl_exception_count reads a positive value, the user
|
||||
application should read the device_lvl_exception_id attribute to
|
||||
know more information about the exception.
|
||||
|
||||
The attribute is read/write.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/device_lvl_exception_id
|
||||
What: /sys/bus/platform/devices/*.ufs/device_lvl_exception_id
|
||||
Date: March 2025
|
||||
Contact: Bao D. Nguyen <quic_nguyenb@quicinc.com>
|
||||
Description:
|
||||
Reading the device_lvl_exception_id returns the
|
||||
qDeviceLevelExceptionID attribute of the ufs device JEDEC
|
||||
specification version 4.1. The definition of the
|
||||
qDeviceLevelExceptionID is the ufs device vendor specific
|
||||
implementation. Refer to the device manufacturer datasheet for
|
||||
more information on the meaning of the qDeviceLevelExceptionID
|
||||
attribute value.
|
||||
|
||||
The attribute is read only.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/wb_resize_enable
|
||||
What: /sys/bus/platform/devices/*.ufs/wb_resize_enable
|
||||
Date: April 2025
|
||||
|
|
|
@ -943,8 +943,28 @@ static void hisi_sas_phyup_work_common(struct work_struct *work,
|
|||
container_of(work, typeof(*phy), works[event]);
|
||||
struct hisi_hba *hisi_hba = phy->hisi_hba;
|
||||
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
||||
struct asd_sas_port *sas_port = sas_phy->port;
|
||||
struct hisi_sas_port *port = phy->port;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct domain_device *port_dev;
|
||||
int phy_no = sas_phy->id;
|
||||
|
||||
if (!test_bit(HISI_SAS_RESETTING_BIT, &hisi_hba->flags) &&
|
||||
sas_port && port && (port->id != phy->port_id)) {
|
||||
dev_info(dev, "phy%d's hw port id changed from %d to %llu\n",
|
||||
phy_no, port->id, phy->port_id);
|
||||
port_dev = sas_port->port_dev;
|
||||
if (port_dev && !dev_is_expander(port_dev->dev_type)) {
|
||||
/*
|
||||
* Set the device state to gone to block
|
||||
* sending IO to the device.
|
||||
*/
|
||||
set_bit(SAS_DEV_GONE, &port_dev->state);
|
||||
hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
phy->wait_phyup_cnt = 0;
|
||||
if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
|
||||
hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
|
||||
|
|
|
@ -2501,6 +2501,7 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
|
|||
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
|
||||
struct sas_ata_task *ata_task = &task->ata_task;
|
||||
struct sas_tmf_task *tmf = slot->tmf;
|
||||
int phy_id;
|
||||
u8 *buf_cmd;
|
||||
int has_data = 0, hdr_tag = 0;
|
||||
u32 dw0, dw1 = 0, dw2 = 0;
|
||||
|
@ -2508,10 +2509,14 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba,
|
|||
/* create header */
|
||||
/* dw0 */
|
||||
dw0 = port->id << CMD_HDR_PORT_OFF;
|
||||
if (parent_dev && dev_is_expander(parent_dev->dev_type))
|
||||
if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
|
||||
dw0 |= 3 << CMD_HDR_CMD_OFF;
|
||||
else
|
||||
} else {
|
||||
phy_id = device->phy->identify.phy_identifier;
|
||||
dw0 |= (1U << phy_id) << CMD_HDR_PHY_ID_OFF;
|
||||
dw0 |= CMD_HDR_FORCE_PHY_MSK;
|
||||
dw0 |= 4 << CMD_HDR_CMD_OFF;
|
||||
}
|
||||
|
||||
if (tmf && ata_task->force_phy) {
|
||||
dw0 |= CMD_HDR_FORCE_PHY_MSK;
|
||||
|
|
|
@ -359,6 +359,10 @@
|
|||
#define CMD_HDR_RESP_REPORT_MSK (0x1 << CMD_HDR_RESP_REPORT_OFF)
|
||||
#define CMD_HDR_TLR_CTRL_OFF 6
|
||||
#define CMD_HDR_TLR_CTRL_MSK (0x3 << CMD_HDR_TLR_CTRL_OFF)
|
||||
#define CMD_HDR_PHY_ID_OFF 8
|
||||
#define CMD_HDR_PHY_ID_MSK (0x1ff << CMD_HDR_PHY_ID_OFF)
|
||||
#define CMD_HDR_FORCE_PHY_OFF 17
|
||||
#define CMD_HDR_FORCE_PHY_MSK (0x1U << CMD_HDR_FORCE_PHY_OFF)
|
||||
#define CMD_HDR_PORT_OFF 18
|
||||
#define CMD_HDR_PORT_MSK (0xf << CMD_HDR_PORT_OFF)
|
||||
#define CMD_HDR_PRIORITY_OFF 27
|
||||
|
@ -1477,15 +1481,21 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
|
|||
struct hisi_sas_cmd_hdr *hdr = slot->cmd_hdr;
|
||||
struct asd_sas_port *sas_port = device->port;
|
||||
struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
|
||||
int phy_id;
|
||||
u8 *buf_cmd;
|
||||
int has_data = 0, hdr_tag = 0;
|
||||
u32 dw1 = 0, dw2 = 0;
|
||||
|
||||
hdr->dw0 = cpu_to_le32(port->id << CMD_HDR_PORT_OFF);
|
||||
if (parent_dev && dev_is_expander(parent_dev->dev_type))
|
||||
if (parent_dev && dev_is_expander(parent_dev->dev_type)) {
|
||||
hdr->dw0 |= cpu_to_le32(3 << CMD_HDR_CMD_OFF);
|
||||
else
|
||||
} else {
|
||||
phy_id = device->phy->identify.phy_identifier;
|
||||
hdr->dw0 |= cpu_to_le32((1U << phy_id)
|
||||
<< CMD_HDR_PHY_ID_OFF);
|
||||
hdr->dw0 |= CMD_HDR_FORCE_PHY_MSK;
|
||||
hdr->dw0 |= cpu_to_le32(4U << CMD_HDR_CMD_OFF);
|
||||
}
|
||||
|
||||
switch (task->data_dir) {
|
||||
case DMA_TO_DEVICE:
|
||||
|
|
|
@ -23,8 +23,8 @@
|
|||
/*
|
||||
* MegaRAID SAS Driver meta data
|
||||
*/
|
||||
#define MEGASAS_VERSION "07.727.03.00-rc1"
|
||||
#define MEGASAS_RELDATE "Oct 03, 2023"
|
||||
#define MEGASAS_VERSION "07.734.00.00-rc1"
|
||||
#define MEGASAS_RELDATE "Apr 03, 2025"
|
||||
|
||||
#define MEGASAS_MSIX_NAME_LEN 32
|
||||
|
||||
|
|
|
@ -2103,6 +2103,9 @@ static int megasas_sdev_configure(struct scsi_device *sdev,
|
|||
/* This sdev property may change post OCR */
|
||||
megasas_set_dynamic_target_properties(sdev, lim, is_target_prop);
|
||||
|
||||
if (!MEGASAS_IS_LOGICAL(sdev))
|
||||
sdev->no_vpd_size = 1;
|
||||
|
||||
mutex_unlock(&instance->reset_mutex);
|
||||
|
||||
return 0;
|
||||
|
@ -3662,8 +3665,10 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd,
|
|||
|
||||
case MFI_STAT_SCSI_IO_FAILED:
|
||||
case MFI_STAT_LD_INIT_IN_PROGRESS:
|
||||
cmd->scmd->result =
|
||||
(DID_ERROR << 16) | hdr->scsi_status;
|
||||
if (hdr->scsi_status == 0xf0)
|
||||
cmd->scmd->result = (DID_ERROR << 16) | SAM_STAT_CHECK_CONDITION;
|
||||
else
|
||||
cmd->scmd->result = (DID_ERROR << 16) | hdr->scsi_status;
|
||||
break;
|
||||
|
||||
case MFI_STAT_SCSI_DONE_WITH_ERROR:
|
||||
|
|
|
@ -2043,7 +2043,10 @@ map_cmd_status(struct fusion_context *fusion,
|
|||
|
||||
case MFI_STAT_SCSI_IO_FAILED:
|
||||
case MFI_STAT_LD_INIT_IN_PROGRESS:
|
||||
scmd->result = (DID_ERROR << 16) | ext_status;
|
||||
if (ext_status == 0xf0)
|
||||
scmd->result = (DID_ERROR << 16) | SAM_STAT_CHECK_CONDITION;
|
||||
else
|
||||
scmd->result = (DID_ERROR << 16) | ext_status;
|
||||
break;
|
||||
|
||||
case MFI_STAT_SCSI_DONE_WITH_ERROR:
|
||||
|
|
|
@ -174,6 +174,9 @@ static void mpi3mr_print_event_data(struct mpi3mr_ioc *mrioc,
|
|||
char *desc = NULL;
|
||||
u16 event;
|
||||
|
||||
if (!(mrioc->logging_level & MPI3_DEBUG_EVENT))
|
||||
return;
|
||||
|
||||
event = event_reply->event;
|
||||
|
||||
switch (event) {
|
||||
|
@ -451,6 +454,7 @@ int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc)
|
|||
return 0;
|
||||
}
|
||||
|
||||
atomic_set(&mrioc->admin_pend_isr, 0);
|
||||
reply_desc = (struct mpi3_default_reply_descriptor *)mrioc->admin_reply_base +
|
||||
admin_reply_ci;
|
||||
|
||||
|
@ -565,7 +569,7 @@ int mpi3mr_process_op_reply_q(struct mpi3mr_ioc *mrioc,
|
|||
WRITE_ONCE(op_req_q->ci, le16_to_cpu(reply_desc->request_queue_ci));
|
||||
mpi3mr_process_op_reply_desc(mrioc, reply_desc, &reply_dma,
|
||||
reply_qidx);
|
||||
atomic_dec(&op_reply_q->pend_ios);
|
||||
|
||||
if (reply_dma)
|
||||
mpi3mr_repost_reply_buf(mrioc, reply_dma);
|
||||
num_op_reply++;
|
||||
|
@ -2925,6 +2929,7 @@ static int mpi3mr_setup_admin_qpair(struct mpi3mr_ioc *mrioc)
|
|||
mrioc->admin_reply_ci = 0;
|
||||
mrioc->admin_reply_ephase = 1;
|
||||
atomic_set(&mrioc->admin_reply_q_in_use, 0);
|
||||
atomic_set(&mrioc->admin_pend_isr, 0);
|
||||
|
||||
if (!mrioc->admin_req_base) {
|
||||
mrioc->admin_req_base = dma_alloc_coherent(&mrioc->pdev->dev,
|
||||
|
@ -4653,6 +4658,7 @@ void mpi3mr_memset_buffers(struct mpi3mr_ioc *mrioc)
|
|||
if (mrioc->admin_reply_base)
|
||||
memset(mrioc->admin_reply_base, 0, mrioc->admin_reply_q_sz);
|
||||
atomic_set(&mrioc->admin_reply_q_in_use, 0);
|
||||
atomic_set(&mrioc->admin_pend_isr, 0);
|
||||
|
||||
if (mrioc->init_cmds.reply) {
|
||||
memset(mrioc->init_cmds.reply, 0, sizeof(*mrioc->init_cmds.reply));
|
||||
|
|
|
@ -766,6 +766,7 @@ static void pm8001_dev_gone_notify(struct domain_device *dev)
|
|||
spin_lock_irqsave(&pm8001_ha->lock, flags);
|
||||
}
|
||||
PM8001_CHIP_DISP->dereg_dev_req(pm8001_ha, device_id);
|
||||
pm8001_ha->phy[pm8001_dev->attached_phy].phy_attached = 0;
|
||||
pm8001_free_dev(pm8001_dev);
|
||||
} else {
|
||||
pm8001_dbg(pm8001_ha, DISC, "Found dev has gone.\n");
|
||||
|
|
|
@ -1253,8 +1253,12 @@ EXPORT_SYMBOL_GPL(scsi_alloc_request);
|
|||
*/
|
||||
static void scsi_cleanup_rq(struct request *rq)
|
||||
{
|
||||
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
|
||||
|
||||
cmd->flags = 0;
|
||||
|
||||
if (rq->rq_flags & RQF_DONTPREP) {
|
||||
scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
|
||||
scsi_mq_uninit_cmd(cmd);
|
||||
rq->rq_flags &= ~RQF_DONTPREP;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3182,11 +3182,14 @@ iscsi_set_host_param(struct iscsi_transport *transport,
|
|||
}
|
||||
|
||||
/* see similar check in iscsi_if_set_param() */
|
||||
if (strlen(data) > ev->u.set_host_param.len)
|
||||
return -EINVAL;
|
||||
if (strlen(data) > ev->u.set_host_param.len) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = transport->set_host_param(shost, ev->u.set_host_param.param,
|
||||
data, ev->u.set_host_param.len);
|
||||
out:
|
||||
scsi_host_put(shost);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -388,7 +388,7 @@ static void srp_reconnect_work(struct work_struct *work)
|
|||
"reconnect attempt %d failed (%d)\n",
|
||||
++rport->failed_reconnects, res);
|
||||
delay = rport->reconnect_delay *
|
||||
min(100, max(1, rport->failed_reconnects - 10));
|
||||
clamp(rport->failed_reconnects - 10, 1, 100);
|
||||
if (delay > 0)
|
||||
queue_delayed_work(system_long_wq,
|
||||
&rport->reconnect_work, delay * HZ);
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <linux/bcd.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/cciss_ioctl.h>
|
||||
#include <linux/crash_dump.h>
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
#include <scsi/scsi_device.h>
|
||||
|
@ -5246,7 +5247,7 @@ static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
|
|||
ctrl_info->error_buffer_length =
|
||||
ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
|
||||
|
||||
if (reset_devices)
|
||||
if (is_kdump_kernel())
|
||||
max_transfer_size = min(ctrl_info->max_transfer_size,
|
||||
PQI_MAX_TRANSFER_SIZE_KDUMP);
|
||||
else
|
||||
|
@ -5275,7 +5276,7 @@ static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
|
|||
u16 num_elements_per_iq;
|
||||
u16 num_elements_per_oq;
|
||||
|
||||
if (reset_devices) {
|
||||
if (is_kdump_kernel()) {
|
||||
num_queue_groups = 1;
|
||||
} else {
|
||||
int num_cpus;
|
||||
|
@ -8288,12 +8289,12 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
|
|||
u32 product_id;
|
||||
|
||||
if (reset_devices) {
|
||||
if (pqi_is_fw_triage_supported(ctrl_info)) {
|
||||
if (is_kdump_kernel() && pqi_is_fw_triage_supported(ctrl_info)) {
|
||||
rc = sis_wait_for_fw_triage_completion(ctrl_info);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
if (sis_is_ctrl_logging_supported(ctrl_info)) {
|
||||
if (is_kdump_kernel() && sis_is_ctrl_logging_supported(ctrl_info)) {
|
||||
sis_notify_kdump(ctrl_info);
|
||||
rc = sis_wait_for_ctrl_logging_completion(ctrl_info);
|
||||
if (rc)
|
||||
|
@ -8344,7 +8345,7 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
|
|||
ctrl_info->product_id = (u8)product_id;
|
||||
ctrl_info->product_revision = (u8)(product_id >> 8);
|
||||
|
||||
if (reset_devices) {
|
||||
if (is_kdump_kernel()) {
|
||||
if (ctrl_info->max_outstanding_requests >
|
||||
PQI_MAX_OUTSTANDING_REQUESTS_KDUMP)
|
||||
ctrl_info->max_outstanding_requests =
|
||||
|
@ -8480,7 +8481,7 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (ctrl_info->ctrl_logging_supported && !reset_devices) {
|
||||
if (ctrl_info->ctrl_logging_supported && !is_kdump_kernel()) {
|
||||
pqi_host_setup_buffer(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_CTRL_LOG_TOTAL_SIZE, PQI_CTRL_LOG_MIN_SIZE);
|
||||
pqi_host_memory_update(ctrl_info, &ctrl_info->ctrl_log_memory, PQI_VENDOR_GENERAL_CTRL_LOG_MEMORY_UPDATE);
|
||||
}
|
||||
|
|
|
@ -4263,8 +4263,8 @@ int iscsit_close_connection(
|
|||
spin_unlock(&iscsit_global->ts_bitmap_lock);
|
||||
|
||||
iscsit_stop_timers_for_cmds(conn);
|
||||
iscsit_stop_nopin_response_timer(conn);
|
||||
iscsit_stop_nopin_timer(conn);
|
||||
iscsit_stop_nopin_response_timer(conn);
|
||||
|
||||
if (conn->conn_transport->iscsit_wait_conn)
|
||||
conn->conn_transport->iscsit_wait_conn(conn);
|
||||
|
|
|
@ -677,13 +677,6 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
|
|||
unsigned long flags;
|
||||
int err;
|
||||
|
||||
if (!ufshcd_cmd_inflight(lrbp->cmd)) {
|
||||
dev_err(hba->dev,
|
||||
"%s: skip abort. cmd at tag %d already completed.\n",
|
||||
__func__, tag);
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
/* Skip task abort in case previous aborts failed and report failure */
|
||||
if (lrbp->req_abort_skip) {
|
||||
dev_err(hba->dev, "%s: skip abort. tag %d failed earlier\n",
|
||||
|
@ -692,6 +685,11 @@ int ufshcd_mcq_abort(struct scsi_cmnd *cmd)
|
|||
}
|
||||
|
||||
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
|
||||
if (!hwq) {
|
||||
dev_err(hba->dev, "%s: skip abort. cmd at tag %d already completed.\n",
|
||||
__func__, tag);
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
if (ufshcd_mcq_sqe_search(hba, hwq, tag)) {
|
||||
/*
|
||||
|
|
|
@ -534,6 +534,56 @@ static ssize_t critical_health_show(struct device *dev,
|
|||
return sysfs_emit(buf, "%d\n", hba->critical_health_count);
|
||||
}
|
||||
|
||||
static ssize_t device_lvl_exception_count_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct ufs_hba *hba = dev_get_drvdata(dev);
|
||||
|
||||
if (hba->dev_info.wspecversion < 0x410)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return sysfs_emit(buf, "%u\n", atomic_read(&hba->dev_lvl_exception_count));
|
||||
}
|
||||
|
||||
static ssize_t device_lvl_exception_count_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct ufs_hba *hba = dev_get_drvdata(dev);
|
||||
unsigned int value;
|
||||
|
||||
if (kstrtouint(buf, 0, &value))
|
||||
return -EINVAL;
|
||||
|
||||
/* the only supported usecase is to reset the dev_lvl_exception_count */
|
||||
if (value)
|
||||
return -EINVAL;
|
||||
|
||||
atomic_set(&hba->dev_lvl_exception_count, 0);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t device_lvl_exception_id_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct ufs_hba *hba = dev_get_drvdata(dev);
|
||||
u64 exception_id;
|
||||
int err;
|
||||
|
||||
ufshcd_rpm_get_sync(hba);
|
||||
err = ufshcd_read_device_lvl_exception_id(hba, &exception_id);
|
||||
ufshcd_rpm_put_sync(hba);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
hba->dev_lvl_exception_id = exception_id;
|
||||
return sysfs_emit(buf, "%llu\n", exception_id);
|
||||
}
|
||||
|
||||
static DEVICE_ATTR_RW(rpm_lvl);
|
||||
static DEVICE_ATTR_RO(rpm_target_dev_state);
|
||||
static DEVICE_ATTR_RO(rpm_target_link_state);
|
||||
|
@ -548,6 +598,8 @@ static DEVICE_ATTR_WO(wb_resize_enable);
|
|||
static DEVICE_ATTR_RW(rtc_update_ms);
|
||||
static DEVICE_ATTR_RW(pm_qos_enable);
|
||||
static DEVICE_ATTR_RO(critical_health);
|
||||
static DEVICE_ATTR_RW(device_lvl_exception_count);
|
||||
static DEVICE_ATTR_RO(device_lvl_exception_id);
|
||||
|
||||
static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
|
||||
&dev_attr_rpm_lvl.attr,
|
||||
|
@ -564,6 +616,8 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
|
|||
&dev_attr_rtc_update_ms.attr,
|
||||
&dev_attr_pm_qos_enable.attr,
|
||||
&dev_attr_critical_health.attr,
|
||||
&dev_attr_device_lvl_exception_count.attr,
|
||||
&dev_attr_device_lvl_exception_id.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
|
|
@ -94,6 +94,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
|
|||
enum query_opcode desc_op);
|
||||
|
||||
int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
|
||||
int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id);
|
||||
|
||||
/* Wrapper functions for safely calling variant operations */
|
||||
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
|
||||
|
|
|
@ -278,6 +278,7 @@ static const struct ufs_dev_quirk ufs_fixups[] = {
|
|||
.model = UFS_ANY_MODEL,
|
||||
.quirk = UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
|
||||
UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE |
|
||||
UFS_DEVICE_QUIRK_PA_HIBER8TIME |
|
||||
UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS },
|
||||
{ .wmanufacturerid = UFS_VENDOR_SKHYNIX,
|
||||
.model = UFS_ANY_MODEL,
|
||||
|
@ -3173,16 +3174,10 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
|
|||
int err;
|
||||
|
||||
retry:
|
||||
time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
|
||||
time_left = wait_for_completion_timeout(&hba->dev_cmd.complete,
|
||||
time_left);
|
||||
|
||||
if (likely(time_left)) {
|
||||
/*
|
||||
* The completion handler called complete() and the caller of
|
||||
* this function still owns the @lrbp tag so the code below does
|
||||
* not trigger any race conditions.
|
||||
*/
|
||||
hba->dev_cmd.complete = NULL;
|
||||
err = ufshcd_get_tr_ocs(lrbp, NULL);
|
||||
if (!err)
|
||||
err = ufshcd_dev_cmd_completion(hba, lrbp);
|
||||
|
@ -3196,7 +3191,6 @@ retry:
|
|||
/* successfully cleared the command, retry if needed */
|
||||
if (ufshcd_clear_cmd(hba, lrbp->task_tag) == 0)
|
||||
err = -EAGAIN;
|
||||
hba->dev_cmd.complete = NULL;
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -3212,11 +3206,9 @@ retry:
|
|||
spin_lock_irqsave(&hba->outstanding_lock, flags);
|
||||
pending = test_bit(lrbp->task_tag,
|
||||
&hba->outstanding_reqs);
|
||||
if (pending) {
|
||||
hba->dev_cmd.complete = NULL;
|
||||
if (pending)
|
||||
__clear_bit(lrbp->task_tag,
|
||||
&hba->outstanding_reqs);
|
||||
}
|
||||
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
|
||||
|
||||
if (!pending) {
|
||||
|
@ -3234,8 +3226,6 @@ retry:
|
|||
spin_lock_irqsave(&hba->outstanding_lock, flags);
|
||||
pending = test_bit(lrbp->task_tag,
|
||||
&hba->outstanding_reqs);
|
||||
if (pending)
|
||||
hba->dev_cmd.complete = NULL;
|
||||
spin_unlock_irqrestore(&hba->outstanding_lock, flags);
|
||||
|
||||
if (!pending) {
|
||||
|
@ -3269,13 +3259,9 @@ static void ufshcd_dev_man_unlock(struct ufs_hba *hba)
|
|||
static int ufshcd_issue_dev_cmd(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
|
||||
const u32 tag, int timeout)
|
||||
{
|
||||
DECLARE_COMPLETION_ONSTACK(wait);
|
||||
int err;
|
||||
|
||||
hba->dev_cmd.complete = &wait;
|
||||
|
||||
ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
|
||||
|
||||
ufshcd_send_command(hba, tag, hba->dev_cmd_queue);
|
||||
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
|
||||
|
||||
|
@ -5582,12 +5568,12 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
|
|||
ufshcd_release_scsi_cmd(hba, lrbp);
|
||||
/* Do not touch lrbp after scsi done */
|
||||
scsi_done(cmd);
|
||||
} else if (hba->dev_cmd.complete) {
|
||||
} else {
|
||||
if (cqe) {
|
||||
ocs = le32_to_cpu(cqe->status) & MASK_OCS;
|
||||
lrbp->utr_descriptor_ptr->header.ocs = ocs;
|
||||
}
|
||||
complete(hba->dev_cmd.complete);
|
||||
complete(&hba->dev_cmd.complete);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -5689,6 +5675,8 @@ static void ufshcd_mcq_compl_pending_transfer(struct ufs_hba *hba,
|
|||
continue;
|
||||
|
||||
hwq = ufshcd_mcq_req_to_hwq(hba, scsi_cmd_to_rq(cmd));
|
||||
if (!hwq)
|
||||
continue;
|
||||
|
||||
if (force_compl) {
|
||||
ufshcd_mcq_compl_all_cqes_lock(hba, hwq);
|
||||
|
@ -6010,6 +5998,42 @@ out:
|
|||
__func__, err);
|
||||
}
|
||||
|
||||
int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id)
|
||||
{
|
||||
struct utp_upiu_query_v4_0 *upiu_resp;
|
||||
struct ufs_query_req *request = NULL;
|
||||
struct ufs_query_res *response = NULL;
|
||||
int err;
|
||||
|
||||
if (hba->dev_info.wspecversion < 0x410)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ufshcd_hold(hba);
|
||||
mutex_lock(&hba->dev_cmd.lock);
|
||||
|
||||
ufshcd_init_query(hba, &request, &response,
|
||||
UPIU_QUERY_OPCODE_READ_ATTR,
|
||||
QUERY_ATTR_IDN_DEV_LVL_EXCEPTION_ID, 0, 0);
|
||||
|
||||
request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
|
||||
|
||||
err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
|
||||
|
||||
if (err) {
|
||||
dev_err(hba->dev, "%s: failed to read device level exception %d\n",
|
||||
__func__, err);
|
||||
goto out;
|
||||
}
|
||||
|
||||
upiu_resp = (struct utp_upiu_query_v4_0 *)response;
|
||||
*exception_id = get_unaligned_be64(&upiu_resp->osf3);
|
||||
out:
|
||||
mutex_unlock(&hba->dev_cmd.lock);
|
||||
ufshcd_release(hba);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __ufshcd_wb_toggle(struct ufs_hba *hba, bool set, enum flag_idn idn)
|
||||
{
|
||||
u8 index;
|
||||
|
@ -6095,7 +6119,7 @@ int ufshcd_wb_set_resize_en(struct ufs_hba *hba, enum wb_resize_en en_mode)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
|
||||
static bool ufshcd_wb_curr_buff_threshold_check(struct ufs_hba *hba,
|
||||
u32 avail_buf)
|
||||
{
|
||||
u32 cur_buf;
|
||||
|
@ -6177,15 +6201,13 @@ static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
|
|||
}
|
||||
|
||||
/*
|
||||
* The ufs device needs the vcc to be ON to flush.
|
||||
* With user-space reduction enabled, it's enough to enable flush
|
||||
* by checking only the available buffer. The threshold
|
||||
* defined here is > 90% full.
|
||||
* With user-space preserved enabled, the current-buffer
|
||||
* should be checked too because the wb buffer size can reduce
|
||||
* when disk tends to be full. This info is provided by current
|
||||
* buffer (dCurrentWriteBoosterBufferSize). There's no point in
|
||||
* keeping vcc on when current buffer is empty.
|
||||
* buffer (dCurrentWriteBoosterBufferSize).
|
||||
*/
|
||||
index = ufshcd_wb_get_query_index(hba);
|
||||
ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR,
|
||||
|
@ -6200,7 +6222,7 @@ static bool ufshcd_wb_need_flush(struct ufs_hba *hba)
|
|||
if (!hba->dev_info.b_presrv_uspc_en)
|
||||
return avail_buf <= UFS_WB_BUF_REMAIN_PERCENT(10);
|
||||
|
||||
return ufshcd_wb_presrv_usrspc_keep_vcc_on(hba, avail_buf);
|
||||
return ufshcd_wb_curr_buff_threshold_check(hba, avail_buf);
|
||||
}
|
||||
|
||||
static void ufshcd_rpm_dev_flush_recheck_work(struct work_struct *work)
|
||||
|
@ -6252,6 +6274,11 @@ static void ufshcd_exception_event_handler(struct work_struct *work)
|
|||
sysfs_notify(&hba->dev->kobj, NULL, "critical_health");
|
||||
}
|
||||
|
||||
if (status & hba->ee_drv_mask & MASK_EE_DEV_LVL_EXCEPTION) {
|
||||
atomic_inc(&hba->dev_lvl_exception_count);
|
||||
sysfs_notify(&hba->dev->kobj, NULL, "device_lvl_exception_count");
|
||||
}
|
||||
|
||||
ufs_debugfs_exception_event(hba, status);
|
||||
}
|
||||
|
||||
|
@ -8175,6 +8202,22 @@ static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf)
|
|||
}
|
||||
}
|
||||
|
||||
static void ufshcd_device_lvl_exception_probe(struct ufs_hba *hba, u8 *desc_buf)
|
||||
{
|
||||
u32 ext_ufs_feature;
|
||||
|
||||
if (hba->dev_info.wspecversion < 0x410)
|
||||
return;
|
||||
|
||||
ext_ufs_feature = get_unaligned_be32(desc_buf +
|
||||
DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
|
||||
if (!(ext_ufs_feature & UFS_DEV_LVL_EXCEPTION_SUP))
|
||||
return;
|
||||
|
||||
atomic_set(&hba->dev_lvl_exception_count, 0);
|
||||
ufshcd_enable_ee(hba, MASK_EE_DEV_LVL_EXCEPTION);
|
||||
}
|
||||
|
||||
static void ufshcd_set_rtt(struct ufs_hba *hba)
|
||||
{
|
||||
struct ufs_dev_info *dev_info = &hba->dev_info;
|
||||
|
@ -8375,6 +8418,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba)
|
|||
|
||||
ufs_init_rtc(hba, desc_buf);
|
||||
|
||||
ufshcd_device_lvl_exception_probe(hba, desc_buf);
|
||||
|
||||
/*
|
||||
* ufshcd_read_string_desc returns size of the string
|
||||
* reset the error value
|
||||
|
@ -8464,6 +8509,31 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* ufshcd_quirk_override_pa_h8time - Ensures proper adjustment of PA_HIBERN8TIME.
|
||||
* @hba: per-adapter instance
|
||||
*
|
||||
* Some UFS devices require specific adjustments to the PA_HIBERN8TIME parameter
|
||||
* to ensure proper hibernation timing. This function retrieves the current
|
||||
* PA_HIBERN8TIME value and increments it by 100us.
|
||||
*/
|
||||
static void ufshcd_quirk_override_pa_h8time(struct ufs_hba *hba)
|
||||
{
|
||||
u32 pa_h8time;
|
||||
int ret;
|
||||
|
||||
ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_HIBERN8TIME), &pa_h8time);
|
||||
if (ret) {
|
||||
dev_err(hba->dev, "Failed to get PA_HIBERN8TIME: %d\n", ret);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Increment by 1 to increase hibernation time by 100 µs */
|
||||
ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), pa_h8time + 1);
|
||||
if (ret)
|
||||
dev_err(hba->dev, "Failed updating PA_HIBERN8TIME: %d\n", ret);
|
||||
}
|
||||
|
||||
static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
|
||||
{
|
||||
ufshcd_vops_apply_dev_quirks(hba);
|
||||
|
@ -8474,6 +8544,9 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
|
|||
|
||||
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
|
||||
ufshcd_quirk_tune_host_pa_tactivate(hba);
|
||||
|
||||
if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_HIBER8TIME)
|
||||
ufshcd_quirk_override_pa_h8time(hba);
|
||||
}
|
||||
|
||||
static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
|
||||
|
@ -10527,6 +10600,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
|
|||
UFS_SLEEP_PWR_MODE,
|
||||
UIC_LINK_HIBERN8_STATE);
|
||||
|
||||
init_completion(&hba->dev_cmd.complete);
|
||||
|
||||
err = ufshcd_hba_init(hba);
|
||||
if (err)
|
||||
goto out_error;
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
* Exynos's Vendor specific registers for UFSHCI
|
||||
*/
|
||||
#define HCI_TXPRDT_ENTRY_SIZE 0x00
|
||||
#define PRDT_PREFECT_EN BIT(31)
|
||||
#define PRDT_PREFETCH_EN BIT(31)
|
||||
#define HCI_RXPRDT_ENTRY_SIZE 0x04
|
||||
#define HCI_1US_TO_CNT_VAL 0x0C
|
||||
#define CNT_VAL_1US_MASK 0x3FF
|
||||
|
@ -92,11 +92,16 @@
|
|||
UIC_TRANSPORT_NO_CONNECTION_RX |\
|
||||
UIC_TRANSPORT_BAD_TC)
|
||||
|
||||
/* FSYS UFS Shareability */
|
||||
#define UFS_WR_SHARABLE BIT(2)
|
||||
#define UFS_RD_SHARABLE BIT(1)
|
||||
#define UFS_SHARABLE (UFS_WR_SHARABLE | UFS_RD_SHARABLE)
|
||||
#define UFS_SHAREABILITY_OFFSET 0x710
|
||||
/* UFS Shareability */
|
||||
#define UFS_EXYNOSAUTO_WR_SHARABLE BIT(2)
|
||||
#define UFS_EXYNOSAUTO_RD_SHARABLE BIT(1)
|
||||
#define UFS_EXYNOSAUTO_SHARABLE (UFS_EXYNOSAUTO_WR_SHARABLE | \
|
||||
UFS_EXYNOSAUTO_RD_SHARABLE)
|
||||
#define UFS_GS101_WR_SHARABLE BIT(1)
|
||||
#define UFS_GS101_RD_SHARABLE BIT(0)
|
||||
#define UFS_GS101_SHARABLE (UFS_GS101_WR_SHARABLE | \
|
||||
UFS_GS101_RD_SHARABLE)
|
||||
#define UFS_SHAREABILITY_OFFSET 0x710
|
||||
|
||||
/* Multi-host registers */
|
||||
#define MHCTRL 0xC4
|
||||
|
@ -209,8 +214,8 @@ static int exynos_ufs_shareability(struct exynos_ufs *ufs)
|
|||
/* IO Coherency setting */
|
||||
if (ufs->sysreg) {
|
||||
return regmap_update_bits(ufs->sysreg,
|
||||
ufs->shareability_reg_offset,
|
||||
UFS_SHARABLE, UFS_SHARABLE);
|
||||
ufs->iocc_offset,
|
||||
ufs->iocc_mask, ufs->iocc_val);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -957,6 +962,12 @@ static int exynos_ufs_phy_init(struct exynos_ufs *ufs)
|
|||
}
|
||||
|
||||
phy_set_bus_width(generic_phy, ufs->avail_ln_rx);
|
||||
|
||||
if (generic_phy->power_count) {
|
||||
phy_power_off(generic_phy);
|
||||
phy_exit(generic_phy);
|
||||
}
|
||||
|
||||
ret = phy_init(generic_phy);
|
||||
if (ret) {
|
||||
dev_err(hba->dev, "%s: phy init failed, ret = %d\n",
|
||||
|
@ -1049,9 +1060,14 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba)
|
|||
exynos_ufs_config_intr(ufs, DFES_DEF_L4_ERRS, UNIPRO_L4);
|
||||
exynos_ufs_set_unipro_pclk_div(ufs);
|
||||
|
||||
exynos_ufs_setup_clocks(hba, true, PRE_CHANGE);
|
||||
|
||||
/* unipro */
|
||||
exynos_ufs_config_unipro(ufs);
|
||||
|
||||
if (ufs->drv_data->pre_link)
|
||||
ufs->drv_data->pre_link(ufs);
|
||||
|
||||
/* m-phy */
|
||||
exynos_ufs_phy_init(ufs);
|
||||
if (!(ufs->opts & EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR)) {
|
||||
|
@ -1059,11 +1075,6 @@ static int exynos_ufs_pre_link(struct ufs_hba *hba)
|
|||
exynos_ufs_config_phy_cap_attr(ufs);
|
||||
}
|
||||
|
||||
exynos_ufs_setup_clocks(hba, true, PRE_CHANGE);
|
||||
|
||||
if (ufs->drv_data->pre_link)
|
||||
ufs->drv_data->pre_link(ufs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1087,12 +1098,17 @@ static int exynos_ufs_post_link(struct ufs_hba *hba)
|
|||
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
|
||||
struct phy *generic_phy = ufs->phy;
|
||||
struct exynos_ufs_uic_attr *attr = ufs->drv_data->uic_attr;
|
||||
u32 val = ilog2(DATA_UNIT_SIZE);
|
||||
|
||||
exynos_ufs_establish_connt(ufs);
|
||||
exynos_ufs_fit_aggr_timeout(ufs);
|
||||
|
||||
hci_writel(ufs, 0xa, HCI_DATA_REORDER);
|
||||
hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_TXPRDT_ENTRY_SIZE);
|
||||
|
||||
if (hba->caps & UFSHCD_CAP_CRYPTO)
|
||||
val |= PRDT_PREFETCH_EN;
|
||||
hci_writel(ufs, val, HCI_TXPRDT_ENTRY_SIZE);
|
||||
|
||||
hci_writel(ufs, ilog2(DATA_UNIT_SIZE), HCI_RXPRDT_ENTRY_SIZE);
|
||||
hci_writel(ufs, (1 << hba->nutrs) - 1, HCI_UTRL_NEXUS_TYPE);
|
||||
hci_writel(ufs, (1 << hba->nutmrs) - 1, HCI_UTMRL_NEXUS_TYPE);
|
||||
|
@ -1168,12 +1184,22 @@ static int exynos_ufs_parse_dt(struct device *dev, struct exynos_ufs *ufs)
|
|||
ufs->sysreg = NULL;
|
||||
else {
|
||||
if (of_property_read_u32_index(np, "samsung,sysreg", 1,
|
||||
&ufs->shareability_reg_offset)) {
|
||||
&ufs->iocc_offset)) {
|
||||
dev_warn(dev, "can't get an offset from sysreg. Set to default value\n");
|
||||
ufs->shareability_reg_offset = UFS_SHAREABILITY_OFFSET;
|
||||
ufs->iocc_offset = UFS_SHAREABILITY_OFFSET;
|
||||
}
|
||||
}
|
||||
|
||||
ufs->iocc_mask = ufs->drv_data->iocc_mask;
|
||||
/*
|
||||
* no 'dma-coherent' property means the descriptors are
|
||||
* non-cacheable so iocc shareability should be disabled.
|
||||
*/
|
||||
if (of_dma_is_coherent(dev->of_node))
|
||||
ufs->iocc_val = ufs->iocc_mask;
|
||||
else
|
||||
ufs->iocc_val = 0;
|
||||
|
||||
ufs->pclk_avail_min = PCLK_AVAIL_MIN;
|
||||
ufs->pclk_avail_max = PCLK_AVAIL_MAX;
|
||||
|
||||
|
@ -1497,6 +1523,14 @@ out:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void exynos_ufs_exit(struct ufs_hba *hba)
|
||||
{
|
||||
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
|
||||
|
||||
phy_power_off(ufs->phy);
|
||||
phy_exit(ufs->phy);
|
||||
}
|
||||
|
||||
static int exynos_ufs_host_reset(struct ufs_hba *hba)
|
||||
{
|
||||
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
|
||||
|
@ -1667,6 +1701,12 @@ static void exynos_ufs_hibern8_notify(struct ufs_hba *hba,
|
|||
}
|
||||
}
|
||||
|
||||
static int gs101_ufs_suspend(struct exynos_ufs *ufs)
|
||||
{
|
||||
hci_writel(ufs, 0 << 0, HCI_GPIO_OUT);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
|
||||
enum ufs_notify_change_status status)
|
||||
{
|
||||
|
@ -1675,6 +1715,9 @@ static int exynos_ufs_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op,
|
|||
if (status == PRE_CHANGE)
|
||||
return 0;
|
||||
|
||||
if (ufs->drv_data->suspend)
|
||||
ufs->drv_data->suspend(ufs);
|
||||
|
||||
if (!ufshcd_is_link_active(hba))
|
||||
phy_power_off(ufs->phy);
|
||||
|
||||
|
@ -1952,6 +1995,7 @@ static int gs101_ufs_pre_pwr_change(struct exynos_ufs *ufs,
|
|||
static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
|
||||
.name = "exynos_ufs",
|
||||
.init = exynos_ufs_init,
|
||||
.exit = exynos_ufs_exit,
|
||||
.hce_enable_notify = exynos_ufs_hce_enable_notify,
|
||||
.link_startup_notify = exynos_ufs_link_startup_notify,
|
||||
.pwr_change_notify = exynos_ufs_pwr_change_notify,
|
||||
|
@ -1990,13 +2034,7 @@ static int exynos_ufs_probe(struct platform_device *pdev)
|
|||
|
||||
static void exynos_ufs_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct ufs_hba *hba = platform_get_drvdata(pdev);
|
||||
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
|
||||
|
||||
ufshcd_pltfrm_remove(pdev);
|
||||
|
||||
phy_power_off(ufs->phy);
|
||||
phy_exit(ufs->phy);
|
||||
}
|
||||
|
||||
static struct exynos_ufs_uic_attr exynos7_uic_attr = {
|
||||
|
@ -2035,6 +2073,7 @@ static const struct exynos_ufs_drv_data exynosauto_ufs_drvs = {
|
|||
.opts = EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
|
||||
EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
|
||||
EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX,
|
||||
.iocc_mask = UFS_EXYNOSAUTO_SHARABLE,
|
||||
.drv_init = exynosauto_ufs_drv_init,
|
||||
.post_hce_enable = exynosauto_ufs_post_hce_enable,
|
||||
.pre_link = exynosauto_ufs_pre_link,
|
||||
|
@ -2136,10 +2175,12 @@ static const struct exynos_ufs_drv_data gs101_ufs_drvs = {
|
|||
.opts = EXYNOS_UFS_OPT_SKIP_CONFIG_PHY_ATTR |
|
||||
EXYNOS_UFS_OPT_UFSPR_SECURE |
|
||||
EXYNOS_UFS_OPT_TIMER_TICK_SELECT,
|
||||
.iocc_mask = UFS_GS101_SHARABLE,
|
||||
.drv_init = gs101_ufs_drv_init,
|
||||
.pre_link = gs101_ufs_pre_link,
|
||||
.post_link = gs101_ufs_post_link,
|
||||
.pre_pwr_change = gs101_ufs_pre_pwr_change,
|
||||
.suspend = gs101_ufs_suspend,
|
||||
};
|
||||
|
||||
static const struct of_device_id exynos_ufs_of_match[] = {
|
||||
|
|
|
@ -181,6 +181,7 @@ struct exynos_ufs_drv_data {
|
|||
struct exynos_ufs_uic_attr *uic_attr;
|
||||
unsigned int quirks;
|
||||
unsigned int opts;
|
||||
u32 iocc_mask;
|
||||
/* SoC's specific operations */
|
||||
int (*drv_init)(struct exynos_ufs *ufs);
|
||||
int (*pre_link)(struct exynos_ufs *ufs);
|
||||
|
@ -191,6 +192,7 @@ struct exynos_ufs_drv_data {
|
|||
const struct ufs_pa_layer_attr *pwr);
|
||||
int (*pre_hce_enable)(struct exynos_ufs *ufs);
|
||||
int (*post_hce_enable)(struct exynos_ufs *ufs);
|
||||
int (*suspend)(struct exynos_ufs *ufs);
|
||||
};
|
||||
|
||||
struct ufs_phy_time_cfg {
|
||||
|
@ -230,7 +232,9 @@ struct exynos_ufs {
|
|||
ktime_t entry_hibern8_t;
|
||||
const struct exynos_ufs_drv_data *drv_data;
|
||||
struct regmap *sysreg;
|
||||
u32 shareability_reg_offset;
|
||||
u32 iocc_offset;
|
||||
u32 iocc_mask;
|
||||
u32 iocc_val;
|
||||
|
||||
u32 opts;
|
||||
#define EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL BIT(0)
|
||||
|
|
|
@ -33,6 +33,10 @@
|
|||
((((c) >> 16) & MCQ_QCFGPTR_MASK) * MCQ_QCFGPTR_UNIT)
|
||||
#define MCQ_QCFG_SIZE 0x40
|
||||
|
||||
/* De-emphasis for gear-5 */
|
||||
#define DEEMPHASIS_3_5_dB 0x04
|
||||
#define NO_DEEMPHASIS 0x0
|
||||
|
||||
enum {
|
||||
TSTBUS_UAWM,
|
||||
TSTBUS_UARM,
|
||||
|
@ -830,6 +834,23 @@ static int ufs_qcom_icc_update_bw(struct ufs_qcom_host *host)
|
|||
return ufs_qcom_icc_set_bw(host, bw_table.mem_bw, bw_table.cfg_bw);
|
||||
}
|
||||
|
||||
static void ufs_qcom_set_tx_hs_equalizer(struct ufs_hba *hba, u32 gear, u32 tx_lanes)
|
||||
{
|
||||
u32 equalizer_val;
|
||||
int ret, i;
|
||||
|
||||
/* Determine the equalizer value based on the gear */
|
||||
equalizer_val = (gear == 5) ? DEEMPHASIS_3_5_dB : NO_DEEMPHASIS;
|
||||
|
||||
for (i = 0; i < tx_lanes; i++) {
|
||||
ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(TX_HS_EQUALIZER, i),
|
||||
equalizer_val);
|
||||
if (ret)
|
||||
dev_err(hba->dev, "%s: failed equalizer lane %d\n",
|
||||
__func__, i);
|
||||
}
|
||||
}
|
||||
|
||||
static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
|
||||
enum ufs_notify_change_status status,
|
||||
const struct ufs_pa_layer_attr *dev_max_params,
|
||||
|
@ -881,6 +902,11 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
|
|||
dev_req_params->gear_tx,
|
||||
PA_INITIAL_ADAPT);
|
||||
}
|
||||
|
||||
if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING)
|
||||
ufs_qcom_set_tx_hs_equalizer(hba,
|
||||
dev_req_params->gear_tx, dev_req_params->lane_tx);
|
||||
|
||||
break;
|
||||
case POST_CHANGE:
|
||||
if (ufs_qcom_cfg_timers(hba, false)) {
|
||||
|
@ -928,6 +954,16 @@ static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
|
|||
(pa_vs_config_reg1 | (1 << 12)));
|
||||
}
|
||||
|
||||
static void ufs_qcom_override_pa_tx_hsg1_sync_len(struct ufs_hba *hba)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TX_HSG1_SYNC_LENGTH),
|
||||
PA_TX_HSG1_SYNC_LENGTH_VAL);
|
||||
if (err)
|
||||
dev_err(hba->dev, "Failed (%d) set PA_TX_HSG1_SYNC_LENGTH\n", err);
|
||||
}
|
||||
|
||||
static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
|
||||
{
|
||||
int err = 0;
|
||||
|
@ -935,6 +971,9 @@ static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
|
|||
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
|
||||
err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
|
||||
|
||||
if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH)
|
||||
ufs_qcom_override_pa_tx_hsg1_sync_len(hba);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -949,6 +988,10 @@ static struct ufs_dev_quirk ufs_qcom_dev_fixups[] = {
|
|||
{ .wmanufacturerid = UFS_VENDOR_WDC,
|
||||
.model = UFS_ANY_MODEL,
|
||||
.quirk = UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE },
|
||||
{ .wmanufacturerid = UFS_VENDOR_SAMSUNG,
|
||||
.model = UFS_ANY_MODEL,
|
||||
.quirk = UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH |
|
||||
UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING },
|
||||
{}
|
||||
};
|
||||
|
||||
|
|
|
@ -122,8 +122,11 @@ enum {
|
|||
TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN)
|
||||
|
||||
/* QUniPro Vendor specific attributes */
|
||||
#define PA_TX_HSG1_SYNC_LENGTH 0x1552
|
||||
#define PA_VS_CONFIG_REG1 0x9000
|
||||
#define DME_VS_CORE_CLK_CTRL 0xD002
|
||||
#define TX_HS_EQUALIZER 0x0037
|
||||
|
||||
/* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
|
||||
#define CLK_1US_CYCLES_MASK_V4 GENMASK(27, 16)
|
||||
#define CLK_1US_CYCLES_MASK GENMASK(7, 0)
|
||||
|
@ -141,6 +144,21 @@ enum {
|
|||
#define UNIPRO_CORE_CLK_FREQ_201_5_MHZ 202
|
||||
#define UNIPRO_CORE_CLK_FREQ_403_MHZ 403
|
||||
|
||||
/* TX_HSG1_SYNC_LENGTH attr value */
|
||||
#define PA_TX_HSG1_SYNC_LENGTH_VAL 0x4A
|
||||
|
||||
/*
|
||||
* Some ufs device vendors need a different TSync length.
|
||||
* Enable this quirk to give an additional TX_HS_SYNC_LENGTH.
|
||||
*/
|
||||
#define UFS_DEVICE_QUIRK_PA_TX_HSG1_SYNC_LENGTH BIT(16)
|
||||
|
||||
/*
|
||||
* Some ufs device vendors need a different Deemphasis setting.
|
||||
* Enable this quirk to tune TX Deemphasis parameters.
|
||||
*/
|
||||
#define UFS_DEVICE_QUIRK_PA_TX_DEEMPHASIS_TUNING BIT(17)
|
||||
|
||||
/* ICE allocator type to share AES engines among TX stream and RX stream */
|
||||
#define ICE_ALLOCATOR_TYPE 2
|
||||
|
||||
|
|
|
@ -181,6 +181,7 @@ enum attr_idn {
|
|||
QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E,
|
||||
QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F,
|
||||
QUERY_ATTR_IDN_TIMESTAMP = 0x30,
|
||||
QUERY_ATTR_IDN_DEV_LVL_EXCEPTION_ID = 0x34,
|
||||
QUERY_ATTR_IDN_WB_BUF_RESIZE_HINT = 0x3C,
|
||||
QUERY_ATTR_IDN_WB_BUF_RESIZE_EN = 0x3D,
|
||||
QUERY_ATTR_IDN_WB_BUF_RESIZE_STATUS = 0x3E,
|
||||
|
@ -399,6 +400,7 @@ enum {
|
|||
UFS_DEV_EXT_TEMP_NOTIF = BIT(6),
|
||||
UFS_DEV_HPB_SUPPORT = BIT(7),
|
||||
UFS_DEV_WRITE_BOOSTER_SUP = BIT(8),
|
||||
UFS_DEV_LVL_EXCEPTION_SUP = BIT(12),
|
||||
};
|
||||
#define UFS_DEV_HPB_SUPPORT_VERSION 0x310
|
||||
|
||||
|
@ -428,6 +430,7 @@ enum {
|
|||
MASK_EE_TOO_LOW_TEMP = BIT(4),
|
||||
MASK_EE_WRITEBOOSTER_EVENT = BIT(5),
|
||||
MASK_EE_PERFORMANCE_THROTTLING = BIT(6),
|
||||
MASK_EE_DEV_LVL_EXCEPTION = BIT(7),
|
||||
MASK_EE_HEALTH_CRITICAL = BIT(9),
|
||||
};
|
||||
#define MASK_EE_URGENT_TEMP (MASK_EE_TOO_HIGH_TEMP | MASK_EE_TOO_LOW_TEMP)
|
||||
|
|
|
@ -107,4 +107,10 @@ struct ufs_dev_quirk {
|
|||
*/
|
||||
#define UFS_DEVICE_QUIRK_DELAY_AFTER_LPM (1 << 11)
|
||||
|
||||
/*
|
||||
* Some ufs devices may need more time to be in hibern8 before exiting.
|
||||
* Enable this quirk to give it an additional 100us.
|
||||
*/
|
||||
#define UFS_DEVICE_QUIRK_PA_HIBER8TIME (1 << 12)
|
||||
|
||||
#endif /* UFS_QUIRKS_H_ */
|
||||
|
|
|
@ -246,7 +246,7 @@ struct ufs_query {
|
|||
struct ufs_dev_cmd {
|
||||
enum dev_cmd_type type;
|
||||
struct mutex lock;
|
||||
struct completion *complete;
|
||||
struct completion complete;
|
||||
struct ufs_query query;
|
||||
};
|
||||
|
||||
|
@ -964,6 +964,9 @@ enum ufshcd_mcq_opr {
|
|||
* @pm_qos_req: PM QoS request handle
|
||||
* @pm_qos_enabled: flag to check if pm qos is enabled
|
||||
* @critical_health_count: count of critical health exceptions
|
||||
* @dev_lvl_exception_count: count of device level exceptions since last reset
|
||||
* @dev_lvl_exception_id: vendor specific information about the
|
||||
* device level exception event.
|
||||
*/
|
||||
struct ufs_hba {
|
||||
void __iomem *mmio_base;
|
||||
|
@ -1135,6 +1138,8 @@ struct ufs_hba {
|
|||
bool pm_qos_enabled;
|
||||
|
||||
int critical_health_count;
|
||||
atomic_t dev_lvl_exception_count;
|
||||
u64 dev_lvl_exception_id;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in New Issue