mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-05-02 18:17:50 -04:00
The GSC uC needs to communicate with the CSME to perform certain
operations. Since the GSC can't perform this communication directly
on platforms where it is integrated in GT, i915 needs to transfer the
messages from GSC to CSME and back.
The proxy flow is as follow:
1 - i915 submits a request to GSC asking for the message to CSME
2 - GSC replies with the proxy header + payload for CSME
3 - i915 sends the reply from GSC as-is to CSME via the mei proxy
component
4 - CSME replies with the proxy header + payload for GSC
5 - i915 submits a request to GSC with the reply from CSME
6 - GSC replies either with a new header + payload (same as step 2,
so we restart from there) or with an end message.
After GSC load, i915 is expected to start the first proxy message chain,
while all subsequent ones will be triggered by the GSC via interrupt.
To communicate with the CSME, we use a dedicated mei component, which
means that we need to wait for it to bind before we can initialize the
proxies. This usually happens quite fast, but given that there is a
chance that we'll have to wait a few seconds the GSC work has been moved
to a dedicated WQ to not stall other processes.
v2: fix code style, includes and variable naming (Alan)
v3: add extra check for proxy status, fix includes and comments
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Alan Previn <alan.previn.teres.alexis@intel.com>
Reviewed-by: Alan Previn <alan.previn.teres.alexis@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230502163854.317653-4-daniele.ceraolospurio@intel.com
197 lines
4.6 KiB
C
197 lines
4.6 KiB
C
// SPDX-License-Identifier: MIT
|
|
/*
|
|
* Copyright © 2022 Intel Corporation
|
|
*/
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include "gt/intel_gt.h"
|
|
#include "gt/intel_gt_print.h"
|
|
#include "intel_gsc_uc.h"
|
|
#include "intel_gsc_fw.h"
|
|
#include "i915_drv.h"
|
|
#include "intel_gsc_proxy.h"
|
|
|
|
static void gsc_work(struct work_struct *work)
|
|
{
|
|
struct intel_gsc_uc *gsc = container_of(work, typeof(*gsc), work);
|
|
struct intel_gt *gt = gsc_uc_to_gt(gsc);
|
|
intel_wakeref_t wakeref;
|
|
int ret;
|
|
|
|
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
|
|
|
|
ret = intel_gsc_uc_fw_upload(gsc);
|
|
if (ret)
|
|
goto out_put;
|
|
|
|
ret = intel_gsc_proxy_request_handler(gsc);
|
|
if (ret)
|
|
goto out_put;
|
|
|
|
/*
|
|
* If there is a proxy establishment error, the GSC might still
|
|
* complete the request handling cleanly, so we need to check the
|
|
* status register to check if the proxy init was actually successful
|
|
*/
|
|
if (intel_gsc_uc_fw_proxy_init_done(gsc)) {
|
|
drm_dbg(>->i915->drm, "GSC Proxy initialized\n");
|
|
intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_RUNNING);
|
|
} else {
|
|
drm_err(>->i915->drm, "GSC status reports proxy init not complete\n");
|
|
}
|
|
|
|
out_put:
|
|
intel_runtime_pm_put(gt->uncore->rpm, wakeref);
|
|
}
|
|
|
|
static bool gsc_engine_supported(struct intel_gt *gt)
|
|
{
|
|
intel_engine_mask_t mask;
|
|
|
|
/*
|
|
* We reach here from i915_driver_early_probe for the primary GT before
|
|
* its engine mask is set, so we use the device info engine mask for it.
|
|
* For other GTs we expect the GT-specific mask to be set before we
|
|
* call this function.
|
|
*/
|
|
GEM_BUG_ON(!gt_is_root(gt) && !gt->info.engine_mask);
|
|
|
|
if (gt_is_root(gt))
|
|
mask = RUNTIME_INFO(gt->i915)->platform_engine_mask;
|
|
else
|
|
mask = gt->info.engine_mask;
|
|
|
|
return __HAS_ENGINE(mask, GSC0);
|
|
}
|
|
|
|
void intel_gsc_uc_init_early(struct intel_gsc_uc *gsc)
|
|
{
|
|
struct intel_gt *gt = gsc_uc_to_gt(gsc);
|
|
|
|
intel_uc_fw_init_early(&gsc->fw, INTEL_UC_FW_TYPE_GSC);
|
|
INIT_WORK(&gsc->work, gsc_work);
|
|
|
|
/* we can arrive here from i915_driver_early_probe for primary
|
|
* GT with it being not fully setup hence check device info's
|
|
* engine mask
|
|
*/
|
|
if (!gsc_engine_supported(gt)) {
|
|
intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED);
|
|
return;
|
|
}
|
|
|
|
gsc->wq = alloc_ordered_workqueue("i915_gsc", 0);
|
|
if (!gsc->wq) {
|
|
gt_err(gt, "failed to allocate WQ for GSC, disabling FW\n");
|
|
intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_NOT_SUPPORTED);
|
|
}
|
|
}
|
|
|
|
int intel_gsc_uc_init(struct intel_gsc_uc *gsc)
|
|
{
|
|
static struct lock_class_key gsc_lock;
|
|
struct intel_gt *gt = gsc_uc_to_gt(gsc);
|
|
struct intel_engine_cs *engine = gt->engine[GSC0];
|
|
struct intel_context *ce;
|
|
struct i915_vma *vma;
|
|
int err;
|
|
|
|
err = intel_uc_fw_init(&gsc->fw);
|
|
if (err)
|
|
goto out;
|
|
|
|
vma = intel_guc_allocate_vma(>->uc.guc, SZ_8M);
|
|
if (IS_ERR(vma)) {
|
|
err = PTR_ERR(vma);
|
|
goto out_fw;
|
|
}
|
|
|
|
gsc->local = vma;
|
|
|
|
ce = intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
|
|
I915_GEM_HWS_GSC_ADDR,
|
|
&gsc_lock, "gsc_context");
|
|
if (IS_ERR(ce)) {
|
|
gt_err(gt, "failed to create GSC CS ctx for FW communication\n");
|
|
err = PTR_ERR(ce);
|
|
goto out_vma;
|
|
}
|
|
|
|
gsc->ce = ce;
|
|
|
|
/* if we fail to init proxy we still want to load GSC for PM */
|
|
intel_gsc_proxy_init(gsc);
|
|
|
|
intel_uc_fw_change_status(&gsc->fw, INTEL_UC_FIRMWARE_LOADABLE);
|
|
|
|
return 0;
|
|
|
|
out_vma:
|
|
i915_vma_unpin_and_release(&gsc->local, 0);
|
|
out_fw:
|
|
intel_uc_fw_fini(&gsc->fw);
|
|
out:
|
|
gt_probe_error(gt, "GSC init failed %pe\n", ERR_PTR(err));
|
|
return err;
|
|
}
|
|
|
|
void intel_gsc_uc_fini(struct intel_gsc_uc *gsc)
|
|
{
|
|
if (!intel_uc_fw_is_loadable(&gsc->fw))
|
|
return;
|
|
|
|
flush_work(&gsc->work);
|
|
if (gsc->wq) {
|
|
destroy_workqueue(gsc->wq);
|
|
gsc->wq = NULL;
|
|
}
|
|
|
|
intel_gsc_proxy_fini(gsc);
|
|
|
|
if (gsc->ce)
|
|
intel_engine_destroy_pinned_context(fetch_and_zero(&gsc->ce));
|
|
|
|
i915_vma_unpin_and_release(&gsc->local, 0);
|
|
|
|
intel_uc_fw_fini(&gsc->fw);
|
|
}
|
|
|
|
void intel_gsc_uc_flush_work(struct intel_gsc_uc *gsc)
|
|
{
|
|
if (!intel_uc_fw_is_loadable(&gsc->fw))
|
|
return;
|
|
|
|
flush_work(&gsc->work);
|
|
}
|
|
|
|
void intel_gsc_uc_resume(struct intel_gsc_uc *gsc)
|
|
{
|
|
if (!intel_uc_fw_is_loadable(&gsc->fw))
|
|
return;
|
|
|
|
/*
|
|
* we only want to start the GSC worker from here in the actual resume
|
|
* flow and not during driver load. This is because GSC load is slow and
|
|
* therefore we want to make sure that the default state init completes
|
|
* first to not slow down the init thread. A separate call to
|
|
* intel_gsc_uc_load_start will ensure that the GSC is loaded during
|
|
* driver load.
|
|
*/
|
|
if (!gsc_uc_to_gt(gsc)->engine[GSC0]->default_state)
|
|
return;
|
|
|
|
intel_gsc_uc_load_start(gsc);
|
|
}
|
|
|
|
void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc)
|
|
{
|
|
if (!intel_uc_fw_is_loadable(&gsc->fw))
|
|
return;
|
|
|
|
if (intel_gsc_uc_fw_init_done(gsc))
|
|
return;
|
|
|
|
queue_work(gsc->wq, &gsc->work);
|
|
}
|