diff options
| author | Lyude Paul <lyude@redhat.com> | 2020-08-26 14:24:48 -0400 |
|---|---|---|
| committer | Lyude Paul <lyude@redhat.com> | 2020-08-31 19:10:08 -0400 |
| commit | d297ce4b5d490eab60bede36bfd5da6fbcd416cf (patch) | |
| tree | 7bef3320d1ad9505e4fa7d297283b08895307029 /drivers/gpu/drm/nouveau/nouveau_connector.c | |
| parent | 02bb7fe2d3eacb5ca1cec597ad16eff93ae8d769 (diff) | |
drm/nouveau/kms: Only use hpd_work for reprobing in HPD paths
Currently we perform both short IRQ handling for DP, and connector
reprobing in the HPD IRQ handler. However since we need to grab
connection_mutex in order to reprobe a connector, in theory we could
accidentally block ourselves from handling any short IRQs until after a
modeset completes if a connector hotplug happens to occur in parallel
with a modeset.
I haven't seen this actually happen yet, but since we're cleaning up
nouveau's hotplug handling code anyway and we already have a hpd worker,
we can simply fix this by only relying on the HPD worker to actually
reprobe connectors when we receive a HPD IRQ. We also add a mask to
nouveau_drm to keep track of which connectors are waiting to be reprobed
in response to an HPD IRQ.
Signed-off-by: Lyude Paul <lyude@redhat.com>
Reviewed-by: Ben Skeggs <bskeggs@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200826182456.322681-13-lyude@redhat.com
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_connector.c')
| -rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_connector.c | 42 |
1 files changed, 16 insertions, 26 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 4a29f691c08e..637e91594fbe 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -1138,6 +1138,20 @@ nouveau_connector_funcs_lvds = { .early_unregister = nouveau_connector_early_unregister, }; +void +nouveau_connector_hpd(struct drm_connector *connector) +{ + struct nouveau_drm *drm = nouveau_drm(connector->dev); + u32 mask = drm_connector_mask(connector); + + mutex_lock(&drm->hpd_lock); + if (!(drm->hpd_pending & mask)) { + drm->hpd_pending |= mask; + schedule_work(&drm->hpd_work); + } + mutex_unlock(&drm->hpd_lock); +} + static int nouveau_connector_hotplug(struct nvif_notify *notify) { @@ -1147,8 +1161,6 @@ nouveau_connector_hotplug(struct nvif_notify *notify) struct drm_device *dev = connector->dev; struct nouveau_drm *drm = nouveau_drm(dev); const struct nvif_notify_conn_rep_v0 *rep = notify->data; - const char *name = connector->name; - int ret; bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG); if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) { @@ -1156,31 +1168,9 @@ nouveau_connector_hotplug(struct nvif_notify *notify) return NVIF_NOTIFY_KEEP; } - ret = pm_runtime_get(drm->dev->dev); - if (ret == 0) { - /* We can't block here if there's a pending PM request - * running, as we'll deadlock nouveau_display_fini() when it - * calls nvif_put() on our nvif_notify struct. So, simply - * defer the hotplug event until the device finishes resuming - */ - NV_DEBUG(drm, "Deferring HPD on %s until runtime resume\n", - name); - schedule_work(&drm->hpd_work); - - pm_runtime_put_noidle(drm->dev->dev); - return NVIF_NOTIFY_KEEP; - } else if (ret != 1 && ret != -EACCES) { - NV_WARN(drm, "HPD on %s dropped due to RPM failure: %d\n", - name, ret); - return NVIF_NOTIFY_DROP; - } - - NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name); - - drm_helper_hpd_irq_event(connector->dev); + NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", connector->name); + nouveau_connector_hpd(connector); - pm_runtime_mark_last_busy(drm->dev->dev); - pm_runtime_put_autosuspend(drm->dev->dev); return NVIF_NOTIFY_KEEP; } |