scsi: libsas: Use new workqueue to run sas event and disco event

Now all libsas works are queued to scsi host workqueue, include sas
event work post by LLDD and sas discovery work, and a sas hotplug flow
may be divided into several works, e.g libsas receive a
PORTE_BYTES_DMAED event, currently we process it as following steps:

sas_form_port  --- run in work in shost workq
	sas_discover_domain  --- run in another work in shost workq
		...
		sas_probe_devices  --- run in new work in shost workq
We found during hot-add a device, libsas may need run several
works in same workqueue to add device in system, the process is
not atomic, it may interrupt by other sas event works, like
PHYE_LOSS_OF_SIGNAL.

This patch is preparation of execute libsas sas event in sync. We need
to use different workqueue to run sas event and disco event. Otherwise
the work will be blocked for waiting another chained work in the same
workqueue.

Signed-off-by: Yijing Wang <wangyijing@huawei.com>
CC: John Garry <john.garry@huawei.com>
CC: Johannes Thumshirn <jthumshirn@suse.de>
CC: Ewan Milne <emilne@redhat.com>
CC: Christoph Hellwig <hch@lst.de>
CC: Tomas Henzl <thenzl@redhat.com>
CC: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Jason Yan <yanaijie@huawei.com>
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Jason Yan 2017-12-08 17:42:07 +08:00 committed by Martin K. Petersen
parent 8eea9dd84e
commit 93bdbd06b1
4 changed files with 25 additions and 4 deletions

View File

@ -534,7 +534,7 @@ static void sas_chain_work(struct sas_ha_struct *ha, struct sas_work *sw)
* workqueue, or known to be submitted from a context that is
* not racing against draining
*/
scsi_queue_work(ha->core.shost, &sw->work);
queue_work(ha->disco_q, &sw->work);
}
static void sas_chain_event(int event, unsigned long *pending,

View File

@ -40,7 +40,7 @@ int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
if (list_empty(&sw->drain_node))
list_add_tail(&sw->drain_node, &ha->defer_q);
} else
rc = scsi_queue_work(ha->core.shost, &sw->work);
rc = queue_work(ha->event_q, &sw->work);
return rc;
}
@ -61,7 +61,6 @@ static int sas_queue_event(int event, struct sas_work *work,
void __sas_drain_work(struct sas_ha_struct *ha)
{
struct workqueue_struct *wq = ha->core.shost->work_q;
struct sas_work *sw, *_sw;
int ret;
@ -70,7 +69,8 @@ void __sas_drain_work(struct sas_ha_struct *ha)
spin_lock_irq(&ha->lock);
spin_unlock_irq(&ha->lock);
drain_workqueue(wq);
drain_workqueue(ha->event_q);
drain_workqueue(ha->disco_q);
spin_lock_irq(&ha->lock);
clear_bit(SAS_HA_DRAINING, &ha->state);

View File

@ -110,6 +110,7 @@ void sas_hash_addr(u8 *hashed, const u8 *sas_addr)
int sas_register_ha(struct sas_ha_struct *sas_ha)
{
char name[64];
int error = 0;
mutex_init(&sas_ha->disco_mutex);
@ -143,10 +144,24 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
goto Undo_ports;
}
error = -ENOMEM;
snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev));
sas_ha->event_q = create_singlethread_workqueue(name);
if (!sas_ha->event_q)
goto Undo_ports;
snprintf(name, sizeof(name), "%s_disco_q", dev_name(sas_ha->dev));
sas_ha->disco_q = create_singlethread_workqueue(name);
if (!sas_ha->disco_q)
goto Undo_event_q;
INIT_LIST_HEAD(&sas_ha->eh_done_q);
INIT_LIST_HEAD(&sas_ha->eh_ata_q);
return 0;
Undo_event_q:
destroy_workqueue(sas_ha->event_q);
Undo_ports:
sas_unregister_ports(sas_ha);
Undo_phys:
@ -177,6 +192,9 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha)
__sas_drain_work(sas_ha);
mutex_unlock(&sas_ha->drain_mutex);
destroy_workqueue(sas_ha->disco_q);
destroy_workqueue(sas_ha->event_q);
return 0;
}

View File

@ -389,6 +389,9 @@ struct sas_ha_struct {
struct device *dev; /* should be set */
struct module *lldd_module; /* should be set */
struct workqueue_struct *event_q;
struct workqueue_struct *disco_q;
u8 *sas_addr; /* must be set */
u8 hashed_sas_addr[HASHED_SAS_ADDR_SIZE];