mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-16 14:02:10 +00:00
[SCSI] pm8001: raise host can queue
This is a followup to a patch provided by Jack Wang on September 21 2011. After increasing the CAN_QUEUE to 510 in pm8001 we discovered some performance degredation from time to time. We needed to increase the MPI queue to compensate and ensure we never hit that limit. We also needed to double the margin to support event and administrivial commands that take from the pool resulting in an occasional largely unproductive command completion with soft error to the caller when the command pool is overloaded temporarily. Signed-off-by: Mark Salyzyn <mark_salyzyn@xyratex.com> Acked-by: Jack Wang <jack_wang@usish.com> Signed-off-by: James Bottomley <JBottomley@Parallels.com>
This commit is contained in:
parent
85bb4457ef
commit
99c72ebceb
@ -66,9 +66,10 @@ enum port_type {
|
||||
|
||||
/* driver compile-time configuration */
|
||||
#define PM8001_MAX_CCB 512 /* max ccbs supported */
|
||||
#define PM8001_MPI_QUEUE 1024 /* maximum mpi queue entries */
|
||||
#define PM8001_MAX_INB_NUM 1
|
||||
#define PM8001_MAX_OUTB_NUM 1
|
||||
#define PM8001_CAN_QUEUE 128 /* SCSI Queue depth */
|
||||
#define PM8001_CAN_QUEUE 508 /* SCSI Queue depth */
|
||||
|
||||
/* unchangeable hardware details */
|
||||
#define PM8001_MAX_PHYS 8 /* max. possible phys */
|
||||
|
@ -192,7 +192,7 @@ init_default_table_values(struct pm8001_hba_info *pm8001_ha)
|
||||
pm8001_ha->main_cfg_tbl.fatal_err_interrupt = 0x01;
|
||||
for (i = 0; i < qn; i++) {
|
||||
pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt =
|
||||
0x00000100 | (0x00000040 << 16) | (0x00<<30);
|
||||
PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30);
|
||||
pm8001_ha->inbnd_q_tbl[i].upper_base_addr =
|
||||
pm8001_ha->memoryMap.region[IB].phys_addr_hi;
|
||||
pm8001_ha->inbnd_q_tbl[i].lower_base_addr =
|
||||
@ -218,7 +218,7 @@ init_default_table_values(struct pm8001_hba_info *pm8001_ha)
|
||||
}
|
||||
for (i = 0; i < qn; i++) {
|
||||
pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
|
||||
256 | (64 << 16) | (1<<30);
|
||||
PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30);
|
||||
pm8001_ha->outbnd_q_tbl[i].upper_base_addr =
|
||||
pm8001_ha->memoryMap.region[OB].phys_addr_hi;
|
||||
pm8001_ha->outbnd_q_tbl[i].lower_base_addr =
|
||||
@ -1245,7 +1245,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
|
||||
/* Stores the new consumer index */
|
||||
consumer_index = pm8001_read_32(circularQ->ci_virt);
|
||||
circularQ->consumer_index = cpu_to_le32(consumer_index);
|
||||
if (((circularQ->producer_idx + bcCount) % 256) ==
|
||||
if (((circularQ->producer_idx + bcCount) % PM8001_MPI_QUEUE) ==
|
||||
le32_to_cpu(circularQ->consumer_index)) {
|
||||
*messagePtr = NULL;
|
||||
return -1;
|
||||
@ -1253,7 +1253,8 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ,
|
||||
/* get memory IOMB buffer address */
|
||||
offset = circularQ->producer_idx * 64;
|
||||
/* increment to next bcCount element */
|
||||
circularQ->producer_idx = (circularQ->producer_idx + bcCount) % 256;
|
||||
circularQ->producer_idx = (circularQ->producer_idx + bcCount)
|
||||
% PM8001_MPI_QUEUE;
|
||||
/* Adds that distance to the base of the region virtual address plus
|
||||
the message header size*/
|
||||
msgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + offset);
|
||||
@ -1326,7 +1327,8 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg,
|
||||
return 0;
|
||||
}
|
||||
/* free the circular queue buffer elements associated with the message*/
|
||||
circularQ->consumer_idx = (circularQ->consumer_idx + bc) % 256;
|
||||
circularQ->consumer_idx = (circularQ->consumer_idx + bc)
|
||||
% PM8001_MPI_QUEUE;
|
||||
/* update the CI of outbound queue */
|
||||
pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar, circularQ->ci_offset,
|
||||
circularQ->consumer_idx);
|
||||
@ -1383,7 +1385,8 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
|
||||
circularQ->consumer_idx =
|
||||
(circularQ->consumer_idx +
|
||||
((le32_to_cpu(msgHeader_tmp)
|
||||
>> 24) & 0x1f)) % 256;
|
||||
>> 24) & 0x1f))
|
||||
% PM8001_MPI_QUEUE;
|
||||
msgHeader_tmp = 0;
|
||||
pm8001_write_32(msgHeader, 0, 0);
|
||||
/* update the CI of outbound queue */
|
||||
@ -1396,7 +1399,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha,
|
||||
circularQ->consumer_idx =
|
||||
(circularQ->consumer_idx +
|
||||
((le32_to_cpu(msgHeader_tmp) >> 24) &
|
||||
0x1f)) % 256;
|
||||
0x1f)) % PM8001_MPI_QUEUE;
|
||||
msgHeader_tmp = 0;
|
||||
pm8001_write_32(msgHeader, 0, 0);
|
||||
/* update the CI of outbound queue */
|
||||
|
@ -235,15 +235,15 @@ static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
|
||||
pm8001_ha->memoryMap.region[PI].alignment = 4;
|
||||
|
||||
/* MPI Memory region 5 inbound queues */
|
||||
pm8001_ha->memoryMap.region[IB].num_elements = 256;
|
||||
pm8001_ha->memoryMap.region[IB].num_elements = PM8001_MPI_QUEUE;
|
||||
pm8001_ha->memoryMap.region[IB].element_size = 64;
|
||||
pm8001_ha->memoryMap.region[IB].total_len = 256 * 64;
|
||||
pm8001_ha->memoryMap.region[IB].total_len = PM8001_MPI_QUEUE * 64;
|
||||
pm8001_ha->memoryMap.region[IB].alignment = 64;
|
||||
|
||||
/* MPI Memory region 6 inbound queues */
|
||||
pm8001_ha->memoryMap.region[OB].num_elements = 256;
|
||||
/* MPI Memory region 6 outbound queues */
|
||||
pm8001_ha->memoryMap.region[OB].num_elements = PM8001_MPI_QUEUE;
|
||||
pm8001_ha->memoryMap.region[OB].element_size = 64;
|
||||
pm8001_ha->memoryMap.region[OB].total_len = 256 * 64;
|
||||
pm8001_ha->memoryMap.region[OB].total_len = PM8001_MPI_QUEUE * 64;
|
||||
pm8001_ha->memoryMap.region[OB].alignment = 64;
|
||||
|
||||
/* Memory region write DMA*/
|
||||
|
Loading…
Reference in New Issue
Block a user