diff options
| author | Dan Williams <dan.j.williams@intel.com> | 2011-06-27 14:57:03 -0700 |
|---|---|---|
| committer | Dan Williams <dan.j.williams@intel.com> | 2011-07-03 04:04:52 -0700 |
| commit | 5076a1a97e2fa61c847a5fdd4b1991faf7716da6 (patch) | |
| tree | 251d207e75439da25d4d3a0353e0b853c8e79f2b /drivers/scsi/isci/request.c | |
| parent | ba7cb22342a66505a831bb7e4541fef90e0193c9 (diff) | |
| download | linux-5076a1a97e2fa61c847a5fdd4b1991faf7716da6.tar.gz linux-5076a1a97e2fa61c847a5fdd4b1991faf7716da6.tar.bz2 linux-5076a1a97e2fa61c847a5fdd4b1991faf7716da6.zip | |
isci: unify isci_request and scic_sds_request
They are one in the same object so remove the distinction. The near
duplicate fields (owning_controller, and isci_host) will be cleaned up
after the scic_sds_contoller isci_host unification.
Reported-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/scsi/isci/request.c')
| -rw-r--r-- | drivers/scsi/isci/request.c | 683 |
1 files changed, 332 insertions, 351 deletions
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 8520626b02fa..c544bc79ce17 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -61,35 +61,35 @@ #include "scu_event_codes.h" #include "sas.h" -static struct scu_sgl_element_pair *to_sgl_element_pair(struct scic_sds_request *sci_req, +static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq, int idx) { if (idx == 0) - return &sci_req->tc->sgl_pair_ab; + return &ireq->tc->sgl_pair_ab; else if (idx == 1) - return &sci_req->tc->sgl_pair_cd; + return &ireq->tc->sgl_pair_cd; else if (idx < 0) return NULL; else - return &sci_req->sg_table[idx - 2]; + return &ireq->sg_table[idx - 2]; } static dma_addr_t to_sgl_element_pair_dma(struct scic_sds_controller *scic, - struct scic_sds_request *sci_req, u32 idx) + struct isci_request *ireq, u32 idx) { u32 offset; if (idx == 0) { - offset = (void *) &sci_req->tc->sgl_pair_ab - + offset = (void *) &ireq->tc->sgl_pair_ab - (void *) &scic->task_context_table[0]; return scic->task_context_dma + offset; } else if (idx == 1) { - offset = (void *) &sci_req->tc->sgl_pair_cd - + offset = (void *) &ireq->tc->sgl_pair_cd - (void *) &scic->task_context_table[0]; return scic->task_context_dma + offset; } - return scic_io_request_get_dma_addr(sci_req, &sci_req->sg_table[idx - 2]); + return scic_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]); } static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) @@ -100,12 +100,11 @@ static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) e->address_modifier = 0; } -static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) +static void scic_sds_request_build_sgl(struct isci_request *ireq) { - struct isci_request *isci_request = sci_req_to_ireq(sds_request); - struct isci_host *isci_host = isci_request->isci_host; + struct isci_host *isci_host = ireq->isci_host; struct scic_sds_controller *scic = &isci_host->sci; - struct sas_task *task = isci_request_access_task(isci_request); + struct sas_task *task = isci_request_access_task(ireq); struct scatterlist *sg = NULL; dma_addr_t dma_addr; u32 sg_idx = 0; @@ -116,7 +115,7 @@ static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) sg = task->scatter; while (sg) { - scu_sg = to_sgl_element_pair(sds_request, sg_idx); + scu_sg = to_sgl_element_pair(ireq, sg_idx); init_sgl_element(&scu_sg->A, sg); sg = sg_next(sg); if (sg) { @@ -127,7 +126,7 @@ static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) if (prev_sg) { dma_addr = to_sgl_element_pair_dma(scic, - sds_request, + ireq, sg_idx); prev_sg->next_pair_upper = @@ -140,14 +139,14 @@ static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) sg_idx++; } } else { /* handle when no sg */ - scu_sg = to_sgl_element_pair(sds_request, sg_idx); + scu_sg = to_sgl_element_pair(ireq, sg_idx); dma_addr = dma_map_single(&isci_host->pdev->dev, task->scatter, task->total_xfer_len, task->data_dir); - isci_request->zero_scatter_daddr = dma_addr; + ireq->zero_scatter_daddr = dma_addr; scu_sg->A.length = task->total_xfer_len; scu_sg->A.address_upper = upper_32_bits(dma_addr); @@ -160,13 +159,12 @@ static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request) } } -static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req) +static void scic_sds_io_request_build_ssp_command_iu(struct isci_request *ireq) { struct ssp_cmd_iu *cmd_iu; - struct isci_request *ireq = sci_req_to_ireq(sci_req); struct sas_task *task = isci_request_access_task(ireq); - cmd_iu = &sci_req->ssp.cmd; + cmd_iu = &ireq->ssp.cmd; memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8); cmd_iu->add_cdb_len = 0; @@ -181,14 +179,13 @@ static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sc sizeof(task->ssp_task.cdb) / sizeof(u32)); } -static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req) +static void scic_sds_task_request_build_ssp_task_iu(struct isci_request *ireq) { struct ssp_task_iu *task_iu; - struct isci_request *ireq = sci_req_to_ireq(sci_req); struct sas_task *task = isci_request_access_task(ireq); struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); - task_iu = &sci_req->ssp.tmf; + task_iu = &ireq->ssp.tmf; memset(task_iu, 0, sizeof(struct ssp_task_iu)); @@ -208,15 +205,15 @@ static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci * */ static void scu_ssp_reqeust_construct_task_context( - struct scic_sds_request *sds_request, + struct isci_request *ireq, struct scu_task_context *task_context) { dma_addr_t dma_addr; struct scic_sds_remote_device *target_device; struct scic_sds_port *target_port; - target_device = scic_sds_request_get_device(sds_request); - target_port = scic_sds_request_get_port(sds_request); + target_device = scic_sds_request_get_device(ireq); + target_port = scic_sds_request_get_port(ireq); /* Fill in the TC with the its required data */ task_context->abort = 0; @@ -232,7 +229,7 @@ static void scu_ssp_reqeust_construct_task_context( task_context->context_type = SCU_TASK_CONTEXT_TYPE; task_context->remote_node_index = - scic_sds_remote_device_get_index(sds_request->target_device); + scic_sds_remote_device_get_index(ireq->target_device); task_context->command_code = 0; task_context->link_layer_control = 0; @@ -244,22 +241,21 @@ static void scu_ssp_reqeust_construct_task_context( task_context->address_modifier = 0; - /* task_context->type.ssp.tag = sci_req->io_tag; */ + /* task_context->type.ssp.tag = ireq->io_tag; */ task_context->task_phase = 0x01; - sds_request->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | (scic_sds_controller_get_protocol_engine_group(controller) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | (scic_sds_port_get_index(target_port) << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | - ISCI_TAG_TCI(sds_request->io_tag)); + ISCI_TAG_TCI(ireq->io_tag)); /* * Copy the physical address for the command buffer to the * SCU Task Context */ - dma_addr = scic_io_request_get_dma_addr(sds_request, - &sds_request->ssp.cmd); + dma_addr = scic_io_request_get_dma_addr(ireq, &ireq->ssp.cmd); task_context->command_iu_upper = upper_32_bits(dma_addr); task_context->command_iu_lower = lower_32_bits(dma_addr); @@ -268,8 +264,7 @@ static void scu_ssp_reqeust_construct_task_context( * Copy the physical address for the response buffer to the * SCU Task Context */ - dma_addr = scic_io_request_get_dma_addr(sds_request, - &sds_request->ssp.rsp); + dma_addr = scic_io_request_get_dma_addr(ireq, &ireq->ssp.rsp); task_context->response_iu_upper = upper_32_bits(dma_addr); task_context->response_iu_lower = lower_32_bits(dma_addr); @@ -280,13 +275,13 @@ static void scu_ssp_reqeust_construct_task_context( * @sci_req: * */ -static void scu_ssp_io_request_construct_task_context(struct scic_sds_request *sci_req, +static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq, enum dma_data_direction dir, u32 len) { - struct scu_task_context *task_context = sci_req->tc; + struct scu_task_context *task_context = ireq->tc; - scu_ssp_reqeust_construct_task_context(sci_req, task_context); + scu_ssp_reqeust_construct_task_context(ireq, task_context); task_context->ssp_command_iu_length = sizeof(struct ssp_cmd_iu) / sizeof(u32); @@ -306,7 +301,7 @@ static void scu_ssp_io_request_construct_task_context(struct scic_sds_request *s task_context->transfer_length_bytes = len; if (task_context->transfer_length_bytes > 0) - scic_sds_request_build_sgl(sci_req); + scic_sds_request_build_sgl(ireq); } /** @@ -322,11 +317,11 @@ static void scu_ssp_io_request_construct_task_context(struct scic_sds_request *s * constructed. * */ -static void scu_ssp_task_request_construct_task_context(struct scic_sds_request *sci_req) +static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq) { - struct scu_task_context *task_context = sci_req->tc; + struct scu_task_context *task_context = ireq->tc; - scu_ssp_reqeust_construct_task_context(sci_req, task_context); + scu_ssp_reqeust_construct_task_context(ireq, task_context); task_context->control_frame = 1; task_context->priority = SCU_TASK_PRIORITY_HIGH; @@ -350,15 +345,15 @@ static void scu_ssp_task_request_construct_task_context(struct scic_sds_request * determine what is common for SSP/SMP/STP task context structures. */ static void scu_sata_reqeust_construct_task_context( - struct scic_sds_request *sci_req, + struct isci_request *ireq, struct scu_task_context *task_context) { dma_addr_t dma_addr; struct scic_sds_remote_device *target_device; struct scic_sds_port *target_port; - target_device = scic_sds_request_get_device(sci_req); - target_port = scic_sds_request_get_port(sci_req); + target_device = scic_sds_request_get_device(ireq); + target_port = scic_sds_request_get_port(ireq); /* Fill in the TC with the its required data */ task_context->abort = 0; @@ -374,7 +369,7 @@ static void scu_sata_reqeust_construct_task_context( task_context->context_type = SCU_TASK_CONTEXT_TYPE; task_context->remote_node_index = - scic_sds_remote_device_get_index(sci_req->target_device); + scic_sds_remote_device_get_index(ireq->target_device); task_context->command_code = 0; task_context->link_layer_control = 0; @@ -391,21 +386,21 @@ static void scu_sata_reqeust_construct_task_context( (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32); /* Set the first word of the H2D REG FIS */ - task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd; + task_context->type.words[0] = *(u32 *)&ireq->stp.cmd; - sci_req->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | + ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | (scic_sds_controller_get_protocol_engine_group(controller) << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | (scic_sds_port_get_index(target_port) << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | - ISCI_TAG_TCI(sci_req->io_tag)); + ISCI_TAG_TCI(ireq->io_tag)); /* * Copy the physical address for the command buffer to the SCU Task * Context. We must offset the command buffer by 4 bytes because the * first 4 bytes are transfered in the body of the TC. */ - dma_addr = scic_io_request_get_dma_addr(sci_req, - ((char *) &sci_req->stp.cmd) + + dma_addr = scic_io_request_get_dma_addr(ireq, + ((char *) &ireq->stp.cmd) + sizeof(u32)); task_context->command_iu_upper = upper_32_bits(dma_addr); @@ -416,11 +411,11 @@ static void scu_sata_reqeust_construct_task_context( task_context->response_iu_lower = 0; } -static void scu_stp_raw_request_construct_task_context(struct scic_sds_request *sci_req) +static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq) { - struct scu_task_context *task_context = sci_req->tc; + struct scu_task_context *task_context = ireq->tc; - scu_sata_reqeust_construct_task_context(sci_req, task_context); + scu_sata_reqeust_construct_task_context(ireq, task_context); task_context->control_frame = 0; task_context->priority = SCU_TASK_PRIORITY_NORMAL; @@ -429,20 +424,19 @@ static void scu_stp_raw_request_construct_task_context(struct scic_sds_request * task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); } -static enum sci_status -scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req, - bool copy_rx_frame) +static enum sci_status scic_sds_stp_pio_request_construct(struct isci_request *ireq, + bool copy_rx_frame) { - struct isci_stp_request *stp_req = &sci_req->stp.req; + struct isci_stp_request *stp_req = &ireq->stp.req; - scu_stp_raw_request_construct_task_context(sci_req); + scu_stp_raw_request_construct_task_context(ireq); stp_req->status = 0; stp_req->sgl.offset = 0; stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A; if (copy_rx_frame) { - scic_sds_request_build_sgl(sci_req); + scic_sds_request_build_sgl(ireq); stp_req->sgl.index = 0; } else { /* The user does not want the data copied to the SGL buffer location */ @@ -464,18 +458,18 @@ scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req, * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method * returns an indication as to whether the construction was successful. */ -static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req, +static void scic_sds_stp_optimized_request_construct(struct isci_request *ireq, u8 optimized_task_type, u32 len, enum dma_data_direction dir) { - struct scu_task_context *task_context = sci_req->tc; + struct scu_task_context *task_context = ireq->tc; /* Build the STP task context structure */ - scu_sata_reqeust_construct_task_context(sci_req, task_context); + scu_sata_reqeust_construct_task_context(ireq, task_context); /* Copy over the SGL elements */ - scic_sds_request_build_sgl(sci_req); + scic_sds_request_build_sgl(ireq); /* Copy over the number of bytes to be transfered */ task_context->transfer_length_bytes = len; @@ -500,13 +494,12 @@ static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sc static enum sci_status -scic_io_request_construct_sata(struct scic_sds_request *sci_req, +scic_io_request_construct_sata(struct isci_request *ireq, u32 len, enum dma_data_direction dir, bool copy) { enum sci_status status = SCI_SUCCESS; - struct isci_request *ireq = sci_req_to_ireq(sci_req); struct sas_task *task = isci_request_access_task(ireq); /* check for management protocols */ @@ -515,20 +508,20 @@ scic_io_request_construct_sata(struct scic_sds_request *sci_req, if (tmf->tmf_code == isci_tmf_sata_srst_high || tmf->tmf_code == isci_tmf_sata_srst_low) { - scu_stp_raw_request_construct_task_context(sci_req); + scu_stp_raw_request_construct_task_context(ireq); return SCI_SUCCESS; } else { - dev_err(scic_to_dev(sci_req->owning_controller), + dev_err(scic_to_dev(ireq->owning_controller), "%s: Request 0x%p received un-handled SAT " "management protocol 0x%x.\n", - __func__, sci_req, tmf->tmf_code); + __func__, ireq, tmf->tmf_code); return SCI_FAILURE; } } if (!sas_protocol_ata(task->task_proto)) { - dev_err(scic_to_dev(sci_req->owning_controller), + dev_err(scic_to_dev(ireq->owning_controller), "%s: Non-ATA protocol in SATA path: 0x%x\n", __func__, task->task_proto); @@ -538,13 +531,13 @@ scic_io_request_construct_sata(struct scic_sds_request *sci_req, /* non data */ if (task->data_dir == DMA_NONE) { - scu_stp_raw_request_construct_task_context(sci_req); + scu_stp_raw_request_construct_task_context(ireq); return SCI_SUCCESS; } /* NCQ */ if (task->ata_task.use_ncq) { - scic_sds_stp_optimized_request_construct(sci_req, + scic_sds_stp_optimized_request_construct(ireq, SCU_TASK_TYPE_FPDMAQ_READ, len, dir); return SCI_SUCCESS; @@ -552,74 +545,71 @@ scic_io_request_construct_sata(struct scic_sds_request *sci_req, /* DMA */ if (task->ata_task.dma_xfer) { - scic_sds_stp_optimized_request_construct(sci_req, + scic_sds_stp_optimized_request_construct(ireq, SCU_TASK_TYPE_DMA_IN, len, dir); return SCI_SUCCESS; } else /* PIO */ - return scic_sds_stp_pio_request_construct(sci_req, copy); + return scic_sds_stp_pio_request_construct(ireq, copy); return status; } -static enum sci_status scic_io_request_construct_basic_ssp(struct scic_sds_request *sci_req) +static enum sci_status scic_io_request_construct_basic_ssp(struct isci_request *ireq) { - struct isci_request *ireq = sci_req_to_ireq(sci_req); struct sas_task *task = isci_request_access_task(ireq); - sci_req->protocol = SCIC_SSP_PROTOCOL; + ireq->protocol = SCIC_SSP_PROTOCOL; - scu_ssp_io_request_construct_task_context(sci_req, + scu_ssp_io_request_construct_task_context(ireq, task->data_dir, task->total_xfer_len); - scic_sds_io_request_build_ssp_command_iu(sci_req); + scic_sds_io_request_build_ssp_command_iu(ireq); - sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED); + sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); return SCI_SUCCESS; } enum sci_status scic_task_request_construct_ssp( - struct scic_sds_request *sci_req) + struct isci_request *ireq) { /* Construct the SSP Task SCU Task Context */ - scu_ssp_task_request_construct_task_context(sci_req); + scu_ssp_task_request_construct_task_context(ireq); /* Fill in the SSP Task IU */ - scic_sds_task_request_build_ssp_task_iu(sci_req); + scic_sds_task_request_build_ssp_task_iu(ireq); - sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED); + sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); return SCI_SUCCESS; } -static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req) +static enum sci_status scic_io_request_construct_basic_sata(struct isci_request *ireq) { enum sci_status status; bool copy = false; - struct isci_request *isci_request = sci_req_to_ireq(sci_req); - struct sas_task *task = isci_request_access_task(isci_request); + struct sas_task *task = isci_request_access_task(ireq); - sci_req->protocol = SCIC_STP_PROTOCOL; + ireq->protocol = SCIC_STP_PROTOCOL; copy = (task->data_dir == DMA_NONE) ? false : true; - status = scic_io_request_construct_sata(sci_req, + status = scic_io_request_construct_sata(ireq, task->total_xfer_len, task->data_dir, copy); if (status == SCI_SUCCESS) - sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED); + sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); return status; } -enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req) +enum sci_status scic_task_request_construct_sata(struct isci_request *ireq) { enum sci_status status = SCI_SUCCESS; - struct isci_request *ireq = sci_req_to_ireq(sci_req); /* check for management protocols */ if (ireq->ttype == tmf_task) { @@ -627,12 +617,12 @@ enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_re if (tmf->tmf_code == isci_tmf_sata_srst_high || tmf->tmf_code == isci_tmf_sata_srst_low) { - scu_stp_raw_request_construct_task_context(sci_req); + scu_stp_raw_request_construct_task_context(ireq); } else { - dev_err(scic_to_dev(sci_req->owning_controller), + dev_err(scic_to_dev(ireq->owning_controller), "%s: Request 0x%p received un-handled SAT " "Protocol 0x%x.\n", - __func__, sci_req, tmf->tmf_code); + __func__, ireq, tmf->tmf_code); return SCI_FAILURE; } @@ -640,7 +630,7 @@ enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_re if (status != SCI_SUCCESS) return status; - sci_change_state(&sci_req->sm, SCI_REQ_CONSTRUCTED); + sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED); return status; } @@ -650,9 +640,9 @@ enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_re * @sci_req: request that was terminated early */ #define SCU_TASK_CONTEXT_SRAM 0x200000 -static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req) +static u32 sci_req_tx_bytes(struct isci_request *ireq) { - struct scic_sds_controller *scic = sci_req->owning_controller; + struct scic_sds_controller *scic = ireq->owning_controller; u32 ret_val = 0; if (readl(&scic->smu_registers->address_modifier) == 0) { @@ -666,19 +656,19 @@ static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req) */ ret_val = readl(scu_reg_base + (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + - ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(sci_req->io_tag))); + ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag))); } return ret_val; } -enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req) +enum sci_status scic_sds_request_start(struct isci_request *ireq) { enum sci_base_request_states state; - struct scu_task_context *tc = sci_req->tc; - struct scic_sds_controller *scic = sci_req->owning_controller; + struct scu_task_context *tc = ireq->tc; + struct scic_sds_controller *scic = ireq->owning_controller; - state = sci_req->sm.current_state_id; + state = ireq->sm.current_state_id; if (state != SCI_REQ_CONSTRUCTED) { dev_warn(scic_to_dev(scic), "%s: SCIC IO Request requested to start while in wrong " @@ -686,19 +676,19 @@ enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req) return SCI_FAILURE_INVALID_STATE; } - tc->task_index = ISCI_TAG_TCI(sci_req->io_tag); + tc->task_index = ISCI_TAG_TCI(ireq->io_tag); switch (tc->protocol_type) { case SCU_TASK_CONTEXT_PROTOCOL_SMP: case SCU_TASK_CONTEXT_PROTOCOL_SSP: /* SSP/SMP Frame */ - tc->type.ssp.tag = sci_req->io_tag; + tc->type.ssp.tag = ireq->io_tag; tc->type.ssp.target_port_transfer_tag = 0xFFFF; break; case SCU_TASK_CONTEXT_PROTOCOL_STP: /* STP/SATA Frame - * tc->type.stp.ncq_tag = sci_req->ncq_tag; + * tc->type.stp.ncq_tag = ireq->ncq_tag; */ break; @@ -713,28 +703,28 @@ enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req) } /* Add to the post_context the io tag value */ - sci_req->post_context |= ISCI_TAG_TCI(sci_req->io_tag); + ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag); /* Everything is good go ahead and change state */ - sci_change_state(&sci_req->sm, SCI_REQ_STARTED); + sci_change_state(&ireq->sm, SCI_REQ_STARTED); return SCI_SUCCESS; } enum sci_status -scic_sds_io_request_terminate(struct scic_sds_request *sci_req) +scic_sds_io_request_terminate(struct isci_request *ireq) { enum sci_base_request_states state; - state = sci_req->sm.current_state_id; + state = ireq->sm.current_state_id; switch (state) { case SCI_REQ_CONSTRUCTED: - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED); - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); return SCI_SUCCESS; case SCI_REQ_STARTED: case SCI_REQ_TASK_WAIT_TC_COMP: @@ -751,54 +741,54 @@ scic_sds_io_request_terminate(struct scic_sds_request *sci_req) case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED: case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG: case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: - sci_change_state(&sci_req->sm, SCI_REQ_ABORTING); + sci_change_state(&ireq->sm, SCI_REQ_ABORTING); return SCI_SUCCESS; case SCI_REQ_TASK_WAIT_TC_RESP: - sci_change_state(&sci_req->sm, SCI_REQ_ABORTING); - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_ABORTING); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); return SCI_SUCCESS; case SCI_REQ_ABORTING: - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); return SCI_SUCCESS; case SCI_REQ_COMPLETED: default: - dev_warn(scic_to_dev(sci_req->owning_controller), + dev_warn(scic_to_dev(ireq->owning_controller), "%s: SCIC IO Request requested to abort while in wrong " "state %d\n", __func__, - sci_req->sm.current_state_id); + ireq->sm.current_state_id); break; } return SCI_FAILURE_INVALID_STATE; } -enum sci_status scic_sds_request_complete(struct scic_sds_request *sci_req) +enum sci_status scic_sds_request_complete(struct isci_request *ireq) { enum sci_base_request_states state; - struct scic_sds_controller *scic = sci_req->owning_controller; + struct scic_sds_controller *scic = ireq->owning_controller; - state = sci_req->sm.current_state_id; + state = ireq->sm.current_state_id; if (WARN_ONCE(state != SCI_REQ_COMPLETED, "isci: request completion from wrong state (%d)\n", state)) return SCI_FAILURE_INVALID_STATE; - if (sci_req->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) + if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) scic_sds_controller_release_frame(scic, - sci_req->saved_rx_frame_index); + ireq->saved_rx_frame_index); /* XXX can we just stop the machine and remove the 'final' state? */ - sci_change_state(&sci_req->sm, SCI_REQ_FINAL); + sci_change_state(&ireq->sm, SCI_REQ_FINAL); return SCI_SUCCESS; } -enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req, +enum sci_status scic_sds_io_request_event_handler(struct isci_request *ireq, u32 event_code) { enum sci_base_request_states state; - struct scic_sds_controller *scic = sci_req->owning_controller; + struct scic_sds_controller *scic = ireq->owning_controller; - state = sci_req->sm.current_state_id; + state = ireq->sm.current_state_id; if (state != SCI_REQ_STP_PIO_DATA_IN) { dev_warn(scic_to_dev(scic), "%s: (%x) in wrong state %d\n", @@ -812,7 +802,7 @@ enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_r /* We are waiting for data and the SCU has R_ERR the data frame. * Go back to waiting for the D2H Register FIS */ - sci_change_state(&sci_req->sm, SCI_REQ_STP_PIO_WAIT_FRAME); + sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME); return SCI_SUCCESS; default: dev_err(scic_to_dev(scic), @@ -832,15 +822,14 @@ enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_r * @sci_req: This parameter specifies the request object for which to copy * the response data. */ -static void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req) +static void scic_sds_io_request_copy_response(struct isci_request *ireq) { void *resp_buf; u32 len; struct ssp_response_iu *ssp_response; - struct isci_request *ireq = sci_req_to_ireq(sci_req); struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); - ssp_response = &sci_req->ssp.rsp; + ssp_response = &ireq->ssp.rsp; resp_buf = &isci_tmf->resp.resp_iu; @@ -852,7 +841,7 @@ static void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req) } static enum sci_status -request_started_state_tc_event(struct scic_sds_request *sci_req, +request_started_state_tc_event(struct isci_request *ireq, u32 completion_code) { struct ssp_response_iu *resp_iu; @@ -863,7 +852,7 @@ request_started_state_tc_event(struct scic_sds_request *sci_req, */ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); break; @@ -875,19 +864,19 @@ request_started_state_tc_event(struct scic_sds_request *sci_req, * truly a failed request or a good request that just got * completed early. */ - struct ssp_response_iu *resp = &sci_req->ssp.rsp; + struct ssp_response_iu *resp = &ireq->ssp.rsp; ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); - sci_swab32_cpy(&sci_req->ssp.rsp, - &sci_req->ssp.rsp, + sci_swab32_cpy(&ireq->ssp.rsp, + &ireq->ssp.rsp, word_cnt); if (resp->status == 0) { - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS_IO_DONE_EARLY); } else { - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); } @@ -896,11 +885,11 @@ request_started_state_tc_event(struct scic_sds_request *sci_req, case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): { ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); - sci_swab32_cpy(&sci_req->ssp.rsp, - &sci_req->ssp.rsp, + sci_swab32_cpy(&ireq->ssp.rsp, + &ireq->ssp.rsp, word_cnt); - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); break; @@ -911,15 +900,15 @@ request_started_state_tc_event(struct scic_sds_request *sci_req, * guaranteed to be received before this completion status is * posted? */ - resp_iu = &sci_req->ssp.rsp; + resp_iu = &ireq->ssp.rsp; datapres = resp_iu->datapres; if (datapres == 1 || datapres == 2) { - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_CHECK_RESPONSE, SCI_FAILURE_IO_RESPONSE_VALID); } else - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); break; @@ -935,13 +924,13 @@ request_started_state_tc_event(struct scic_sds_request *sci_req, case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): - if (sci_req->protocol == SCIC_STP_PROTOCOL) { - scic_sds_request_set_status(sci_req, + if (ireq->protocol == SCIC_STP_PROTOCOL) { + scic_sds_request_set_status(ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT, SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); } else { - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT, SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); @@ -959,7 +948,7 @@ request_started_state_tc_event(struct scic_sds_request *sci_req, case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT, SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED); @@ -983,7 +972,7 @@ request_started_state_tc_event(struct scic_sds_request *sci_req, case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): default: scic_sds_request_set_status( - sci_req, + ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code) >> SCU_COMPLETION_TL_STATUS_SHIFT, SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); @@ -995,21 +984,21 @@ request_started_state_tc_event(struct scic_sds_request *sci_req, */ /* In all cases we will treat this as the completion of the IO req. */ - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); return SCI_SUCCESS; } static enum sci_status -request_aborting_state_tc_event(struct scic_sds_request *sci_req, +request_aborting_state_tc_event(struct isci_request *ireq, u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_TASK_ABORT, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_TASK_ABORT, SCI_FAILURE_IO_TERMINATED); - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; default: @@ -1022,15 +1011,15 @@ request_aborting_state_tc_event(struct scic_sds_request *sci_req, return SCI_SUCCESS; } -static enum sci_status ssp_task_request_await_tc_event(struct scic_sds_request *sci_req, +static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq, u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); - sci_change_state(&sci_req->sm, SCI_REQ_TASK_WAIT_TC_RESP); + sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); break; case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): /* Currently, the decision is to simply allow the task request @@ -1038,12 +1027,12 @@ static enum sci_status ssp_task_request_await_tc_event(struct scic_sds_request * * There is a potential for receiving multiple task responses if * we decide to send the task IU again. */ - dev_warn(scic_to_dev(sci_req->owning_controller), + dev_warn(scic_to_dev(ireq->owning_controller), "%s: TaskRequest:0x%p CompletionCode:%x - " - "ACK/NAK timeout\n", __func__, sci_req, + "ACK/NAK timeout\n", __func__, ireq, completion_code); - sci_change_state(&sci_req->sm, SCI_REQ_TASK_WAIT_TC_RESP); + sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP); break; default: /* @@ -1051,11 +1040,11 @@ static enum sci_status ssp_task_request_await_tc_event(struct scic_sds_request * * If a NAK was received, then it is up to the user to retry * the request. */ - scic_sds_request_set_status(sci_req, + scic_sds_request_set_status(ireq, SCU_NORMALIZE_COMPLETION_STATUS(completion_code), SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR); - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; } @@ -1063,7 +1052,7 @@ static enum sci_status ssp_task_request_await_tc_event(struct scic_sds_request * } static enum sci_status -smp_request_await_response_tc_event(struct scic_sds_request *sci_req, +smp_request_await_response_tc_event(struct isci_request *ireq, u32 completion_code) { switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { @@ -1072,10 +1061,10 @@ smp_request_await_response_tc_event(struct scic_sds_request *sci_req, * unexpected. but if the TC has success status, we * complete the IO anyway. */ - scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, + scic_sds_request_set_status(ireq, SCU_TASK_DONE_GOOD, SCI_SUCCESS); - sci_change_state(&sci_req->sm, SCI_REQ_COMPLETED); + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); break; case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): @@ -1089,21 +1078,21 @@ smp_request_await_response_tc_event(struct scic_sds_request *sci_req, |
