2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-18 15:44:02 +08:00

scsi: remove the second argument of k[un]map_atomic()

Signed-off-by: Cong Wang <amwang@redhat.com>
This commit is contained in:
Cong Wang 2011-11-25 23:14:23 +08:00 committed by Cong Wang
parent 4679026d78
commit 77dfce076c
19 changed files with 64 additions and 69 deletions

View File

@ -1736,7 +1736,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
(uint32_t ) cmd->cmnd[8];
/* 4 bytes: Areca io control code */
sg = scsi_sglist(cmd);
buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
buffer = kmap_atomic(sg_page(sg)) + sg->offset;
if (scsi_sg_count(cmd) > 1) {
retvalue = ARCMSR_MESSAGE_FAIL;
goto message_out;
@ -1985,7 +1985,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
}
message_out:
sg = scsi_sglist(cmd);
kunmap_atomic(buffer - sg->offset, KM_IRQ0);
kunmap_atomic(buffer - sg->offset);
return retvalue;
}
@ -2035,11 +2035,11 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
strncpy(&inqdata[32], "R001", 4); /* Product Revision */
sg = scsi_sglist(cmd);
buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
buffer = kmap_atomic(sg_page(sg)) + sg->offset;
memcpy(buffer, inqdata, sizeof(inqdata));
sg = scsi_sglist(cmd);
kunmap_atomic(buffer - sg->offset, KM_IRQ0);
kunmap_atomic(buffer - sg->offset);
cmd->scsi_done(cmd);
}

View File

@ -322,8 +322,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
return -ENOMEM;
}
frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
cp = kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ)
+ frag->page_offset;
cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
} else {
cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
}
@ -332,7 +331,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
cp->fcoe_eof = eof;
cp->fcoe_crc32 = cpu_to_le32(~crc);
if (skb_is_nonlinear(skb)) {
kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
kunmap_atomic(cp);
cp = NULL;
}

View File

@ -1956,12 +1956,11 @@ int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
/* data fits in the skb's headroom */
for (i = 0; i < tdata->nr_frags; i++, frag++) {
char *src = kmap_atomic(frag->page,
KM_SOFTIRQ0);
char *src = kmap_atomic(frag->page);
memcpy(dst, src+frag->offset, frag->size);
dst += frag->size;
kunmap_atomic(src, KM_SOFTIRQ0);
kunmap_atomic(src);
}
if (padlen) {
memset(dst, 0, padlen);

View File

@ -1515,7 +1515,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
return -ENOMEM;
}
frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
cp = kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ)
cp = kmap_atomic(skb_frag_page(frag))
+ frag->page_offset;
} else {
cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
@ -1526,7 +1526,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
cp->fcoe_crc32 = cpu_to_le32(~crc);
if (skb_is_nonlinear(skb)) {
kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ);
kunmap_atomic(cp);
cp = NULL;
}

View File

@ -210,10 +210,9 @@ u32 fcoe_fc_crc(struct fc_frame *fp)
while (len > 0) {
clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
data = kmap_atomic(
skb_frag_page(frag) + (off >> PAGE_SHIFT),
KM_SKB_DATA_SOFTIRQ);
skb_frag_page(frag) + (off >> PAGE_SHIFT));
crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ);
kunmap_atomic(data);
off += clen;
len -= clen;
}

View File

@ -2310,10 +2310,10 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
return;
}
local_irq_save(flags);
address = kmap_atomic(sg_page(sl), KM_BIO_SRC_IRQ) + sl->offset;
address = kmap_atomic(sg_page(sl)) + sl->offset;
memcpy(address, buffer, cpnow);
flush_dcache_page(sg_page(sl));
kunmap_atomic(address, KM_BIO_SRC_IRQ);
kunmap_atomic(address);
local_irq_restore(flags);
if (cpsum == cpcount)
break;

View File

@ -1511,14 +1511,14 @@ static int ips_is_passthru(struct scsi_cmnd *SC)
/* kmap_atomic() ensures addressability of the user buffer.*/
/* local_irq_save() protects the KM_IRQ0 address slot. */
local_irq_save(flags);
buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
buffer = kmap_atomic(sg_page(sg)) + sg->offset;
if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
buffer[2] == 'P' && buffer[3] == 'P') {
kunmap_atomic(buffer - sg->offset, KM_IRQ0);
kunmap_atomic(buffer - sg->offset);
local_irq_restore(flags);
return 1;
}
kunmap_atomic(buffer - sg->offset, KM_IRQ0);
kunmap_atomic(buffer - sg->offset);
local_irq_restore(flags);
}
return 0;

View File

@ -1304,9 +1304,9 @@ sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
struct page *page = sg_page(sg);
copy_len = min_t(int, total_len, sg_dma_len(sg));
kaddr = kmap_atomic(page, KM_IRQ0);
kaddr = kmap_atomic(page);
memcpy(kaddr + sg->offset, src_addr, copy_len);
kunmap_atomic(kaddr, KM_IRQ0);
kunmap_atomic(kaddr);
total_len -= copy_len;
src_addr += copy_len;
sg = sg_next(sg);
@ -1654,7 +1654,7 @@ sci_io_request_frame_handler(struct isci_request *ireq,
sci_unsolicited_frame_control_get_header(&ihost->uf_control,
frame_index,
&frame_header);
kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
kaddr = kmap_atomic(sg_page(sg));
rsp = kaddr + sg->offset;
sci_swab32_cpy(rsp, frame_header, 1);
@ -1691,7 +1691,7 @@ sci_io_request_frame_handler(struct isci_request *ireq,
ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
}
kunmap_atomic(kaddr, KM_IRQ0);
kunmap_atomic(kaddr);
sci_controller_release_frame(ihost, frame_index);
@ -3023,10 +3023,10 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
/* need to swab it back in case the command buffer is re-used */
kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
kaddr = kmap_atomic(sg_page(sg));
smp_req = kaddr + sg->offset;
sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
kunmap_atomic(kaddr, KM_IRQ0);
kunmap_atomic(kaddr);
break;
}
default:
@ -3311,7 +3311,7 @@ sci_io_request_construct_smp(struct device *dev,
u8 req_len;
u32 cmd;
kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
kaddr = kmap_atomic(sg_page(sg));
smp_req = kaddr + sg->offset;
/*
* Look at the SMP requests' header fields; for certain SAS 1.x SMP
@ -3337,7 +3337,7 @@ sci_io_request_construct_smp(struct device *dev,
req_len = smp_req->req_len;
sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
cmd = *(u32 *) smp_req;
kunmap_atomic(kaddr, KM_IRQ0);
kunmap_atomic(kaddr);
if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
return SCI_FAILURE;

View File

@ -485,11 +485,11 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED)) {
copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
&offset, KM_SOFTIRQ0, NULL);
&offset, NULL);
} else {
crc = crc32(~0, (u8 *) fh, sizeof(*fh));
copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
&offset, KM_SOFTIRQ0, &crc);
&offset, &crc);
buf = fc_frame_payload_get(fp, 0);
if (len % 4)
crc = crc32(crc, buf + len, 4 - (len % 4));
@ -650,10 +650,10 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
* The scatterlist item may be bigger than PAGE_SIZE,
* but we must not cross pages inside the kmap.
*/
page_addr = kmap_atomic(page, KM_SOFTIRQ0);
page_addr = kmap_atomic(page);
memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
sg_bytes);
kunmap_atomic(page_addr, KM_SOFTIRQ0);
kunmap_atomic(page_addr);
data += sg_bytes;
}
offset += sg_bytes;

View File

@ -105,14 +105,13 @@ module_exit(libfc_exit);
* @sg: pointer to the pointer of the SG list.
* @nents: pointer to the remaining number of entries in the SG list.
* @offset: pointer to the current offset in the SG list.
* @km_type: dedicated page table slot type for kmap_atomic.
* @crc: pointer to the 32-bit crc value.
* If crc is NULL, CRC is not calculated.
*/
u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
struct scatterlist *sg,
u32 *nents, size_t *offset,
enum km_type km_type, u32 *crc)
u32 *crc)
{
size_t remaining = len;
u32 copy_len = 0;
@ -142,12 +141,11 @@ u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
off = *offset + sg->offset;
sg_bytes = min(sg_bytes,
(size_t)(PAGE_SIZE - (off & ~PAGE_MASK)));
page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT),
km_type);
page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT));
if (crc)
*crc = crc32(*crc, buf, sg_bytes);
memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, sg_bytes);
kunmap_atomic(page_addr, km_type);
kunmap_atomic(page_addr);
buf += sg_bytes;
*offset += sg_bytes;
remaining -= sg_bytes;

View File

@ -134,6 +134,6 @@ extern void fc_fc4_conf_lport_params(struct fc_lport *, enum fc_fh_type);
u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
struct scatterlist *sg,
u32 *nents, size_t *offset,
enum km_type km_type, u32 *crc);
u32 *crc);
#endif /* _FC_LIBFC_H_ */

View File

@ -1698,7 +1698,7 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
job->reply->reply_payload_rcv_len +=
fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
&info->offset, KM_BIO_SRC_IRQ, NULL);
&info->offset, NULL);
if (fr_eof(fp) == FC_EOF_T &&
(ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==

View File

@ -135,7 +135,7 @@ static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
if (recv) {
segment->atomic_mapped = true;
segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0);
segment->sg_mapped = kmap_atomic(sg_page(sg));
} else {
segment->atomic_mapped = false;
/* the xmit path can sleep with the page mapped so use kmap */
@ -149,7 +149,7 @@ void iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
{
if (segment->sg_mapped) {
if (segment->atomic_mapped)
kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0);
kunmap_atomic(segment->sg_mapped);
else
kunmap(sg_page(segment->sg));
segment->sg_mapped = NULL;

View File

@ -246,9 +246,9 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
}
local_irq_disable();
buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio);
buf = kmap_atomic(bio_page(req->bio));
memcpy(req_data, buf, blk_rq_bytes(req));
kunmap_atomic(buf - bio_offset(req->bio), KM_USER0);
kunmap_atomic(buf - bio_offset(req->bio));
local_irq_enable();
if (req_data[0] != SMP_REQUEST)
@ -361,10 +361,10 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
}
local_irq_disable();
buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio);
buf = kmap_atomic(bio_page(rsp->bio));
memcpy(buf, resp_data, blk_rq_bytes(rsp));
flush_kernel_dcache_page(bio_page(rsp->bio));
kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0);
kunmap_atomic(buf - bio_offset(rsp->bio));
local_irq_enable();
out:

View File

@ -670,10 +670,10 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
struct scatterlist *sg;
sg = scsi_sglist(cmd);
buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
buf = kmap_atomic(sg_page(sg)) + sg->offset;
memset(buf, 0, cmd->cmnd[4]);
kunmap_atomic(buf - sg->offset, KM_IRQ0);
kunmap_atomic(buf - sg->offset);
cmd->result = (DID_OK << 16);
cmd->scsi_done(cmd);

View File

@ -1885,11 +1885,11 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
case SAS_PROTOCOL_SMP: {
struct scatterlist *sg_resp = &task->smp_task.smp_resp;
tstat->stat = SAM_STAT_GOOD;
to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
to = kmap_atomic(sg_page(sg_resp));
memcpy(to + sg_resp->offset,
slot->response + sizeof(struct mvs_err_info),
sg_dma_len(sg_resp));
kunmap_atomic(to, KM_IRQ0);
kunmap_atomic(to);
break;
}

View File

@ -1778,7 +1778,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
int len = min(psgl->length, resid);
paddr = kmap_atomic(sg_page(psgl), KM_IRQ0) + psgl->offset;
paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
memcpy(paddr, dif_storep + dif_offset(sector), len);
sector += len >> 3;
@ -1788,7 +1788,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
sector = do_div(tmp_sec, sdebug_store_sectors);
}
resid -= len;
kunmap_atomic(paddr, KM_IRQ0);
kunmap_atomic(paddr);
}
dix_reads++;
@ -1881,12 +1881,12 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
BUG_ON(scsi_sg_count(SCpnt) == 0);
BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
paddr = kmap_atomic(sg_page(psgl), KM_IRQ1) + psgl->offset;
paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
ppage_offset = 0;
/* For each data page */
scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
daddr = kmap_atomic(sg_page(dsgl), KM_IRQ0) + dsgl->offset;
daddr = kmap_atomic(sg_page(dsgl)) + dsgl->offset;
/* For each sector-sized chunk in data page */
for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) {
@ -1895,10 +1895,10 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
* protection page advance to the next one
*/
if (ppage_offset >= psgl->length) {
kunmap_atomic(paddr, KM_IRQ1);
kunmap_atomic(paddr);
psgl = sg_next(psgl);
BUG_ON(psgl == NULL);
paddr = kmap_atomic(sg_page(psgl), KM_IRQ1)
paddr = kmap_atomic(sg_page(psgl))
+ psgl->offset;
ppage_offset = 0;
}
@ -1971,10 +1971,10 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
ppage_offset += sizeof(struct sd_dif_tuple);
}
kunmap_atomic(daddr, KM_IRQ0);
kunmap_atomic(daddr);
}
kunmap_atomic(paddr, KM_IRQ1);
kunmap_atomic(paddr);
dix_writes++;
@ -1982,8 +1982,8 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
out:
dif_errors++;
kunmap_atomic(daddr, KM_IRQ0);
kunmap_atomic(paddr, KM_IRQ1);
kunmap_atomic(daddr);
kunmap_atomic(paddr);
return ret;
}
@ -2303,7 +2303,7 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
offset = 0;
for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0);
kaddr = (unsigned char *)kmap_atomic(sg_page(sg));
if (!kaddr)
goto out;
@ -2311,7 +2311,7 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
*(kaddr + sg->offset + j) ^= *(buf + offset + j);
offset += sg->length;
kunmap_atomic(kaddr, KM_USER0);
kunmap_atomic(kaddr);
}
ret = 0;
out:

View File

@ -2567,7 +2567,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
if (*len > sg_len)
*len = sg_len;
return kmap_atomic(page, KM_BIO_SRC_IRQ);
return kmap_atomic(page);
}
EXPORT_SYMBOL(scsi_kmap_atomic_sg);
@ -2577,6 +2577,6 @@ EXPORT_SYMBOL(scsi_kmap_atomic_sg);
*/
void scsi_kunmap_atomic_sg(void *virt)
{
kunmap_atomic(virt, KM_BIO_SRC_IRQ);
kunmap_atomic(virt);
}
EXPORT_SYMBOL(scsi_kunmap_atomic_sg);

View File

@ -392,7 +392,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
virt = bio->bi_integrity->bip_sector & 0xffffffff;
bip_for_each_vec(iv, bio->bi_integrity, i) {
sdt = kmap_atomic(iv->bv_page, KM_USER0)
sdt = kmap_atomic(iv->bv_page)
+ iv->bv_offset;
for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
@ -405,7 +405,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
phys++;
}
kunmap_atomic(sdt, KM_USER0);
kunmap_atomic(sdt);
}
bio->bi_flags |= (1 << BIO_MAPPED_INTEGRITY);
@ -414,7 +414,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
return 0;
error:
kunmap_atomic(sdt, KM_USER0);
kunmap_atomic(sdt);
sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u, app %4x\n",
__func__, virt, phys, be32_to_cpu(sdt->ref_tag),
be16_to_cpu(sdt->app_tag));
@ -453,13 +453,13 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
virt = bio->bi_integrity->bip_sector & 0xffffffff;
bip_for_each_vec(iv, bio->bi_integrity, i) {
sdt = kmap_atomic(iv->bv_page, KM_USER0)
sdt = kmap_atomic(iv->bv_page)
+ iv->bv_offset;
for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
if (sectors == 0) {
kunmap_atomic(sdt, KM_USER0);
kunmap_atomic(sdt);
return;
}
@ -474,7 +474,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
sectors--;
}
kunmap_atomic(sdt, KM_USER0);
kunmap_atomic(sdt);
}
}
}