2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-29 23:53:55 +08:00

ath9k: Remove the useless do..while loops

These are unnecessary constructs in a function.
This patch removes these from both RX and TX init
routines.

Signed-off-by: Sujith <Sujith.Manoharan@atheros.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
This commit is contained in:
Sujith 2009-03-30 15:28:45 +05:30 committed by John W. Linville
parent 4658b98517
commit 797fe5cbef
3 changed files with 58 additions and 69 deletions

View File

@ -340,7 +340,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an);
void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an);
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq);
int ath_tx_init(struct ath_softc *sc, int nbufs);
int ath_tx_cleanup(struct ath_softc *sc);
void ath_tx_cleanup(struct ath_softc *sc);
struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb);
int ath_txq_update(struct ath_softc *sc, int qnum,
struct ath9k_tx_queue_info *q);

View File

@ -283,54 +283,51 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
struct ath_buf *bf;
int error = 0;
do {
spin_lock_init(&sc->rx.rxflushlock);
sc->sc_flags &= ~SC_OP_RXFLUSH;
spin_lock_init(&sc->rx.rxbuflock);
spin_lock_init(&sc->rx.rxflushlock);
sc->sc_flags &= ~SC_OP_RXFLUSH;
spin_lock_init(&sc->rx.rxbuflock);
sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
min(sc->cachelsz,
(u16)64));
sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
min(sc->cachelsz, (u16)64));
DPRINTF(sc, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
sc->cachelsz, sc->rx.bufsize);
DPRINTF(sc, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
sc->cachelsz, sc->rx.bufsize);
/* Initialize rx descriptors */
/* Initialize rx descriptors */
error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
"rx", nbufs, 1);
if (error != 0) {
error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
"rx", nbufs, 1);
if (error != 0) {
DPRINTF(sc, ATH_DBG_FATAL,
"failed to allocate rx descriptors: %d\n", error);
goto err;
}
list_for_each_entry(bf, &sc->rx.rxbuf, list) {
skb = ath_rxbuf_alloc(sc, sc->rx.bufsize, GFP_KERNEL);
if (skb == NULL) {
error = -ENOMEM;
goto err;
}
bf->bf_mpdu = skb;
bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
sc->rx.bufsize,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(sc->dev,
bf->bf_buf_addr))) {
dev_kfree_skb_any(skb);
bf->bf_mpdu = NULL;
DPRINTF(sc, ATH_DBG_FATAL,
"failed to allocate rx descriptors: %d\n", error);
break;
"dma_mapping_error() on RX init\n");
error = -ENOMEM;
goto err;
}
bf->bf_dmacontext = bf->bf_buf_addr;
}
sc->rx.rxlink = NULL;
list_for_each_entry(bf, &sc->rx.rxbuf, list) {
skb = ath_rxbuf_alloc(sc, sc->rx.bufsize, GFP_KERNEL);
if (skb == NULL) {
error = -ENOMEM;
break;
}
bf->bf_mpdu = skb;
bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
sc->rx.bufsize,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(sc->dev,
bf->bf_buf_addr))) {
dev_kfree_skb_any(skb);
bf->bf_mpdu = NULL;
DPRINTF(sc, ATH_DBG_FATAL,
"dma_mapping_error() on RX init\n");
error = -ENOMEM;
break;
}
bf->bf_dmacontext = bf->bf_buf_addr;
}
sc->rx.rxlink = NULL;
} while (0);
err:
if (error)
ath_rx_cleanup(sc);
@ -345,10 +342,8 @@ void ath_rx_cleanup(struct ath_softc *sc)
list_for_each_entry(bf, &sc->rx.rxbuf, list) {
skb = bf->bf_mpdu;
if (skb) {
dma_unmap_single(sc->dev,
bf->bf_buf_addr,
sc->rx.bufsize,
DMA_FROM_DEVICE);
dma_unmap_single(sc->dev, bf->bf_buf_addr,
sc->rx.bufsize, DMA_FROM_DEVICE);
dev_kfree_skb(skb);
}
}

View File

@ -2047,44 +2047,38 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
{
int error = 0;
do {
spin_lock_init(&sc->tx.txbuflock);
spin_lock_init(&sc->tx.txbuflock);
error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
"tx", nbufs, 1);
if (error != 0) {
DPRINTF(sc, ATH_DBG_FATAL,
"Failed to allocate tx descriptors: %d\n",
error);
break;
}
error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
"tx", nbufs, 1);
if (error != 0) {
DPRINTF(sc, ATH_DBG_FATAL,
"Failed to allocate tx descriptors: %d\n", error);
goto err;
}
error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
"beacon", ATH_BCBUF, 1);
if (error != 0) {
DPRINTF(sc, ATH_DBG_FATAL,
"Failed to allocate beacon descriptors: %d\n",
error);
break;
}
} while (0);
error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
"beacon", ATH_BCBUF, 1);
if (error != 0) {
DPRINTF(sc, ATH_DBG_FATAL,
"Failed to allocate beacon descriptors: %d\n", error);
goto err;
}
err:
if (error != 0)
ath_tx_cleanup(sc);
return error;
}
int ath_tx_cleanup(struct ath_softc *sc)
void ath_tx_cleanup(struct ath_softc *sc)
{
if (sc->beacon.bdma.dd_desc_len != 0)
ath_descdma_cleanup(sc, &sc->beacon.bdma, &sc->beacon.bbuf);
if (sc->tx.txdma.dd_desc_len != 0)
ath_descdma_cleanup(sc, &sc->tx.txdma, &sc->tx.txbuf);
return 0;
}
void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)