aboutsummaryrefslogtreecommitdiff
path: root/drivers/staging/cxt1e1
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/cxt1e1')
-rw-r--r--drivers/staging/cxt1e1/functions.c18
-rw-r--r--drivers/staging/cxt1e1/hwprobe.c16
-rw-r--r--drivers/staging/cxt1e1/linux.c38
-rw-r--r--drivers/staging/cxt1e1/musycc.c2678
-rw-r--r--drivers/staging/cxt1e1/pmc93x6_eeprom.c550
-rw-r--r--drivers/staging/cxt1e1/pmcc4_drv.c47
-rw-r--r--drivers/staging/cxt1e1/pmcc4_private.h2
-rw-r--r--drivers/staging/cxt1e1/sbecom_inline_linux.h23
-rw-r--r--drivers/staging/cxt1e1/sbecrc.c5
-rw-r--r--drivers/staging/cxt1e1/sbeproc.c2
10 files changed, 1599 insertions, 1780 deletions
diff --git a/drivers/staging/cxt1e1/functions.c b/drivers/staging/cxt1e1/functions.c
index ee9d39bbd25..65b6fc36edf 100644
--- a/drivers/staging/cxt1e1/functions.c
+++ b/drivers/staging/cxt1e1/functions.c
@@ -24,24 +24,6 @@
#include "libsbew.h"
#include "pmcc4.h"
-#if defined(CONFIG_SBE_HDLC_V7) || defined(CONFIG_SBE_WAN256T3_HDLC_V7) || \
-defined(CONFIG_SBE_HDLC_V7_MODULE) || \
-defined(CONFIG_SBE_WAN256T3_HDLC_V7_MODULE)
-#define _v7_hdlc_ 1
-#else
-#define _v7_hdlc_ 0
-#endif
-
-#if _v7_hdlc_
-#define V7(x) (x ## _v7)
-extern int hdlc_netif_rx_v7(hdlc_device *, struct sk_buff *);
-extern int register_hdlc_device_v7(hdlc_device *);
-extern int unregister_hdlc_device_v7(hdlc_device *);
-
-#else
-#define V7(x) x
-#endif
-
#ifndef USE_MAX_INT_DELAY
static int dummy = 0;
diff --git a/drivers/staging/cxt1e1/hwprobe.c b/drivers/staging/cxt1e1/hwprobe.c
index 9b4198b1e63..4fa27c8931b 100644
--- a/drivers/staging/cxt1e1/hwprobe.c
+++ b/drivers/staging/cxt1e1/hwprobe.c
@@ -159,8 +159,8 @@ prep_hdw_info(void)
hi->ndev = NULL;
hi->addr[0] = 0L;
hi->addr[1] = 0L;
- hi->addr_mapped[0] = 0L;
- hi->addr_mapped[1] = 0L;
+ hi->addr_mapped[0] = NULL;
+ hi->addr_mapped[1] = NULL;
}
}
@@ -174,14 +174,14 @@ cleanup_ioremap(void)
if (hi->pci_slot == 0xff)
break;
if (hi->addr_mapped[0]) {
- iounmap((void *)(hi->addr_mapped[0]));
+ iounmap(hi->addr_mapped[0]);
release_mem_region((long) hi->addr[0], hi->len[0]);
- hi->addr_mapped[0] = 0;
+ hi->addr_mapped[0] = NULL;
}
if (hi->addr_mapped[1]) {
- iounmap((void *)(hi->addr_mapped[1]));
+ iounmap(hi->addr_mapped[1]);
release_mem_region((long) hi->addr[1], hi->len[1]);
- hi->addr_mapped[1] = 0;
+ hi->addr_mapped[1] = NULL;
}
}
}
@@ -205,7 +205,7 @@ cleanup_devs(void)
#ifdef CONFIG_SBE_PMCC4_NCOMM
free_irq(hi->pdev[1]->irq, hi->ndev);
#endif
- OS_kfree(hi->ndev);
+ kfree(hi->ndev);
}
}
@@ -329,7 +329,7 @@ c4hw_attach_all(void)
return -ENOMEM;
}
- hi->addr_mapped[j] = (unsigned long)ioremap(hi->addr[j], hi->len[j]);
+ hi->addr_mapped[j] = ioremap(hi->addr[j], hi->len[j]);
if (!hi->addr_mapped[j]) {
pr_warning("%s: ioremap fails, addr=0x%lx, len=0x%lx ?\n",
hi->devname, hi->addr[j], hi->len[j]);
diff --git a/drivers/staging/cxt1e1/linux.c b/drivers/staging/cxt1e1/linux.c
index b02f5ade666..09f3d5ca75a 100644
--- a/drivers/staging/cxt1e1/linux.c
+++ b/drivers/staging/cxt1e1/linux.c
@@ -60,7 +60,6 @@ status_t c4_chan_work_init(mpi_t *, mch_t *);
void musycc_wq_chan_restart(void *);
status_t __init c4_init(ci_t *, u_char *, u_char *);
status_t __init c4_init2(ci_t *);
-ci_t *__init c4_new(void *);
int __init c4hw_attach_all(void);
void __init hdw_sn_get(hdw_info_t *, int);
@@ -84,23 +83,6 @@ int musycc_start_xmit(ci_t *, int, void *);
extern ci_t *CI;
extern struct s_hdw_info hdw_info[];
-#if defined(CONFIG_SBE_HDLC_V7) || defined(CONFIG_SBE_WAN256T3_HDLC_V7) || \
- defined(CONFIG_SBE_HDLC_V7_MODULE) || defined(CONFIG_SBE_WAN256T3_HDLC_V7_MODULE)
-#define _v7_hdlc_ 1
-#else
-#define _v7_hdlc_ 0
-#endif
-
-#if _v7_hdlc_
-#define V7(x) (x ## _v7)
-extern int hdlc_netif_rx_v7(hdlc_device *, struct sk_buff *);
-extern int register_hdlc_device_v7(hdlc_device *);
-extern int unregister_hdlc_device_v7(hdlc_device *);
-
-#else
-#define V7(x) x
-#endif
-
int error_flag; /* module load error reporting */
int cxt1e1_log_level = LOG_ERROR;
static int log_level_default = LOG_ERROR;
@@ -418,7 +400,7 @@ create_chan(struct net_device *ndev, ci_t *ci,
struct c4_priv *priv;
/* allocate then fill in private data structure */
- priv = OS_kmalloc(sizeof(struct c4_priv));
+ priv = kzalloc(sizeof(struct c4_priv), GFP_KERNEL);
if (!priv) {
pr_warning("%s: no memory for net_device !\n",
ci->devname);
@@ -428,7 +410,7 @@ create_chan(struct net_device *ndev, ci_t *ci,
if (!dev) {
pr_warning("%s: no memory for hdlc_device !\n",
ci->devname);
- OS_kfree(priv);
+ kfree(priv);
return NULL;
}
priv->ci = ci;
@@ -972,8 +954,8 @@ c4_add_dev(hdw_info_t *hi, int brdno, unsigned long f0, unsigned long f1,
if (register_netdev(ndev) ||
(c4_init(ci, (u_char *) f0, (u_char *) f1) != SBE_DRVR_SUCCESS)) {
- OS_kfree(netdev_priv(ndev));
- OS_kfree(ndev);
+ kfree(netdev_priv(ndev));
+ kfree(ndev);
error_flag = -ENODEV;
return NULL;
}
@@ -998,8 +980,8 @@ c4_add_dev(hdw_info_t *hi, int brdno, unsigned long f0, unsigned long f1,
pr_warning("%s: MUSYCC could not get irq: %d\n",
ndev->name, irq0);
unregister_netdev(ndev);
- OS_kfree(netdev_priv(ndev));
- OS_kfree(ndev);
+ kfree(netdev_priv(ndev));
+ kfree(ndev);
error_flag = -EIO;
return NULL;
}
@@ -1008,8 +990,8 @@ c4_add_dev(hdw_info_t *hi, int brdno, unsigned long f0, unsigned long f1,
pr_warning("%s: EBUS could not get irq: %d\n", hi->devname, irq1);
unregister_netdev(ndev);
free_irq(irq0, ndev);
- OS_kfree(netdev_priv(ndev));
- OS_kfree(ndev);
+ kfree(netdev_priv(ndev));
+ kfree(ndev);
error_flag = -EIO;
return NULL;
}
@@ -1068,8 +1050,8 @@ c4_add_dev(hdw_info_t *hi, int brdno, unsigned long f0, unsigned long f1,
unregister_netdev(ndev);
free_irq(irq1, ndev);
free_irq(irq0, ndev);
- OS_kfree(netdev_priv(ndev));
- OS_kfree(ndev);
+ kfree(netdev_priv(ndev));
+ kfree(ndev);
/* failure, error_flag is set */
return NULL;
}
diff --git a/drivers/staging/cxt1e1/musycc.c b/drivers/staging/cxt1e1/musycc.c
index 7b4f6f2108e..0bcbd8a3fc8 100644
--- a/drivers/staging/cxt1e1/musycc.c
+++ b/drivers/staging/cxt1e1/musycc.c
@@ -1,5 +1,5 @@
-static unsigned int max_intcnt = 0;
-static unsigned int max_bh = 0;
+static unsigned int max_intcnt;
+static unsigned int max_bh;
/*-----------------------------------------------------------------------------
* musycc.c -
@@ -64,132 +64,134 @@ void musycc_update_timeslots(mpi_t *);
/*******************************************************************/
-#if 1
static int
musycc_dump_rxbuffer_ring(mch_t *ch, int lockit)
{
- struct mdesc *m;
- unsigned long flags = 0;
+ struct mdesc *m;
+ unsigned long flags = 0;
- u_int32_t status;
- int n;
+ u_int32_t status;
+ int n;
- if (lockit)
- spin_lock_irqsave(&ch->ch_rxlock, flags);
- if (ch->rxd_num == 0)
- pr_info(" ZERO receive buffers allocated for this channel.");
- else {
- FLUSH_MEM_READ();
- m = &ch->mdr[ch->rxix_irq_srv];
- for (n = ch->rxd_num; n; n--) {
- status = le32_to_cpu(m->status);
- {
- pr_info("%c %08lx[%2d]: sts %08x (%c%c%c%c:%d.) Data [%08x] Next [%08x]\n",
- (m == &ch->mdr[ch->rxix_irq_srv]) ? 'F' : ' ',
- (unsigned long) m, n,
- status,
- m->data ? (status & HOST_RX_OWNED ? 'H' : 'M') : '-',
- status & POLL_DISABLED ? 'P' : '-',
- status & EOBIRQ_ENABLE ? 'b' : '-',
- status & EOMIRQ_ENABLE ? 'm' : '-',
- status & LENGTH_MASK,
- le32_to_cpu(m->data), le32_to_cpu(m->next));
#ifdef RLD_DUMP_BUFDATA
- {
- u_int32_t *dp;
- int len = status & LENGTH_MASK;
+ u_int32_t *dp;
+ int len = 0;
+#endif
+ if (lockit)
+ spin_lock_irqsave(&ch->ch_rxlock, flags);
+ if (ch->rxd_num == 0)
+ pr_info(" ZERO receive buffers allocated for this channel.");
+ else {
+ FLUSH_MEM_READ();
+ m = &ch->mdr[ch->rxix_irq_srv];
+ for (n = ch->rxd_num; n; n--) {
+ status = le32_to_cpu(m->status);
+ pr_info("%c %08lx[%2d]: sts %08x (%c%c%c%c:%d.) Data [%08x] Next [%08x]\n",
+ (m == &ch->mdr[ch->rxix_irq_srv]) ? 'F' : ' ',
+ (unsigned long) m, n,
+ status,
+ m->data ? (status & HOST_RX_OWNED ? 'H' : 'M') : '-',
+ status & POLL_DISABLED ? 'P' : '-',
+ status & EOBIRQ_ENABLE ? 'b' : '-',
+ status & EOMIRQ_ENABLE ? 'm' : '-',
+ status & LENGTH_MASK,
+ le32_to_cpu(m->data), le32_to_cpu(m->next));
+#ifdef RLD_DUMP_BUFDATA
+ len = status & LENGTH_MASK;
#if 1
- if (m->data && (status & HOST_RX_OWNED))
+ if (m->data && (status & HOST_RX_OWNED))
#else
- if (m->data) /* always dump regardless of valid RX
- * data */
+ /* always dump regardless of valid RX data */
+ if (m->data)
#endif
- {
- dp = (u_int32_t *) OS_phystov((void *) (le32_to_cpu(m->data)));
- if (len >= 0x10)
- pr_info(" %x[%x]: %08X %08X %08X %08x\n", (u_int32_t) dp, len,
- *dp, *(dp + 1), *(dp + 2), *(dp + 3));
- else if (len >= 0x08)
- pr_info(" %x[%x]: %08X %08X\n", (u_int32_t) dp, len,
- *dp, *(dp + 1));
- else
- pr_info(" %x[%x]: %08X\n", (u_int32_t) dp, len, *dp);
- }
- }
+ {
+ dp = (u_int32_t *)OS_phystov((void *)(le32_to_cpu(m->data)));
+ if (len >= 0x10)
+ pr_info(" %x[%x]: %08X %08X %08X %08x\n",
+ (u_int32_t)dp, len,
+ *dp, *(dp + 1),
+ *(dp + 2), *(dp + 3));
+ else if (len >= 0x08)
+ pr_info(" %x[%x]: %08X %08X\n",
+ (u_int32_t)dp, len,
+ *dp, *(dp + 1));
+ else
+ pr_info(" %x[%x]: %08X\n",
+ (u_int32_t)dp,
+ len, *dp);
+ }
#endif
- }
- m = m->snext;
+ m = m->snext;
+ }
}
- } /* -for- */
- pr_info("\n");
+ pr_info("\n");
- if (lockit)
- spin_unlock_irqrestore(&ch->ch_rxlock, flags);
- return 0;
+ if (lockit)
+ spin_unlock_irqrestore(&ch->ch_rxlock, flags);
+ return 0;
}
-#endif
-#if 1
static int
musycc_dump_txbuffer_ring(mch_t *ch, int lockit)
{
- struct mdesc *m;
- unsigned long flags = 0;
- u_int32_t status;
- int n;
-
- if (lockit)
- spin_lock_irqsave(&ch->ch_txlock, flags);
- if (ch->txd_num == 0)
- pr_info(" ZERO transmit buffers allocated for this channel.");
- else {
- FLUSH_MEM_READ();
- m = ch->txd_irq_srv;
- for (n = ch->txd_num; n; n--) {
- status = le32_to_cpu(m->status);
- {
- pr_info("%c%c %08lx[%2d]: sts %08x (%c%c%c%c:%d.) Data [%08x] Next [%08x]\n",
- (m == ch->txd_usr_add) ? 'F' : ' ',
- (m == ch->txd_irq_srv) ? 'L' : ' ',
- (unsigned long) m, n,
- status,
- m->data ? (status & MUSYCC_TX_OWNED ? 'M' : 'H') : '-',
- status & POLL_DISABLED ? 'P' : '-',
- status & EOBIRQ_ENABLE ? 'b' : '-',
- status & EOMIRQ_ENABLE ? 'm' : '-',
- status & LENGTH_MASK,
- le32_to_cpu(m->data), le32_to_cpu(m->next));
+ struct mdesc *m;
+ unsigned long flags = 0;
+ u_int32_t status;
+ int n;
#ifdef RLD_DUMP_BUFDATA
- {
- u_int32_t *dp;
- int len = status & LENGTH_MASK;
-
- if (m->data) {
- dp = (u_int32_t *) OS_phystov((void *) (le32_to_cpu(m->data)));
- if (len >= 0x10)
- pr_info(" %x[%x]: %08X %08X %08X %08x\n", (u_int32_t) dp, len,
- *dp, *(dp + 1), *(dp + 2), *(dp + 3));
- else if (len >= 0x08)
- pr_info(" %x[%x]: %08X %08X\n", (u_int32_t) dp, len,
- *dp, *(dp + 1));
- else
- pr_info(" %x[%x]: %08X\n", (u_int32_t) dp, len, *dp);
- }
- }
+ u_int32_t *dp;
+ int len = 0;
#endif
- }
- m = m->snext;
- }
- } /* -for- */
- pr_info("\n");
- if (lockit)
- spin_unlock_irqrestore(&ch->ch_txlock, flags);
- return 0;
-}
+ if (lockit)
+ spin_lock_irqsave(&ch->ch_txlock, flags);
+ if (ch->txd_num == 0)
+ pr_info(" ZERO transmit buffers allocated for this channel.");
+ else {
+ FLUSH_MEM_READ();
+ m = ch->txd_irq_srv;
+ for (n = ch->txd_num; n; n--) {
+ status = le32_to_cpu(m->status);
+ pr_info("%c%c %08lx[%2d]: sts %08x (%c%c%c%c:%d.) Data [%08x] Next [%08x]\n",
+ (m == ch->txd_usr_add) ? 'F' : ' ',
+ (m == ch->txd_irq_srv) ? 'L' : ' ',
+ (unsigned long) m, n,
+ status,
+ m->data ? (status & MUSYCC_TX_OWNED ? 'M' : 'H') : '-',
+ status & POLL_DISABLED ? 'P' : '-',
+ status & EOBIRQ_ENABLE ? 'b' : '-',
+ status & EOMIRQ_ENABLE ? 'm' : '-',
+ status & LENGTH_MASK,
+ le32_to_cpu(m->data), le32_to_cpu(m->next));
+#ifdef RLD_DUMP_BUFDATA
+ len = status & LENGTH_MASK;
+
+ if (m->data) {
+ dp = (u_int32_t *)OS_phystov((void *)(le32_to_cpu(m->data)));
+ if (len >= 0x10)
+ pr_info(" %x[%x]: %08X %08X %08X %08x\n",
+ (u_int32_t) dp, len,
+ *dp, *(dp + 1),
+ *(dp + 2), *(dp + 3));
+ else if (len >= 0x08)
+ pr_info(" %x[%x]: %08X %08X\n",
+ (u_int32_t)dp, len,
+ *dp, *(dp + 1));
+ else
+ pr_info(" %x[%x]: %08X\n",
+ (u_int32_t)dp, len, *dp);
+ }
#endif
+ m = m->snext;
+ }
+ } /* -for- */
+ pr_info("\n");
+ if (lockit)
+ spin_unlock_irqrestore(&ch->ch_txlock, flags);
+ return 0;
+}
/*
* The following supports a backdoor debug facility which can be used to
@@ -199,12 +201,11 @@ musycc_dump_txbuffer_ring(mch_t *ch, int lockit)
status_t
musycc_dump_ring(ci_t *ci, unsigned int chan)
{
- mch_t *ch;
+ mch_t *ch;
+ int bh;
- if (chan >= MAX_CHANS_USED)
- return SBE_DRVR_FAIL; /* E2BIG */
- {
- int bh;
+ if (chan >= MAX_CHANS_USED)
+ return SBE_DRVR_FAIL; /* E2BIG */
bh = atomic_read(&ci->bh_pending);
pr_info(">> bh_pend %d [%d] ihead %d itail %d [%d] th_cnt %d bh_cnt %d wdcnt %d note %d\n",
@@ -214,40 +215,43 @@ musycc_dump_ring(ci_t *ci, unsigned int chan)
ci->wdcount, ci->wd_notify);
max_bh = 0; /* reset counter */
max_intcnt = 0; /* reset counter */
- }
-
- ch = sd_find_chan(dummy, chan);
- if (!ch) {
- pr_info(">> musycc_dump_ring: channel %d not up.\n", chan);
- return ENOENT;
- }
- pr_info(">> CI %p CHANNEL %3d @ %p: state %x status/p %x/%x\n", ci, chan, ch, ch->state,
- ch->status, ch->p.status);
- pr_info("--------------------------------\nTX Buffer Ring - Channel %d, txd_num %d. (bd/ch pend %d %d), TXD required %d, txpkt %lu\n",
- chan, ch->txd_num,
- (u_int32_t) atomic_read(&ci->tx_pending), (u_int32_t) atomic_read(&ch->tx_pending), ch->txd_required, ch->s.tx_packets);
- pr_info("++ User 0x%p IRQ_SRV 0x%p USR_ADD 0x%p QStopped %x, start_tx %x tx_full %d txd_free %d mode %x\n",
- ch->user, ch->txd_irq_srv, ch->txd_usr_add,
- sd_queue_stopped(ch->user),
- ch->ch_start_tx, ch->tx_full, ch->txd_free, ch->p.chan_mode);
- musycc_dump_txbuffer_ring(ch, 1);
- pr_info("RX Buffer Ring - Channel %d, rxd_num %d. IRQ_SRV[%d] 0x%p, start_rx %x rxpkt %lu\n",
- chan, ch->rxd_num, ch->rxix_irq_srv,
- &ch->mdr[ch->rxix_irq_srv], ch->ch_start_rx, ch->s.rx_packets);
- musycc_dump_rxbuffer_ring(ch, 1);
-
- return SBE_DRVR_SUCCESS;
+
+ ch = sd_find_chan(dummy, chan);
+ if (!ch) {
+ pr_info(">> musycc_dump_ring: channel %d not up.\n", chan);
+ return ENOENT;
+ }
+ pr_info(">> CI %p CHANNEL %3d @ %p: state %x status/p %x/%x\n",
+ ci, chan, ch, ch->state,
+ ch->status, ch->p.status);
+ pr_info("--------------------------------\n");
+ pr_info("TX Buffer Ring - Channel %d, txd_num %d. (bd/ch pend %d %d), TXD required %d, txpkt %lu\n",
+ chan, ch->txd_num,
+ (u_int32_t)atomic_read(&ci->tx_pending),
+ (u_int32_t)atomic_read(&ch->tx_pending),
+ ch->txd_required, ch->s.tx_packets);
+ pr_info("++ User 0x%p IRQ_SRV 0x%p USR_ADD 0x%p QStopped %x, start_tx %x tx_full %d txd_free %d mode %x\n",
+ ch->user, ch->txd_irq_srv, ch->txd_usr_add,
+ sd_queue_stopped(ch->user),
+ ch->ch_start_tx, ch->tx_full, ch->txd_free, ch->p.chan_mode);
+ musycc_dump_txbuffer_ring(ch, 1);
+ pr_info("RX Buffer Ring - Channel %d, rxd_num %d. IRQ_SRV[%d] 0x%p, start_rx %x rxpkt %lu\n",
+ chan, ch->rxd_num, ch->rxix_irq_srv,
+ &ch->mdr[ch->rxix_irq_srv], ch->ch_start_rx, ch->s.rx_packets);
+ musycc_dump_rxbuffer_ring(ch, 1);
+
+ return SBE_DRVR_SUCCESS;
}
status_t
musycc_dump_rings(ci_t *ci, unsigned int start_chan)
{
- unsigned int chan;
+ unsigned int chan;
- for (chan = start_chan; chan < (start_chan + 5); chan++)
- musycc_dump_ring(ci, chan);
- return SBE_DRVR_SUCCESS;
+ for (chan = start_chan; chan < (start_chan + 5); chan++)
+ musycc_dump_ring(ci, chan);
+ return SBE_DRVR_SUCCESS;
}
@@ -259,22 +263,22 @@ musycc_dump_rings(ci_t *ci, unsigned int start_chan)
void
musycc_init_mdt(mpi_t *pi)
{
- u_int32_t *addr, cfg;
- int i;
-
- /*
- * This Idle Code insertion takes effect prior to channel's first
- * transmitted message. After that, each message contains its own Idle
- * Code information which is to be issued after the message is
- * transmitted (Ref.MUSYCC 5.2.2.3: MCENBL bit in Group Configuration
- * Descriptor).
- */
-
- addr = (u_int32_t *) ((u_long) pi->reg + MUSYCC_MDT_BASE03_ADDR);
- cfg = CFG_CH_FLAG_7E << IDLE_CODE;
-
- for (i = 0; i < 32; addr++, i++)
- pci_write_32(addr, cfg);
+ u_int32_t *addr, cfg;
+ int i;
+
+ /*
+ * This Idle Code insertion takes effect prior to channel's first
+ * transmitted message. After that, each message contains its own Idle
+ * Code information which is to be issued after the message is
+ * transmitted (Ref.MUSYCC 5.2.2.3: MCENBL bit in Group Configuration
+ * Descriptor).
+ */
+
+ addr = (u_int32_t *) ((u_long) pi->reg + MUSYCC_MDT_BASE03_ADDR);
+ cfg = CFG_CH_FLAG_7E << IDLE_CODE;
+
+ for (i = 0; i < 32; addr++, i++)
+ pci_write_32(addr, cfg);
}
@@ -283,44 +287,45 @@ musycc_init_mdt(mpi_t *pi)
void
musycc_update_tx_thp(mch_t *ch)
{
- struct mdesc *md;
- unsigned long flags;
+ struct mdesc *md;
+ unsigned long flags;
- spin_lock_irqsave(&ch->ch_txlock, flags);
- while (1) {
- md = ch->txd_irq_srv;
- FLUSH_MEM_READ();
- if (!md->data) {
- /* No MDs with buffers to process */
- spin_unlock_irqrestore(&ch->ch_txlock, flags);
- return;
+ spin_lock_irqsave(&ch->ch_txlock, flags);
+ while (1) {
+ md = ch->txd_irq_srv;
+ FLUSH_MEM_READ();
+ if (!md->data) {
+ /* No MDs with buffers to process */
+ spin_unlock_irqrestore(&ch->ch_txlock, flags);
+ return;
+ }
+ if ((le32_to_cpu(md->status)) & MUSYCC_TX_OWNED) {
+ /* this is the MD to restart TX with */
+ break;
+ }
+ /*
+ * Otherwise, we have a valid, host-owned message descriptor which
+ * has been successfully transmitted and whose buffer can be freed,
+ * so... process this MD, it's owned by the host. (This might give
+ * as a new, updated txd_irq_srv.)
+ */
+ musycc_bh_tx_eom(ch->up, ch->gchan);
}
- if ((le32_to_cpu(md->status)) & MUSYCC_TX_OWNED) {
- /* this is the MD to restart TX with */
- break;
+ md = ch->txd_irq_srv;
+ ch->up->regram->thp[ch->gchan] = cpu_to_le32(OS_vtophys(md));
+ FLUSH_MEM_WRITE();
+
+ if (ch->tx_full) {
+ ch->tx_full = 0;
+ ch->txd_required = 0;
+ sd_enable_xmit(ch->user); /* re-enable to catch flow controlled
+ * channel */
}
- /*
- * Otherwise, we have a valid, host-owned message descriptor which
- * has been successfully transmitted and whose buffer can be freed,
- * so... process this MD, it's owned by the host. (This might give
- * as a new, updated txd_irq_srv.)
- */
- musycc_bh_tx_eom(ch->up, ch->gchan);
- }
- md = ch->txd_irq_srv;
- ch->up->regram->thp[ch->gchan] = cpu_to_le32(OS_vtophys(md));
- FLUSH_MEM_WRITE();
-
- if (ch->tx_full) {
- ch->tx_full = 0;
- ch->txd_required = 0;
- sd_enable_xmit(ch->user); /* re-enable to catch flow controlled
- * channel */
- }
- spin_unlock_irqrestore(&ch->ch_txlock, flags);
+ spin_unlock_irqrestore(&ch->ch_txlock, flags);
#ifdef RLD_TRANS_DEBUG
- pr_info("++ musycc_update_tx_thp[%d]: setting thp = %p, sts %x\n", ch->channum, md, md->status);
+ pr_info("++ musycc_update_tx_thp[%d]: setting thp = %p, sts %x\n",
+ ch->channum, md, md->status);
#endif
}
@@ -337,96 +342,88 @@ musycc_update_tx_thp(mch_t *ch)
void
musycc_wq_chan_restart(void *arg) /* channel private structure */
{
- mch_t *ch;
- mpi_t *pi;
- struct mdesc *md;
-#if 0
- unsigned long flags;
+ mch_t *ch;
+ mpi_t *pi;
+ struct mdesc *md;
+
+#if defined(RLD_TRANS_DEBUG) || defined(RLD_RXACT_DEBUG)
+ static int hereb4 = 7;
#endif
- ch = container_of(arg, struct c4_chan_info, ch_work);
- pi = ch->up;
+ ch = container_of(arg, struct c4_chan_info, ch_work);
+ pi = ch->up;
#ifdef RLD_TRANS_DEBUG
- pr_info("wq_chan_restart[%d]: start_RT[%d/%d] status %x\n",
- ch->channum, ch->ch_start_rx, ch->ch_start_tx, ch->status);
+ pr_info("wq_chan_restart[%d]: start_RT[%d/%d] status %x\n",
+ ch->channum, ch->ch_start_rx, ch->ch_start_tx, ch->status);
#endif
- /**********************************/
- /** check for RX restart request **/
- /**********************************/
+ /**********************************/
+ /** check for RX restart request **/
+ /**********************************/
- if ((ch->ch_start_rx) && (ch->status & RX_ENABLED)) {
+ if ((ch->ch_start_rx) && (ch->status & RX_ENABLED)) {
- ch->ch_start_rx = 0;
+ ch->ch_start_rx = 0;
#if defined(RLD_TRANS_DEBUG) || defined(RLD_RXACT_DEBUG)
- {
- static int hereb4 = 7;
-
- if (hereb4) { /* RLD DEBUG */
- hereb4--;
+ if (hereb4) { /* RLD DEBUG */
+ hereb4--;
#ifdef RLD_TRANS_DEBUG
- md = &ch->mdr[ch->rxix_irq_srv];
- pr_info("++ musycc_wq_chan_restart[%d] CHAN RX ACTIVATE: rxix_irq_srv %d, md %p sts %x, rxpkt %lu\n",
- ch->channum, ch->rxix_irq_srv, md, le32_to_cpu(md->status),
- ch->s.rx_packets);
+ md = &ch->mdr[ch->rxix_irq_srv];
+ pr_info("++ musycc_wq_chan_restart[%d] CHAN RX ACTIVATE: rxix_irq_srv %d, md %p sts %x, rxpkt %lu\n",
+ ch->channum, ch->rxix_irq_srv, md,
+ le32_to_cpu(md->status), ch->s.rx_packets);
#elif defined(RLD_RXACT_DEBUG)
- md = &ch->mdr[ch->rxix_irq_srv];
- pr_info("++ musycc_wq_chan_restart[%d] CHAN RX ACTIVATE: rxix_irq_srv %d, md %p sts %x, rxpkt %lu\n",
- ch->channum, ch->rxix_irq_srv, md, le32_to_cpu(md->status),
- ch->s.rx_packets);
- musycc_dump_rxbuffer_ring(ch, 1); /* RLD DEBUG */
+ md = &ch->mdr[ch->rxix_irq_srv];
+ pr_info("++ musycc_wq_chan_restart[%d] CHAN RX ACTIVATE: rxix_irq_srv %d, md %p sts %x, rxpkt %lu\n",
+ ch->channum, ch->rxix_irq_srv,
+ md, le32_to_cpu(md->status),
+ ch->s.rx_packets);
+ musycc_dump_rxbuffer_ring(ch, 1); /* RLD DEBUG */
#endif
- }
- }
+ }
#endif
- musycc_serv_req(pi, SR_CHANNEL_ACTIVATE | SR_RX_DIRECTION | ch->gchan);
- }
- /**********************************/
- /** check for TX restart request **/
- /**********************************/
+ musycc_serv_req(pi, SR_CHANNEL_ACTIVATE |
+ SR_RX_DIRECTION | ch->gchan);
+ }
+ /**********************************/
+ /** check for TX restart request **/
+ /**********************************/
- if ((ch->ch_start_tx) && (ch->status & TX_ENABLED)) {
- /* find next unprocessed message, then set TX thp to it */
- musycc_update_tx_thp(ch);
+ if ((ch->ch_start_tx) && (ch->status & TX_ENABLED)) {
+ /* find next unprocessed message, then set TX thp to it */
+ musycc_update_tx_thp(ch);
-#if 0
- spin_lock_irqsave(&ch->ch_txlock, flags);
-#endif
- md = ch->txd_irq_srv;
- if (!md) {
+ md = ch->txd_irq_srv;
+ if (!md) {
#ifdef RLD_TRANS_DEBUG
- pr_info("-- musycc_wq_chan_restart[%d]: WARNING, starting NULL md\n", ch->channum);
-#endif
-#if 0
- spin_unlock_irqrestore(&ch->ch_txlock, flags);
-#endif
- } else if (md->data && ((le32_to_cpu(md->status)) & MUSYCC_TX_OWNED)) {
- ch->ch_start_tx = 0;
-#if 0
- spin_unlock_irqrestore(&ch->ch_txlock, flags); /* allow interrupts for service request */
+ pr_info("-- musycc_wq_chan_restart[%d]: WARNING, starting NULL md\n",
+ ch->channum);
#endif
+ } else if (md->data && ((le32_to_cpu(md->status)) &
+ MUSYCC_TX_OWNED)) {
+ ch->ch_start_tx = 0;
+
#ifdef RLD_TRANS_DEBUG
- pr_info("++ musycc_wq_chan_restart() CHAN TX ACTIVATE: chan %d txd_irq_srv %p = sts %x, txpkt %lu\n",
- ch->channum, ch->txd_irq_srv, ch->txd_irq_srv->status, ch->s.tx_packets);
+ pr_info("++ musycc_wq_chan_restart() CHAN TX ACTIVATE: chan %d txd_irq_srv %p = sts %x, txpkt %lu\n",
+ ch->channum, ch->txd_irq_srv,
+ ch->txd_irq_srv->status, ch->s.tx_packets);
#endif
- musycc_serv_req(pi, SR_CHANNEL_ACTIVATE | SR_TX_DIRECTION | ch->gchan);
- }
+ musycc_serv_req(pi, SR_CHANNEL_ACTIVATE |
+ SR_TX_DIRECTION | ch->gchan);
+ }
#ifdef RLD_RESTART_DEBUG
- else {
- /* retain request to start until retried and we have data to xmit */
- pr_info("-- musycc_wq_chan_restart[%d]: DELAYED due to md %p sts %x data %x, start_tx %x\n",
- ch->channum, md,
- le32_to_cpu(md->status),
- le32_to_cpu(md->data), ch->ch_start_tx);
- musycc_dump_txbuffer_ring(ch, 0);
-#if 0
- spin_unlock_irqrestore(&ch->ch_txlock, flags); /* allow interrupts for service request */
+ else {
+ /* retain request to start until retried and we have data to xmit */
+ pr_info("-- musycc_wq_chan_restart[%d]: DELAYED due to md %p sts %x data %x, start_tx %x\n",
+ ch->channum, md,
+ le32_to_cpu(md->status),
+ le32_to_cpu(md->data), ch->ch_start_tx);
+ musycc_dump_txbuffer_ring(ch, 0);
+ }
#endif
}
-#endif
- }
}
@@ -439,31 +436,32 @@ void
musycc_chan_restart(mch_t *ch)
{
#ifdef RLD_RESTART_DEBUG
- pr_info("++ musycc_chan_restart[%d]: txd_irq_srv @ %p = sts %x\n",
- ch->channum, ch->txd_irq_srv, ch->txd_irq_srv->status);
+ pr_info("++ musycc_chan_restart[%d]: txd_irq_srv @ %p = sts %x\n",
+ ch->channum, ch->txd_irq_srv, ch->txd_irq_srv->status);
#endif
- /* 2.6 - find next unprocessed message, then set TX thp to it */
+ /* 2.6 - find next unprocessed message, then set TX thp to it */
#ifdef RLD_RESTART_DEBUG
- pr_info(">> musycc_chan_restart: scheduling Chan %x workQ @ %p\n", ch->channum, &ch->ch_work);
+ pr_info(">> musycc_chan_restart: scheduling Chan %x workQ @ %p\n",
+ ch->channum, &ch->ch_work);
#endif
- c4_wk_chan_restart(ch); /* work queue mechanism fires off: Ref:
- * musycc_wq_chan_restart () */
-
+ c4_wk_chan_restart(ch); /* work queue mechanism fires off: Ref:
+ * musycc_wq_chan_restart () */
}
void
rld_put_led(mpi_t *pi, u_int32_t ledval)
{
- static u_int32_t led = 0;
+ static u_int32_t led;
- if (ledval == 0)
- led = 0;
- else
- led |= ledval;
+ if (ledval == 0)
+ led = 0;
+ else
+ led |= ledval;
- pci_write_32((u_int32_t *) &pi->up->cpldbase->leds, led); /* RLD DEBUG TRANHANG */
+ /* RLD DEBUG TRANHANG */
+ pci_write_32((u_int32_t *) &pi->up->cpldbase->leds, led);
}
@@ -472,100 +470,110 @@ rld_put_led(mpi_t *pi, u_int32_t ledval)
void
musycc_serv_req(mpi_t *pi, u_int32_t req)
{
- volatile u_int32_t r;
- int rcnt;
+ volatile u_int32_t r;
+ int rcnt;
- /*
- * PORT NOTE: Semaphore protect service loop guarantees only a single
- * operation at a time. Per MUSYCC Manual - "Issuing service requests to
- * the same channel group without first receiving ACK from each request
- * may cause the host to lose track of which service request has been
- * acknowledged."
- */
+ /*
+ * PORT NOTE: Semaphore protect service loop guarantees only a single
+ * operation at a time. Per MUSYCC Manual - "Issuing service requests to
+ * the same channel group without first receiving ACK from each request
+ * may cause the host to lose track of which service request has been
+ * acknowledged."
+ */
- SD_SEM_TAKE(&pi->sr_sem_busy, "serv"); /* only 1 thru here, per
- * group */
+ SD_SEM_TAKE(&pi->sr_sem_busy, "serv"); /* only 1 thru here, per
+ * group */
- if (pi->sr_last == req) {
+ if (pi->sr_last == req) {
#ifdef RLD_TRANS_DEBUG
- pr_info(">> same SR, Port %d Req %x\n", pi->portnum, req);
+ pr_info(">> same SR, Port %d Req %x\n", pi->portnum, req);
#endif
- /*
- * The most likely repeated request is the channel activation command
- * which follows the occurrence of a Transparent mode TX ONR or a
- * BUFF error. If the previous command was a CHANNEL ACTIVATE,
- * precede it with a NOOP command in order maintain coherent control
- * of this current (re)ACTIVATE.
- */
+ /*
+ * The most likely repeated request is the channel activation command
+ * which follows the occurrence of a Transparent mode TX ONR or a
+ * BUFF error. If the previous command was a CHANNEL ACTIVATE,
+ * precede it with a NOOP command in order maintain coherent control
+ * of this current (re)ACTIVATE.
+ */
- r = (pi->sr_last & ~SR_GCHANNEL_MASK);
- if ((r == (SR_CHANNEL_ACTIVATE | SR_TX_DIRECTION)) ||
- (r == (SR_CHANNEL_ACTIVATE | SR_RX_DIRECTION))) {
+ r = (pi->sr_last & ~SR_GCHANNEL_MASK);
+ if ((r == (SR_CHANNEL_ACTIVATE | SR_TX_DIRECTION)) ||
+ (r == (SR_CHANNEL_ACTIVATE | SR_RX_DIRECTION))) {
#ifdef RLD_TRANS_DEBUG
- pr_info(">> same CHAN ACT SR, Port %d Req %x => issue SR_NOOP CMD\n", pi->portnum, req);
+ pr_info(">> same CHAN ACT SR, Port %d Req %x => issue SR_NOOP CMD\n", pi->portnum, req);
#endif
- SD_SEM_GIVE(&pi->sr_sem_busy); /* allow this next request */
- musycc_serv_req(pi, SR_NOOP);
- SD_SEM_TAKE(&pi->sr_sem_busy, "serv"); /* relock & continue w/
- * original req */
- } else if (req == SR_NOOP) {
- /* no need to issue back-to-back SR_NOOP commands at this time */
+ /* allow this next request */
+ SD_SEM_GIVE(&pi->sr_sem_busy);
+ musycc_serv_req(pi, SR_NOOP);
+ /* relock & continue w/ original req */
+ SD_SEM_TAKE(&pi->sr_sem_busy, "serv");
+ } else if (req == SR_NOOP) {
+ /* no need to issue back-to-back
+ * SR_NOOP commands at this time
+ */
#ifdef RLD_TRANS_DEBUG
- pr_info(">> same Port SR_NOOP skipped, Port %d\n", pi->portnum);
+ pr_info(">> same Port SR_NOOP skipped, Port %d\n",
+ pi->portnum);
#endif
- SD_SEM_GIVE(&pi->sr_sem_busy); /* allow this next request */
- return;
+ /* allow this next request */
+ SD_SEM_GIVE(&pi->sr_sem_busy);
+ return;
+ }
}
- }
- rcnt = 0;
- pi->sr_last = req;
+ rcnt = 0;
+ pi->sr_last = req;
rewrite:
- pci_write_32((u_int32_t *) &pi->reg->srd, req);
- FLUSH_MEM_WRITE();
-
- /*
- * Per MUSYCC Manual, Section 6.1,2 - "When writing an SCR service
- * request, the host must ensure at least one PCI bus clock cycle has
- * elapsed before writing another service request. To meet this minimum
- * elapsed service request write timing interval, it is recommended that
- * the host follow any SCR write with another operation which reads from
- * the same address."
- */
- r = pci_read_32((u_int32_t *) &pi->reg->srd); /* adhere to write
- * timing imposition */
-
-
- if ((r != req) && (req != SR_CHIP_RESET) && (++rcnt <= MUSYCC_SR_RETRY_CNT)) {
- if (cxt1e1_log_level >= LOG_MONITOR)
- pr_info("%s: %d - reissue srv req/last %x/%x (hdw reads %x), Chan %d.\n",
- pi->up->devname, rcnt, req, pi->sr_last, r,
- (pi->portnum * MUSYCC_NCHANS) + (req & 0x1f));
- OS_uwait_dummy(); /* this delay helps reduce reissue counts
- * (reason not yet researched) */
- goto rewrite;
- }
- if (rcnt > MUSYCC_SR_RETRY_CNT) {
- pr_warning("%s: failed service request (#%d)= %x, group %d.\n",
- pi->up->devname, MUSYCC_SR_RETRY_CNT, req, pi->portnum);
- SD_SEM_GIVE(&pi->sr_sem_busy); /* allow any next request */
- return;
- }
- if (req == SR_CHIP_RESET) {
+ pci_write_32((u_int32_t *) &pi->reg->srd, req);
+ FLUSH_MEM_WRITE();
+
/*
- * PORT NOTE: the CHIP_RESET command is NOT ack'd by the MUSYCC, thus
- * the upcoming delay is used. Though the MUSYCC documentation
- * suggests a read-after-write would supply the required delay, it's
- * unclear what CPU/BUS clock speeds might have been assumed when
- * suggesting this 'lack of ACK' workaround. Thus the use of uwait.
+ * Per MUSYCC Manual, Section 6.1,2 - "When writing an SCR service
+ * request, the host must ensure at least one PCI bus clock cycle has
+ * elapsed before writing another service request. To meet this minimum
+ * elapsed service request write timing interval, it is recommended that
+ * the host follow any SCR write with another operation which reads from
+ * the same address."
*/
- OS_uwait(100000, "icard"); /* 100ms */
- } else {
- FLUSH_MEM_READ();
- SD_SEM_TAKE(&pi->sr_sem_wait, "sakack"); /* sleep until SACK
- * interrupt occurs */
- }
- SD_SEM_GIVE(&pi->sr_sem_busy); /* allow any next request */
+
+ /* adhere to write timing imposition */
+ r = pci_read_32((u_int32_t *) &pi->reg->srd);
+
+
+ if ((r != req) && (req != SR_CHIP_RESET) &&
+ (++rcnt <= MUSYCC_SR_RETRY_CNT)) {
+ if (cxt1e1_log_level >= LOG_MONITOR)
+ pr_info("%s: %d - reissue srv req/last %x/%x (hdw reads %x), Chan %d.\n",