diff options
author | Sage Ahn <syahn@gctsemi.com> | 2012-05-15 13:20:36 +0900 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-05-15 08:49:35 -0700 |
commit | 247e9cffdce024fec5f55f76a8592f2fa8b3aa7b (patch) | |
tree | dbc4204e78315eff10b2b2300cf0b918fa7bf295 | |
parent | 93c66ee1186ad31f767f1b7cbd0ecaef7375b8ed (diff) |
staging: gdm72xx: Add GCT GDM72xx WiMAX driver.
This patch provides the kernel driver for the GDM72xx WiMAX chips
developed by GCT Semiconductor, Inc., which enables mobile WiMAX
connection on the Linux host.
Signed-off-by: Sage Ahn <syahn@gctsemi.com>
Cc: Ben Chan <benchan@chromium.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
22 files changed, 4621 insertions, 0 deletions
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index d6417d118a9..7e0e5e4317a 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -130,4 +130,6 @@ source "drivers/staging/ozwpan/Kconfig" source "drivers/staging/ipack/Kconfig" +source "drivers/staging/gdm72xx/Kconfig" + endif # STAGING diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index fd8b7ce3a22..30901057bd1 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -57,3 +57,4 @@ obj-$(CONFIG_ANDROID) += android/ obj-$(CONFIG_PHONE) += telephony/ obj-$(CONFIG_RAMSTER) += ramster/ obj-$(CONFIG_USB_WPAN_HCD) += ozwpan/ +obj-$(CONFIG_WIMAX_GDM72XX) += gdm72xx/ diff --git a/drivers/staging/gdm72xx/Kconfig b/drivers/staging/gdm72xx/Kconfig new file mode 100644 index 00000000000..5c37cba1148 --- /dev/null +++ b/drivers/staging/gdm72xx/Kconfig @@ -0,0 +1,45 @@ +# +# GCT GDM72xx WiMAX driver configuration +# + +menuconfig WIMAX_GDM72XX + tristate "GCT GDM72xx WiMAX support" + help + Support for the GCT GDM72xx WiMAX chip + +if WIMAX_GDM72XX + +config WIMAX_GDM72XX_QOS + bool "Enable QoS support" + default n + +config WIMAX_GDM72XX_K_MODE + bool "Enable K mode" + default n + +config WIMAX_GDM72XX_WIMAX2 + bool "Enable WIMAX2 support" + default n + +choice + prompt "Select interface" + +config WIMAX_GDM72XX_USB + bool "USB interface" + depends on USB + +config WIMAX_GDM72XX_SDIO + bool "SDIO interface" + depends on MMC + +endchoice + +if WIMAX_GDM72XX_USB + +config WIMAX_GDM72XX_USB_PM + bool "Enable power managerment support" + default n + +endif # WIMAX_GDM72XX_USB + +endif # WIMAX_GDM72XX diff --git a/drivers/staging/gdm72xx/Makefile b/drivers/staging/gdm72xx/Makefile new file mode 100644 index 00000000000..35da7b90b19 --- /dev/null +++ b/drivers/staging/gdm72xx/Makefile @@ -0,0 +1,6 @@ +obj-$(CONFIG_WIMAX_GDM72XX) := gdmwm.o + +gdmwm-y += gdm_wimax.o netlink_k.o +gdmwm-$(CONFIG_WIMAX_GDM72XX_QOS) += gdm_qos.o +gdmwm-$(CONFIG_WIMAX_GDM72XX_SDIO) += gdm_sdio.o sdio_boot.o +gdmwm-$(CONFIG_WIMAX_GDM72XX_USB) += gdm_usb.o usb_boot.o diff --git a/drivers/staging/gdm72xx/TODO b/drivers/staging/gdm72xx/TODO new file mode 100644 index 00000000000..30ac01ab972 --- /dev/null +++ b/drivers/staging/gdm72xx/TODO @@ -0,0 +1,5 @@ +TODO: +- Replace kernel_thread with kthread in gdm_usb.c +- Replace hard-coded firmware paths with request_firmware in + sdio_boot.c and usb_boot.c +- Clean up coding style to meet kernel standard. diff --git a/drivers/staging/gdm72xx/gdm_qos.c b/drivers/staging/gdm72xx/gdm_qos.c new file mode 100644 index 00000000000..0217680ec54 --- /dev/null +++ b/drivers/staging/gdm72xx/gdm_qos.c @@ -0,0 +1,460 @@ +/* + * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/version.h> +#include <linux/etherdevice.h> +#include <asm/byteorder.h> + +#include <linux/ip.h> +#include <linux/tcp.h> +#include <linux/if_ether.h> + +#include "gdm_wimax.h" +#include "hci.h" +#include "gdm_qos.h" + +#define B2H(x) __be16_to_cpu(x) + +#undef dprintk +#define dprintk(fmt, args ...) printk(KERN_DEBUG "[QoS] " fmt, ## args) +#undef wprintk +#define wprintk(fmt, args ...) \ + printk(KERN_WARNING "[QoS WARNING] " fmt, ## args) +#undef eprintk +#define eprintk(fmt, args ...) printk(KERN_ERR "[QoS ERROR] " fmt, ## args) + + +#define MAX_FREE_LIST_CNT 32 +static struct { + struct list_head head; + int cnt; + spinlock_t lock; +} qos_free_list; + +static void init_qos_entry_list(void) +{ + qos_free_list.cnt = 0; + INIT_LIST_HEAD(&qos_free_list.head); + spin_lock_init(&qos_free_list.lock); +} + +static void *alloc_qos_entry(void) +{ + struct qos_entry_s *entry; + unsigned long flags; + + spin_lock_irqsave(&qos_free_list.lock, flags); + if (qos_free_list.cnt) { + entry = list_entry(qos_free_list.head.prev, struct qos_entry_s, + list); + list_del(&entry->list); + qos_free_list.cnt--; + spin_unlock_irqrestore(&qos_free_list.lock, flags); + return entry; + } + spin_unlock_irqrestore(&qos_free_list.lock, flags); + + entry = kmalloc(sizeof(struct qos_entry_s), GFP_ATOMIC); + return entry; +} + +static void free_qos_entry(void *entry) +{ + struct qos_entry_s *qentry = (struct qos_entry_s *) entry; + unsigned long flags; + + spin_lock_irqsave(&qos_free_list.lock, flags); + if (qos_free_list.cnt < MAX_FREE_LIST_CNT) { + list_add(&qentry->list, &qos_free_list.head); + qos_free_list.cnt++; + spin_unlock_irqrestore(&qos_free_list.lock, flags); + return; + } + spin_unlock_irqrestore(&qos_free_list.lock, flags); + + kfree(entry); +} + +static void free_qos_entry_list(struct list_head *free_list) +{ + struct qos_entry_s *entry, *n; + int total_free = 0; + + list_for_each_entry_safe(entry, n, free_list, list) { + list_del(&entry->list); + kfree(entry); + total_free++; + } + + dprintk("%s: total_free_cnt=%d\n", __func__, total_free); +} + +void gdm_qos_init(void *nic_ptr) +{ + struct nic *nic = nic_ptr; + struct qos_cb_s *qcb = &nic->qos; + int i; + + for (i = 0 ; i < QOS_MAX; i++) { + INIT_LIST_HEAD(&qcb->qos_list[i]); + qcb->csr[i].QoSBufCount = 0; + qcb->csr[i].Enabled = 0; + } + + qcb->qos_list_cnt = 0; + qcb->qos_null_idx = QOS_MAX-1; + qcb->qos_limit_size = 255; + + spin_lock_init(&qcb->qos_lock); + + init_qos_entry_list(); +} + +void gdm_qos_release_list(void *nic_ptr) +{ + struct nic *nic = nic_ptr; + struct qos_cb_s *qcb = &nic->qos; + unsigned long flags; + struct qos_entry_s *entry, *n; + struct list_head free_list; + int i; + + INIT_LIST_HEAD(&free_list); + + spin_lock_irqsave(&qcb->qos_lock, flags); + + for (i = 0; i < QOS_MAX; i++) { + qcb->csr[i].QoSBufCount = 0; + qcb->csr[i].Enabled = 0; + } + + qcb->qos_list_cnt = 0; + qcb->qos_null_idx = QOS_MAX-1; + + for (i = 0; i < QOS_MAX; i++) { + list_for_each_entry_safe(entry, n, &qcb->qos_list[i], list) { + list_move_tail(&entry->list, &free_list); + } + } + spin_unlock_irqrestore(&qcb->qos_lock, flags); + free_qos_entry_list(&free_list); +} + +static u32 chk_ipv4_rule(struct gdm_wimax_csr_s *csr, u8 *Stream, u8 *port) +{ + int i; + + if (csr->ClassifierRuleEnable&IPTYPEOFSERVICE) { + if (((Stream[1] & csr->IPToSMask) < csr->IPToSLow) || + ((Stream[1] & csr->IPToSMask) > csr->IPToSHigh)) + return 1; + } + + if (csr->ClassifierRuleEnable&PROTOCOL) { + if (Stream[9] != csr->Protocol) + return 1; + } + + if (csr->ClassifierRuleEnable&IPMASKEDSRCADDRESS) { + for (i = 0; i < 4; i++) { + if ((Stream[12 + i] & csr->IPSrcAddrMask[i]) != + (csr->IPSrcAddr[i] & csr->IPSrcAddrMask[i])) + return 1; + } + } + + if (csr->ClassifierRuleEnable&IPMASKEDDSTADDRESS) { + for (i = 0; i < 4; i++) { + if ((Stream[16 + i] & csr->IPDstAddrMask[i]) != + (csr->IPDstAddr[i] & csr->IPDstAddrMask[i])) + return 1; + } + } + + if (csr->ClassifierRuleEnable&PROTOCOLSRCPORTRANGE) { + i = ((port[0]<<8)&0xff00)+port[1]; + if ((i < csr->SrcPortLow) || (i > csr->SrcPortHigh)) + return 1; + } + + if (csr->ClassifierRuleEnable&PROTOCOLDSTPORTRANGE) { + i = ((port[2]<<8)&0xff00)+port[3]; + if ((i < csr->DstPortLow) || (i > csr->DstPortHigh)) + return 1; + } + + return 0; +} + +static u32 get_qos_index(struct nic *nic, u8* iph, u8* tcpudph) +{ + u32 IP_Ver, Header_Len, i; + struct qos_cb_s *qcb = &nic->qos; + + if (iph == NULL || tcpudph == NULL) + return -1; + + IP_Ver = (iph[0]>>4)&0xf; + Header_Len = iph[0]&0xf; + + if (IP_Ver == 4) { + for (i = 0; i < QOS_MAX; i++) { + if (qcb->csr[i].Enabled) { + if (qcb->csr[i].ClassifierRuleEnable) { + if (chk_ipv4_rule(&qcb->csr[i], iph, + tcpudph) == 0) + return i; + } + } + } + } + + return -1; +} + +static u32 extract_qos_list(struct nic *nic, struct list_head *head) +{ + struct qos_cb_s *qcb = &nic->qos; + struct qos_entry_s *entry; + int i; + + INIT_LIST_HEAD(head); + + for (i = 0; i < QOS_MAX; i++) { + if (qcb->csr[i].Enabled) { + if (qcb->csr[i].QoSBufCount < qcb->qos_limit_size) { + if (!list_empty(&qcb->qos_list[i])) { + entry = list_entry( + qcb->qos_list[i].prev, + struct qos_entry_s, list); + list_move_tail(&entry->list, head); + qcb->csr[i].QoSBufCount++; + + if (!list_empty(&qcb->qos_list[i])) + wprintk("QoS Index(%d) " + "is piled!!\n", i); + } + } + } + } + + return 0; +} + +static void send_qos_list(struct nic *nic, struct list_head *head) +{ + struct qos_entry_s *entry, *n; + + list_for_each_entry_safe(entry, n, head, list) { + list_del(&entry->list); + free_qos_entry(entry); + gdm_wimax_send_tx(entry->skb, entry->dev); + } +} + +int gdm_qos_send_hci_pkt(struct sk_buff *skb, struct net_device *dev) +{ + struct nic *nic = netdev_priv(dev); + int index; + struct qos_cb_s *qcb = &nic->qos; + unsigned long flags; + struct ethhdr *ethh = (struct ethhdr *) (skb->data + HCI_HEADER_SIZE); + struct iphdr *iph = (struct iphdr *) ((char *) ethh + ETH_HLEN); + struct tcphdr *tcph; + struct qos_entry_s *entry = NULL; + struct list_head send_list; + int ret = 0; + + tcph = (struct tcphdr *) iph + iph->ihl*4; + + if (B2H(ethh->h_proto) == ETH_P_IP) { + if (qcb->qos_list_cnt && !qos_free_list.cnt) { + entry = alloc_qos_entry(); + entry->skb = skb; + entry->dev = dev; + dprintk("qcb->qos_list_cnt=%d\n", qcb->qos_list_cnt); + } + + spin_lock_irqsave(&qcb->qos_lock, flags); + if (qcb->qos_list_cnt) { + index = get_qos_index(nic, (u8 *)iph, (u8 *) tcph); + if (index == -1) + index = qcb->qos_null_idx; + + if (!entry) { + entry = alloc_qos_entry(); + entry->skb = skb; + entry->dev = dev; + } + + list_add_tail(&entry->list, &qcb->qos_list[index]); + extract_qos_list(nic, &send_list); + spin_unlock_irqrestore(&qcb->qos_lock, flags); + send_qos_list(nic, &send_list); + goto out; + } + spin_unlock_irqrestore(&qcb->qos_lock, flags); + if (entry) + free_qos_entry(entry); + } + + ret = gdm_wimax_send_tx(skb, dev); +out: + return ret; +} + +static u32 get_csr(struct qos_cb_s *qcb, u32 SFID, int mode) +{ + int i; + + for (i = 0; i < qcb->qos_list_cnt; i++) { + if (qcb->csr[i].SFID == SFID) + return i; + } + + if (mode) { + for (i = 0; i < QOS_MAX; i++) { + if (qcb->csr[i].Enabled == 0) { + qcb->csr[i].Enabled = 1; + qcb->qos_list_cnt++; + return i; + } + } + } + return -1; +} + +#define QOS_CHANGE_DEL 0xFC +#define QOS_ADD 0xFD +#define QOS_REPORT 0xFE + +void gdm_recv_qos_hci_packet(void *nic_ptr, u8 *buf, int size) +{ + struct nic *nic = nic_ptr; + u32 i, SFID, index, pos; + u8 subCmdEvt; + u8 len; + struct qos_cb_s *qcb = &nic->qos; + struct qos_entry_s *entry, *n; + struct list_head send_list; + struct list_head free_list; + unsigned long flags; + + subCmdEvt = (u8)buf[4]; + + if (subCmdEvt == QOS_REPORT) { + len = (u8)buf[5]; + + spin_lock_irqsave(&qcb->qos_lock, flags); + for (i = 0; i < qcb->qos_list_cnt; i++) { + SFID = ((buf[(i*5)+6]<<24)&0xff000000); + SFID += ((buf[(i*5)+7]<<16)&0xff0000); + SFID += ((buf[(i*5)+8]<<8)&0xff00); + SFID += (buf[(i*5)+9]); + index = get_csr(qcb, SFID, 0); + if (index == -1) { + spin_unlock_irqrestore(&qcb->qos_lock, flags); + eprintk("QoS ERROR: No SF\n"); + return; + } + qcb->csr[index].QoSBufCount = buf[(i*5)+10]; + } + + extract_qos_list(nic, &send_list); + spin_unlock_irqrestore(&qcb->qos_lock, flags); + send_qos_list(nic, &send_list); + return; + } else if (subCmdEvt == QOS_ADD) { + pos = 5; + len = (u8)buf[pos++]; + + SFID = ((buf[pos++]<<24)&0xff000000); + SFID += ((buf[pos++]<<16)&0xff0000); + SFID += ((buf[pos++]<<8)&0xff00); + SFID += (buf[pos++]); + + index = get_csr(qcb, SFID, 1); + if (index == -1) { + eprintk("QoS ERROR: csr Update Error\n"); + return; + } + + dprintk("QOS_ADD SFID = 0x%x, index=%d\n", SFID, index); + + spin_lock_irqsave(&qcb->qos_lock, flags); + qcb->csr[index].SFID = SFID; + qcb->csr[index].ClassifierRuleEnable = ((buf[pos++]<<8)&0xff00); + qcb->csr[index].ClassifierRuleEnable += buf[pos++]; + if (qcb->csr[index].ClassifierRuleEnable == 0) + qcb->qos_null_idx = index; + qcb->csr[index].IPToSMask = buf[pos++]; + qcb->csr[index].IPToSLow = buf[pos++]; + qcb->csr[index].IPToSHigh = buf[pos++]; + qcb->csr[index].Protocol = buf[pos++]; + qcb->csr[index].IPSrcAddrMask[0] = buf[pos++]; + qcb->csr[index].IPSrcAddrMask[1] = buf[pos++]; + qcb->csr[index].IPSrcAddrMask[2] = buf[pos++]; + qcb->csr[index].IPSrcAddrMask[3] = buf[pos++]; + qcb->csr[index].IPSrcAddr[0] = buf[pos++]; + qcb->csr[index].IPSrcAddr[1] = buf[pos++]; + qcb->csr[index].IPSrcAddr[2] = buf[pos++]; + qcb->csr[index].IPSrcAddr[3] = buf[pos++]; + qcb->csr[index].IPDstAddrMask[0] = buf[pos++]; + qcb->csr[index].IPDstAddrMask[1] = buf[pos++]; + qcb->csr[index].IPDstAddrMask[2] = buf[pos++]; + qcb->csr[index].IPDstAddrMask[3] = buf[pos++]; + qcb->csr[index].IPDstAddr[0] = buf[pos++]; + qcb->csr[index].IPDstAddr[1] = buf[pos++]; + qcb->csr[index].IPDstAddr[2] = buf[pos++]; + qcb->csr[index].IPDstAddr[3] = buf[pos++]; + qcb->csr[index].SrcPortLow = ((buf[pos++]<<8)&0xff00); + qcb->csr[index].SrcPortLow += buf[pos++]; + qcb->csr[index].SrcPortHigh = ((buf[pos++]<<8)&0xff00); + qcb->csr[index].SrcPortHigh += buf[pos++]; + qcb->csr[index].DstPortLow = ((buf[pos++]<<8)&0xff00); + qcb->csr[index].DstPortLow += buf[pos++]; + qcb->csr[index].DstPortHigh = ((buf[pos++]<<8)&0xff00); + qcb->csr[index].DstPortHigh += buf[pos++]; + + qcb->qos_limit_size = 254/qcb->qos_list_cnt; + spin_unlock_irqrestore(&qcb->qos_lock, flags); + } else if (subCmdEvt == QOS_CHANGE_DEL) { + pos = 5; + len = (u8)buf[pos++]; + SFID = ((buf[pos++]<<24)&0xff000000); + SFID += ((buf[pos++]<<16)&0xff0000); + SFID += ((buf[pos++]<<8)&0xff00); + SFID += (buf[pos++]); + index = get_csr(qcb, SFID, 1); + if (index == -1) { + eprintk("QoS ERROR: Wrong index(%d)\n", index); + return; + } + + dprintk("QOS_CHANGE_DEL SFID = 0x%x, index=%d\n", SFID, index); + + INIT_LIST_HEAD(&free_list); + + spin_lock_irqsave(&qcb->qos_lock, flags); + qcb->csr[index].Enabled = 0; + qcb->qos_list_cnt--; + qcb->qos_limit_size = 254/qcb->qos_list_cnt; + + list_for_each_entry_safe(entry, n, &qcb->qos_list[index], + list) { + list_move_tail(&entry->list, &free_list); + } + spin_unlock_irqrestore(&qcb->qos_lock, flags); + free_qos_entry_list(&free_list); + } +} diff --git a/drivers/staging/gdm72xx/gdm_qos.h b/drivers/staging/gdm72xx/gdm_qos.h new file mode 100644 index 00000000000..33f2bd4cee3 --- /dev/null +++ b/drivers/staging/gdm72xx/gdm_qos.h @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#if !defined(GDM_QOS_H_20090403) +#define GDM_QOS_H_20090403 + +#include <linux/types.h> +#include <linux/usb.h> +#include <linux/list.h> + +#define BOOLEAN u8 + +#define QOS_MAX 16 +#define IPTYPEOFSERVICE 0x8000 +#define PROTOCOL 0x4000 +#define IPMASKEDSRCADDRESS 0x2000 +#define IPMASKEDDSTADDRESS 0x1000 +#define PROTOCOLSRCPORTRANGE 0x800 +#define PROTOCOLDSTPORTRANGE 0x400 +#define DSTMACADDR 0x200 +#define SRCMACADDR 0x100 +#define ETHERTYPE 0x80 +#define IEEE802_1DUSERPRIORITY 0x40 +#define IEEE802_1QVLANID 0x10 + +struct gdm_wimax_csr_s { + /* union{ + U16 all; + struct _CS_CLASSIFIER_RULE_ENABLE{ + IPTypeOfService:1, + Protocol:1, + IPMaskedSrcAddress:1, + IPMaskedDstAddress:1, + ProtocolSrcPortRange:1, + ProtocolDstPortRange:1, + DstMacAddr:1, + SrcMacAddr:1, + Ethertype:1, + IEEE802_1DUserPriority:1, + IEEE802_1QVLANID:1, + Reserved:5; + } fields; + } */ + BOOLEAN Enabled; + u32 SFID; + u8 QoSBufCount; + u16 ClassifierRuleEnable; + u8 IPToSLow; + u8 IPToSHigh; + u8 IPToSMask; + u8 Protocol; + u8 IPSrcAddr[16]; + u8 IPSrcAddrMask[16]; + u8 IPDstAddr[16]; + u8 IPDstAddrMask[16]; + u16 SrcPortLow; + u16 SrcPortHigh; + u16 DstPortLow; + u16 DstPortHigh; +}; + +struct qos_entry_s { + struct list_head list; + struct sk_buff *skb; + struct net_device *dev; + +}; + +struct qos_cb_s { + struct list_head qos_list[QOS_MAX]; + u32 qos_list_cnt; + u32 qos_null_idx; + struct gdm_wimax_csr_s csr[QOS_MAX]; + spinlock_t qos_lock; + u32 qos_limit_size; +}; + +void gdm_qos_init(void *nic_ptr); +void gdm_qos_release_list(void *nic_ptr); +int gdm_qos_send_hci_pkt(struct sk_buff *skb, struct net_device *dev); +void gdm_recv_qos_hci_packet(void *nic_ptr, u8 *buf, int size); + +#endif diff --git a/drivers/staging/gdm72xx/gdm_sdio.c b/drivers/staging/gdm72xx/gdm_sdio.c new file mode 100644 index 00000000000..1ef466e718a --- /dev/null +++ b/drivers/staging/gdm72xx/gdm_sdio.c @@ -0,0 +1,754 @@ +/* + * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/version.h> + +#include <linux/mmc/core.h> +#include <linux/mmc/card.h> +#include <linux/mmc/sdio_func.h> +#include <linux/mmc/sdio_ids.h> + +#include "gdm_sdio.h" +#include "gdm_wimax.h" +#include "sdio_boot.h" +#include "hci.h" + +#define TYPE_A_HEADER_SIZE 4 +#define TYPE_A_LOOKAHEAD_SIZE 16 + +#define MAX_NR_RX_BUF 4 + +#define SDU_TX_BUF_SIZE 2048 +#define TX_BUF_SIZE 2048 +#define TX_CHUNK_SIZE (2048 - TYPE_A_HEADER_SIZE) +#define RX_BUF_SIZE (25*1024) + +#define TX_HZ 2000 +#define TX_INTERVAL (1000000/TX_HZ) + +/*#define DEBUG*/ + +static int init_sdio(struct sdiowm_dev *sdev); +static void release_sdio(struct sdiowm_dev *sdev); + +#ifdef DEBUG +static void hexdump(char *title, u8 *data, int len) +{ + int i; + + printk(KERN_DEBUG "%s: length = %d\n", title, len); + for (i = 0; i < len; i++) { + printk(KERN_DEBUG "%02x ", data[i]); + if ((i & 0xf) == 0xf) + printk(KERN_DEBUG "\n"); + } + printk(KERN_DEBUG "\n"); +} +#endif + +static struct sdio_tx *alloc_tx_struct(struct tx_cxt *tx) +{ + struct sdio_tx *t = NULL; + + t = kmalloc(sizeof(*t), GFP_ATOMIC); + if (t == NULL) + goto out; + + memset(t, 0, sizeof(*t)); + + t->buf = kmalloc(TX_BUF_SIZE, GFP_ATOMIC); + if (t->buf == NULL) + goto out; + + t->tx_cxt = tx; + + return t; +out: + if (t) { + kfree(t->buf); + kfree(t); + } + return NULL; +} + +static void free_tx_struct(struct sdio_tx *t) +{ + if (t) { + kfree(t->buf); + kfree(t); + } +} + +static struct sdio_rx *alloc_rx_struct(struct rx_cxt *rx) +{ + struct sdio_rx *r = NULL; + + r = kmalloc(sizeof(*r), GFP_ATOMIC); + if (r == NULL) + goto out; + + memset(r, 0, sizeof(*r)); + + r->rx_cxt = rx; + + return r; +out: + kfree(r); + return NULL; +} + +static void free_rx_struct(struct sdio_rx *r) +{ + kfree(r); +} + +/* Before this function is called, spin lock should be locked. */ +static struct sdio_tx *get_tx_struct(struct tx_cxt *tx, int *no_spc) +{ + struct sdio_tx *t; + + if (list_empty(&tx->free_list)) + return NULL; + + t = list_entry(tx->free_list.prev, struct sdio_tx, list); + list_del(&t->list); + + *no_spc = list_empty(&tx->free_list) ? 1 : 0; + + return t; +} + +/* Before this function is called, spin lock should be locked. */ +static void put_tx_struct(struct tx_cxt *tx, struct sdio_tx *t) +{ + list_add_tail(&t->list, &tx->free_list); +} + +/* Before this function is called, spin lock should be locked. */ +static struct sdio_rx *get_rx_struct(struct rx_cxt *rx) +{ + struct sdio_rx *r; + + if (list_empty(&rx->free_list)) + return NULL; + + r = list_entry(rx->free_list.prev, struct sdio_rx, list); + list_del(&r->list); + + return r; +} + +/* Before this function is called, spin lock should be locked. */ +static void put_rx_struct(struct rx_cxt *rx, struct sdio_rx *r) +{ + list_add_tail(&r->list, &rx->free_list); +} + +static int init_sdio(struct sdiowm_dev *sdev) +{ + int ret = 0, i; + struct tx_cxt *tx = &sdev->tx; + struct rx_cxt *rx = &sdev->rx; + struct sdio_tx *t; + struct sdio_rx *r; + + INIT_LIST_HEAD(&tx->free_list); + INIT_LIST_HEAD(&tx->sdu_list); + INIT_LIST_HEAD(&tx->hci_list); + + spin_lock_init(&tx->lock); + + tx->sdu_buf = kmalloc(SDU_TX_BUF_SIZE, GFP_KERNEL); + if (tx->sdu_buf == NULL) { + printk(KERN_ERR "Failed to allocate SDU tx buffer.\n"); + goto fail; + } + + for (i = 0; i < MAX_NR_SDU_BUF; i++) { + t = alloc_tx_struct(tx); + if (t == NULL) { + ret = -ENOMEM; + goto fail; + } + list_add(&t->list, &tx->free_list); + } + + INIT_LIST_HEAD(&rx->free_list); + INIT_LIST_HEAD(&rx->req_list); + + spin_lock_init(&rx->lock); + + for (i = 0; i < MAX_NR_RX_BUF; i++) { + r = alloc_rx_struct(rx); + if (r == NULL) { + ret = -ENOMEM; + goto fail; + } + list_add(&r->list, &rx->free_list); + } + + rx->rx_buf = kmalloc(RX_BUF_SIZE, GFP_KERNEL); + if (rx->rx_buf == NULL) { + printk(KERN_ERR "Failed to allocate rx buffer.\n"); + goto fail; + } + + return 0; + +fail: + release_sdio(sdev); + return ret; +} + +static void release_sdio(struct sdiowm_dev *sdev) +{ + struct tx_cxt *tx = &sdev->tx; + struct rx_cxt *rx = &sdev->rx; + struct sdio_tx *t, *t_next; + struct sdio_rx *r, *r_next; + + kfree(tx->sdu_buf); + + list_for_each_entry_safe(t, t_next, &tx->free_list, list) { + list_del(&t->list); + free_tx_struct(t); + } + + list_for_each_entry_safe(t, t_next, &tx->sdu_list, list) { + list_del(&t->list); + free_tx_struct(t); + } + + list_for_each_entry_safe(t, t_next, &tx->hci_list, list) { + list_del(&t->list); + free_tx_struct(t); + } + + kfree(rx->rx_buf); + + list_for_each_entry_safe(r, r_next, &rx->free_list, list) { + list_del(&r->list); + free_rx_struct(r); + } + + list_for_each_entry_safe(r, r_next, &rx->req_list, list) { + list_del(&r->list); + free_rx_struct(r); + } +} + +static void send_sdio_pkt(struct sdio_func *func, u8 *data, int len) +{ + int n, blocks, ret, remain; + + sdio_claim_host(func); + + blocks = len / func->cur_blksize; + n = blocks * func->cur_blksize; + if (blocks) { + ret = sdio_memcpy_toio(func, 0, data, n); + if (ret < 0) { + if (ret != -ENOMEDIUM) + printk(KERN_ERR "gdmwms: %s error: ret = %d\n", + __func__, ret); + goto end_io; + } + } + + remain = len - n; + remain = (remain + 3) & ~3; + + if (remain) { + ret = sdio_memcpy_toio(func, 0, data + n, remain); + if (ret < 0) { + if (ret != -ENOMEDIUM) + printk(KERN_ERR "gdmwms: %s error: ret = %d\n", + __func__, ret); + goto end_io; + } + } + +end_io: + sdio_release_host(func); +} + +static void send_sdu(struct sdio_func *func, struct tx_cxt *tx) +{ + struct list_head *l, *next; + struct hci_s *hci; + struct sdio_tx *t; + int pos, len, i, estlen, aggr_num = 0, aggr_len; + u8 *buf; + unsigned long flags; + + spin_lock_irqsave(&tx->lock, flags); + + pos = TYPE_A_HEADER_SIZE + HCI_HEADER_SIZE; + list_for_each_entry(t, &tx->sdu_list, list) { + estlen = ((t->len + 3) & ~3) + 4; + if ((pos + estlen) > SDU_TX_BUF_SIZE) + break; + + aggr_num++; + memcpy(tx->sdu_buf + pos, t->buf, t->len); + memset(tx->sdu_buf + pos + t->len, 0, estlen - t->len); + pos += estlen; + } + aggr_len = pos; + + hci = (struct hci_s *)(tx->sdu_buf + TYPE_A_HEADER_SIZE); + hci->cmd_evt = H2B(WIMAX_TX_SDU_AGGR); + hci->length = H2B(aggr_len - TYPE_A_HEADER_SIZE - HCI_HEADER_SIZE); + + spin_unlock_irqrestore(&tx->lock, flags); + +#ifdef DEBUG + hexdump("sdio_send", tx->sdu_buf + TYPE_A_HEADER_SIZE, + aggr_len - TYPE_A_HEADER_SIZE); +#endif + + for (pos = TYPE_A_HEADER_SIZE; pos < aggr_len; pos += TX_CHUNK_SIZE) { + len = aggr_len - pos; + len = len > TX_CHUNK_SIZE ? TX_CHUNK_SIZE : len; + buf = tx->sdu_buf + pos - TYPE_A_HEADER_SIZE; + + buf[0] = len & 0xff; + buf[1] = (len >> 8) & 0xff; + buf[2] = (len >> 16) & 0xff; + buf[3] = (pos + len) >= aggr_len ? 0 : 1; + send_sdio_pkt(func, buf, len + TYPE_A_HEADER_SIZE); + } + + spin_lock_irqsave(&tx->lock, flags); + + for (l = tx->sdu_list.next, i = 0; i < aggr_num; i++, l = next) { + next = l->next; + t = list_entry(l, struct sdio_tx, list); + if (t->callback) + t->callback(t->cb_data); + + list_del(l); + put_tx_struct(t->tx_cxt, t); + } + + do_gettimeofday(&tx->sdu_stamp); + spin_unlock_irqrestore(&tx->lock, flags); +} + +static void send_hci(struct sdio_func *func, struct tx_cxt *tx, + struct sdio_tx *t) +{ + unsigned long flags; + +#ifdef DEBUG + hexdump("sdio_send", t->buf + TYPE_A_HEADER_SIZE, + t->len - TYPE_A_HEADER_SIZE); +#endif + send_sdio_pkt(func, t->buf, t->len); + + spin_lock_irqsave(&tx->lock, flags); + if (t->callback) + t->callback(t->cb_data); + free_tx_struct(t); + spin_unlock_irqrestore(&tx->lock, flags); +} + +static void do_tx(struct work_struct *work) +{ + struct sdiowm_dev *sdev = container_of(work, struct sdiowm_dev, ws); + struct sdio_func *func = sdev->func; + struct tx_cxt *tx = &sdev->tx; + struct sdio_tx *t = NULL; + struct timeval now, *before; |