/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
#include <linux/mmc/card.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sd.h>
#include "htc_hif.h"
#include "hif-ops.h"
#include "target.h"
#include "debug.h"
#include "cfg80211.h"
struct ath6kl_sdio {
struct sdio_func *func;
spinlock_t lock;
/* free list */
struct list_head bus_req_freeq;
/* available bus requests */
struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
struct ath6kl *ar;
u8 *dma_buffer;
/* scatter request list head */
struct list_head scat_req;
spinlock_t scat_lock;
bool is_disabled;
atomic_t irq_handling;
const struct sdio_device_id *id;
struct work_struct wr_async_work;
struct list_head wr_asyncq;
spinlock_t wr_async_lock;
};
#define CMD53_ARG_READ 0
#define CMD53_ARG_WRITE 1
#define CMD53_ARG_BLOCK_BASIS 1
#define CMD53_ARG_FIXED_ADDRESS 0
#define CMD53_ARG_INCR_ADDRESS 1
static inline struct ath6kl_sdio *ath6kl_sdio_priv(struct ath6kl *ar)
{
return ar->hif_priv;
}
/*
* Macro to check if DMA buffer is WORD-aligned and DMA-able.
* Most host controllers assume the buffer is DMA'able and will
* bug-check otherwise (i.e. buffers on the stack). virt_addr_valid
* check fails on stack memory.
*/
static inline bool buf_needs_bounce(u8 *buf)
{
return ((unsigned long) buf & 0x3) || !virt_addr_valid(buf);
}
static void ath6kl_sdio_set_mbox_info(struct ath6kl *ar)
{
struct ath6kl_mbox_info *mbox_info = &ar->mbox_info;
/* EP1 has an extended range */
mbox_info->htc_addr = HIF_MBOX_BASE_ADDR;
mbox_info->htc_ext_addr = HIF_MBOX0_EXT_BASE_ADDR;
mbox_info->htc_ext_sz = HIF_MBOX0_EXT_WIDTH;
mbox_info->block_size = HIF_MBOX_BLOCK_SIZE;
mbox_info->gmbox_addr = HIF_GMBOX_BASE_ADDR;
mbox_info->gmbox_sz = HIF_GMBOX_WIDTH;
}
static inline void ath6kl_sdio_set_cmd53_arg(u32 *arg, u8 rw, u8 func,
u8 mode, u8 opcode, u32 addr,
u16 blksz)
{
*arg = (((rw & 1) << 31) |
((func & 0x7) << 28) |
((mode & 1) << 27) |
((opcode & 1) << 26) |
((addr & 0x1FFFF) << 9) |
(blksz & 0x1FF));
}
static inline void ath6kl_sdio_set_cmd52_arg(u32 *arg, u8 write, u8 raw,
unsigned int address,
unsigned char val)
{
const u8 func = 0;
*arg = ((write & 1) << 31) |
((func & 0x7) << 28) |
((raw & 1) << 27) |
(1 << 26) |
((address & 0x1FFFF) << 9) |
(1 << 8) |
(val & 0xFF);
}
static int ath6kl_sdio_func0_cmd52_wr_byte(struct mmc_card *card,
unsigned int address,
unsigned char byte)
{
struct mmc_command io_cmd;
memset(&io_cmd, 0, sizeof(io_cmd));
ath6kl_sdio_set_cmd52_arg(&io_cmd.arg, 1, 0, address, byte);
io_cmd.opcode = SD_IO_RW_DIRECT;
io_cmd.flags = MMC_RSP_R5 | MMC_CMD_AC;
return mmc_wait_for_cmd(card->host, &io_cmd, 0);
}
static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
u8 *buf, u32 len)
{
int ret = 0;
if (request & HIF_WRITE) {
/* FIXME: looks like ugly workaround for something */
if (addr >= HIF_MBOX_BASE_ADDR &&
addr <= HIF_MBOX_END_ADDR)
addr += (HIF_MBOX_WIDTH - len);
/* FIXME: this also looks like ugly workaround */
if (addr == HIF_MBOX0_EXT_BASE_ADDR)
addr += HIF_MBOX0_EXT_WIDTH - len;
if (request & HIF_FIXED_ADDRESS)
ret = sdio_writesb(func, addr, buf, len);
else
ret = sdio_memcpy_toio(func, addr,