aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-07-14 07:56:40 -0700
committerDavid S. Miller <davem@davemloft.net>2011-07-14 07:56:40 -0700
commit6a7ebdf2fd15417e87b4fd02ff411aeaca34da5f (patch)
tree86b15d8cd3e25c97b348b5a61bdb16c02726a480 /drivers
parentf6b72b6217f8c24f2a54988e58af858b4e66024d (diff)
parent51414d41084496aaefd06d7f19eb8206e8bfac2d (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: net/bluetooth/l2cap_core.c
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/memory.c1
-rw-r--r--drivers/base/syscore.c8
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c37
-rw-r--r--drivers/block/drbd/drbd_receiver.c6
-rw-r--r--drivers/block/drbd/drbd_worker.c7
-rw-r--r--drivers/char/agp/intel-agp.h7
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c2
-rw-r--r--drivers/firewire/ohci.c6
-rw-r--r--drivers/gpio/langwell_gpio.c2
-rw-r--r--drivers/gpio/tps65910-gpio.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c16
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c45
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c11
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h32
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c80
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c19
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c118
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c17
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c167
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c9
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h4
-rw-r--r--drivers/gpu/drm/radeon/ni.c1
-rw-r--r--drivers/gpu/drm/radeon/nid.h2
-rw-r--r--drivers/gpu/drm/radeon/r600.c1
-rw-r--r--drivers/gpu/drm/radeon/r600d.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/rs600.c6
-rw-r--r--drivers/gpu/drm/radeon/rv770.c7
-rw-r--r--drivers/hwmon/Kconfig2
-rw-r--r--drivers/hwmon/adm1275.c16
-rw-r--r--drivers/hwmon/emc6w201.c58
-rw-r--r--drivers/hwmon/f71882fg.c19
-rw-r--r--drivers/hwmon/hwmon-vid.c2
-rw-r--r--drivers/hwmon/lm95241.c22
-rw-r--r--drivers/hwmon/pmbus.c19
-rw-r--r--drivers/hwmon/pmbus_core.c11
-rw-r--r--drivers/hwmon/sch5627.c2
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c8
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c7
-rw-r--r--drivers/i2c/busses/i2c-tegra.c8
-rw-r--r--drivers/infiniband/core/cm.c3
-rw-r--r--drivers/infiniband/core/uverbs_main.c3
-rw-r--r--drivers/input/keyboard/pmic8xxx-keypad.c3
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c3
-rw-r--r--drivers/leds/leds-pca9532.c2
-rw-r--r--drivers/media/rc/fintek-cir.c5
-rw-r--r--drivers/media/rc/imon.c19
-rw-r--r--drivers/media/rc/ir-raw.c4
-rw-r--r--drivers/media/rc/ite-cir.c12
-rw-r--r--drivers/media/rc/ite-cir.h3
-rw-r--r--drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c58
-rw-r--r--drivers/media/rc/lirc_dev.c37
-rw-r--r--drivers/media/rc/mceusb.c80
-rw-r--r--drivers/media/rc/nuvoton-cir.c2
-rw-r--r--drivers/media/rc/nuvoton-cir.h1
-rw-r--r--drivers/media/rc/rc-main.c48
-rw-r--r--drivers/media/video/bt8xx/bttv-driver.c2
-rw-r--r--drivers/media/video/cx18/cx18-ioctl.c8
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c8
-rw-r--r--drivers/media/video/m5mols/m5mols.h57
-rw-r--r--drivers/media/video/m5mols/m5mols_capture.c22
-rw-r--r--drivers/media/video/m5mols/m5mols_controls.c6
-rw-r--r--drivers/media/video/m5mols/m5mols_core.c144
-rw-r--r--drivers/media/video/m5mols/m5mols_reg.h21
-rw-r--r--drivers/media/video/msp3400-driver.c12
-rw-r--r--drivers/media/video/mx1_camera.c10
-rw-r--r--drivers/media/video/omap/omap_vout.c18
-rw-r--r--drivers/media/video/omap/omap_voutlib.c6
-rw-r--r--drivers/media/video/omap3isp/isp.c2
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c4
-rw-r--r--drivers/media/video/pwc/pwc-ctrl.c2
-rw-r--r--drivers/media/video/pwc/pwc-if.c152
-rw-r--r--drivers/media/video/pwc/pwc.h4
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c21
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.c28
-rw-r--r--drivers/media/video/s5p-fimc/fimc-core.h29
-rw-r--r--drivers/media/video/saa7134/saa7134-input.c2
-rw-r--r--drivers/media/video/tuner-core.c229
-rw-r--r--drivers/media/video/uvc/uvc_entity.c34
-rw-r--r--drivers/media/video/uvc/uvc_queue.c2
-rw-r--r--drivers/media/video/uvc/uvc_video.c4
-rw-r--r--drivers/media/video/v4l2-dev.c39
-rw-r--r--drivers/media/video/v4l2-ioctl.c18
-rw-r--r--drivers/media/video/videobuf2-core.c14
-rw-r--r--drivers/media/video/videobuf2-dma-sg.c2
-rw-r--r--drivers/mfd/Kconfig3
-rw-r--r--drivers/mfd/Makefile1
-rw-r--r--drivers/mfd/asic3.c1
-rw-r--r--drivers/mfd/htc-pasic3.c1
-rw-r--r--drivers/mfd/omap-usb-host.c131
-rw-r--r--drivers/mfd/tps65911-comparator.c2
-rw-r--r--drivers/mmc/core/mmc.c77
-rw-r--r--drivers/mmc/host/mmci.c2
-rw-r--r--drivers/mmc/host/mmci.h5
-rw-r--r--drivers/net/natsemi.c6
-rw-r--r--drivers/net/sh_eth.c6
-rw-r--r--drivers/net/slip.c2
-rw-r--r--drivers/net/usb/hso.c7
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c3
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h5
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c3
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/sysfs.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c1
-rw-r--r--drivers/pci/pci.c2
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/setup-bus.c15
-rw-r--r--drivers/pcmcia/pxa2xx_vpac270.c4
-rw-r--r--drivers/platform/x86/acer-wmi.c47
-rw-r--r--drivers/platform/x86/asus-wmi.c1
-rw-r--r--drivers/platform/x86/compal-laptop.c4
-rw-r--r--drivers/platform/x86/dell-laptop.c30
-rw-r--r--drivers/platform/x86/hp-wmi.c11
-rw-r--r--drivers/platform/x86/intel_oaktrail.c1
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c72
-rw-r--r--drivers/regulator/db8500-prcmu.c14
-rw-r--r--drivers/regulator/max8952.c2
-rw-r--r--drivers/regulator/max8997.c55
-rw-r--r--drivers/scsi/Kconfig13
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/hpsa.c16
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c4
-rw-r--r--drivers/scsi/isci/Makefile8
-rw-r--r--drivers/scsi/isci/firmware/Makefile19
-rw-r--r--drivers/scsi/isci/firmware/README36
-rw-r--r--drivers/scsi/isci/firmware/create_fw.c99
-rw-r--r--drivers/scsi/isci/firmware/create_fw.h77
-rw-r--r--drivers/scsi/isci/host.c2751
-rw-r--r--drivers/scsi/isci/host.h542
-rw-r--r--drivers/scsi/isci/init.c565
-rw-r--r--drivers/scsi/isci/isci.h538
-rw-r--r--drivers/scsi/isci/phy.c1312
-rw-r--r--drivers/scsi/isci/phy.h504
-rw-r--r--drivers/scsi/isci/port.c1757
-rw-r--r--drivers/scsi/isci/port.h306
-rw-r--r--drivers/scsi/isci/port_config.c754
-rw-r--r--drivers/scsi/isci/probe_roms.c243
-rw-r--r--drivers/scsi/isci/probe_roms.h249
-rw-r--r--drivers/scsi/isci/registers.h1934
-rw-r--r--drivers/scsi/isci/remote_device.c1501
-rw-r--r--drivers/scsi/isci/remote_device.h352
-rw-r--r--drivers/scsi/isci/remote_node_context.c627
-rw-r--r--drivers/scsi/isci/remote_node_context.h224
-rw-r--r--drivers/scsi/isci/remote_node_table.c598
-rw-r--r--drivers/scsi/isci/remote_node_table.h188
-rw-r--r--drivers/scsi/isci/request.c3391
-rw-r--r--drivers/scsi/isci/request.h448
-rw-r--r--drivers/scsi/isci/sas.h219
-rw-r--r--drivers/scsi/isci/scu_completion_codes.h283
-rw-r--r--drivers/scsi/isci/scu_event_codes.h336
-rw-r--r--drivers/scsi/isci/scu_remote_node_context.h229
-rw-r--r--drivers/scsi/isci/scu_task_context.h942
-rw-r--r--drivers/scsi/isci/task.c1676
-rw-r--r--drivers/scsi/isci/task.h367
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.c225
-rw-r--r--drivers/scsi/isci/unsolicited_frame_control.h278
-rw-r--r--drivers/spi/spi_s3c64xx.c4
-rw-r--r--drivers/ssb/driver_pcicore.c18
-rw-r--r--drivers/staging/lirc/lirc_imon.c10
-rw-r--r--drivers/staging/lirc/lirc_serial.c44
-rw-r--r--drivers/staging/lirc/lirc_sir.c11
-rw-r--r--drivers/staging/lirc/lirc_zilog.c4
-rw-r--r--drivers/tty/serial/atmel_serial.c3
-rw-r--r--drivers/usb/core/message.c11
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c36
-rw-r--r--drivers/video/amba-clcd.c2
-rw-r--r--drivers/video/fsl-diu-fb.c16
-rw-r--r--drivers/video/geode/gx1fb_core.c14
-rw-r--r--drivers/video/hecubafb.c3
-rw-r--r--drivers/video/sh_mobile_meram.c2
-rw-r--r--drivers/video/sm501fb.c2
-rw-r--r--drivers/video/udlfb.c8
-rw-r--r--drivers/video/vesafb.c1
-rw-r--r--drivers/w1/masters/ds1wm.c5
182 files changed, 25202 insertions, 1151 deletions
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 9f9b2359f71..45d7c8fc73b 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -30,7 +30,6 @@
static DEFINE_MUTEX(mem_sysfs_mutex);
#define MEMORY_CLASS_NAME "memory"
-#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS)
static int sections_per_block;
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index c126db3cb7d..e8d11b6630e 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -9,6 +9,7 @@
#include <linux/syscore_ops.h>
#include <linux/mutex.h>
#include <linux/module.h>
+#include <linux/interrupt.h>
static LIST_HEAD(syscore_ops_list);
static DEFINE_MUTEX(syscore_ops_lock);
@@ -48,6 +49,13 @@ int syscore_suspend(void)
struct syscore_ops *ops;
int ret = 0;
+ pr_debug("Checking wakeup interrupts\n");
+
+ /* Return error code if there are any wakeup interrupts pending. */
+ ret = check_wakeup_irqs();
+ if (ret)
+ return ret;
+
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled before system core suspend.\n");
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 09ef9a878ef..cf0e63dd97d 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -79,7 +79,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
md_io.error = 0;
if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
- rw |= REQ_FUA;
+ rw |= REQ_FUA | REQ_FLUSH;
rw |= REQ_SYNC;
bio = bio_alloc(GFP_NOIO, 1);
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index f440a02dfdb..7b976296b56 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -112,9 +112,6 @@ struct drbd_bitmap {
struct task_struct *bm_task;
};
-static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
- unsigned long e, int val, const enum km_type km);
-
#define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
static void __bm_print_lock_info(struct drbd_conf *mdev, const char *func)
{
@@ -994,6 +991,9 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
bio_endio(bio, -EIO);
} else {
submit_bio(rw, bio);
+ /* this should not count as user activity and cause the
+ * resync to throttle -- see drbd_rs_should_slow_down(). */
+ atomic_add(len >> 9, &mdev->rs_sect_ev);
}
}
@@ -1256,7 +1256,7 @@ unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_f
* expected to be called for only a few bits (e - s about BITS_PER_LONG).
* Must hold bitmap lock already. */
static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
- unsigned long e, int val, const enum km_type km)
+ unsigned long e, int val)
{
struct drbd_bitmap *b = mdev->bitmap;
unsigned long *p_addr = NULL;
@@ -1274,14 +1274,14 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
if (page_nr != last_page_nr) {
if (p_addr)
- __bm_unmap(p_addr, km);
+ __bm_unmap(p_addr, KM_IRQ1);
if (c < 0)
bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
else if (c > 0)
bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
changed_total += c;
c = 0;
- p_addr = __bm_map_pidx(b, page_nr, km);
+ p_addr = __bm_map_pidx(b, page_nr, KM_IRQ1);
last_page_nr = page_nr;
}
if (val)
@@ -1290,7 +1290,7 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
}
if (p_addr)
- __bm_unmap(p_addr, km);
+ __bm_unmap(p_addr, KM_IRQ1);
if (c < 0)
bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
else if (c > 0)
@@ -1318,7 +1318,7 @@ static int bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
bm_print_lock_info(mdev);
- c = __bm_change_bits_to(mdev, s, e, val, KM_IRQ1);
+ c = __bm_change_bits_to(mdev, s, e, val);
spin_unlock_irqrestore(&b->bm_lock, flags);
return c;
@@ -1343,16 +1343,17 @@ static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
{
int i;
int bits;
- unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_USER0);
+ unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_IRQ1);
for (i = first_word; i < last_word; i++) {
bits = hweight_long(paddr[i]);
paddr[i] = ~0UL;
b->bm_set += BITS_PER_LONG - bits;
}
- kunmap_atomic(paddr, KM_USER0);
+ kunmap_atomic(paddr, KM_IRQ1);
}
-/* Same thing as drbd_bm_set_bits, but without taking the spin_lock_irqsave.
+/* Same thing as drbd_bm_set_bits,
+ * but more efficient for a large bit range.
* You must first drbd_bm_lock().
* Can be called to set the whole bitmap in one go.
* Sets bits from s to e _inclusive_. */
@@ -1366,6 +1367,7 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
* Do not use memset, because we must account for changes,
* so we need to loop over the words with hweight() anyways.
*/
+ struct drbd_bitmap *b = mdev->bitmap;
unsigned long sl = ALIGN(s,BITS_PER_LONG);
unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
int first_page;
@@ -1376,15 +1378,19 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
if (e - s <= 3*BITS_PER_LONG) {
/* don't bother; el and sl may even be wrong. */
- __bm_change_bits_to(mdev, s, e, 1, KM_USER0);
+ spin_lock_irq(&b->bm_lock);
+ __bm_change_bits_to(mdev, s, e, 1);
+ spin_unlock_irq(&b->bm_lock);
return;
}
/* difference is large enough that we can trust sl and el */
+ spin_lock_irq(&b->bm_lock);
+
/* bits filling the current long */
if (sl)
- __bm_change_bits_to(mdev, s, sl-1, 1, KM_USER0);
+ __bm_change_bits_to(mdev, s, sl-1, 1);
first_page = sl >> (3 + PAGE_SHIFT);
last_page = el >> (3 + PAGE_SHIFT);
@@ -1397,8 +1403,10 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
/* first and full pages, unless first page == last page */
for (page_nr = first_page; page_nr < last_page; page_nr++) {
bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word);
+ spin_unlock_irq(&b->bm_lock);
cond_resched();
first_word = 0;
+ spin_lock_irq(&b->bm_lock);
}
/* last page (respectively only page, for first page == last page) */
@@ -1411,7 +1419,8 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi
* it would trigger an assert in __bm_change_bits_to()
*/
if (el <= e)
- __bm_change_bits_to(mdev, el, e, 1, KM_USER0);
+ __bm_change_bits_to(mdev, el, e, 1);
+ spin_unlock_irq(&b->bm_lock);
}
/* returns bit state
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 25d32c5aa50..43beaca5317 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -4602,6 +4602,11 @@ int drbd_asender(struct drbd_thread *thi)
dev_err(DEV, "meta connection shut down by peer.\n");
goto reconnect;
} else if (rv == -EAGAIN) {
+ /* If the data socket received something meanwhile,
+ * that is good enough: peer is still alive. */
+ if (time_after(mdev->last_received,
+ jiffies - mdev->meta.socket->sk->sk_rcvtimeo))
+ continue;
if (ping_timeout_active) {
dev_err(DEV, "PingAck did not arrive in time.\n");
goto reconnect;
@@ -4637,6 +4642,7 @@ int drbd_asender(struct drbd_thread *thi)
goto reconnect;
}
if (received == expect) {
+ mdev->last_received = jiffies;
D_ASSERT(cmd != NULL);
if (!cmd->process(mdev, h))
goto reconnect;
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 4d76b06b6b2..4d3e6f6213b 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -536,12 +536,7 @@ static int w_make_resync_request(struct drbd_conf *mdev,
return 1;
}
- /* starting with drbd 8.3.8, we can handle multi-bio EEs,
- * if it should be necessary */
- max_bio_size =
- mdev->agreed_pro_version < 94 ? queue_max_hw_sectors(mdev->rq_queue) << 9 :
- mdev->agreed_pro_version < 95 ? DRBD_MAX_SIZE_H80_PACKET : DRBD_MAX_BIO_SIZE;
-
+ max_bio_size = queue_max_hw_sectors(mdev->rq_queue) << 9;
number = drbd_rs_number_requests(mdev);
if (number == 0)
goto requeue;
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 999803ce10d..5da67f165af 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -90,9 +90,10 @@
#define G4x_GMCH_SIZE_MASK (0xf << 8)
#define G4x_GMCH_SIZE_1M (0x1 << 8)
#define G4x_GMCH_SIZE_2M (0x3 << 8)
-#define G4x_GMCH_SIZE_VT_1M (0x9 << 8)
-#define G4x_GMCH_SIZE_VT_1_5M (0xa << 8)
-#define G4x_GMCH_SIZE_VT_2M (0xc << 8)
+#define G4x_GMCH_SIZE_VT_EN (0x8 << 8)
+#define G4x_GMCH_SIZE_VT_1M (G4x_GMCH_SIZE_1M | G4x_GMCH_SIZE_VT_EN)
+#define G4x_GMCH_SIZE_VT_1_5M ((0x2 << 8) | G4x_GMCH_SIZE_VT_EN)
+#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
#define GFX_FLSH_CNTL 0x2170 /* 915+ */
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 4e04e127438..596d5dd32f4 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -759,7 +759,7 @@ static void __exit acpi_cpufreq_exit(void)
cpufreq_unregister_driver(&acpi_cpufreq_driver);
- free_percpu(acpi_perf_data);
+ free_acpi_perf_data();
}
module_param(acpi_pstate_strict, uint, 0644);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 438e6c83117..ebb897329c1 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -264,6 +264,7 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
#define PCI_DEVICE_ID_AGERE_FW643 0x5901
#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
+#define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
#define QUIRK_CYCLE_TIMER 1
#define QUIRK_RESET_PACKET 2
@@ -3190,6 +3191,11 @@ static int __devinit pci_probe(struct pci_dev *dev,
int i, err;
size_t size;
+ if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
+ dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n");
+ return -ENOSYS;
+ }
+
ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
if (ohci == NULL) {
err = -ENOMEM;
diff --git a/drivers/gpio/langwell_gpio.c b/drivers/gpio/langwell_gpio.c
index bd6571e0097..644ba1255d3 100644
--- a/drivers/gpio/langwell_gpio.c
+++ b/drivers/gpio/langwell_gpio.c
@@ -223,7 +223,7 @@ static void lnw_irq_handler(unsigned irq, struct irq_desc *desc)
gedr = gpio_reg(&lnw->chip, base, GEDR);
pending = readl(gedr);
while (pending) {
- gpio = __ffs(pending) - 1;
+ gpio = __ffs(pending);
mask = BIT(gpio);
pending &= ~mask;
/* Clear before handling so we can't lose an edge */
diff --git a/drivers/gpio/tps65910-gpio.c b/drivers/gpio/tps65910-gpio.c
index 8d1ddfdd63e..15097ca616d 100644
--- a/drivers/gpio/tps65910-gpio.c
+++ b/drivers/gpio/tps65910-gpio.c
@@ -81,8 +81,10 @@ void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base)
switch(tps65910_chip_id(tps65910)) {
case TPS65910:
tps65910->gpio.ngpio = 6;
+ break;
case TPS65911:
tps65910->gpio.ngpio = 9;
+ break;
default:
return;
}
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 21058e6ad2b..82db1850666 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -886,9 +886,6 @@ int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
total_objects += dev->mode_config.num_connector;
total_objects += dev->mode_config.num_encoder;
- if (total_objects == 0)
- return -EINVAL;
-
group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
if (!group->id_list)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4d46441cbe2..0a893f7400f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1207,13 +1207,17 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (ret)
return ret;
- seq_printf(m, "power context ");
- describe_obj(m, dev_priv->pwrctx);
- seq_printf(m, "\n");
+ if (dev_priv->pwrctx) {
+ seq_printf(m, "power context ");
+ describe_obj(m, dev_priv->pwrctx);
+ seq_printf(m, "\n");
+ }
- seq_printf(m, "render context ");
- describe_obj(m, dev_priv->renderctx);
- seq_printf(m, "\n");
+ if (dev_priv->renderctx) {
+ seq_printf(m, "render context ");
+ describe_obj(m, dev_priv->renderctx);
+ seq_printf(m, "\n");
+ }
mutex_unlock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2b79588541e..296fbd66f0e 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1266,30 +1266,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_modeset_gem_init(dev);
- if (IS_IVYBRIDGE(dev)) {
- /* Share pre & uninstall handlers with ILK/SNB */
- dev->driver->irq_handler = ivybridge_irq_handler;
- dev->driver->irq_preinstall = ironlake_irq_preinstall;
- dev->driver->irq_postinstall = ivybridge_irq_postinstall;
- dev->driver->irq_uninstall = ironlake_irq_uninstall;
- dev->driver->enable_vblank = ivybridge_enable_vblank;
- dev->driver->disable_vblank = ivybridge_disable_vblank;
- } else if (HAS_PCH_SPLIT(dev)) {
- dev->driver->irq_handler = ironlake_irq_handler;
- dev->driver->irq_preinstall = ironlake_irq_preinstall;
- dev->driver->irq_postinstall = ironlake_irq_postinstall;
- dev->driver->irq_uninstall = ironlake_irq_uninstall;
- dev->driver->enable_vblank = ironlake_enable_vblank;
- dev->driver->disable_vblank = ironlake_disable_vblank;
- } else {
- dev->driver->irq_preinstall = i915_driver_irq_preinstall;
- dev->driver->irq_postinstall = i915_driver_irq_postinstall;
- dev->driver->irq_uninstall = i915_driver_irq_uninstall;
- dev->driver->irq_handler = i915_driver_irq_handler;
- dev->driver->enable_vblank = i915_enable_vblank;
- dev->driver->disable_vblank = i915_disable_vblank;
- }
-
ret = drm_irq_install(dev);
if (ret)
goto cleanup_gem;
@@ -1967,7 +1943,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!dev_priv->mm.gtt) {
DRM_ERROR("Failed to initialize GTT\n");
ret = -ENODEV;
- goto out_iomapfree;
+ goto out_rmmap;
}
agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
@@ -2011,18 +1987,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (dev_priv->wq == NULL) {
DRM_ERROR("Failed to create our workqueue.\n");
ret = -ENOMEM;
- goto out_iomapfree;
+ goto out_mtrrfree;
}
/* enable GEM by default */
dev_priv->has_gem = 1;
- dev->driver->get_vblank_counter = i915_get_vblank_counter;
- dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
- if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
- dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
- dev->driver->get_vblank_counter = gm45_get_vblank_counter;
- }
+ intel_irq_init(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);
@@ -2103,13 +2074,21 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
out_gem_unload:
+ if (dev_priv->mm.inactive_shrinker.shrink)
+ unregister_shrinker(&dev_priv->mm.inactive_shrinker);
+
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
destroy_workqueue(dev_priv->wq);
-out_iomapfree:
+out_mtrrfree:
+ if (dev_priv->mm.gtt_mtrr >= 0) {
+ mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
+ dev->agp->agp_info.aper_size * 1024 * 1024);
+ dev_priv->mm.gtt_mtrr = -1;
+ }
io_mapping_free(dev_priv->mm.gtt_mapping);
out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 609358faaa9..eb91e2dd791 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -52,7 +52,7 @@ module_param_named(powersave, i915_powersave, int, 0600);
unsigned int i915_semaphores = 0;
module_param_named(semaphores, i915_semaphores, int, 0600);
-unsigned int i915_enable_rc6 = 1;
+unsigned int i915_enable_rc6 = 0;
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
unsigned int i915_enable_fbc = 0;
@@ -577,6 +577,7 @@ int i915_reset(struct drm_device *dev, u8 flags)
if (get_seconds() - dev_priv->last_gpu_reset < 5) {
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
} else switch (INTEL_INFO(dev)->gen) {
+ case 7:
case 6:
ret = gen6_do_reset(dev, flags);
/* If reset with a user forcewake, try to restore */
@@ -765,14 +766,6 @@ static struct drm_driver driver = {
.resume = i915_resume,
.device_is_agp = i915_driver_device_is_agp,
- .enable_vblank = i915_enable_vblank,
- .disable_vblank = i915_disable_vblank,
- .get_vblank_timestamp = i915_get_vblank_timestamp,
- .get_scanout_position = i915_get_crtc_scanoutpos,
- .irq_preinstall = i915_driver_irq_preinstall,
- .irq_postinstall = i915_driver_irq_postinstall,
- .irq_uninstall = i915_driver_irq_uninstall,
- .irq_handler = i915_driver_irq_handler,
.reclaim_buffers = drm_core_reclaim_buffers,
.master_create = i915_master_create,
.master_destroy = i915_master_destroy,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index eddabf68e97..f245c588ae9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -997,8 +997,6 @@ extern unsigned int i915_enable_fbc;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);
-extern void i915_save_display(struct drm_device *dev);
-extern void i915_restore_display(struct drm_device *dev);
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
@@ -1033,33 +1031,12 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
extern int i915_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
-extern void i915_driver_irq_preinstall(struct drm_device * dev);
-extern int i915_driver_irq_postinstall(struct drm_device *dev);
-extern void i915_driver_irq_uninstall(struct drm_device * dev);
-
-extern irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS);
-extern void ironlake_irq_preinstall(struct drm_device *dev);
-extern int ironlake_irq_postinstall(struct drm_device *dev);
-extern void ironlake_irq_uninstall(struct drm_device *dev);
-
-extern irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS);
-extern void ivybridge_irq_preinstall(struct drm_device *dev);
-extern int ivybridge_irq_postinstall(struct drm_device *dev);
-extern void ivybridge_irq_uninstall(struct drm_device *dev);
+extern void intel_irq_init(struct drm_device *dev);
extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int i915_enable_vblank(struct drm_device *dev, int crtc);
-extern void i915_disable_vblank(struct drm_device *dev, int crtc);
-extern int ironlake_enable_vblank(struct drm_device *dev, int crtc);
-extern void ironlake_disable_vblank(struct drm_device *dev, int crtc);
-extern int ivybridge_enable_vblank(struct drm_device *dev, int crtc);
-extern void ivybridge_disable_vblank(struct drm_device *dev, int crtc);
-extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
-extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
extern int i915_vblank_swap(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1070,13 +1047,6 @@ void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
void intel_enable_asle (struct drm_device *dev);
-int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
- int *max_error,
- struct timeval *vblank_time,
- unsigned flags);
-
-int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
- int *vpos, int *hpos);
#ifdef CONFIG_DEBUG_FS
extern void i915_destroy_error_state(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index ae2b49969b9..3b03f85ea62 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -152,7 +152,7 @@ i915_pipe_enabled(struct drm_device *dev, int pipe)
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
-u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
+static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long high_frame;
@@ -184,7 +184,7 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
return (high1 << 8) | low;
}
-u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
+static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int reg = PIPE_FRMCOUNT_GM45(pipe);
@@ -198,7 +198,7 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
return I915_READ(reg);
}
-int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
int *vpos, int *hpos)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -264,7 +264,7 @@ int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
return ret;
}
-int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
+static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
int *max_error,
struct timeval *vblank_time,
unsigned flags)
@@ -462,7 +462,7 @@ static void pch_irq_handler(struct drm_device *dev)
DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
}
-irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -550,7 +550,7 @@ done:
return ret;
}
-irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1209,7 +1209,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
}
}
-irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
+static irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1454,7 +1454,7 @@ int i915_irq_wait(struct drm_device *dev, void *data,
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
-int i915_enable_vblank(struct drm_device *dev, int pipe)
+static int i915_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -1478,7 +1478,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
return 0;
}
-int ironlake_enable_vblank(struct drm_device *dev, int pipe)
+static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -1494,7 +1494,7 @@ int ironlake_enable_vblank(struct drm_device *dev, int pipe)
return 0;
}
-int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
+static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -1513,7 +1513,7 @@ int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
/* Called from drm generic code, passed 'crtc' which
* we use as a pipe index
*/
-void i915_disable_vblank(struct drm_device *dev, int pipe)
+static void i915_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -1529,7 +1529,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-void ironlake_disable_vblank(struct drm_device *dev, int pipe)
+static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -1540,7 +1540,7 @@ void ironlake_disable_vblank(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
-void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
+static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
@@ -1728,7 +1728,7 @@ repeat:
/* drm_dma.h hooks
*/
-void ironlake_irq_preinstall(struct drm_device *dev)
+static void ironlake_irq_preinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1740,7 +1740,7 @@ void ironlake_irq_preinstall(struct drm_device *dev)
INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
I915_WRITE(HWSTAM, 0xeffe);
- if (IS_GEN6(dev)) {
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
/* Workaround stalls observed on Sandy Bridge GPUs by
* making the blitter command streamer generate a
* write to the Hardware Status Page for
@@ -1769,7 +1769,7 @@ void ironlake_irq_preinstall(struct drm_device *dev)
POSTING_READ(SDEIER);
}
-int ironlake_irq_postinstall(struct drm_device *dev)
+static int ironlake_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
@@ -1841,7 +1841,7 @@ int ironlake_irq_postinstall(struct drm_device *dev)
return 0;
}
-int ivybridge_irq_postinstall(struct drm_device *dev)
+static int ivybridge_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
@@ -1891,7 +1891,7 @@ int ivybridge_irq_postinstall(struct drm_device *dev)
return 0;
}
-void i915_driver_irq_preinstall(struct drm_device * dev)
+static void i915_driver_irq_preinstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
@@ -1918,7 +1918,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
* Must be called after intel_modeset_init or hotplug interrupts won't be
* enabled correctly.
*/
-int i915_driver_irq_postinstall(struct drm_device *dev)
+static int i915_driver_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
@@ -1994,7 +1994,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
return 0;
}
-void ironlake_irq_uninstall(struct drm_device *dev)
+static void ironlake_irq_uninstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2014,7 +2014,7 @@ void ironlake_irq_uninstall(struct drm_device *dev)
I915_WRITE(GTIIR, I915_READ(GTIIR));
}
-void i915_driver_irq_uninstall(struct drm_device * dev)
+static void i915_driver_irq_uninstall(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
@@ -2040,3 +2040,41 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
I915_WRITE(IIR, I915_READ(IIR));
}
+
+void intel_irq_init(struct drm_device *dev)
+{
+ dev->driver->get_vblank_counter = i915_get_vblank_counter;
+ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+ if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev) || IS_IVYBRIDGE(dev)) {
+ dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
+ dev->driver->get_vblank_counter = gm45_get_vblank_counter;
+ }
+
+
+ dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
+ dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
+
+ if (IS_IVYBRIDGE(dev)) {
+ /* Share pre & uninstall handlers with ILK/SNB */
+ dev->driver->irq_handler = ivybridge_irq_handler;
+ dev->driver->irq_preinstall = ironlake_irq_preinstall;
+ dev->driver->irq_postinstall = ivybridge_irq_postinstall;
+ dev->driver->irq_uninstall = ironlake_irq_uninstall;
+ dev->driver->enable_vblank = ivybridge_enable_vblank;
+ dev->driver->disable_vblank = ivybridge_disable_vblank;
+ } else if (HAS_PCH_SPLIT(dev)) {
+ dev->driver->irq_handler = ironlake_irq_handler;
+ dev->driver->irq_preinstall = ironlake_irq_preinstall;
+ dev->driver->irq_postinstall = ironlake_irq_postinstall;
+ dev->driver->irq_uninstall = ironlake_irq_uninstall;
+ dev->driver->enable_vblank = ironlake_enable_vblank;
+ dev->driver->disable_vblank = ironlake_disable_vblank;
+ } else {
+ dev->driver->irq_preinstall = i915_driver_irq_preinstall;
+ dev->driver->irq_postinstall = i915_driver_irq_postinstall;
+ dev->driver->irq_uninstall = i915_driver_irq_uninstall;
+ dev->driver->irq_handler = i915_driver_irq_handler;
+ dev->driver->enable_vblank = i915_enable_vblank;
+ dev->driver->disable_vblank = i915_disable_vblank;
+ }
+}
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index e8152d23d5b..5257cfc34c3 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -597,7 +597,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
return;
}
-void i915_save_display(struct drm_device *dev)
+static void i915_save_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -678,7 +678,6 @@ void i915_save_display(struct drm_device *dev)
}
/* VGA state */
- mutex_lock(&dev->struct_mutex);
dev_priv->saveVGA0 = I915_READ(VGA0);
dev_priv->saveVGA1 = I915_READ(VGA1);
dev_priv->saveVGA_PD = I915_READ(VGA_PD);
@@ -688,10 +687,9 @@ void i915_save_display(struct drm_device *dev)
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
i915_save_vga(dev);
- mutex_unlock(&dev->struct_mutex);
}
-void i915_restore_display(struct drm_device *dev)
+static void i915_restore_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -783,7 +781,6 @@ void i915_restore_display(struct drm_device *dev)
else
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
- mutex_lock(&dev->struct_mutex);
I915_WRITE(VGA0, dev_priv->saveVGA0);
I915_WRITE(VGA1, dev_priv->saveVGA1);
I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
@@ -791,7 +788,6 @@ void i915_restore_display(struct drm_device *dev)
udelay(150);
i915_restore_vga(dev);
- mutex_unlock(&dev->struct_mutex);
}
int i915_save_state(struct drm_device *dev)
@@ -801,6 +797,8 @@ int i915_save_state(struct drm_device *dev)
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
+ mutex_lock(&dev->struct_mutex);
+
/* Hardware status page */
dev_priv->saveHWS = I915_READ(HWS_PGA);
@@ -840,6 +838,8 @@ int i915_save_state(struct drm_device *dev)
for (i = 0; i < 3; i++)
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+ mutex_unlock(&dev->struct_mutex);
+
return 0;
}
@@ -850,6 +850,8 @@ int i915_restore_state(struct drm_device *dev)
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
+ mutex_lock(&dev->struct_mutex);
+
/* Hardware status page */
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
@@ -867,6 +869,7 @@ int i915_restore_state(struct drm_device *dev)
I915_WRITE(IER, dev_priv->saveIER);
I915_WRITE(IMR, dev_priv->saveIMR);
}
+ mutex_unlock(&dev->struct_mutex);
intel_init_clock_gating(dev);
@@ -878,6 +881,8 @@ int i915_restore_state(struct drm_device *dev)
if (IS_GEN6(dev))
gen6_enable_rps(dev_priv);
+ mutex_lock(&dev->struct_mutex);
+
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
@@ -891,6 +896,8 @@ int i915_restore_state(struct drm_device *dev)
for (i = 0; i < 3; i++)
I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+ mutex_unlock(&dev->struct_mutex);
+
intel_i2c_reset(dev);
return 0;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 391b55f1cc7..e2aced6eec4 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -50,7 +50,6 @@ struct intel_dp {
bool has_audio;
int force_audio;
uint32_t color_range;
- int dpms_mode;
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[4];
@@ -138,8 +137,8 @@ intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
int max_lane_count = 4;
- if (intel_dp->dpcd[0] >= 0x11) {
- max_lane_count = intel_dp->dpcd[2] & 0x1f;
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
+ max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
switch (max_lane_count) {
case 1: case 2: case 4:
break;
@@ -153,7 +152,7 @@ intel_dp_max_lane_count(struct intel_dp *intel_dp)
static int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
- int max_link_bw = intel_dp->dpcd[1];
+ int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
switch (max_link_bw) {
case DP_LINK_BW_1_62:
@@ -774,7 +773,8 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
/*
* Check for DPCD version > 1.1 and enhanced framing support
*/
- if (intel_dp->dpcd[0] >= 0x11 && (intel_dp->dpcd[2] & DP_ENHANCED_FRAME_CAP)) {
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
intel_dp->DP |= DP_ENHANCED_FRAMING;
}
@@ -942,11 +942,44 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder)
udelay(200);
}
+/* If the sink supports it, try to set the power state appropriately */
+static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
+{
+ int ret, i;
+
+ /* Should have a valid DPCD by this point */
+ if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
+ return;
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
+ DP_SET_POWER_D3);
+ if (ret != 1)
+ DRM_DEBUG_DRIVER("failed to write sink power state\n");
+ } else {
+ /*
+ * When turning on, we need to retry for 1ms to give the sink
+ * time to wake up.
+ */
+ for (i = 0; i < 3; i++) {
+ ret = intel_dp_aux_native_write_1(intel_dp,
+ DP_SET_POWER,
+ DP_SET_POWER_D0);
+ if (ret == 1)
+ break;
+ msleep(1);
+ }
+ }
+}
+
static void intel_dp_prepare(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_device *dev = encoder->dev;
+ /* Wake up the sink first */
+ intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
+
if (is_edp(intel_dp)) {
ironlake_edp_backlight_off(dev);
ironlake_edp_panel_off(dev);
@@ -990,6 +1023,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
if (mode != DRM_MODE_DPMS_ON) {
if (is_edp(intel_dp))
ironlake_edp_backlight_off(dev);
+ intel_dp_sink_dpms(intel_dp, mode);
intel_dp_link_down(intel_dp);
if (is_edp(intel_dp))
ironlake_edp_panel_off(dev);
@@ -998,6 +1032,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
} else {
if (is_edp(intel_dp))
ironlake_edp_panel_vdd_on(intel_dp);
+ intel_dp_sink_dpms(intel_dp, mode);
if (!(dp_reg & DP_PORT_EN)) {
intel_dp_start_link_train(intel_dp);
if (is_edp(intel_dp)) {
@@ -1009,7 +1044,31 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
if (is_edp(intel_dp))
ironlake_edp_backlight_on(dev);
}
- intel_dp->dpms_mode = mode;
+}
+
+/*
+ * Native read with retry for link status and receiver capability reads for
+ * cases where the sink may still be asleep.
+ */
+static bool
+intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
+ uint8_t *recv, int recv_bytes)
+{
+ int ret, i;
+
+ /*
+ * Sinks are *supposed* to come up within 1ms from an off state,
+ * but we're also supposed to retry 3 times per the spec.
+ */
+ for (i = 0; i < 3; i++) {
+ ret = intel_dp_aux_native_read(intel_dp, address, recv,
+ recv_bytes);
+ if (ret == recv_bytes)
+ return true;
+ msleep(1);
+ }
+
+ return false;
}
/*
@@ -1019,14 +1078,10 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
static bool
intel_dp_get_link_status(struct intel_dp *intel_dp)
{
- int ret;
-
- ret = intel_dp_aux_native_read(intel_dp,
- DP_LANE0_1_STATUS,
- intel_dp->link_status, DP_LINK_STATUS_SIZE);
- if (ret != DP_LINK_STATUS_SIZE)
- return false;
- return true;
+ return intel_dp_aux_native_read_retry(intel_dp,
+ DP_LANE0_1_STATUS,
+ intel_dp->link_status,
+ DP_LINK_STATUS_SIZE);
}
static uint8_t
@@ -1515,6 +1570,8 @@ intel_dp_link_down(struct intel_dp *intel_dp)
static void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
+ int ret;
+
if (!intel_dp->base.base.crtc)
return;
@@ -1523,6 +1580,15 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
return;
}
+ /* Try to read receiver status if the link appears to be up */
+ ret = intel_dp_aux_native_read(intel_dp,
+ 0x000, intel_dp->dpcd,
+ sizeof (intel_dp->dpcd));
+ if (ret != sizeof(intel_dp->dpcd)) {
+ intel_dp_link_down(intel_dp);
+ return;
+ }
+
if (!intel_channel_eq_ok(intel_dp)) {
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
@@ -1533,6 +1599,7 @@ static enum drm_connector_status
ironlake_dp_detect(struct intel_dp *intel_dp)
{
enum drm_connector_status status;
+ bool ret;
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp)) {
@@ -1543,13 +1610,11 @@ ironlake_dp_detect(struct intel_dp *intel_dp)
}
status = connector_status_disconnected;
- if (intel_dp_aux_native_read(intel_dp,
- 0x000, intel_dp->dpcd,
- sizeof (intel_dp->dpcd))
- == sizeof(intel_dp->dpcd)) {
- if (intel_dp->dpcd[0] != 0)
- status = connector_status_connected;
- }
+ ret = intel_dp_aux_native_read_retry(intel_dp,
+ 0x000, intel_dp->dpcd,
+ sizeof (intel_dp->dpcd));
+ if (ret && intel_dp->dpcd[DP_DPCD_REV] != 0)
+ status = connector_status_connected;
DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0],
intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]);
return status;
@@ -1586,7 +1651,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
if (intel_dp_aux_native_read(intel_dp, 0x000, intel_dp->dpcd,
sizeof (intel_dp->dpcd)) == sizeof (intel_dp->dpcd))
{
- if (intel_dp->dpcd[0] != 0)
+ if (intel_dp->dpcd[DP_DPCD_REV] != 0)
status = connector_status_connected;
}
@@ -1790,8 +1855,7 @@ intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
- if (intel_dp->dpms_mode == DRM_MODE_DPMS_ON)
- intel_dp_check_link_status(intel_dp);
+ intel_dp_check_link_status(intel_dp);
}
/* Return which DP Port should be selected for Transcoder DP control */
@@ -1859,7 +1923,6 @@ intel_dp_init(struct drm_device *dev, int output_reg)
return;
intel_dp->output_reg = output_reg;
- intel_dp->dpms_mode = -1;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
@@ -1954,8 +2017,9 @@ intel_dp_init(struct drm_device *dev, int output_reg)
sizeof(intel_dp->dpcd));
ironlake_edp_panel_vdd_off(intel_dp);
if (ret == sizeof(intel_dp->dpcd)) {
- if (intel_dp->dpcd[0] >= 0x11)
- dev_priv->no_aux_handshake = intel_dp->dpcd[3] &
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
+ dev_priv->no_aux_handshake =
+ intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
} else {
/* if this fails, presume the device is a ghost */
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 56a8e2aea19..9e2959bc91c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1409,6 +1409,11 @@ void intel_setup_overlay(struct drm_device *dev)
overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
if (!overlay)
return;
+
+ mutex_lock(&dev->struct_mutex);
+ if (WARN_ON(dev_priv->overlay))
+ goto out_free;
+
overlay->dev = dev;
reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
@@ -1416,8 +1421,6 @@ void intel_setup_overlay(struct drm_device *dev)
goto out_free;
overlay->reg_bo = reg_bo;
- mutex_lock(&dev->struct_mutex);
-
if (OVERLAY_NEEDS_PHYSICAL(dev)) {
ret = i915_gem_attach_phys_object(dev, reg_bo,
I915_GEM_PHYS_OVERLAY_REGS,
@@ -1442,8 +1445,6 @@ void intel_setup_overlay(struct drm_device *dev)
}
}
- mutex_unlock(&dev->struct_mutex);
-
/* init all values */
overlay->color_key = 0x0101fe;
overlay->brightness = -19;
@@ -1452,7 +1453,7 @@ void intel_setup_overlay(struct drm_device *dev)
regs = intel_overlay_map_regs(overlay);
if (!regs)
- goto out_free_bo;
+ goto out_unpin_bo;
memset(regs, 0, sizeof(struct overlay_registers));
update_polyphase_filter(regs);
@@ -1461,15 +1462,17 @@ void intel_setup_overlay(struct drm_device *dev)
intel_overlay_unmap_regs(overlay, regs);
dev_priv->overlay = overlay;
+ mutex_unlock(&dev->struct_mutex);
DRM_INFO("initialized overlay support\n");
return;
out_unpin_bo:
- i915_gem_object_unpin(reg_bo);
+ if (!OVERLAY_NEEDS_PHYSICAL(dev))
+ i915_gem_object_unpin(reg_bo);
out_free_bo:
drm_gem_object_unreference(&reg_bo->base);
- mutex_unlock(&dev->struct_mutex);
out_free:
+ mutex_unlock(&dev->struct_mutex);
kfree(overlay);
return;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index c0e0ee63fbf..39ac2b634ae 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -165,7 +165,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
{
- return intel_wait_ring_buffer(ring, ring->space - 8);
+ return intel_wait_ring_buffer(ring, ring->size - 8);
}
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 144f79a350a..731acea865b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -371,7 +371,6 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->vram.flags_valid = nv50_vram_flags_valid;
break;
case 0xC0:
- case 0xD0:
engine->instmem.init = nvc0_instmem_init;
engine->instmem.takedown = nvc0_instmem_takedown;
engine->instmem.suspend = nvc0_instmem_suspend;
@@ -923,7 +922,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
dev_priv->card_type = NV_50;
break;
case 0xc0:
- case 0xd0:
dev_priv->card_type = NV_C0;
break;
default:
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 12d2fdc5241..15bd0477a3e 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -985,17 +985,19 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
{
save->vga_control[0] = RREG32(D1VGA_CONTROL);
save->vga_control[1] = RREG32(D2VGA_CONTROL);
- save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
- save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
- save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
- save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
+ save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
+ save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ }
+ if (rdev->num_crtc >= 6) {
+ save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
+ save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
}
@@ -1004,35 +1006,45 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
WREG32(VGA_RENDER_CONTROL, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
}
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
WREG32(D1VGA_CONTROL, 0);
WREG32(D2VGA_CONTROL, 0);
- WREG32(EVERGREEN_D3VGA_CONTROL, 0);
- WREG32(EVERGREEN_D4VGA_CONTROL, 0);
- WREG32(EVERGREEN_D5VGA_CONTROL, 0);
- WREG32(EVERGREEN_D6VGA_CONTROL, 0);
+ if (rdev->num_crtc >= 4) {
+ WREG32(EVERGREEN_D3VGA_CONTROL, 0);
+ WREG32(EVERGREEN_D4VGA_CONTROL, 0);
+ }
+ if (rdev->num_crtc >= 6) {
+ WREG32(EVERGREEN_D5VGA_CONTROL, 0);
+ WREG32(EVERGREEN_D6VGA_CONTROL, 0);
+ }
}
void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
@@ -1055,7 +1067,7 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
@@ -1073,7 +1085,8 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
-
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
@@ -1101,31 +1114,41 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
/* Restore video state */
WREG32(D1VGA_CONTROL, save->vga_control[0]);
WREG32(D2VGA_CONTROL, save->vga_control[1]);
- WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
- WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
- WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
- WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
+ if (rdev->num_crtc >= 4) {
+ WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
+ WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
+ }
+ if (rdev->num_crtc >= 6) {
+ WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
+ WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
+ }
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
}
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
}
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
@@ -1977,7 +2000,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
gb_backend_map = 0x66442200;
break;
case CHIP_JUNIPER:
- gb_backend_map = 0x00006420;
+ gb_backend_map = 0x00002200;
break;
default:
gb_backend_map =
@@ -2248,7 +2271,10 @@ int evergreen_mc_init(struct radeon_device *rdev)
/* Get VRAM informations */
rdev->mc.vram_is_ddr = true;
- tmp = RREG32(MC_ARB_RAMCFG);
+ if (rdev->flags & RADEON_IS_IGP)
+ tmp = RREG32(FUS_MC_ARB_RAMCFG);
+ else
+ tmp = RREG32(MC_ARB_RAMCFG);
if (tmp & CHANSIZE_OVERRIDE) {
chansize = 16;
} else if (tmp & CHANSIZE_MASK) {
@@ -2414,18 +2440,22 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
@@ -2544,19 +2574,25 @@ int evergreen_irq_set(struct radeon_device *rdev)
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
- if (!(rdev->flags & RADEON_IS_IGP)) {
+ if (rdev->num_crtc >= 4) {
WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
+ }
+ if (rdev->num_crtc >= 6) {
WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
}
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
- WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
+ if (rdev->num_crtc >= 4) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
+ }
+ if (rdev->num_crtc >= 6) {
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
+ WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
+ }
WREG32(DC_HPD1_INT_CONTROL, hpd1);
WREG32(DC_HPD2_INT_CONTROL, hpd2);
@@ -2580,53 +2616,57 @@ static inline void evergreen_irq_ack(struct radeon_device *rdev)
rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
- rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
- rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
- rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
- rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ if (rdev->num_crtc >= 4) {
+ rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ }
+ if (rdev->num_crtc >= 6) {
+ rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ }
if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
- if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
- WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
- if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
- WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
- if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
- WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
- if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
- WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
-
if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
-
if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
- WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
- if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
- WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
-
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
- WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
- if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
- WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
-
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
- WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
- if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
- WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
-
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
- WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
- if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
- WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
+ if (rdev->num_crtc >= 4) {
+ if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
+ }
+
+ if (rdev->num_crtc >= 6) {
+ if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
+ WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
+ WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
+ if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
+ WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
+ }
if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
tmp = RREG32(DC_HPD1_INT_CONTROL);
@@ -3234,6 +3274,7 @@ void evergreen_fini(struct radeon_device *rdev)
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 57f3bc17b87..2eb251858e7 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -252,7 +252,7 @@ draw_auto(struct radeon_device *rdev)
}
-/* emits 36 */
+/* emits 39 */
static void
set_default_state(struct radeon_device *rdev)
{
@@ -531,6 +531,11 @@ set_default_state(struct radeon_device *rdev)
radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(rdev, 0);
+ /* setup LDS */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(rdev, 0x10001000);
+
/* SQ config */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
@@ -773,7 +778,7 @@ int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
/* calculate number of loops correctly */
ring_size = num_loops * dwords_per_loop;
/* set default + shaders */
- ring_size += 52; /* shaders + def state */
+ ring_size += 55; /* shaders + def state */
ring_size += 10; /* fence emit for VB IB */
ring_size += 5; /* done copy */
ring_size += 10; /* fence emit for done copy */
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 1636e344982..b7b2714f0b3 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -466,7 +466,7 @@
#define IH_RB_WPTR_ADDR_LO 0x3e14
#define IH_CNTL 0x3e18
# define ENABLE_INTR (1 << 0)
-# define IH_MC_SWAP(x) ((x) << 2)
+# define IH_MC_SWAP(x) ((x) << 1)
# define IH_MC_SWAP_NONE 0
# define IH_MC_SWAP_16BIT 1
# define IH_MC_SWAP_32BIT 2
@@ -547,7 +547,7 @@
# define LB_D5_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD5_INTERRUPT (1 << 17)
# define DC_HPD5_RX_INTERRUPT (1 << 18)
-#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6050
+#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6150
# define LB_D6_VLINE_INTERRUPT (1 << 2)
# define LB_D6_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD6_INTERRUPT (1 << 17)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 16caafeadf5..559dbd41290 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1581,6 +1581,7 @@ void cayman_fini(struct radeon_device *rdev)
cayman_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
cayman_pcie_gart_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 9736746da2d..4672869cdb2 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -320,7 +320,7 @@
#define CGTS_USER_TCC_DISABLE 0x914C
#define TCC_DISABLE_MASK 0xFFFF0000
#define TCC_DISABLE_SHIFT 16
-#define CGTS_SM_CTRL_REG 0x915C
+#define CGTS_SM_CTRL_REG 0x9150
#define OVERRIDE (1 << 21)
#define TA_CNTL_AUX 0x9508
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index f79d2ccb675..bc54b26cb32 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2628,6 +2628,7 @@ void r600_fini(struct radeon_device *rdev)
r600_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev);
radeon_agp_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index f140a0d5cb5..0245ae6c204 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -536,7 +536,7 @@
#define IH_RB_WPTR_ADDR_LO 0x3e14
#define IH_CNTL 0x3e18
# define ENABLE_INTR (1 << 0)
-# define IH_MC_SWAP(x) ((x) << 2)
+# define IH_MC_SWAP(x) ((x) << 1)
# define IH_MC_SWAP_NONE 0
# define IH_MC_SWAP_16BIT 1
# define IH_MC_SWAP_32BIT 2
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 3fc5fa1aefd..229a20f10e2 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -331,7 +331,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev)
seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
viph_control = RREG32(RADEON_VIPH_CONTROL);
- bus_cntl = RREG32(RADEON_BUS_CNTL);
+ bus_cntl = RREG32(RV370_BUS_CNTL);
d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
@@ -350,7 +350,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev)
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
/* enable the rom */
- WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+ WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM));
/* Disable VGA mode */
WREG32(AVIVO_D1VGA_CONTROL,
@@ -367,7 +367,7 @@ static bool avivo_read_disabled_bios(struct radeon_device *rdev)
/* restore regs */
WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
WREG32(RADEON_VIPH_CONTROL, viph_control);
- WREG32(RADEON_BUS_CNTL, bus_cntl);
+ WREG32(RV370_BUS_CNTL, bus_cntl);
WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
@@ -390,7 +390,10 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
seprom_cntl1 = RREG32(RADEON_SEPROM_CNTL1);
viph_control = RREG32(RADEON_VIPH_CONTROL);
- bus_cntl = RREG32(RADEON_BUS_CNTL);
+ if (rdev->flags & RADEON_IS_PCIE)
+ bus_cntl = RREG32(RV370_BUS_CNTL);
+ else
+ bus_cntl = RREG32(RADEON_BUS_CNTL);
crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
crtc2_gen_cntl = 0;
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
@@ -412,7 +415,10 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
/* enable the rom */
- WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+ if (rdev->flags & RADEON_IS_PCIE)
+ WREG32(RV370_BUS_CNTL, (bus_cntl & ~RV370_BUS_BIOS_DIS_ROM));
+ else
+ WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
/* Turn off mem requests and CRTC for both controllers */
WREG32(RADEON_CRTC_GEN_CNTL,
@@ -439,7 +445,10 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev)
/* restore regs */
WREG32(RADEON_SEPROM_CNTL1, seprom_cntl1);
WREG32(RADEON_VIPH_CONTROL, viph_control);
- WREG32(RADEON_BUS_CNTL, bus_cntl);
+ if (rdev->flags & RADEON_IS_PCIE)
+ WREG32(RV370_BUS_CNTL, bus_cntl);
+ else
+ WREG32(RADEON_BUS_CNTL, bus_cntl);
WREG32(RADEON_CRTC_GEN_CNTL, crtc_gen_cntl);
if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index cbfca3a24fd..9792d4ffdc8 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -52,6 +52,12 @@ void radeon_connector_hotplug(struct drm_connector *connector)
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+ /* bail if the connector does not have hpd pin, e.g.,
+ * VGA, TV, etc.
+ */
+ if (radeon_connector->hpd.hpd == RADEON_HPD_NONE)
+ return;
+
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
/* powering up/down the eDP panel generates hpd events which
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index ec93a75369e..bc44a3d35ec 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -300,6 +300,8 @@
# define RADEON_BUS_READ_BURST (1 << 30)
#define RADEON_BUS_CNTL1 0x0034
# define RADEON_BUS_WAIT_ON_LOCK_EN (1 << 4)
+#define RV370_BUS_CNTL 0x004c
+# define RV370_BUS_BIOS_DIS_ROM (1 << 2)
/* rv370/rv380, rv410, r423/r430/r480, r5xx */
#define RADEON_MSI_REARM_EN 0x0160
# define RV370_MSI_REARM_EN (1 << 0)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 6e3b11e5abb..1f5850e473c 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -426,7 +426,7 @@ int rs600_gart_init(struct radeon_device *rdev)
return radeon_gart_table_vram_alloc(rdev);
}
-int rs600_gart_enable(struct radeon_device *rdev)
+static int rs600_gart_enable(struct radeon_device *rdev)
{
u32 tmp;
int r, i;
@@ -440,8 +440,8 @@ int rs600_gart_enable(struct radeon_device *rdev)
return r;
radeon_gart_restore(rdev);
/* Enable bus master */
- tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
- WREG32(R_00004C_BUS_CNTL, tmp);
+ tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
+ WREG32(RADEON_BUS_CNTL, tmp);
/* FIXME: setup default page */
WREG32_MC(R_000100_MC_PT0_CNTL,
(S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 6f508ffd103..4de51891aa6 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -575,6 +575,12 @@ static void rv770_program_channel_remap(struct radeon_device *rdev)
else
tcp_chan_steer = 0x00fac688;
+ /* RV770 CE has special chremap setup */
+ if (rdev->pdev->device == 0x944e) {
+ tcp_chan_steer = 0x00b08b08;
+ mc_shared_chremap = 0x00b08b08;
+ }
+
WREG32(TCP_CHAN_STEER, tcp_chan_steer);
WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
}
@@ -1362,6 +1368,7 @@ void rv770_fini(struct radeon_device *rdev)
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
+ radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
rv770_vram_scratch_fini(rdev);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 16db83c83c8..5f888f7e7dc 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -333,7 +333,7 @@ config SENSORS_F71882FG
F71858FG
F71862FG
F71863FG
- F71869F/E
+ F71869F/E/A
F71882FG
F71883FG
F71889FG/ED/A
diff --git a/drivers/hwmon/adm1275.c b/drivers/hwmon/adm1275.c
index c2ee2048ab9..b9b7caf4a1d 100644
--- a/drivers/hwmon/adm1275.c
+++ b/drivers/hwmon/adm1275.c
@@ -32,6 +32,7 @@ static int adm1275_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int config;
+ int ret;
struct pmbus_driver_info *info;
if (!i2c_check_functionality(client->adapter,
@@ -43,8 +44,10 @@ static int adm1275_probe(struct i2c_client *client,
return -ENOMEM;
config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG);
- if (config < 0)
- return config;
+ if (config < 0) {
+ ret = config;
+ goto err_mem;
+ }
info->pages = 1;
info->direct[PSC_VOLTAGE_IN] = true;
@@ -76,7 +79,14 @@ static int adm1275_probe(struct i2c_client *client,
else
info->func[0] |= PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT;
- return pmbus_do_probe(client, id, info);
+ ret = pmbus_do_probe(client, id, info);
+ if (ret)
+ goto err_mem;
+ return 0;
+
+err_mem:
+ kfree(info);
+ return ret;
}
static int adm1275_remove(struct i2c_client *client)
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index e0ef32378ac..0064432f361 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -78,8 +78,9 @@ static u16 emc6w201_read16(struct i2c_client *client, u8 reg)
lsb = i2c_smbus_read_byte_data(client, reg);
msb = i2c_smbus_read_byte_data(client, reg + 1);
- if (lsb < 0 || msb < 0) {
- dev_err(&client->dev, "16-bit read failed at 0x%02x\n", reg);
+ if (unlikely(lsb < 0 || msb < 0)) {
+ dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
+ 16, "read", reg);
return 0xFFFF; /* Arbitrary value */
}
@@ -95,10 +96,39 @@ static int emc6w201_write16(struct i2c_client *client, u8 reg, u16 val)
int err;
err = i2c_smbus_write_byte_data(client, reg, val & 0xff);
- if (!err)
+ if (likely(!err))
err = i2c_smbus_write_byte_data(client, reg + 1, val >> 8);
- if (err < 0)
- dev_err(&client->dev, "16-bit write failed at 0x%02x\n", reg);
+ if (unlikely(err < 0))
+ dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
+ 16, "write", reg);
+
+ return err;
+}
+
+/* Read 8-bit value from register */
+static u8 emc6w201_read8(struct i2c_client *client, u8 reg)
+{
+ int val;
+
+ val = i2c_smbus_read_byte_data(client, reg);
+ if (unlikely(val < 0)) {
+ dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
+ 8, "read", reg);
+ return 0x00; /* Arbitrary value */
+ }
+
+ return val;
+}
+
+/* Write 8-bit value to register */
+static int emc6w201_write8(struct i2c_client *client, u8 reg, u8 val)
+{
+ int err;
+
+ err = i2c_smbus_write_byte_data(client, reg, val);
+ if (unlikely(err < 0))
+ dev_err(&client->dev, "%d-bit %s failed at 0x%02x\n",
+ 8, "write", reg);
return err;
}
@@ -114,25 +144,25 @@ static struct emc6w201_data *emc6w201_update_device(struct device *dev)
if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
for (nr = 0; nr < 6; nr++) {
data->in[input][nr] =
- i2c_smbus_read_byte_data(client,
+ emc6w201_read8(client,
EMC6W201_REG_IN(nr));
data->in[min][nr] =
- i2c_smbus_read_byte_data(client,
+ emc6w201_read8(client,
EMC6W201_REG_IN_LOW(nr));
data->in[max][nr] =
- i2c_smbus_read_byte_data(client,
+ emc6w201_read8(client,
EMC6W201_REG_IN_HIGH(nr));
}
for (nr = 0; nr < 6; nr++) {
data->temp[input][nr] =
- i2c_smbus_read_byte_data(client,
+ emc6w201_read8(client,
EMC6W201_REG_TEMP(nr));
data->temp[min][nr] =
- i2c_smbus_read_byte_data(client,
+ emc6w201_read8(client,
EMC6W201_REG_TEMP_LOW(nr));
data->temp[max][nr] =
- i2c_smbus_read_byte_data(client,
+ emc6w201_read8(client,
EMC6W201_REG_TEMP_HIGH(nr));
}
@@ -192,7 +222,7 @@ static ssize_t set_in(struct device *dev, struct device_attribute *devattr,
mutex_lock(&data->update_lock);
data->in[sf][nr] = SENSORS_LIMIT(val, 0, 255);
- err = i2c_smbus_write_byte_data(client, reg, data->in[sf][nr]);
+ err = emc6w201_write8(client, reg, data->in[sf][nr]);
mutex_unlock(&data->update_lock);
return err < 0 ? err : count;
@@ -229,7 +259,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
mutex_lock(&data->update_lock);
data->temp[sf][nr] = SENSORS_LIMIT(val, -127, 128);
- err = i2c_smbus_write_byte_data(client, reg, data->temp[sf][nr]);
+ err = emc6w201_write8(client, reg, data->temp[sf][nr]);
mutex_unlock(&data->update_lock);
return err < 0 ? err : count;
@@ -444,7 +474,7 @@ static int emc6w201_detect(struct i2c_client *client,
/* Check configuration */
config = i2c_smbus_read_byte_data(client, EMC6W201_REG_CONFIG);
- if ((config & 0xF4) != 0x04)
+ if (config < 0 || (config & 0xF4) != 0x04)
return -ENODEV;
if (!(config & 0x01)) {
dev_err(&client->dev, "Monitoring not enabled\n");
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index a4a94a096c9..2d96ed2bf8e 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -52,6 +52,7 @@
#define SIO_F71858_ID 0x0507 /* Chipset ID */
#define SIO_F71862_ID 0x0601 /* Chipset ID */
#define SIO_F71869_ID 0x0814 /* Chipset ID */
+#define SIO_F71869A_ID 0x1007 /* Chipset ID */
#define SIO_F71882_ID 0x0541 /* Chipset ID */
#define SIO_F71889_ID 0x0723 /* Chipset ID */
#define SIO_F71889E_ID 0x0909 /* Chipset ID */
@@ -108,8 +109,8 @@ static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
-enum chips { f71808e, f71808a, f71858fg, f71862fg, f71869, f71882fg, f71889fg,
- f71889ed, f71889a, f8000, f81865f };
+enum chips { f71808e, f71808a, f71858fg, f71862fg, f71869, f71869a, f71882fg,
+ f71889fg, f71889ed, f71889a, f8000, f81865f };
static const char *f71882fg_names[] = {
"f71808e",
@@ -117,6 +118,7 @@ static const char *f71882fg_names[] = {
"f71858fg",
"f71862fg",
"f71869", /* Both f71869f and f71869e, reg. compatible and same id */
+ "f71869a",
"f71882fg",
"f71889fg", /* f81801u too, same id */
"f71889ed",
@@ -131,6 +133,7 @@ static const char f71882fg_has_in[][F71882FG_MAX_INS] = {
[f71858fg] = { 1, 1, 1, 0, 0, 0, 0, 0, 0 },
[f71862fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
[f71869] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
+ [f71869a] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
[f71882fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
[f71889fg] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
[f71889ed] = { 1, 1, 1, 1, 1, 1, 1, 1, 1 },
@@ -145,6 +148,7 @@ static const char f71882fg_has_in1_alarm[] = {
[f71858fg] = 0,
[f71862fg] = 0,
[f71869] = 0,
+ [f71869a] = 0,
[f71882fg] = 1,
[f71889fg] = 1,
[f71889ed] = 1,
@@ -159,6 +163,7 @@ static const char f71882fg_fan_has_beep[] = {
[f71858fg] = 0,
[f71862fg] = 1,
[f71869] = 1,
+ [f71869a] = 1,
[f71882fg] = 1,
[f71889fg] = 1,
[f71889ed] = 1,
@@ -173,6 +178,7 @@ static const char f71882fg_nr_fans[] = {
[f71858fg] = 3,
[f71862fg] = 3,
[f71869] = 3,
+ [f71869a] = 3,
[f71882fg] = 4,
[f71889fg] = 3,
[f71889ed] = 3,
@@ -187,6 +193,7 @@ static const char f71882fg_temp_has_beep[] = {
[f71858fg] = 0,
[f71862fg] = 1,
[f71869] = 1,
+ [f71869a] = 1,
[f71882fg] = 1,
[f71889fg] = 1,
[f71889ed] = 1,
@@ -201,6 +208,7 @@ static const char f71882fg_nr_temps[] = {
[f71858fg] = 3,
[f71862fg] = 3,
[f71869] = 3,
+ [f71869a] = 3,
[f71882fg] = 3,
[f71889fg] = 3,
[f71889ed] = 3,
@@ -2243,6 +2251,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
case f71808e:
case f71808a:
case f71869:
+ case f71869a:
/* These always have signed auto point temps */
data->auto_point_temp_signed = 1;
/* Fall through to select correct fan/pwm reg bank! */
@@ -2305,6 +2314,7 @@ static int __devinit f71882fg_probe(struct platform_device *pdev)
case f71808e:
case f71808a:
case f71869:
+ case f71869a:
case f71889fg:
case f71889ed:
case f71889a:
@@ -2528,6 +2538,9 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
case SIO_F71869_ID:
sio_data->type = f71869;
break;
+ case SIO_F71869A_ID:
+ sio_data->type = f71869a;
+ break;
case SIO_F71882_ID:
sio_data->type = f71882fg;
break;
@@ -2662,7 +2675,7 @@ static void __exit f71882fg_exit(void)
}
MODULE_DESCRIPTION("F71882FG Hardware Monitoring Driver");
-MODULE_AUTHOR("Hans Edgington, Hans de Goede (hdegoede@redhat.com)");
+MODULE_AUTHOR("Hans Edgington, Hans de Goede <hdegoede@redhat.com>");
MODULE_LICENSE("GPL");
module_init(f71882fg_init);
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index 2582bfef6cc..c8195a077da 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -202,7 +202,7 @@ static struct vrm_model vrm_models[] = {
{X86_VENDOR_CENTAUR, 0x6, 0x7, ANY, 85}, /* Eden ESP/Ezra */
{X86_VENDOR_CENTAUR, 0x6, 0x8, 0x7, 85}, /* Ezra T */
- {X86_VENDOR_CENTAUR, 0x6, 0x9, 0x7, 85}, /* Nemiah */
+ {X86_VENDOR_CENTAUR, 0x6, 0x9, 0x7, 85}, /* Nehemiah */
{X86_VENDOR_CENTAUR, 0x6, 0x9, ANY, 17}, /* C3-M, Eden-N */
{X86_VENDOR_CENTAUR, 0x6, 0xA, 0x7, 0}, /* No information */
{X86_VENDOR_CENTAUR, 0x6, 0xA, ANY, 13}, /* C7, Esther */
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 1a6dfb6df1e..d3b464b74ce 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -98,11 +98,16 @@ struct lm95241_data {
};
/* Conversions */
-static int TempFromReg(u8 val_h, u8 val_l)
+static int temp_from_reg_signed(u8 val_h, u8 val_l)
{
- if (val_h & 0x80)
- return val_h - 0x100;
- return val_h * 1000 + val_l * 1000 / 256;
+ s16 val_hl = (val_h << 8) | val_l;
+ return val_hl * 1000 / 256;
+}
+
+static int temp_from_reg_unsigned(u8 val_h, u8 val_l)
+{
+ u16 val_hl = (val_h << 8) | val_l;
+ return val_hl * 1000 / 256;
}
static struct lm95241_data *lm95241_update_device(struct device *dev)
@@ -135,10 +140,13 @@ static ssize_t show_input(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct lm95241_data *data = lm95241_update_device(dev);
+ int index = to_sensor_dev_attr(attr)->index;
return snprintf(buf, PAGE_SIZE - 1, "%d\n",
- TempFromReg(data->temp[to_sensor_dev_attr(attr)->index],
- data->temp[to_sensor_dev_attr(attr)->index + 1]));
+ index == 0 || (data->config & (1 << (index / 2))) ?
+ temp_from_reg_signed(data->temp[index], data->temp[index + 1]) :
+ temp_from_reg_unsigned(data->temp[index],
+ data->temp[index + 1]));
}
static ssize_t show_type(struct device *dev, struct device_attribute *attr,
@@ -339,7 +347,7 @@ static int lm95241_detect(struct i2c_client *new_client,
if ((i2c_smbus_read_byte_data(new_client, LM95241_REG_R_MAN_ID)
== MANUFACTURER_ID)
&& (i2c_smbus_read_byte_data(new_client, LM95241_REG_R_CHIP_ID)
- >= DEFAULT_REVISION)) {
+ == DEFAULT_REVISION)) {
name = DEVNAME;
} else {
dev_dbg(&adapter->dev, "LM95241 detection failed at 0x%02x\n",
diff --git a/drivers/hwmon/pmbus.c b/drivers/hwmon/pmbus.c
index 98e2e28899e..9b1f0c37ef7 100644
--- a/drivers/hwmon/pmbus.c
+++ b/drivers/hwmon/pmbus.c
@@ -47,22 +47,29 @@ static void pmbus_find_sensor_groups(struct i2c_client *client,
if (info->func[0]
&& pmbus_check_byte_register(client, 0, PMBUS_STATUS_INPUT))
info->func[0] |= PMBUS_HAVE_STATUS_INPUT;
- if (pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_1)) {
+ if (pmbus_check_byte_register(client, 0, PMBUS_FAN_CONFIG_12) &&
+ pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_1)) {
info->func[0] |= PMBUS_HAVE_FAN12;
if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_12))
info->func[0] |= PMBUS_HAVE_STATUS_FAN12;
}
- if (pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_3)) {
+ if (pmbus_check_byte_register(client, 0, PMBUS_FAN_CONFIG_34) &&
+ pmbus_check_word_register(client, 0, PMBUS_READ_FAN_SPEED_3)) {
info->func[0] |= PMBUS_HAVE_FAN34;
if (pmbus_check_byte_register(client, 0, PMBUS_STATUS_FAN_34))
info->func[0] |= PMBUS_HAVE_STATUS_FAN34;
}
- if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_1)) {
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_1))
info->func[0] |= PMBUS_HAVE_TEMP;
- if (pmbus_check_byte_register(client, 0,
- PMBUS_STATUS_TEMPERATURE))
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_2))
+ info->func[0] |= PMBUS_HAVE_TEMP2;
+ if (pmbus_check_word_register(client, 0, PMBUS_READ_TEMPERATURE_3))
+ info->func[0] |= PMBUS_HAVE_TEMP3;
+ if (info->func[0] & (PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2
+ | PMBUS_HAVE_TEMP3)
+ && pmbus_check_byte_register(client, 0,
+ PMBUS_STATUS_TEMPERATURE))
info->func[0] |= PMBUS_HAVE_STATUS_TEMP;
- }
/* Sensors detected on all pages */
for (page = 0; page < info->pages; page++) {
diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus_core.c
index 354770ed318..744672c1f26 100644
--- a/drivers/hwmon/pmbus_core.c
+++ b/drivers/hwmon/pmbus_core.c
@@ -1430,14 +1430,9 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
- /*
- * Bail out if status register or PMBus revision register
- * does not exist.
- */
- if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0
- || i2c_smbus_read_byte_data(client, PMBUS_REVISION) < 0) {
- dev_err(&client->dev,
- "Status or revision register not found\n");
+ /* Bail out if PMBus status register does not exist. */
+ if (i2c_smbus_read_byte_data(client, PMBUS_STATUS_BYTE) < 0) {
+ dev_err(&client->dev, "PMBus status register not found\n");
ret = -ENODEV;
goto out_data;
}
diff --git a/drivers/hwmon/sch5627.c b/drivers/hwmon/sch5627.c
index 020c87273ea..3494a4cce41 100644
--- a/drivers/hwmon/sch5627.c
+++ b/drivers/hwmon/sch5627.c
@@ -887,7 +887,7 @@ static void __exit sch5627_exit(void)
}
MODULE_DESCRIPTION("SMSC SCH5627 Hardware Monitoring Driver");
-MODULE_AUTHOR("Hans de Goede (hdegoede@redhat.com)");
+MODULE_AUTHOR("Hans de Goede <hdegoede@redhat.com>");
MODULE_LICENSE("GPL");
module_init(sch5627_init);
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index 52b545a795f..cbc98aea5b0 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -193,7 +193,13 @@ static void bfin_twi_handle_interrupt(struct bfin_twi_iface *iface,
return;
}
if (twi_int_status & MCOMP) {
- if (iface->cur_mode == TWI_I2C_MODE_COMBINED) {
+ if ((read_MASTER_CTL(iface) & MEN) == 0 &&
+ (iface->cur_mode == TWI_I2C_MODE_REPEAT ||
+ iface->cur_mode == TWI_I2C_MODE_COMBINED)) {
+ iface->result = -1;
+ write_INT_MASK(iface, 0);
+ write_MASTER_CTL(iface, 0);
+ } else if (iface->cur_mode == TWI_I2C_MODE_COMBINED) {
if (iface->readNum == 0) {
/* set the read number to 1 and ask for manual
* stop in block combine mode
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 6c00c107ebf..f84a63c6dd9 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -248,12 +248,12 @@ static inline int is_msgend(struct s3c24xx_i2c *i2c)
return i2c->msg_ptr >= i2c->msg->len;
}
-/* i2s_s3c_irq_nextbyte
+/* i2c_s3c_irq_nextbyte
*
* process an interrupt and work out what to do
*/
-static int i2s_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
+static int i2c_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
{
unsigned long tmp;
unsigned char byte;
@@ -264,7 +264,6 @@ static int i2s_s3c_irq_nextbyte(struct s3c24xx_i2c *i2c, unsigned long iicstat)
case STATE_IDLE:
dev_err(i2c->dev, "%s: called in STATE_IDLE\n", __func__);
goto out;
- break;
case STATE_STOP:
dev_err(i2c->dev, "%s: called in STATE_STOP\n", __func__);
@@ -444,7 +443,7 @@ static irqreturn_t s3c24xx_i2c_irq(int irqno, void *dev_id)
/* pretty much this leaves us with the fact that we've
* transmitted or received whatever byte we last sent */
- i2s_s3c_irq_nextbyte(i2c, status);
+ i2c_s3c_irq_nextbyte(i2c, status);
out:
return IRQ_HANDLED;
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 4d9319665e3..fb3b4f8f815 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -40,8 +40,10 @@
#define I2C_CNFG_NEW_MASTER_FSM (1<<11)
#define I2C_STATUS 0x01C
#define I2C_SL_CNFG 0x020
+#define I2C_SL_CNFG_NACK (1<<1)
#define I2C_SL_CNFG_NEWSL (1<<2)
#define I2C_SL_ADDR1 0x02c
+#define I2C_SL_ADDR2 0x030
#define I2C_TX_FIFO 0x050
#define I2C_RX_FIFO 0x054
#define I2C_PACKET_TRANSFER_STATUS 0x058
@@ -337,7 +339,11 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev)
if (!i2c_dev->is_dvc) {
u32 sl_cfg = i2c_readl(i2c_dev, I2C_SL_CNFG);
- i2c_writel(i2c_dev, sl_cfg | I2C_SL_CNFG_NEWSL, I2C_SL_CNFG);
+ sl_cfg |= I2C_SL_CNFG_NACK | I2C_SL_CNFG_NEWSL;
+ i2c_writel(i2c_dev, sl_cfg, I2C_SL_CNFG);
+ i2c_writel(i2c_dev, 0xfc, I2C_SL_ADDR1);
+ i2c_writel(i2c_dev, 0x00, I2C_SL_ADDR2);
+
}
val = 7 << I2C_FIFO_CONTROL_TX_TRIG_SHIFT |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index f62f52fb9ec..fc0f2bd9ca8 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3641,7 +3641,8 @@ static struct kobj_type cm_port_obj_type = {
static char *cm_devnode(struct device *dev, mode_t *mode)
{
- *mode = 0666;
+ if (mode)
+ *mode = 0666;
return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
}
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index e49a85f8a44..56898b6578a 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -826,7 +826,8 @@ static void ib_uverbs_remove_one(struct ib_device *device)
static char *uverbs_devnode(struct device *dev, mode_t *mode)
{
- *mode = 0666;
+ if (mode)
+ *mode = 0666;
return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
}
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c
index 40b02ae96f8..6229c3e8e78 100644
--- a/drivers/input/keyboard/pmic8xxx-keypad.c
+++ b/drivers/input/keyboard/pmic8xxx-keypad.c
@@ -520,7 +520,8 @@ static void pmic8xxx_kp_close(struct input_dev *dev)
*/
static int __devinit pmic8xxx_kp_probe(struct platform_device *pdev)
{
- const struct pm8xxx_keypad_platform_data *pdata = mfd_get_data(pdev);
+ const struct pm8xxx_keypad_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
const struct matrix_keymap_data *keymap_data;
struct pmic8xxx_kp *kp;
int rc;
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index 97e07e786e4..b3cfb9c71e6 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -90,7 +90,8 @@ static int __devinit pmic8xxx_pwrkey_probe(struct platform_device *pdev)
unsigned int delay;
u8 pon_cntl;
struct pmic8xxx_pwrkey *pwrkey;
- const struct pm8xxx_pwrkey_platform_data *pdata = mfd_get_data(pdev);
+ const struct pm8xxx_pwrkey_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "power key platform data not supplied\n");
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index d8d3a1e910a..a2c874623e3 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -88,7 +88,7 @@ static const struct pca9532_chip_info pca9532_chip_info_tbl[] = {
static struct i2c_driver pca9532_driver = {
.driver = {
- .name = "pca953x",
+ .name = "leds-pca953x",
},
.probe = pca9532_probe,
.remove = pca9532_remove,
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c
index 8fa539dde1b..7f7079b12f2 100644
--- a/drivers/media/rc/fintek-cir.c
+++ b/drivers/media/rc/fintek-cir.c
@@ -597,12 +597,17 @@ static void __devexit fintek_remove(struct pnp_dev *pdev)
static int fintek_suspend(struct pnp_dev *pdev, pm_message_t state)
{
struct fintek_dev *fintek = pnp_get_drvdata(pdev);
+ unsigned long flags;
fit_dbg("%s called", __func__);
+ spin_lock_irqsave(&fintek->fintek_lock, flags);
+
/* disable all CIR interrupts */
fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS);
+ spin_unlock_irqrestore(&fintek->fintek_lock, flags);
+
fintek_config_mode_enable(fintek);
/* disable cir logical dev */
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 3f3c7071626..6bc35eeb653 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -307,6 +307,14 @@ static const struct {
/* 0xffdc iMON MCE VFD */
{ 0x00010000ffffffeell, KEY_VOLUMEUP },
{ 0x01000000ffffffeell, KEY_VOLUMEDOWN },
+ { 0x00000001ffffffeell, KEY_MUTE },
+ { 0x0000000fffffffeell, KEY_MEDIA },
+ { 0x00000012ffffffeell, KEY_UP },
+ { 0x00000013ffffffeell, KEY_DOWN },
+ { 0x00000014ffffffeell, KEY_LEFT },
+ { 0x00000015ffffffeell, KEY_RIGHT },
+ { 0x00000016ffffffeell, KEY_ENTER },
+ { 0x00000017ffffffeell, KEY_ESC },
/* iMON Knob values */
{ 0x000100ffffffffeell, KEY_VOLUMEUP },
{ 0x010000ffffffffeell, KEY_VOLUMEDOWN },
@@ -1582,16 +1590,16 @@ static void imon_incoming_packet(struct imon_context *ictx,
/* Only panel type events left to process now */
spin_lock_irqsave(&ictx->kc_lock, flags);
+ do_gettimeofday(&t);
/* KEY_MUTE repeats from knob need to be suppressed */
if (ictx->kc == KEY_MUTE && ictx->kc == ictx->last_keycode) {
- do_gettimeofday(&t);
msec = tv2int(&t, &prev_time);
- prev_time = t;
if (msec < ictx->idev->rep[REP_DELAY]) {
spin_unlock_irqrestore(&ictx->kc_lock, flags);
return;
}
}
+ prev_time = t;
kc = ictx->kc;
spin_unlock_irqrestore(&ictx->kc_lock, flags);
@@ -1603,7 +1611,9 @@ static void imon_incoming_packet(struct imon_context *ictx,
input_report_key(ictx->idev, kc, 0);
input_sync(ictx->idev);
+ spin_lock_irqsave(&ictx->kc_lock, flags);
ictx->last_keycode = kc;
+ spin_unlock_irqrestore(&ictx->kc_lock, flags);
return;
@@ -1740,6 +1750,8 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
detected_display_type = IMON_DISPLAY_TYPE_VFD;
break;
/* iMON VFD, MCE IR */
+ case 0x46:
+ case 0x7e:
case 0x9e:
dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR");
detected_display_type = IMON_DISPLAY_TYPE_VFD;
@@ -1755,6 +1767,9 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
dev_info(ictx->dev, "Unknown 0xffdc device, "
"defaulting to VFD and iMON IR");
detected_display_type = IMON_DISPLAY_TYPE_VFD;
+ /* We don't know which one it is, allow user to set the
+ * RC6 one from userspace if OTHER wasn't correct. */
+ allowed_protos |= RC_TYPE_RC6;
break;
}
diff --git a/drivers/media/rc/ir-raw.c b/drivers/media/rc/ir-raw.c
index 11c19d8d0ee..423ed45d6c5 100644
--- a/drivers/media/rc/ir-raw.c
+++ b/drivers/media/rc/ir-raw.c
@@ -114,18 +114,20 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
s64 delta; /* ns */
DEFINE_IR_RAW_EVENT(ev);
int rc = 0;
+ int delay;
if (!dev->raw)
return -EINVAL;
now = ktime_get();
delta = ktime_to_ns(ktime_sub(now, dev->raw->last_event));
+ delay = MS_TO_NS(dev->input_dev->rep[REP_DELAY]);
/* Check for a long duration since last event or if we're
* being called for the first time, note that delta can't
* possibly be negative.
*/
- if (delta > IR_MAX_DURATION || !dev->raw->last_type)
+ if (delta > delay || !dev->raw->last_type)
type |= IR_START_EVENT;
else
ev.duration = delta;
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index e716b931cf7..ecd3d028076 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -1347,6 +1347,7 @@ static const struct ite_dev_params ite_dev_descs[] = {
{ /* 0: ITE8704 */
.model = "ITE8704 CIR transceiver",
.io_region_size = IT87_IOREG_LENGTH,
+ .io_rsrc_no = 0,
.hw_tx_capable = true,
.sample_period = (u32) (1000000000ULL / 115200),
.tx_carrier_freq = 38000,
@@ -1371,6 +1372,7 @@ static const struct ite_dev_params ite_dev_descs[] = {
{ /* 1: ITE8713 */
.model = "ITE8713 CIR transceiver",
.io_region_size = IT87_IOREG_LENGTH,
+ .io_rsrc_no = 0,
.hw_tx_capable = true,
.sample_period = (u32) (1000000000ULL / 115200),
.tx_carrier_freq = 38000,
@@ -1395,6 +1397,7 @@ static const struct ite_dev_params ite_dev_descs[] = {
{ /* 2: ITE8708 */
.model = "ITE8708 CIR transceiver",
.io_region_size = IT8708_IOREG_LENGTH,
+ .io_rsrc_no = 0,
.hw_tx_capable = true,
.sample_period = (u32) (1000000000ULL / 115200),
.tx_carrier_freq = 38000,
@@ -1420,6 +1423,7 @@ static const struct ite_dev_params ite_dev_descs[] = {
{ /* 3: ITE8709 */
.model = "ITE8709 CIR transceiver",
.io_region_size = IT8709_IOREG_LENGTH,
+ .io_rsrc_no = 2,
.hw_tx_capable = true,
.sample_period = (u32) (1000000000ULL / 115200),
.tx_carrier_freq = 38000,
@@ -1461,6 +1465,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
struct rc_dev *rdev = NULL;
int ret = -ENOMEM;
int model_no;
+ int io_rsrc_no;
ite_dbg("%s called", __func__);
@@ -1490,10 +1495,11 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
/* get the description for the device */
dev_desc = &ite_dev_descs[model_no];
+ io_rsrc_no = dev_desc->io_rsrc_no;
/* validate pnp resources */
- if (!pnp_port_valid(pdev, 0) ||
- pnp_port_len(pdev, 0) != dev_desc->io_region_size) {
+ if (!pnp_port_valid(pdev, io_rsrc_no) ||
+ pnp_port_len(pdev, io_rsrc_no) != dev_desc->io_region_size) {
dev_err(&pdev->dev, "IR PNP Port not valid!\n");
goto failure;
}
@@ -1504,7 +1510,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
}
/* store resource values */
- itdev->cir_addr = pnp_port_start(pdev, 0);
+ itdev->cir_addr = pnp_port_start(pdev, io_rsrc_no);
itdev->cir_irq = pnp_irq(pdev, 0);
/* initialize spinlocks */
diff --git a/drivers/media/rc/ite-cir.h b/drivers/media/rc/ite-cir.h
index 16a19f5fd71..aa899a0b975 100644
--- a/drivers/media/rc/ite-cir.h
+++ b/drivers/media/rc/ite-cir.h
@@ -57,6 +57,9 @@ struct ite_dev_params {
/* size of the I/O region */
int io_region_size;
+ /* IR pnp I/O resource number */
+ int io_rsrc_no;
+
/* true if the hardware supports transmission */
bool hw_tx_capable;
diff --git a/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c b/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
index bb10ffe086b..8d558ae6345 100644
--- a/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
+++ b/drivers/media/rc/keymaps/rc-pinnacle-pctv-hd.c
@@ -15,43 +15,39 @@
/* Pinnacle PCTV HD 800i mini remote */
static struct rc_map_table pinnacle_pctv_hd[] = {
-
- { 0x0f, KEY_1 },
- { 0x15, KEY_2 },
- { 0x10, KEY_3 },
- { 0x18, KEY_4 },
- { 0x1b, KEY_5 },
- { 0x1e, KEY_6 },
- { 0x11, KEY_7 },
- { 0x21, KEY_8 },
- { 0x12, KEY_9 },
- { 0x27, KEY_0 },
-
- { 0x24, KEY_ZOOM },
- { 0x2a, KEY_SUBTITLE },
-
- { 0x00, KEY_MUTE },
- { 0x01, KEY_ENTER }, /* Pinnacle Logo */
- { 0x39, KEY_POWER },
-
- { 0x03, KEY_VOLUMEUP },
- { 0x09, KEY_VOLUMEDOWN },
- { 0x06, KEY_CHANNELUP },
- { 0x0c, KEY_CHANNELDOWN },
-
- { 0x2d, KEY_REWIND },
- { 0x30, KEY_PLAYPAUSE },
- { 0x33, KEY_FASTFORWARD },
- { 0x3c, KEY_STOP },
- { 0x36, KEY_RECORD },
- { 0x3f, KEY_EPG }, /* Labeled "?" */
+ /* Key codes for the tiny Pinnacle remote*/
+ { 0x0700, KEY_MUTE },
+ { 0x0701, KEY_MENU }, /* Pinnacle logo */
+ { 0x0739, KEY_POWER },
+ { 0x0703, KEY_VOLUMEUP },
+ { 0x0709, KEY_VOLUMEDOWN },
+ { 0x0706, KEY_CHANNELUP },
+ { 0x070c, KEY_CHANNELDOWN },
+ { 0x070f, KEY_1 },
+ { 0x0715, KEY_2 },
+ { 0x0710, KEY_3 },
+ { 0x0718, KEY_4 },
+ { 0x071b, KEY_5 },
+ { 0x071e, KEY_6 },
+ { 0x0711, KEY_7 },
+ { 0x0721, KEY_8 },
+ { 0x0712, KEY_9 },
+ { 0x0727, KEY_0 },
+ { 0x0724, KEY_ZOOM }, /* 'Square' key */
+ { 0x072a, KEY_SUBTITLE }, /* 'T' key */
+ { 0x072d, KEY_REWIND },
+ { 0x0730, KEY_PLAYPAUSE },
+ { 0x0733, KEY_FASTFORWARD },
+ { 0x0736, KEY_RECORD },
+ { 0x073c, KEY_STOP },
+ { 0x073f, KEY_HELP }, /* '?' key */
};
static struct rc_map_list pinnacle_pctv_hd_map = {
.map = {
.scan = pinnacle_pctv_hd,
.size = ARRAY_SIZE(pinnacle_pctv_hd),
- .rc_type = RC_TYPE_UNKNOWN, /* Legacy IR type */
+ .rc_type = RC_TYPE_RC5,
.name = RC_MAP_PINNACLE_PCTV_HD,
}
};
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index fd237ab120b..27997a9ceb0 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -55,6 +55,8 @@ struct irctl {
struct lirc_buffer *buf;
unsigned int chunk_size;
+ struct cdev *cdev;
+
struct task_struct *task;
long jiffies_to_wait;
};
@@ -62,7 +64,6 @@ struct irctl {
static DEFINE_MUTEX(lirc_dev_lock);
static struct irctl *irctls[MAX_IRCTL_DEVICES];
-static struct cdev cdevs[MAX_IRCTL_DEVICES];
/* Only used for sysfs but defined to void otherwise */
static struct class *lirc_class;
@@ -167,9 +168,13 @@ static struct file_operations lirc_dev_fops = {
static int lirc_cdev_add(struct irctl *ir)
{
- int retval;
+ int retval = -ENOMEM;
struct lirc_driver *d = &ir->d;
- struct cdev *cdev = &cdevs[d->minor];
+ struct cdev *cdev;
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ goto err_out;
if (d->fops) {
cdev_init(cdev, d->fops);
@@ -180,12 +185,20 @@ static int lirc_cdev_add(struct irctl *ir)
}
retval = kobject_set_name(&cdev->kobj, "lirc%d", d->minor);
if (retval)
- return retval;
+ goto err_out;
retval = cdev_add(cdev, MKDEV(MAJOR(lirc_base_dev), d->minor), 1);
- if (retval)
+ if (retval) {
kobject_put(&cdev->kobj);
+ goto err_out;
+ }
+
+ ir->cdev = cdev;
+
+ return 0;
+err_out:
+ kfree(cdev);
return retval;
}
@@ -214,7 +227,7 @@ int lirc_register_driver(struct lirc_driver *d)
if (MAX_IRCTL_DEVICES <= d->minor) {
dev_err(d->dev, "lirc_dev: lirc_register_driver: "
"\"minor\" must be between 0 and %d (%d)!\n",
- MAX_IRCTL_DEVICES-1, d->minor);
+ MAX_IRCTL_DEVICES - 1, d->minor);
err = -EBADRQC;
goto out;
}
@@ -369,7 +382,7 @@ int lirc_unregister_driver(int minor)
if (minor < 0 || minor >= MAX_IRCTL_DEVICES) {
printk(KERN_ERR "lirc_dev: %s: minor (%d) must be between "
- "0 and %d!\n", __func__, minor, MAX_IRCTL_DEVICES-1);
+ "0 and %d!\n", __func__, minor, MAX_IRCTL_DEVICES - 1);
return -EBADRQC;
}
@@ -380,7 +393,7 @@ int lirc_unregister_driver(int minor)
return -ENOENT;
}
- cdev = &cdevs[minor];
+ cdev = ir->cdev;
mutex_lock(&lirc_dev_lock);
@@ -410,6 +423,7 @@ int lirc_unregister_driver(int minor)
} else {
lirc_irctl_cleanup(ir);
cdev_del(cdev);
+ kfree(cdev);
kfree(ir);
irctls[minor] = NULL;
}
@@ -453,7 +467,7 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
goto error;
}
- cdev = &cdevs[iminor(inode)];
+ cdev = ir->cdev;
if (try_module_get(cdev->owner)) {
ir->open++;
retval = ir->d.set_use_inc(ir->d.data);
@@ -484,13 +498,15 @@ EXPORT_SYMBOL(lirc_dev_fop_open);
int lirc_dev_fop_close(struct inode *inode, struct file *file)
{
struct irctl *ir = irctls[iminor(inode)];
- struct cdev *cdev = &cdevs[iminor(inode)];
+ struct cdev *cdev;
if (!ir) {
printk(KERN_ERR "%s: called with invalid irctl\n", __func__);
return -EINVAL;
}
+ cdev = ir->cdev;
+
dev_dbg(ir->d.dev, LOGHEAD "close called\n", ir->d.name, ir->d.minor);
WARN_ON(mutex_lock_killable(&lirc_dev_lock));
@@ -503,6 +519,7 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
lirc_irctl_cleanup(ir);
cdev_del(cdev);
irctls[ir->d.minor] = NULL;
+ kfree(cdev);
kfree(ir);
}
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index ad927fcaa02..06dfe0957b5 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -108,6 +108,12 @@ static int debug = 1;
static int debug;
#endif
+#define mce_dbg(dev, fmt, ...) \
+ do { \
+ if (debug) \
+ dev_info(dev, fmt, ## __VA_ARGS__); \
+ } while (0)
+
/* general constants */
#define SEND_FLAG_IN_PROGRESS 1
#define SEND_FLAG_COMPLETE 2
@@ -246,6 +252,9 @@ static struct usb_device_id mceusb_dev_table[] = {
.driver_info = MCE_GEN2_TX_INV },
/* SMK eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_SMK, 0x0338) },
+ /* SMK/I-O Data GV-MC7/RCKIT Receiver */
+ { USB_DEVICE(VENDOR_SMK, 0x0353),
+ .driver_info = MCE_GEN2_NO_TX },
/* Tatung eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_TATUNG, 0x9150) },
/* Shuttle eHome Infrared Transceiver */
@@ -606,12 +615,15 @@ static void mce_async_callback(struct urb *urb, struct pt_regs *regs)
if (ir) {
len = urb->actual_length;
- dev_dbg(ir->dev, "callback called (status=%d len=%d)\n",
+ mce_dbg(ir->dev, "callback called (status=%d len=%d)\n",
urb->status, len);
mceusb_dev_printdata(ir, urb->transfer_buffer, 0, len, true);
}
+ /* the transfer buffer and urb were allocated in mce_request_packet */
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
}
/* request incoming or send outgoing usb packet - used to initialize remote */
@@ -655,17 +667,17 @@ static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data,
return;
}
- dev_dbg(dev, "receive request called (size=%#x)\n", size);
+ mce_dbg(dev, "receive request called (size=%#x)\n", size);
async_urb->transfer_buffer_length = size;
async_urb->dev = ir->usbdev;
res = usb_submit_urb(async_urb, GFP_ATOMIC);
if (res) {
- dev_dbg(dev, "receive request FAILED! (res=%d)\n", res);
+ mce_dbg(dev, "receive request FAILED! (res=%d)\n", res);
return;
}
- dev_dbg(dev, "receive request complete (res=%d)\n", res);
+ mce_dbg(dev, "receive request complete (res=%d)\n", res);
}
static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size)
@@ -673,9 +685,9 @@ static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size)
mce_request_packet(ir, data, size, MCEUSB_TX);
}
-static void mce_sync_in(struct mceusb_dev *ir, unsigned char *data, int size)
+static void mce_flush_rx_buffer(struct mceusb_dev *ir, int size)
{
- mce_request_packet(ir, data, size, MCEUSB_RX);
+ mce_request_packet(ir, NULL, size, MCEUSB_RX);
}
/* Send data out the IR blaster port(s) */
@@ -794,7 +806,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
ir->carrier = carrier;
cmdbuf[2] = MCE_CMD_SIG_END;
cmdbuf[3] = MCE_IRDATA_TRAILER;
- dev_dbg(ir->dev, "%s: disabling carrier "
+ mce_dbg(ir->dev, "%s: disabling carrier "
"modulation\n", __func__);
mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
return carrier;
@@ -806,7 +818,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
ir->carrier = carrier;
cmdbuf[2] = prescaler;
cmdbuf[3] = divisor;
- dev_dbg(ir->dev, "%s: requesting %u HZ "
+ mce_dbg(ir->dev, "%s: requesting %u HZ "
"carrier\n", __func__, carrier);
/* Transmit new carrier to mce device */
@@ -879,7 +891,7 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK)
* US_TO_NS(MCE_TIME_UNIT);
- dev_dbg(ir->dev, "Storing %s with duration %d\n",
+ mce_dbg(ir->dev, "Storing %s with duration %d\n",
rawir.pulse ? "pulse" : "space",
rawir.duration);
@@ -911,7 +923,7 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
if (ir->parser_state != CMD_HEADER && !ir->rem)
ir->parser_state = CMD_HEADER;
}
- dev_dbg(ir->dev, "processed IR data, calling ir_raw_event_handle\n");
+ mce_dbg(ir->dev, "processed IR data, calling ir_raw_event_handle\n");
ir_raw_event_handle(ir->rc);
}
@@ -933,7 +945,7 @@ static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs)
if (ir->send_flags == RECV_FLAG_IN_PROGRESS) {
ir->send_flags = SEND_FLAG_COMPLETE;
- dev_dbg(ir->dev, "setup answer received %d bytes\n",
+ mce_dbg(ir->dev, "setup answer received %d bytes\n",
buf_len);
}
@@ -951,7 +963,7 @@ static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs)
case -EPIPE:
default:
- dev_dbg(ir->dev, "Error: urb status = %d\n", urb->status);
+ mce_dbg(ir->dev, "Error: urb status = %d\n", urb->status);
break;
}
@@ -961,7 +973,6 @@ static void mceusb_dev_recv(struct urb *urb, struct pt_regs *regs)
static void mceusb_gen1_init(struct mceusb_dev *ir)
{
int ret;
- int maxp = ir->len_in;
struct device *dev = ir->dev;
char *data;
@@ -978,8 +989,8 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
ret = usb_control_msg(ir->usbdev, usb_rcvctrlpipe(ir->usbdev, 0),
USB_REQ_SET_ADDRESS, USB_TYPE_VENDOR, 0, 0,
data, USB_CTRL_MSG_SZ, HZ * 3);
- dev_dbg(dev, "%s - ret = %d\n", __func__, ret);
- dev_dbg(dev, "%s - data[0] = %d, data[1] = %d\n",
+ mce_dbg(dev, "%s - ret = %d\n", __func__, ret);
+ mce_dbg(dev, "%s - data[0] = %d, data[1] = %d\n",
__func__, data[0], data[1]);
/* set feature: bit rate 38400 bps */
@@ -987,71 +998,56 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
USB_REQ_SET_FEATURE, USB_TYPE_VENDOR,
0xc04e, 0x0000, NULL, 0, HZ * 3);
- dev_dbg(dev, "%s - ret = %d\n", __func__, ret);
+ mce_dbg(dev, "%s - ret = %d\n", __func__, ret);
/* bRequest 4: set char length to 8 bits */
ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
4, USB_TYPE_VENDOR,
0x0808, 0x0000, NULL, 0, HZ * 3);
- dev_dbg(dev, "%s - retB = %d\n", __func__, ret);
+ mce_dbg(dev, "%s - retB = %d\n", __func__, ret);
/* bRequest 2: set handshaking to use DTR/DSR */
ret = usb_control_msg(ir->usbdev, usb_sndctrlpipe(ir->usbdev, 0),
2, USB_TYPE_VENDOR,
0x0000, 0x0100, NULL, 0, HZ * 3);
- dev_dbg(dev, "%s - retC = %d\n", __func__, ret);
+ mce_dbg(dev, "%s - retC = %d\n", __func__, ret);
/* device reset */
mce_async_out(ir, DEVICE_RESET, sizeof(DEVICE_RESET));
- mce_sync_in(ir, NULL, maxp);
/* get hw/sw revision? */
mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION));
- mce_sync_in(ir, NULL, maxp);
kfree(data);
};
static void mceusb_gen2_init(struct mceusb_dev *ir)
{
- int maxp = ir->len_in;
-
/* device reset */
mce_async_out(ir, DEVICE_RESET, sizeof(DEVICE_RESET));
- mce_sync_in(ir, NULL, maxp);
/* get hw/sw revision? */
mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION));
- mce_sync_in(ir, NULL, maxp);
/* unknown what the next two actually return... */
mce_async_out(ir, GET_UNKNOWN, sizeof(GET_UNKNOWN));
- mce_sync_in(ir, NULL, maxp);
mce_async_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2));
- mce_sync_in(ir, NULL, maxp);
}
static void mceusb_get_parameters(struct mceusb_dev *ir)
{
- int maxp = ir->len_in;
-
/* get the carrier and frequency */
mce_async_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ));
- mce_sync_in(ir, NULL, maxp);
- if (!ir->flags.no_tx) {
+ if (!ir->flags.no_tx)
/* get the transmitter bitmask */
mce_async_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
- mce_sync_in(ir, NULL, maxp);
- }
/* get receiver timeout value */
mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
- mce_sync_in(ir, NULL, maxp);
/* get receiver sensor setting */
mce_async_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR));
- mce_sync_in(ir, NULL, maxp);
}
static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
@@ -1122,7 +1118,7 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
bool tx_mask_normal;
int ir_intfnum;
- dev_dbg(&intf->dev, "%s called\n", __func__);
+ mce_dbg(&intf->dev, "%s called\n", __func__);
idesc = intf->cur_altsetting;
@@ -1150,7 +1146,7 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
ep_in = ep;
ep_in->bmAttributes = USB_ENDPOINT_XFER_INT;
ep_in->bInterval = 1;
- dev_dbg(&intf->dev, "acceptable inbound endpoint "
+ mce_dbg(&intf->dev, "acceptable inbound endpoint "
"found\n");
}
@@ -1165,12 +1161,12 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
ep_out = ep;
ep_out->bmAttributes = USB_ENDPOINT_XFER_INT;
ep_out->bInterval = 1;
- dev_dbg(&intf->dev, "acceptable outbound endpoint "
+ mce_dbg(&intf->dev, "acceptable outbound endpoint "
"found\n");
}
}
if (ep_in == NULL) {
- dev_dbg(&intf->dev, "inbound and/or endpoint not found\n");
+ mce_dbg(&intf->dev, "inbound and/or endpoint not found\n");
return -ENODEV;
}
@@ -1215,16 +1211,16 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
if (!ir->rc)
goto rc_dev_fail;
- /* flush buffers on the device */
- mce_sync_in(ir, NULL, maxp);
- mce_sync_in(ir, NULL, maxp);
-
/* wire up inbound data handler */
usb_fill_int_urb(ir->urb_in, dev, pipe, ir->buf_in,
maxp, (usb_complete_t) mceusb_dev_recv, ir, ep_in->bInterval);
ir->urb_in->transfer_dma = ir->dma_in;
ir->urb_in->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ /* flush buffers on the device */
+ mce_dbg(&intf->dev, "Flushing receive buffers\n");
+ mce_flush_rx_buffer(ir, maxp);
+
/* initialize device */
if (ir->flags.microsoft_gen1)
mceusb_gen1_init(ir);
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index bf3060ea610..565f24c20d7 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -991,7 +991,6 @@ static int nvt_open(struct rc_dev *dev)
unsigned long flags;
spin_lock_irqsave(&nvt->nvt_lock, flags);
- nvt->in_use = true;
nvt_enable_cir(nvt);
spin_unlock_irqrestore(&nvt->nvt_lock, flags);
@@ -1004,7 +1003,6 @@ static void nvt_close(struct rc_dev *dev)
unsigned long flags;
spin_lock_irqsave(&nvt->nvt_lock, flags);
- nvt->in_use = false;
nvt_disable_cir(nvt);
spin_unlock_irqrestore(&nvt->nvt_lock, flags);
}
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 379795d61ea..1241fc89a36 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -70,7 +70,6 @@ struct nvt_dev {
struct ir_raw_event rawir;
spinlock_t nvt_lock;
- bool in_use;
/* for rx */
u8 buf[RX_BUF_LEN];
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index f57cd5677ac..3186ac7c2c1 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -522,18 +522,20 @@ EXPORT_SYMBOL_GPL(rc_g_keycode_from_table);
/**
* ir_do_keyup() - internal function to signal the release of a keypress
* @dev: the struct rc_dev descriptor of the device
+ * @sync: whether or not to call input_sync
*
* This function is used internally to release a keypress, it must be
* called with keylock held.
*/
-static void ir_do_keyup(struct rc_dev *dev)
+static void ir_do_keyup(struct rc_dev *dev, bool sync)
{
if (!dev->keypressed)
return;
IR_dprintk(1, "keyup key 0x%04x\n", dev->last_keycode);
input_report_key(dev->input_dev, dev->last_keycode, 0);
- input_sync(dev->input_dev);
+ if (sync)
+ input_sync(dev->input_dev);
dev->keypressed = false;
}
@@ -549,7 +551,7 @@ void rc_keyup(struct rc_dev *dev)
unsigned long flags;
spin_lock_irqsave(&dev->keylock, flags);
- ir_do_keyup(dev);
+ ir_do_keyup(dev, true);
spin_unlock_irqrestore(&dev->keylock, flags);
}
EXPORT_SYMBOL_GPL(rc_keyup);
@@ -578,7 +580,7 @@ static void ir_timer_keyup(unsigned long cookie)
*/
spin_lock_irqsave(&dev->keylock, flags);
if (time_is_before_eq_jiffies(dev->keyup_jiffies))
- ir_do_keyup(dev);
+ ir_do_keyup(dev, true);
spin_unlock_irqrestore(&dev->keylock, flags);
}
@@ -597,6 +599,7 @@ void rc_repeat(struct rc_dev *dev)
spin_lock_irqsave(&dev->keylock, flags);
input_event(dev->input_dev, EV_MSC, MSC_SCAN, dev->last_scancode);
+ input_sync(dev->input_dev);
if (!dev->keypressed)
goto out;
@@ -622,29 +625,28 @@ EXPORT_SYMBOL_GPL(rc_repeat);
static void ir_do_keydown(struct rc_dev *dev, int scancode,
u32 keycode, u8 toggle)
{
- input_event(dev->input_dev, EV_MSC, MSC_SCAN, scancode);
-
- /* Repeat event? */
- if (dev->keypressed &&
- dev->last_scancode == scancode &&
- dev->last_toggle == toggle)
- return;
+ bool new_event = !dev->keypressed ||
+ dev->last_scancode != scancode ||
+ dev->last_toggle != toggle;
- /* Release old keypress */
- ir_do_keyup(dev);
+ if (new_event && dev->keypressed)
+ ir_do_keyup(dev, false);
- dev->last_scancode = scancode;
- dev->last_toggle = toggle;
- dev->last_keycode = keycode;
+ input_event(dev->input_dev, EV_MSC, MSC_SCAN, scancode);
- if (keycode == KEY_RESERVED)
- return;
+ if (new_event && keycode != KEY_RESERVED) {
+ /* Register a keypress */
+ dev->keypressed = true;
+ dev->last_scancode = scancode;
+ dev->last_toggle = toggle;
+ dev->last_keycode = keycode;
+
+ IR_dprintk(1, "%s: key down event, "
+ "key 0x%04x, scancode 0x%04x\n",
+ dev->input_name, keycode, scancode);
+ input_report_key(dev->input_dev, keycode, 1);
+ }
- /* Register a keypress */
- dev->keypressed = true;
- IR_dprintk(1, "%s: key down event, key 0x%04x, scancode 0x%04x\n",
- dev->input_name, keycode, scancode);
- input_report_key(dev->input_dev, dev->last_keycode, 1);
input_sync(dev->input_dev);
}
diff --git a/drivers/media/video/bt8xx/bttv-driver.c b/drivers/media/video/bt8xx/bttv-driver.c
index a97cf2750bd..834a48394bc 100644
--- a/drivers/media/video/bt8xx/bttv-driver.c
+++ b/drivers/media/video/bt8xx/bttv-driver.c
@@ -3474,7 +3474,7 @@ static int radio_s_tuner(struct file *file, void *priv,
if (0 != t->index)
return -EINVAL;
- bttv_call_all(btv, tuner, g_tuner, t);
+ bttv_call_all(btv, tuner, s_tuner, t);
return 0;
}
diff --git a/drivers/media/video/cx18/cx18-ioctl.c b/drivers/media/video/cx18/cx18-ioctl.c
index 1933d4d11bf..e80134f52ef 100644
--- a/drivers/media/video/cx18/cx18-ioctl.c
+++ b/drivers/media/video/cx18/cx18-ioctl.c
@@ -695,14 +695,10 @@ static int cx18_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
cx18_call_all(cx, tuner, g_tuner, vt);
- if (test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) {
+ if (vt->type == V4L2_TUNER_RADIO)
strlcpy(vt->name, "cx18 Radio Tuner", sizeof(vt->name));
- vt->type = V4L2_TUNER_RADIO;
- } else {
+ else
strlcpy(vt->name, "cx18 TV Tuner", sizeof(vt->name));
- vt->type = V4L2_TUNER_ANALOG_TV;
- }
-
return 0;
}
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index f9e347dae73..120c7d8e089 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -1184,14 +1184,10 @@ static int ivtv_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
ivtv_call_all(itv, tuner, g_tuner, vt);
- if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) {
+ if (vt->type == V4L2_TUNER_RADIO)
strlcpy(vt->name, "ivtv Radio Tuner", sizeof(vt->name));
- vt->type = V4L2_TUNER_RADIO;
- } else {
+ else
strlcpy(vt->name, "ivtv TV Tuner", sizeof(vt->name));
- vt->type = V4L2_TUNER_ANALOG_TV;
- }
-
return 0;
}
diff --git a/drivers/media/video/m5mols/m5mols.h b/drivers/media/video/m5mols/m5mols.h
index 10b55c85448..89d09a8914f 100644
--- a/drivers/media/video/m5mols/m5mols.h
+++ b/drivers/media/video/m5mols/m5mols.h
@@ -2,10 +2,10 @@
* Header for M-5MOLS 8M Pixel camera sensor with ISP
*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Author: HeungJun Kim, riverful.kim@samsung.com
+ * Author: HeungJun Kim <riverful.kim@samsung.com>
*
* Copyright (C) 2009 Samsung Electronics Co., Ltd.
- * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -106,23 +106,23 @@ struct m5mols_capture {
* The each value according to each scenemode is recommended in the documents.
*/
struct m5mols_scenemode {
- u32 metering;
- u32 ev_bias;
- u32 wb_mode;
- u32 wb_preset;
- u32 chroma_en;
- u32 chroma_lvl;
- u32 edge_en;
- u32 edge_lvl;
- u32 af_range;
- u32 fd_mode;
- u32 mcc;
- u32 light;
- u32 flash;
- u32 tone;
- u32 iso;
- u32 capt_mode;
- u32 wdr;
+ u8 metering;
+ u8 ev_bias;
+ u8 wb_mode;
+ u8 wb_preset;
+ u8 chroma_en;
+ u8 chroma_lvl;
+ u8 edge_en;
+ u8 edge_lvl;
+ u8 af_range;
+ u8 fd_mode;
+ u8 mcc;
+ u8 light;
+ u8 flash;
+ u8 tone;
+ u8 iso;
+ u8 capt_mode;
+ u8 wdr;
};
/**
@@ -154,7 +154,6 @@ struct m5mols_version {
u8 str[VERSION_STRING_SIZE];
u8 af;
};
-#define VERSION_SIZE sizeof(struct m5mols_version)
/**
* struct m5mols_info - M-5MOLS driver data structure
@@ -216,9 +215,9 @@ struct m5mols_info {
bool lock_ae;
bool lock_awb;
u8 resolution;
- u32 interrupt;
- u32 mode;
- u32 mode_save;
+ u8 interrupt;
+ u8 mode;
+ u8 mode_save;
int (*set_power)(struct device *dev, int on);
};
@@ -256,9 +255,11 @@ struct m5mols_info {
* +-------+---+----------+-----+------+------+------+------+
* - d[0..3]: according to size1
*/
-int m5mols_read(struct v4l2_subdev *sd, u32 reg_comb, u32 *val);
+int m5mols_read_u8(struct v4l2_subdev *sd, u32 reg_comb, u8 *val);
+int m5mols_read_u16(struct v4l2_subdev *sd, u32 reg_comb, u16 *val);
+int m5mols_read_u32(struct v4l2_subdev *sd, u32 reg_comb, u32 *val);
int m5mols_write(struct v4l2_subdev *sd, u32 reg_comb, u32 val);
-int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 value);
+int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u8 value);
/*
* Mode operation of the M-5MOLS
@@ -280,12 +281,12 @@ int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 value);
* The available executing order between each modes are as follows:
* PARAMETER <---> MONITOR <---> CAPTURE
*/
-int m5mols_mode(struct m5mols_info *info, u32 mode);
+int m5mols_mode(struct m5mols_info *info, u8 mode);
-int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg);
+int m5mols_enable_interrupt(struct v4l2_subdev *sd, u8 reg);
int m5mols_sync_controls(struct m5mols_info *info);
int m5mols_start_capture(struct m5mols_info *info);
-int m5mols_do_scenemode(struct m5mols_info *info, u32 mode);
+int m5mols_do_scenemode(struct m5mols_info *info, u8 mode);
int m5mols_lock_3a(struct m5mols_info *info, bool lock);
int m5mols_set_ctrl(struct v4l2_ctrl *ctrl);
diff --git a/drivers/media/video/m5mols/m5mols_capture.c b/drivers/media/video/m5mols/m5mols_capture.c
index d71a3903b60..d9471928369 100644
--- a/drivers/media/video/m5mols/m5mols_capture.c
+++ b/drivers/media/video/m5mols/m5mols_capture.c
@@ -2,10 +2,10 @@
* The Capture code for Fujitsu M-5MOLS ISP
*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Author: HeungJun Kim, riverful.kim@samsung.com
+ * Author: HeungJun Kim <riverful.kim@samsung.com>
*
* Copyright (C) 2009 Samsung Electronics Co., Ltd.
- * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -58,9 +58,9 @@ static int m5mols_read_rational(struct v4l2_subdev *sd, u32 addr_num,
{
u32 num, den;
- int ret = m5mols_read(sd, addr_num, &num);
+ int ret = m5mols_read_u32(sd, addr_num, &num);
if (!ret)
- ret = m5mols_read(sd, addr_den, &den);
+ ret = m5mols_read_u32(sd, addr_den, &den);
if (ret)
return ret;
*val = den == 0 ? 0 : num / den;
@@ -99,20 +99,20 @@ static int m5mols_capture_info(struct m5mols_info *info)
if (ret)
return ret;
- ret = m5mols_read(sd, EXIF_INFO_ISO, (u32 *)&exif->iso_speed);
+ ret = m5mols_read_u16(sd, EXIF_INFO_ISO, &exif->iso_speed);
if (!ret)
- ret = m5mols_read(sd, EXIF_INFO_FLASH, (u32 *)&exif->flash);
+ ret = m5mols_read_u16(sd, EXIF_INFO_FLASH, &exif->flash);
if (!ret)
- ret = m5mols_read(sd, EXIF_INFO_SDR, (u32 *)&exif->sdr);
+ ret = m5mols_read_u16(sd, EXIF_INFO_SDR, &exif->sdr);
if (!ret)
- ret = m5mols_read(sd, EXIF_INFO_QVAL, (u32 *)&exif->qval);
+ ret = m5mols_read_u16(sd, EXIF_INFO_QVAL, &exif->qval);
if (ret)
return ret;
if (!ret)
- ret = m5mols_read(sd, CAPC_IMAGE_SIZE, &info->cap.main);
+ ret = m5mols_read_u32(sd, CAPC_IMAGE_SIZE, &info->cap.main);
if (!ret)
- ret = m5mols_read(sd, CAPC_THUMB_SIZE, &info->cap.thumb);
+ ret = m5mols_read_u32(sd, CAPC_THUMB_SIZE, &info->cap.thumb);
if (!ret)
info->cap.total = info->cap.main + info->cap.thumb;
@@ -122,7 +122,7 @@ static int m5mols_capture_info(struct m5mols_info *info)
int m5mols_start_capture(struct m5mols_info *info)
{
struct v4l2_subdev *sd = &info->sd;
- u32 resolution = info->resolution;
+ u8 resolution = info->resolution;
int timeout;
int ret;
diff --git a/drivers/media/video/m5mols/m5mols_controls.c b/drivers/media/video/m5mols/m5mols_controls.c
index 817c16fec36..d135d20d09c 100644
--- a/drivers/media/video/m5mols/m5mols_controls.c
+++ b/drivers/media/video/m5mols/m5mols_controls.c
@@ -2,10 +2,10 @@
* Controls for M-5MOLS 8M Pixel camera sensor with ISP
*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Author: HeungJun Kim, riverful.kim@samsung.com
+ * Author: HeungJun Kim <riverful.kim@samsung.com>
*
* Copyright (C) 2009 Samsung Electronics Co., Ltd.
- * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -130,7 +130,7 @@ static struct m5mols_scenemode m5mols_default_scenemode[] = {
*
* WARNING: The execution order is important. Do not change the order.
*/
-int m5mols_do_scenemode(struct m5mols_info *info, u32 mode)
+int m5mols_do_scenemode(struct m5mols_info *info, u8 mode)
{
struct v4l2_subdev *sd = &info->sd;
struct m5mols_scenemode scenemode = m5mols_default_scenemode[mode];
diff --git a/drivers/media/video/m5mols/m5mols_core.c b/drivers/media/video/m5mols/m5mols_core.c
index 76eac26e84a..43c68f51c5c 100644
--- a/drivers/media/video/m5mols/m5mols_core.c
+++ b/drivers/media/video/m5mols/m5mols_core.c
@@ -2,10 +2,10 @@
* Driver for M-5MOLS 8M Pixel camera sensor with ISP
*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Author: HeungJun Kim, riverful.kim@samsung.com
+ * Author: HeungJun Kim <riverful.kim@samsung.com>
*
* Copyright (C) 2009 Samsung Electronics Co., Ltd.
- * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -133,13 +133,13 @@ static u32 m5mols_swap_byte(u8 *data, u8 length)
/**
* m5mols_read - I2C read function
* @reg: combination of size, category and command for the I2C packet
+ * @size: desired size of I2C packet
* @val: read value
*/
-int m5mols_read(struct v4l2_subdev *sd, u32 reg, u32 *val)
+static int m5mols_read(struct v4l2_subdev *sd, u32 size, u32 reg, u32 *val)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
u8 rbuf[M5MOLS_I2C_MAX_SIZE + 1];
- u8 size = I2C_SIZE(reg);
u8 category = I2C_CATEGORY(reg);
u8 cmd = I2C_COMMAND(reg);
struct i2c_msg msg[2];
@@ -149,11 +149,6 @@ int m5mols_read(struct v4l2_subdev *sd, u32 reg, u32 *val)
if (!client->adapter)
return -ENODEV;
- if (size != 1 && size != 2 && size != 4) {
- v4l2_err(sd, "Wrong data size\n");
- return -EINVAL;
- }
-
msg[0].addr = client->addr;
msg[0].flags = 0;
msg[0].len = 5;
@@ -184,6 +179,52 @@ int m5mols_read(struct v4l2_subdev *sd, u32 reg, u32 *val)
return 0;
}
+int m5mols_read_u8(struct v4l2_subdev *sd, u32 reg, u8 *val)
+{
+ u32 val_32;
+ int ret;
+
+ if (I2C_SIZE(reg) != 1) {
+ v4l2_err(sd, "Wrong data size\n");
+ return -EINVAL;
+ }
+
+ ret = m5mols_read(sd, I2C_SIZE(reg), reg, &val_32);
+ if (ret)
+ return ret;
+
+ *val = (u8)val_32;
+ return ret;
+}
+
+int m5mols_read_u16(struct v4l2_subdev *sd, u32 reg, u16 *val)
+{
+ u32 val_32;
+ int ret;
+
+ if (I2C_SIZE(reg) != 2) {
+ v4l2_err(sd, "Wrong data size\n");
+ return -EINVAL;
+ }
+
+ ret = m5mols_read(sd, I2C_SIZE(reg), reg, &val_32);
+ if (ret)
+ return ret;
+
+ *val = (u16)val_32;
+ return ret;
+}
+
+int m5mols_read_u32(struct v4l2_subdev *sd, u32 reg, u32 *val)
+{
+ if (I2C_SIZE(reg) != 4) {
+ v4l2_err(sd, "Wrong data size\n");
+ return -EINVAL;
+ }
+
+ return m5mols_read(sd, I2C_SIZE(reg), reg, val);
+}
+
/**
* m5mols_write - I2C command write function
* @reg: combination of size, category and command for the I2C packet
@@ -231,13 +272,14 @@ int m5mols_write(struct v4l2_subdev *sd, u32 reg, u32 val)
return 0;
}
-int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 mask)
+int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u8 mask)
{
- u32 busy, i;
+ u8 busy;
+ int i;
int ret;
for (i = 0; i < M5MOLS_I2C_CHECK_RETRY; i++) {
- ret = m5mols_read(sd, I2C_REG(category, cmd, 1), &busy);
+ ret = m5mols_read_u8(sd, I2C_REG(category, cmd, 1), &busy);
if (ret < 0)
return ret;
if ((busy & mask) == mask)
@@ -252,14 +294,14 @@ int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 mask)
* Before writing desired interrupt value the INT_FACTOR register should
* be read to clear pending interrupts.
*/
-int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg)
+int m5mols_enable_interrupt(struct v4l2_subdev *sd, u8 reg)
{
struct m5mols_info *info = to_m5mols(sd);
- u32 mask = is_available_af(info) ? REG_INT_AF : 0;
- u32 dummy;
+ u8 mask = is_available_af(info) ? REG_INT_AF : 0;
+ u8 dummy;
int ret;
- ret = m5mols_read(sd, SYSTEM_INT_FACTOR, &dummy);
+ ret = m5mols_read_u8(sd, SYSTEM_INT_FACTOR, &dummy);
if (!ret)
ret = m5mols_write(sd, SYSTEM_INT_ENABLE, reg & ~mask);
return ret;
@@ -271,7 +313,7 @@ int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg)
* It always accompanies a little delay changing the M-5MOLS mode, so it is
* needed checking current busy status to guarantee right mode.
*/
-static int m5mols_reg_mode(struct v4l2_subdev *sd, u32 mode)
+static int m5mols_reg_mode(struct v4l2_subdev *sd, u8 mode)
{
int ret = m5mols_write(sd, SYSTEM_SYSMODE, mode);
@@ -286,16 +328,16 @@ static int m5mols_reg_mode(struct v4l2_subdev *sd, u32 mode)
* can be guaranteed only when the sensor is operating in mode which which
* a command belongs to.
*/
-int m5mols_mode(struct m5mols_info *info, u32 mode)
+int m5mols_mode(struct m5mols_info *info, u8 mode)
{
struct v4l2_subdev *sd = &info->sd;
int ret = -EINVAL;
- u32 reg;
+ u8 reg;
if (mode < REG_PARAMETER && mode > REG_CAPTURE)
return ret;
- ret = m5mols_read(sd, SYSTEM_SYSMODE, &reg);
+ ret = m5mols_read_u8(sd, SYSTEM_SYSMODE, &reg);
if ((!ret && reg == mode) || ret)
return ret;
@@ -344,41 +386,37 @@ int m5mols_mode(struct m5mols_info *info, u32 mode)
static int m5mols_get_version(struct v4l2_subdev *sd)
{
struct m5mols_info *info = to_m5mols(sd);
- union {
- struct m5mols_version ver;
- u8 bytes[VERSION_SIZE];
- } version;
- u32 *value;
- u8 cmd = CAT0_VER_CUSTOMER;
+ struct m5mols_version *ver = &info->ver;
+ u8 *str = ver->str;
+ int i;
int ret;
- do {
- value = (u32 *)&version.bytes[cmd];
- ret = m5mols_read(sd, SYSTEM_CMD(cmd), value);
- if (ret)
- return ret;
- } while (cmd++ != CAT0_VER_AWB);
+ ret = m5mols_read_u8(sd, SYSTEM_VER_CUSTOMER, &ver->customer);
+ if (!ret)
+ ret = m5mols_read_u8(sd, SYSTEM_VER_PROJECT, &ver->project);
+ if (!ret)
+ ret = m5mols_read_u16(sd, SYSTEM_VER_FIRMWARE, &ver->fw);
+ if (!ret)
+ ret = m5mols_read_u16(sd, SYSTEM_VER_HARDWARE, &ver->hw);
+ if (!ret)
+ ret = m5mols_read_u16(sd, SYSTEM_VER_PARAMETER, &ver->param);
+ if (!ret)
+ ret = m5mols_read_u16(sd, SYSTEM_VER_AWB, &ver->awb);
+ if (!ret)
+ ret = m5mols_read_u8(sd, AF_VERSION, &ver->af);
+ if (ret)
+ return ret;
- do {
- value = (u32 *)&version.bytes[cmd];
- ret = m5mols_read(sd, SYSTEM_VER_STRING, value);
+ for (i = 0; i < VERSION_STRING_SIZE; i++) {
+ ret = m5mols_read_u8(sd, SYSTEM_VER_STRING, &str[i]);
if (ret)
return ret;
- if (cmd >= VERSION_SIZE - 1)
- return -EINVAL;
- } while (version.bytes[cmd++]);
-
- value = (u32 *)&version.bytes[cmd];
- ret = m5mols_read(sd, AF_VERSION, value);
- if (ret)
- return ret;
+ }
- /* store version information swapped for being readable */
- info->ver = version.ver;
- info->ver.fw = be16_to_cpu(info->ver.fw);
- info->ver.hw = be16_to_cpu(info->ver.hw);
- info->ver.param = be16_to_cpu(info->ver.param);
- info->ver.awb = be16_to_cpu(info->ver.awb);
+ ver->fw = be16_to_cpu(ver->fw);
+ ver->hw = be16_to_cpu(ver->hw);
+ ver->param = be16_to_cpu(ver->param);
+ ver->awb = be16_to_cpu(ver->awb);
v4l2_info(sd, "Manufacturer\t[%s]\n",
is_manufacturer(info, REG_SAMSUNG_ELECTRO) ?
@@ -722,7 +760,7 @@ static int m5mols_init_controls(struct m5mols_info *info)
int ret;
/* Determine value's range & step of controls for various FW version */
- ret = m5mols_read(sd, AE_MAX_GAIN_MON, (u32 *)&max_exposure);
+ ret = m5mols_read_u16(sd, AE_MAX_GAIN_MON, &max_exposure);
if (!ret)
step_zoom = is_manufacturer(info, REG_SAMSUNG_OPTICS) ? 31 : 1;
if (ret)
@@ -842,18 +880,18 @@ static void m5mols_irq_work(struct work_struct *work)
struct m5mols_info *info =
container_of(work, struct m5mols_info, work_irq);
struct v4l2_subdev *sd = &info->sd;
- u32 reg;
+ u8 reg;
int ret;
if (!is_powered(info) ||
- m5mols_read(sd, SYSTEM_INT_FACTOR, &info->interrupt))
+ m5mols_read_u8(sd, SYSTEM_INT_FACTOR, &info->interrupt))
return;
switch (info->interrupt & REG_INT_MASK) {
case REG_INT_AF:
if (!is_available_af(info))
break;
- ret = m5mols_read(sd, AF_STATUS, &reg);
+ ret = m5mols_read_u8(sd, AF_STATUS, &reg);
v4l2_dbg(2, m5mols_debug, sd, "AF %s\n",
reg == REG_AF_FAIL ? "Failed" :
reg == REG_AF_SUCCESS ? "Success" :
diff --git a/drivers/media/video/m5mols/m5mols_reg.h b/drivers/media/video/m5mols/m5mols_reg.h
index b83e36fc6ac..c755bd6edfe 100644
--- a/drivers/media/video/m5mols/m5mols_reg.h
+++ b/drivers/media/video/m5mols/m5mols_reg.h
@@ -2,10 +2,10 @@
* Register map for M-5MOLS 8M Pixel camera sensor with ISP
*
* Copyright (C) 2011 Samsung Electronics Co., Ltd.
- * Author: HeungJun Kim, riverful.kim@samsung.com
+ * Author: HeungJun Kim <riverful.kim@samsung.com>
*
* Copyright (C) 2009 Samsung Electronics Co., Ltd.
- * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com
+ * Author: Dongsoo Nathaniel Kim <dongsoo45.kim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -56,13 +56,24 @@
* more specific contents, see definition if file m5mols.h.
*/
#define CAT0_VER_CUSTOMER 0x00 /* customer version */
-#define CAT0_VER_AWB 0x09 /* Auto WB version */
+#define CAT0_VER_PROJECT 0x01 /* project version */
+#define CAT0_VER_FIRMWARE 0x02 /* Firmware version */
+#define CAT0_VER_HARDWARE 0x04 /* Hardware version */
+#define CAT0_VER_PARAMETER 0x06 /* Parameter version */
+#define CAT0_VER_AWB 0x08 /* Auto WB version */
#define CAT0_VER_STRING 0x0a /* string including M-5MOLS */
#define CAT0_SYSMODE 0x0b /* SYSTEM mode register */
#define CAT0_STATUS 0x0c /* SYSTEM mode status register */
#define CAT0_INT_FACTOR 0x10 /* interrupt pending register */
#define CAT0_INT_ENABLE 0x11 /* interrupt enable register */
+#define SYSTEM_VER_CUSTOMER I2C_REG(CAT_SYSTEM, CAT0_VER_CUSTOMER, 1)
+#define SYSTEM_VER_PROJECT I2C_REG(CAT_SYSTEM, CAT0_VER_PROJECT, 1)
+#define SYSTEM_VER_FIRMWARE I2C_REG(CAT_SYSTEM, CAT0_VER_FIRMWARE, 2)
+#define SYSTEM_VER_HARDWARE I2C_REG(CAT_SYSTEM, CAT0_VER_HARDWARE, 2)
+#define SYSTEM_VER_PARAMETER I2C_REG(CAT_SYSTEM, CAT0_VER_PARAMETER, 2)
+#define SYSTEM_VER_AWB I2C_REG(CAT_SYSTEM, CAT0_VER_AWB, 2)
+
#define SYSTEM_SYSMODE I2C_REG(CAT_SYSTEM, CAT0_SYSMODE, 1)
#define REG_SYSINIT 0x00 /* SYSTEM mode */
#define REG_PARAMETER 0x01 /* PARAMETER mode */
@@ -382,8 +393,8 @@
#define REG_CAP_START_MAIN 0x01
#define REG_CAP_START_THUMB 0x03
-#define CAPC_IMAGE_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_IMAGE_SIZE, 1)
-#define CAPC_THUMB_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_THUMB_SIZE, 1)
+#define CAPC_IMAGE_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_IMAGE_SIZE, 4)
+#define CAPC_THUMB_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_THUMB_SIZE, 4)
/*
* Category F - Flash
diff --git a/drivers/media/video/msp3400-driver.c b/drivers/media/video/msp3400-driver.c
index de5d481b032..c43c81f5f97 100644
--- a/drivers/media/video/msp3400-driver.c
+++ b/drivers/media/video/msp3400-driver.c
@@ -480,12 +480,14 @@ static int msp_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
struct msp_state *state = to_state(sd);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- if (state->radio)
+ if (vt->type != V4L2_TUNER_ANALOG_TV)
return 0;
- if (state->opmode == OPMODE_AUTOSELECT)
- msp_detect_stereo(client);
- vt->audmode = state->audmode;
- vt->rxsubchans = state->rxsubchans;
+ if (!state->radio) {
+ if (state->opmode == OPMODE_AUTOSELECT)
+ msp_detect_stereo(client);
+ vt->rxsubchans = state->rxsubchans;
+ }
+ vt->audmode = state->audmode;
vt->capability |= V4L2_TUNER_CAP_STEREO |
V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
return 0;
diff --git a/drivers/media/video/mx1_camera.c b/drivers/media/video/mx1_camera.c
index bc0c23a1009..63f8a0cc33d 100644
--- a/drivers/media/video/mx1_camera.c
+++ b/drivers/media/video/mx1_camera.c
@@ -444,12 +444,9 @@ static int mx1_camera_add_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
struct mx1_camera_dev *pcdev = ici->priv;
- int ret;
- if (pcdev->icd) {
- ret = -EBUSY;
- goto ebusy;
- }
+ if (pcdev->icd)
+ return -EBUSY;
dev_info(icd->dev.parent, "MX1 Camera driver attached to camera %d\n",
icd->devnum);
@@ -458,8 +455,7 @@ static int mx1_camera_add_device(struct soc_camera_device *icd)
pcdev->icd = icd;
-ebusy:
- return ret;
+ return 0;
}
static void mx1_camera_remove_device(struct soc_camera_device *icd)
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index 4ada9be1d43..4d07c584440 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -982,6 +982,14 @@ static int omap_vout_buffer_setup(struct videobuf_queue *q, unsigned int *count,
startindex = (vout->vid == OMAP_VIDEO1) ?
video1_numbuffers : video2_numbuffers;
+ /* Check the size of the buffer */
+ if (*size > vout->buffer_size) {
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "buffer allocation mismatch [%u] [%u]\n",
+ *size, vout->buffer_size);
+ return -ENOMEM;
+ }
+
for (i = startindex; i < *count; i++) {
vout->buffer_size = *size;
@@ -1228,6 +1236,14 @@ static int omap_vout_mmap(struct file *file, struct vm_area_struct *vma)
(vma->vm_pgoff << PAGE_SHIFT));
return -EINVAL;
}
+ /* Check the size of the buffer */
+ if (size > vout->buffer_size) {
+ v4l2_err(&vout->vid_dev->v4l2_dev,
+ "insufficient memory [%lu] [%u]\n",
+ size, vout->buffer_size);
+ return -ENOMEM;
+ }
+
q->bufs[i]->baddr = vma->vm_start;
vma->vm_flags |= VM_RESERVED;
@@ -2391,7 +2407,7 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
/* Register the Video device with V4L2
*/
vfd = vout->vfd;
- if (video_register_device(vfd, VFL_TYPE_GRABBER, k + 1) < 0) {
+ if (video_register_device(vfd, VFL_TYPE_GRABBER, -1) < 0) {
dev_err(&pdev->dev, ": Could not register "
"Video for Linux device\n");
vfd->minor = -1;
diff --git a/drivers/media/video/omap/omap_voutlib.c b/drivers/media/video/omap/omap_voutlib.c
index 2aa6a76c5e5..8ae74817a11 100644
--- a/drivers/media/video/omap/omap_voutlib.c
+++ b/drivers/media/video/omap/omap_voutlib.c
@@ -193,7 +193,7 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
return -EINVAL;
if (cpu_is_omap24xx()) {
- if (crop->height != win->w.height) {
+ if (try_crop.height != win->w.height) {
/* If we're resizing vertically, we can't support a
* crop width wider than 768 pixels.
*/
@@ -202,7 +202,7 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
}
}
/* vertical resizing */
- vresize = (1024 * crop->height) / win->w.height;
+ vresize = (1024 * try_crop.height) / win->w.height;
if (cpu_is_omap24xx() && (vresize > 2048))
vresize = 2048;
else if (cpu_is_omap34xx() && (vresize > 4096))
@@ -221,7 +221,7 @@ int omap_vout_new_crop(struct v4l2_pix_format *pix,
try_crop.height = 2;
}
/* horizontal resizing */
- hresize = (1024 * crop->width) / win->w.width;
+ hresize = (1024 * try_crop.width) / win->w.width;
if (cpu_is_omap24xx() && (hresize > 2048))
hresize = 2048;
else if (cpu_is_omap34xx() && (hresize > 4096))
diff --git a/drivers/media/video/omap3isp/isp.c b/drivers/media/video/omap3isp/isp.c
index c9fd04ee70a..94b6ed89e19 100644
--- a/drivers/media/video/omap3isp/isp.c
+++ b/drivers/media/video/omap3isp/isp.c
@@ -1748,7 +1748,7 @@ static int isp_register_entities(struct isp_device *isp)
goto done;
/* Register external entities */
- for (subdevs = pdata->subdevs; subdevs->subdevs; ++subdevs) {
+ for (subdevs = pdata->subdevs; subdevs && subdevs->subdevs; ++subdevs) {
struct v4l2_subdev *sensor;
struct media_entity *input;
unsigned int flags;
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index 9d0dd08f57f..e98d3821279 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -3046,6 +3046,8 @@ static void pvr2_subdev_update(struct pvr2_hdw *hdw)
if (hdw->input_dirty || hdw->audiomode_dirty || hdw->force_dirty) {
struct v4l2_tuner vt;
memset(&vt, 0, sizeof(vt));
+ vt.type = (hdw->input_val == PVR2_CVAL_INPUT_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
vt.audmode = hdw->audiomode_val;
v4l2_device_call_all(&hdw->v4l2_dev, 0, tuner, s_tuner, &vt);
}
@@ -5171,6 +5173,8 @@ void pvr2_hdw_status_poll(struct pvr2_hdw *hdw)
{
struct v4l2_tuner *vtp = &hdw->tuner_signal_info;
memset(vtp, 0, sizeof(*vtp));
+ vtp->type = (hdw->input_val == PVR2_CVAL_INPUT_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
hdw->tuner_signal_stale = 0;
/* Note: There apparently is no replacement for VIDIOC_CROPCAP
using v4l2-subdev - therefore we can't support that AT ALL right
diff --git a/drivers/media/video/pwc/pwc-ctrl.c b/drivers/media/video/pwc/pwc-ctrl.c
index 1593f8deb81..760b4de13ad 100644
--- a/drivers/media/video/pwc/pwc-ctrl.c
+++ b/drivers/media/video/pwc/pwc-ctrl.c
@@ -1414,7 +1414,7 @@ long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
{
ARG_DEF(struct pwc_probe, probe)
- strcpy(ARGR(probe).name, pdev->vdev->name);
+ strcpy(ARGR(probe).name, pdev->vdev.name);
ARGR(probe).type = pdev->type;
ARG_OUT(probe)
break;
diff --git a/drivers/media/video/pwc/pwc-if.c b/drivers/media/video/pwc/pwc-if.c
index 356cd42b593..b0bde5a87c8 100644
--- a/drivers/media/video/pwc/pwc-if.c
+++ b/drivers/media/video/pwc/pwc-if.c
@@ -40,7 +40,7 @@
Oh yes, convention: to disctinguish between all the various pointers to
device-structures, I use these names for the pointer variables:
udev: struct usb_device *
- vdev: struct video_device *
+ vdev: struct video_device (member of pwc_dev)
pdev: struct pwc_devive *
*/
@@ -152,6 +152,7 @@ static ssize_t pwc_video_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos);
static unsigned int pwc_video_poll(struct file *file, poll_table *wait);
static int pwc_video_mmap(struct file *file, struct vm_area_struct *vma);
+static void pwc_video_release(struct video_device *vfd);
static const struct v4l2_file_operations pwc_fops = {
.owner = THIS_MODULE,
@@ -164,42 +165,12 @@ static const struct v4l2_file_operations pwc_fops = {
};
static struct video_device pwc_template = {
.name = "Philips Webcam", /* Filled in later */
- .release = video_device_release,
+ .release = pwc_video_release,
.fops = &pwc_fops,
+ .ioctl_ops = &pwc_ioctl_ops,
};
/***************************************************************************/
-
-/* Okay, this is some magic that I worked out and the reasoning behind it...
-
- The biggest problem with any USB device is of course: "what to do
- when the user unplugs the device while it is in use by an application?"
- We have several options:
- 1) Curse them with the 7 plagues when they do (requires divine intervention)
- 2) Tell them not to (won't work: they'll do it anyway)
- 3) Oops the kernel (this will have a negative effect on a user's uptime)
- 4) Do something sensible.
-
- Of course, we go for option 4.
-
- It happens that this device will be linked to two times, once from
- usb_device and once from the video_device in their respective 'private'
- pointers. This is done when the device is probed() and all initialization
- succeeded. The pwc_device struct links back to both structures.
-
- When a device is unplugged while in use it will be removed from the
- list of known USB devices; I also de-register it as a V4L device, but
- unfortunately I can't free the memory since the struct is still in use
- by the file descriptor. This free-ing is then deferend until the first
- opportunity. Crude, but it works.
-
- A small 'advantage' is that if a user unplugs the cam and plugs it back
- in, it should get assigned the same video device minor, but unfortunately
- it's non-trivial to re-link the cam back to the video device... (that
- would surely be magic! :))
-*/
-
-/***************************************************************************/
/* Private functions */
/* Here we want the physical address of the memory.
@@ -1016,16 +987,15 @@ static ssize_t show_snapshot_button_status(struct device *class_dev,
static DEVICE_ATTR(button, S_IRUGO | S_IWUSR, show_snapshot_button_status,
NULL);
-static int pwc_create_sysfs_files(struct video_device *vdev)
+static int pwc_create_sysfs_files(struct pwc_device *pdev)
{
- struct pwc_device *pdev = video_get_drvdata(vdev);
int rc;
- rc = device_create_file(&vdev->dev, &dev_attr_button);
+ rc = device_create_file(&pdev->vdev.dev, &dev_attr_button);
if (rc)
goto err;
if (pdev->features & FEATURE_MOTOR_PANTILT) {
- rc = device_create_file(&vdev->dev, &dev_attr_pan_tilt);
+ rc = device_create_file(&pdev->vdev.dev, &dev_attr_pan_tilt);
if (rc)
goto err_button;
}
@@ -1033,19 +1003,17 @@ static int pwc_create_sysfs_files(struct video_device *vdev)
return 0;
err_button:
- device_remove_file(&vdev->dev, &dev_attr_button);
+ device_remove_file(&pdev->vdev.dev, &dev_attr_button);
err:
PWC_ERROR("Could not create sysfs files.\n");
return rc;
}
-static void pwc_remove_sysfs_files(struct video_device *vdev)
+static void pwc_remove_sysfs_files(struct pwc_device *pdev)
{
- struct pwc_device *pdev = video_get_drvdata(vdev);
-
if (pdev->features & FEATURE_MOTOR_PANTILT)
- device_remove_file(&vdev->dev, &dev_attr_pan_tilt);
- device_remove_file(&vdev->dev, &dev_attr_button);
+ device_remove_file(&pdev->vdev.dev, &dev_attr_pan_tilt);
+ device_remove_file(&pdev->vdev.dev, &dev_attr_button);
}
#ifdef CONFIG_USB_PWC_DEBUG
@@ -1106,7 +1074,7 @@ static int pwc_video_open(struct file *file)
if (ret >= 0)
{
PWC_DEBUG_OPEN("This %s camera is equipped with a %s (%d).\n",
- pdev->vdev->name,
+ pdev->vdev.name,
pwc_sensor_type_to_string(i), i);
}
}
@@ -1180,16 +1148,15 @@ static int pwc_video_open(struct file *file)
return 0;
}
-
-static void pwc_cleanup(struct pwc_device *pdev)
+static void pwc_video_release(struct video_device *vfd)
{
- pwc_remove_sysfs_files(pdev->vdev);
- video_unregister_device(pdev->vdev);
+ struct pwc_device *pdev = container_of(vfd, struct pwc_device, vdev);
+ int hint;
-#ifdef CONFIG_USB_PWC_INPUT_EVDEV
- if (pdev->button_dev)
- input_unregister_device(pdev->button_dev);
-#endif
+ /* search device_hint[] table if we occupy a slot, by any chance */
+ for (hint = 0; hint < MAX_DEV_HINTS; hint++)
+ if (device_hint[hint].pdev == pdev)
+ device_hint[hint].pdev = NULL;
kfree(pdev);
}
@@ -1199,7 +1166,7 @@ static int pwc_video_close(struct file *file)
{
struct video_device *vdev = file->private_data;
struct pwc_device *pdev;
- int i, hint;
+ int i;
PWC_DEBUG_OPEN(">> video_close called(vdev = 0x%p).\n", vdev);
@@ -1234,12 +1201,6 @@ static int pwc_video_close(struct file *file)
}
pdev->vopen--;
PWC_DEBUG_OPEN("<< video_close() vopen=%d\n", pdev->vopen);
- } else {
- pwc_cleanup(pdev);
- /* search device_hint[] table if we occupy a slot, by any chance */
- for (hint = 0; hint < MAX_DEV_HINTS; hint++)
- if (device_hint[hint].pdev == pdev)
- device_hint[hint].pdev = NULL;
}
return 0;
@@ -1715,19 +1676,12 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
init_waitqueue_head(&pdev->frameq);
pdev->vcompression = pwc_preferred_compression;
- /* Allocate video_device structure */
- pdev->vdev = video_device_alloc();
- if (!pdev->vdev) {
- PWC_ERROR("Err, cannot allocate video_device struture. Failing probe.");
- rc = -ENOMEM;
- goto err_free_mem;
- }
- memcpy(pdev->vdev, &pwc_template, sizeof(pwc_template));
- pdev->vdev->parent = &intf->dev;
- pdev->vdev->lock = &pdev->modlock;
- pdev->vdev->ioctl_ops = &pwc_ioctl_ops;
- strcpy(pdev->vdev->name, name);
- video_set_drvdata(pdev->vdev, pdev);
+ /* Init video_device structure */
+ memcpy(&pdev->vdev, &pwc_template, sizeof(pwc_template));
+ pdev->vdev.parent = &intf->dev;
+ pdev->vdev.lock = &pdev->modlock;
+ strcpy(pdev->vdev.name, name);
+ video_set_drvdata(&pdev->vdev, pdev);
pdev->release = le16_to_cpu(udev->descriptor.bcdDevice);
PWC_DEBUG_PROBE("Release: %04x\n", pdev->release);
@@ -1746,8 +1700,6 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
}
}
- pdev->vdev->release = video_device_release;
-
/* occupy slot */
if (hint < MAX_DEV_HINTS)
device_hint[hint].pdev = pdev;
@@ -1759,16 +1711,16 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
pwc_set_leds(pdev, 0, 0);
pwc_camera_power(pdev, 0);
- rc = video_register_device(pdev->vdev, VFL_TYPE_GRABBER, video_nr);
+ rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, video_nr);
if (rc < 0) {
PWC_ERROR("Failed to register as video device (%d).\n", rc);
- goto err_video_release;
+ goto err_free_mem;
}
- rc = pwc_create_sysfs_files(pdev->vdev);
+ rc = pwc_create_sysfs_files(pdev);
if (rc)
goto err_video_unreg;
- PWC_INFO("Registered as %s.\n", video_device_node_name(pdev->vdev));
+ PWC_INFO("Registered as %s.\n", video_device_node_name(&pdev->vdev));
#ifdef CONFIG_USB_PWC_INPUT_EVDEV
/* register webcam snapshot button input device */
@@ -1776,7 +1728,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
if (!pdev->button_dev) {
PWC_ERROR("Err, insufficient memory for webcam snapshot button device.");
rc = -ENOMEM;
- pwc_remove_sysfs_files(pdev->vdev);
+ pwc_remove_sysfs_files(pdev);
goto err_video_unreg;
}
@@ -1794,7 +1746,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
if (rc) {
input_free_device(pdev->button_dev);
pdev->button_dev = NULL;
- pwc_remove_sysfs_files(pdev->vdev);
+ pwc_remove_sysfs_files(pdev);
goto err_video_unreg;
}
#endif
@@ -1804,10 +1756,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
err_video_unreg:
if (hint < MAX_DEV_HINTS)
device_hint[hint].pdev = NULL;
- video_unregister_device(pdev->vdev);
- pdev->vdev = NULL; /* So we don't try to release it below */
-err_video_release:
- video_device_release(pdev->vdev);
+ video_unregister_device(&pdev->vdev);
err_free_mem:
kfree(pdev);
return rc;
@@ -1816,10 +1765,8 @@ err_free_mem:
/* The user yanked out the cable... */
static void usb_pwc_disconnect(struct usb_interface *intf)
{
- struct pwc_device *pdev;
- int hint;
+ struct pwc_device *pdev = usb_get_intfdata(intf);
- pdev = usb_get_intfdata (intf);
mutex_lock(&pdev->modlock);
usb_set_intfdata (intf, NULL);
if (pdev == NULL) {
@@ -1836,30 +1783,25 @@ static void usb_pwc_disconnect(struct usb_interface *intf)
}
/* We got unplugged; this is signalled by an EPIPE error code */
- if (pdev->vopen) {
- PWC_INFO("Disconnected while webcam is in use!\n");
- pdev->error_status = EPIPE;
- }
+ pdev->error_status = EPIPE;
+ pdev->unplugged = 1;
/* Alert waiting processes */
wake_up_interruptible(&pdev->frameq);
- /* Wait until device is closed */
- if (pdev->vopen) {
- pdev->unplugged = 1;
- pwc_iso_stop(pdev);
- } else {
- /* Device is closed, so we can safely unregister it */
- PWC_DEBUG_PROBE("Unregistering video device in disconnect().\n");
-disconnect_out:
- /* search device_hint[] table if we occupy a slot, by any chance */
- for (hint = 0; hint < MAX_DEV_HINTS; hint++)
- if (device_hint[hint].pdev == pdev)
- device_hint[hint].pdev = NULL;
- }
+ /* No need to keep the urbs around after disconnection */
+ pwc_isoc_cleanup(pdev);
+disconnect_out:
mutex_unlock(&pdev->modlock);
- pwc_cleanup(pdev);
+
+ pwc_remove_sysfs_files(pdev);
+ video_unregister_device(&pdev->vdev);
+
+#ifdef CONFIG_USB_PWC_INPUT_EVDEV
+ if (pdev->button_dev)
+ input_unregister_device(pdev->button_dev);
+#endif
}
diff --git a/drivers/media/video/pwc/pwc.h b/drivers/media/video/pwc/pwc.h
index e947766337d..083f8b15df7 100644
--- a/drivers/media/video/pwc/pwc.h
+++ b/drivers/media/video/pwc/pwc.h
@@ -162,9 +162,9 @@ struct pwc_imgbuf
struct pwc_device
{
- struct video_device *vdev;
+ struct video_device vdev;
- /* Pointer to our usb_device */
+ /* Pointer to our usb_device, may be NULL after unplug */
struct usb_device *udev;
int type; /* type of cam (645, 646, 675, 680, 690, 720, 730, 740, 750) */
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index d142b40ea64..81b4a826ee5 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -1,7 +1,7 @@
/*
- * Samsung S5P SoC series camera interface (camera capture) driver
+ * Samsung S5P/EXYNOS4 SoC series camera interface (camera capture) driver
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd
+ * Copyright (C) 2010 - 2011 Samsung Electronics Co., Ltd.
* Author: Sylwester Nawrocki, <s.nawrocki@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -262,12 +262,7 @@ static unsigned int get_plane_size(struct fimc_frame *fr, unsigned int plane)
{
if (!fr || plane >= fr->fmt->memplanes)
return 0;
-
- dbg("%s: w: %d. h: %d. depth[%d]: %d",
- __func__, fr->width, fr->height, plane, fr->fmt->depth[plane]);
-
return fr->f_width * fr->f_height * fr->fmt->depth[plane] / 8;
-
}
static int queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
@@ -283,24 +278,14 @@ static int queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
*num_planes = fmt->memplanes;
- dbg("%s, buffer count=%d, plane count=%d",
- __func__, *num_buffers, *num_planes);
-
for (i = 0; i < fmt->memplanes; i++) {
sizes[i] = get_plane_size(&ctx->d_frame, i);
- dbg("plane: %u, plane_size: %lu", i, sizes[i]);
allocators[i] = ctx->fimc_dev->alloc_ctx;
}
return 0;
}
-static int buffer_init(struct vb2_buffer *vb)
-{
- /* TODO: */
- return 0;
-}
-
static int buffer_prepare(struct vb2_buffer *vb)
{
struct vb2_queue *vq = vb->vb2_queue;
@@ -380,7 +365,6 @@ static struct vb2_ops fimc_capture_qops = {
.queue_setup = queue_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
- .buf_init = buffer_init,
.wait_prepare = fimc_unlock,
.wait_finish = fimc_lock,
.start_streaming = start_streaming,
@@ -903,6 +887,7 @@ err_vd_reg:
err_v4l2_reg:
v4l2_device_unregister(v4l2_dev);
err_info:
+ kfree(ctx);
dev_err(&fimc->pdev->dev, "failed to install\n");
return ret;
}
diff --git a/drivers/media/video/s5p-fimc/fimc-core.c b/drivers/media/video/s5p-fimc/fimc-core.c
index dc91a8511af..bdf19ada917 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.c
+++ b/drivers/media/video/s5p-fimc/fimc-core.c
@@ -1,9 +1,8 @@
/*
- * S5P camera interface (video postprocessor) driver
+ * Samsung S5P/EXYNOS4 SoC series camera interface (video postprocessor) driver
*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd
- *
- * Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ * Copyright (C) 2010-2011 Samsung Electronics Co., Ltd.
+ * Contact: Sylwester Nawrocki, <s.nawrocki@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published
@@ -42,7 +41,6 @@ static struct fimc_fmt fimc_formats[] = {
.color = S5P_FIMC_RGB565,
.memplanes = 1,
.colplanes = 1,
- .mbus_code = V4L2_MBUS_FMT_RGB565_2X8_BE,
.flags = FMT_FLAGS_M2M,
}, {
.name = "BGR666",
@@ -232,11 +230,7 @@ static int fimc_get_scaler_factor(u32 src, u32 tar, u32 *ratio, u32 *shift)
return 0;
}
}
-
*shift = 0, *ratio = 1;
-
- dbg("s: %d, t: %d, shift: %d, ratio: %d",
- src, tar, *shift, *ratio);
return 0;
}
@@ -268,10 +262,8 @@ int fimc_set_scaler_info(struct fimc_ctx *ctx)
err("invalid source size: %d x %d", sx, sy);
return -EINVAL;
}
-
sc->real_width = sx;
sc->real_height = sy;
- dbg("sx= %d, sy= %d, tx= %d, ty= %d", sx, sy, tx, ty);
ret = fimc_get_scaler_factor(sx, tx, &sc->pre_hratio, &sc->hfactor);
if (ret)
@@ -711,22 +703,18 @@ static int fimc_queue_setup(struct vb2_queue *vq, unsigned int *num_buffers,
f = ctx_get_frame(ctx, vq->type);
if (IS_ERR(f))
return PTR_ERR(f);
-
/*
* Return number of non-contigous planes (plane buffers)
* depending on the configured color format.
*/
- if (f->fmt)
- *num_planes = f->fmt->memplanes;
+ if (!f->fmt)
+ return -EINVAL;
+ *num_planes = f->fmt->memplanes;
for (i = 0; i < f->fmt->memplanes; i++) {
- sizes[i] = (f->width * f->height * f->fmt->depth[i]) >> 3;
+ sizes[i] = (f->f_width * f->f_height * f->fmt->depth[i]) / 8;
allocators[i] = ctx->fimc_dev->alloc_ctx;
}
-
- if (*num_buffers == 0)
- *num_buffers = 1;
-
return 0;
}
@@ -852,7 +840,7 @@ struct fimc_fmt *find_format(struct v4l2_format *f, unsigned int mask)
for (i = 0; i < ARRAY_SIZE(fimc_formats); ++i) {
fmt = &fimc_formats[i];
- if (fmt->fourcc == f->fmt.pix.pixelformat &&
+ if (fmt->fourcc == f->fmt.pix_mp.pixelformat &&
(fmt->flags & mask))
break;
}
diff --git a/drivers/media/video/s5p-fimc/fimc-core.h b/drivers/media/video/s5p-fimc/fimc-core.h
index 3beb1e5320c..1f70772daaf 100644
--- a/drivers/media/video/s5p-fimc/fimc-core.h
+++ b/drivers/media/video/s5p-fimc/fimc-core.h
@@ -1,7 +1,5 @@
/*
- * Copyright (c) 2010 Samsung Electronics
- *
- * Sylwester Nawrocki, <s.nawrocki@samsung.com>
+ * Copyright (C) 2010 - 2011 Samsung Electronics Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -135,9 +133,10 @@ enum fimc_color_fmt {
* @name: format description
* @fourcc: the fourcc code for this format, 0 if not applicable
* @color: the corresponding fimc_color_fmt
- * @depth: per plane driver's private 'number of bits per pixel'
* @memplanes: number of physically non-contiguous data planes
* @colplanes: number of physically contiguous data planes
+ * @depth: per plane driver's private 'number of bits per pixel'
+ * @flags: flags indicating which operation mode format applies to
*/
struct fimc_fmt {
enum v4l2_mbus_pixelcode mbus_code;
@@ -171,7 +170,7 @@ struct fimc_dma_offset {
};
/**
- * struct fimc_effect - the configuration data for the "Arbitrary" image effect
+ * struct fimc_effect - color effect information
* @type: effect type
* @pat_cb: cr value when type is "arbitrary"
* @pat_cr: cr value when type is "arbitrary"
@@ -184,7 +183,6 @@ struct fimc_effect {
/**
* struct fimc_scaler - the configuration data for FIMC inetrnal scaler
- *
* @scaleup_h: flag indicating scaling up horizontally
* @scaleup_v: flag indicating scaling up vertically
* @copy_mode: flag indicating transparent DMA transfer (no scaling
@@ -220,7 +218,6 @@ struct fimc_scaler {
/**
* struct fimc_addr - the FIMC physical address set for DMA
- *
* @y: luminance plane physical address
* @cb: Cb plane physical address
* @cr: Cr plane physical address
@@ -234,6 +231,7 @@ struct fimc_addr {
/**
* struct fimc_vid_buffer - the driver's video buffer
* @vb: v4l videobuf buffer
+ * @list: linked list structure for buffer queue
* @paddr: precalculated physical address set
* @index: buffer index for the output DMA engine
*/
@@ -254,11 +252,10 @@ struct fimc_vid_buffer {
* @offs_v: image vertical pixel offset
* @width: image pixel width
* @height: image pixel weight
- * @paddr: image frame buffer physical addresses
- * @buf_cnt: number of buffers depending on a color format
* @payload: image size in bytes (w x h x bpp)
- * @color: color format
+ * @paddr: image frame buffer physical addresses
* @dma_offset: DMA offset in bytes
+ * @fmt: fimc color format pointer
*/
struct fimc_frame {
u32 f_width;
@@ -390,21 +387,22 @@ struct fimc_ctx;
/**
* struct fimc_dev - abstraction for FIMC entity
- *
* @slock: the spinlock protecting this data structure
* @lock: the mutex protecting this data structure
* @pdev: pointer to the FIMC platform device
* @pdata: pointer to the device platform data
+ * @variant: the IP variant information
* @id: FIMC device index (0..FIMC_MAX_DEVS)
* @num_clocks: the number of clocks managed by this device instance
- * @clock[]: the clocks required for FIMC operation
+ * @clock: clocks required for FIMC operation
* @regs: the mapped hardware registers
* @regs_res: the resource claimed for IO registers
- * @irq: interrupt number of the FIMC subdevice
- * @irq_queue:
+ * @irq: FIMC interrupt number
+ * @irq_queue: interrupt handler waitqueue
* @m2m: memory-to-memory V4L2 device information
* @vid_cap: camera capture device information
* @state: flags used to synchronize m2m and capture mode operation
+ * @alloc_ctx: videobuf2 memory allocator context
*/
struct fimc_dev {
spinlock_t slock;
@@ -427,8 +425,7 @@ struct fimc_dev {
/**
* fimc_ctx - the device context data
- *
- * @lock: mutex protecting this data structure
+ * @slock: spinlock protecting this data structure
* @s_frame: source frame properties
* @d_frame: destination frame properties
* @out_order_1p: output 1-plane YCBCR order
diff --git a/drivers/media/video/saa7134/saa7134-input.c b/drivers/media/video/saa7134/saa7134-input.c
index ff6c0e97563..d4ee24bf692 100644
--- a/drivers/media/video/saa7134/saa7134-input.c
+++ b/drivers/media/video/saa7134/saa7134-input.c
@@ -963,7 +963,7 @@ static int saa7134_raw_decode_irq(struct saa7134_dev *dev)
* to work with other protocols.
*/
if (!ir->active) {
- timeout = jiffies + jiffies_to_msecs(15);
+ timeout = jiffies + msecs_to_jiffies(15);
mod_timer(&ir->timer, timeout);
ir->active = true;
}
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index 9363ed91a4c..cfa9f7efe93 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -724,19 +724,15 @@ static inline int check_mode(struct tuner *t, enum v4l2_tuner_type mode)
}
/**
- * set_mode_freq - Switch tuner to other mode.
- * @client: struct i2c_client pointer
+ * set_mode - Switch tuner to other mode.
* @t: a pointer to the module's internal struct_tuner
* @mode: enum v4l2_type (radio or TV)
- * @freq: frequency to set (0 means to use the previous one)
*
* If tuner doesn't support the needed mode (radio or TV), prints a
* debug message and returns -EINVAL, changing its state to standby.
- * Otherwise, changes the state and sets frequency to the last value, if
- * the tuner can sleep or if it supports both Radio and TV.
+ * Otherwise, changes the mode and returns 0.
*/
-static int set_mode_freq(struct i2c_client *client, struct tuner *t,
- enum v4l2_tuner_type mode, unsigned int freq)
+static int set_mode(struct tuner *t, enum v4l2_tuner_type mode)
{
struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
@@ -752,17 +748,27 @@ static int set_mode_freq(struct i2c_client *client, struct tuner *t,
t->mode = mode;
tuner_dbg("Changing to mode %d\n", mode);
}
+ return 0;
+}
+
+/**
+ * set_freq - Set the tuner to the desired frequency.
+ * @t: a pointer to the module's internal struct_tuner
+ * @freq: frequency to set (0 means to use the current frequency)
+ */
+static void set_freq(struct tuner *t, unsigned int freq)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&t->sd);
+
if (t->mode == V4L2_TUNER_RADIO) {
- if (freq)
- t->radio_freq = freq;
- set_radio_freq(client, t->radio_freq);
+ if (!freq)
+ freq = t->radio_freq;
+ set_radio_freq(client, freq);
} else {
- if (freq)
- t->tv_freq = freq;
- set_tv_freq(client, t->tv_freq);
+ if (!freq)
+ freq = t->tv_freq;
+ set_tv_freq(client, freq);
}
-
- return 0;
}
/*
@@ -817,7 +823,8 @@ static void set_tv_freq(struct i2c_client *c, unsigned int freq)
/**
* tuner_fixup_std - force a given video standard variant
*
- * @t: tuner internal struct
+ * @t: tuner internal struct
+ * @std: TV standard
*
* A few devices or drivers have problem to detect some standard variations.
* On other operational systems, the drivers generally have a per-country
@@ -827,57 +834,39 @@ static void set_tv_freq(struct i2c_client *c, unsigned int freq)
* to distinguish all video standard variations, a modprobe parameter can
* be used to force a video standard match.
*/
-static int tuner_fixup_std(struct tuner *t)
+static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std)
{
- if ((t->std & V4L2_STD_PAL) == V4L2_STD_PAL) {
+ if (pal[0] != '-' && (std & V4L2_STD_PAL) == V4L2_STD_PAL) {
switch (pal[0]) {
case '6':
- tuner_dbg("insmod fixup: PAL => PAL-60\n");
- t->std = V4L2_STD_PAL_60;
- break;
+ return V4L2_STD_PAL_60;
case 'b':
case 'B':
case 'g':
case 'G':
- tuner_dbg("insmod fixup: PAL => PAL-BG\n");
- t->std = V4L2_STD_PAL_BG;
- break;
+ return V4L2_STD_PAL_BG;
case 'i':
case 'I':
- tuner_dbg("insmod fixup: PAL => PAL-I\n");
- t->std = V4L2_STD_PAL_I;
- break;
+ return V4L2_STD_PAL_I;
case 'd':
case 'D':
case 'k':
case 'K':
- tuner_dbg("insmod fixup: PAL => PAL-DK\n");
- t->std = V4L2_STD_PAL_DK;
- break;
+ return V4L2_STD_PAL_DK;
case 'M':
case 'm':
- tuner_dbg("insmod fixup: PAL => PAL-M\n");
- t->std = V4L2_STD_PAL_M;
- break;
+ return V4L2_STD_PAL_M;
case 'N':
case 'n':
- if (pal[1] == 'c' || pal[1] == 'C') {
- tuner_dbg("insmod fixup: PAL => PAL-Nc\n");
- t->std = V4L2_STD_PAL_Nc;
- } else {
- tuner_dbg("insmod fixup: PAL => PAL-N\n");
- t->std = V4L2_STD_PAL_N;
- }
- break;
- case '-':
- /* default parameter, do nothing */
- break;
+ if (pal[1] == 'c' || pal[1] == 'C')
+ return V4L2_STD_PAL_Nc;
+ return V4L2_STD_PAL_N;
default:
tuner_warn("pal= argument not recognised\n");
break;
}
}
- if ((t->std & V4L2_STD_SECAM) == V4L2_STD_SECAM) {
+ if (secam[0] != '-' && (std & V4L2_STD_SECAM) == V4L2_STD_SECAM) {
switch (secam[0]) {
case 'b':
case 'B':
@@ -885,63 +874,42 @@ static int tuner_fixup_std(struct tuner *t)
case 'G':
case 'h':
case 'H':
- tuner_dbg("insmod fixup: SECAM => SECAM-BGH\n");
- t->std = V4L2_STD_SECAM_B |
- V4L2_STD_SECAM_G |
- V4L2_STD_SECAM_H;
- break;
+ return V4L2_STD_SECAM_B |
+ V4L2_STD_SECAM_G |
+ V4L2_STD_SECAM_H;
case 'd':
case 'D':
case 'k':
case 'K':
- tuner_dbg("insmod fixup: SECAM => SECAM-DK\n");
- t->std = V4L2_STD_SECAM_DK;
- break;
+ return V4L2_STD_SECAM_DK;
case 'l':
case 'L':
- if ((secam[1] == 'C') || (secam[1] == 'c')) {
- tuner_dbg("insmod fixup: SECAM => SECAM-L'\n");
- t->std = V4L2_STD_SECAM_LC;
- } else {
- tuner_dbg("insmod fixup: SECAM => SECAM-L\n");
- t->std = V4L2_STD_SECAM_L;
- }
- break;
- case '-':
- /* default parameter, do nothing */
- break;
+ if ((secam[1] == 'C') || (secam[1] == 'c'))
+ return V4L2_STD_SECAM_LC;
+ return V4L2_STD_SECAM_L;
default:
tuner_warn("secam= argument not recognised\n");
break;
}
}
- if ((t->std & V4L2_STD_NTSC) == V4L2_STD_NTSC) {
+ if (ntsc[0] != '-' && (std & V4L2_STD_NTSC) == V4L2_STD_NTSC) {
switch (ntsc[0]) {
case 'm':
case 'M':
- tuner_dbg("insmod fixup: NTSC => NTSC-M\n");
- t->std = V4L2_STD_NTSC_M;
- break;
+ return V4L2_STD_NTSC_M;
case 'j':
case 'J':
- tuner_dbg("insmod fixup: NTSC => NTSC_M_JP\n");
- t->std = V4L2_STD_NTSC_M_JP;
- break;
+ return V4L2_STD_NTSC_M_JP;
case 'k':
case 'K':
- tuner_dbg("insmod fixup: NTSC => NTSC_M_KR\n");
- t->std = V4L2_STD_NTSC_M_KR;
- break;
- case '-':
- /* default parameter, do nothing */
- break;
+ return V4L2_STD_NTSC_M_KR;
default:
tuner_info("ntsc= argument not recognised\n");
break;
}
}
- return 0;
+ return std;
}
/*
@@ -1058,10 +1026,9 @@ static void tuner_status(struct dvb_frontend *fe)
static int tuner_s_radio(struct v4l2_subdev *sd)
{
struct tuner *t = to_tuner(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- if (set_mode_freq(client, t, V4L2_TUNER_RADIO, 0) == -EINVAL)
- return 0;
+ if (set_mode(t, V4L2_TUNER_RADIO) == 0)
+ set_freq(t, 0);
return 0;
}
@@ -1072,16 +1039,20 @@ static int tuner_s_radio(struct v4l2_subdev *sd)
/**
* tuner_s_power - controls the power state of the tuner
* @sd: pointer to struct v4l2_subdev
- * @on: a zero value puts the tuner to sleep
+ * @on: a zero value puts the tuner to sleep, non-zero wakes it up
*/
static int tuner_s_power(struct v4l2_subdev *sd, int on)
{
struct tuner *t = to_tuner(sd);
struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
- /* FIXME: Why this function don't wake the tuner if on != 0 ? */
- if (on)
+ if (on) {
+ if (t->standby && set_mode(t, t->mode) == 0) {
+ tuner_dbg("Waking up tuner\n");
+ set_freq(t, 0);
+ }
return 0;
+ }
tuner_dbg("Putting tuner to sleep\n");
t->standby = true;
@@ -1093,28 +1064,36 @@ static int tuner_s_power(struct v4l2_subdev *sd, int on)
static int tuner_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct tuner *t = to_tuner(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- if (set_mode_freq(client, t, V4L2_TUNER_ANALOG_TV, 0) == -EINVAL)
+ if (set_mode(t, V4L2_TUNER_ANALOG_TV))
return 0;
- t->std = std;
- tuner_fixup_std(t);
-
+ t->std = tuner_fixup_std(t, std);
+ if (t->std != std)
+ tuner_dbg("Fixup standard %llx to %llx\n", std, t->std);
+ set_freq(t, 0);
return 0;
}
static int tuner_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
{
struct tuner *t = to_tuner(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
-
- if (set_mode_freq(client, t, f->type, f->frequency) == -EINVAL)
- return 0;
+ if (set_mode(t, f->type) == 0)
+ set_freq(t, f->frequency);
return 0;
}
+/**
+ * tuner_g_frequency - Get the tuned frequency for the tuner
+ * @sd: pointer to struct v4l2_subdev
+ * @f: pointer to struct v4l2_frequency
+ *
+ * At return, the structure f will be filled with tuner frequency
+ * if the tuner matches the f->type.
+ * Note: f->type should be initialized before calling it.
+ * This is done by either video_ioctl2 or by the bridge driver.
+ */
static int tuner_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
{
struct tuner *t = to_tuner(sd);
@@ -1122,8 +1101,7 @@ static int tuner_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
if (check_mode(t, f->type) == -EINVAL)
return 0;
- f->type = t->mode;
- if (fe_tuner_ops->get_frequency && !t->standby) {
+ if (f->type == t->mode && fe_tuner_ops->get_frequency && !t->standby) {
u32 abs_freq;
fe_tuner_ops->get_frequency(&t->fe, &abs_freq);
@@ -1131,12 +1109,22 @@ static int tuner_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
DIV_ROUND_CLOSEST(abs_freq * 2, 125) :
DIV_ROUND_CLOSEST(abs_freq, 62500);
} else {
- f->frequency = (V4L2_TUNER_RADIO == t->mode) ?
+ f->frequency = (V4L2_TUNER_RADIO == f->type) ?
t->radio_freq : t->tv_freq;
}
return 0;
}
+/**
+ * tuner_g_tuner - Fill in tuner information
+ * @sd: pointer to struct v4l2_subdev
+ * @vt: pointer to struct v4l2_tuner
+ *
+ * At return, the structure vt will be filled with tuner information
+ * if the tuner matches vt->type.
+ * Note: vt->type should be initialized before calling it.
+ * This is done by either video_ioctl2 or by the bridge driver.
+ */
static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
{
struct tuner *t = to_tuner(sd);
@@ -1145,48 +1133,58 @@ static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
if (check_mode(t, vt->type) == -EINVAL)
return 0;
- vt->type = t->mode;
- if (analog_ops->get_afc)
+ if (vt->type == t->mode && analog_ops->get_afc)
vt->afc = analog_ops->get_afc(&t->fe);
- if (t->mode == V4L2_TUNER_ANALOG_TV)
+ if (vt->type == V4L2_TUNER_ANALOG_TV)
vt->capability |= V4L2_TUNER_CAP_NORM;
- if (t->mode != V4L2_TUNER_RADIO) {
+ if (vt->type != V4L2_TUNER_RADIO) {
vt->rangelow = tv_range[0] * 16;
vt->rangehigh = tv_range[1] * 16;
return 0;
}
/* radio mode */
- vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
- if (fe_tuner_ops->get_status) {
- u32 tuner_status;
-
- fe_tuner_ops->get_status(&t->fe, &tuner_status);
- vt->rxsubchans =
- (tuner_status & TUNER_STATUS_STEREO) ?
- V4L2_TUNER_SUB_STEREO :
- V4L2_TUNER_SUB_MONO;
+ if (vt->type == t->mode) {
+ vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
+ if (fe_tuner_ops->get_status) {
+ u32 tuner_status;
+
+ fe_tuner_ops->get_status(&t->fe, &tuner_status);
+ vt->rxsubchans =
+ (tuner_status & TUNER_STATUS_STEREO) ?
+ V4L2_TUNER_SUB_STEREO :
+ V4L2_TUNER_SUB_MONO;
+ }
+ if (analog_ops->has_signal)
+ vt->signal = analog_ops->has_signal(&t->fe);
+ vt->audmode = t->audmode;
}
- if (analog_ops->has_signal)
- vt->signal = analog_ops->has_signal(&t->fe);
vt->capability |= V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
- vt->audmode = t->audmode;
vt->rangelow = radio_range[0] * 16000;
vt->rangehigh = radio_range[1] * 16000;
return 0;
}
+/**
+ * tuner_s_tuner - Set the tuner's audio mode
+ * @sd: pointer to struct v4l2_subdev
+ * @vt: pointer to struct v4l2_tuner
+ *
+ * Sets the audio mode if the tuner matches vt->type.
+ * Note: vt->type should be initialized before calling it.
+ * This is done by either video_ioctl2 or by the bridge driver.
+ */
static int tuner_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt)
{
struct tuner *t = to_tuner(sd);
- struct i2c_client *client = v4l2_get_subdevdata(sd);
- if (set_mode_freq(client, t, vt->type, 0) == -EINVAL)
+ if (set_mode(t, vt->type))
return 0;
if (t->mode == V4L2_TUNER_RADIO)
t->audmode = vt->audmode;
+ set_freq(t, 0);
return 0;
}
@@ -1221,7 +1219,8 @@ static int tuner_resume(struct i2c_client *c)
tuner_dbg("resume\n");
if (!t->standby)
- set_mode_freq(c, t, t->type, 0);
+ if (set_mode(t, t->mode) == 0)
+ set_freq(t, 0);
return 0;
}
diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c
index c3ab0c813be..48fea373c25 100644
--- a/drivers/media/video/uvc/uvc_entity.c
+++ b/drivers/media/video/uvc/uvc_entity.c
@@ -27,14 +27,20 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
struct uvc_entity *entity)
{
const u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE;
- struct uvc_entity *remote;
+ struct media_entity *sink;
unsigned int i;
- u8 remote_pad;
- int ret = 0;
+ int ret;
+
+ sink = (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING)
+ ? (entity->vdev ? &entity->vdev->entity : NULL)
+ : &entity->subdev.entity;
+ if (sink == NULL)
+ return 0;
for (i = 0; i < entity->num_pads; ++i) {
struct media_entity *source;
- struct media_entity *sink;
+ struct uvc_entity *remote;
+ u8 remote_pad;
if (!(entity->pads[i].flags & MEDIA_PAD_FL_SINK))
continue;
@@ -43,10 +49,11 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
if (remote == NULL)
return -EINVAL;
- source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING)
- ? &remote->vdev->entity : &remote->subdev.entity;
- sink = (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING)
- ? &entity->vdev->entity : &entity->subdev.entity;
+ source = (UVC_ENTITY_TYPE(remote) != UVC_TT_STREAMING)
+ ? (remote->vdev ? &remote->vdev->entity : NULL)
+ : &remote->subdev.entity;
+ if (source == NULL)
+ continue;
remote_pad = remote->num_pads - 1;
ret = media_entity_create_link(source, remote_pad,
@@ -55,11 +62,10 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
return ret;
}
- if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING)
- ret = v4l2_device_register_subdev(&chain->dev->vdev,
- &entity->subdev);
+ if (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING)
+ return 0;
- return ret;
+ return v4l2_device_register_subdev(&chain->dev->vdev, &entity->subdev);
}
static struct v4l2_subdev_ops uvc_subdev_ops = {
@@ -84,9 +90,11 @@ static int uvc_mc_init_entity(struct uvc_entity *entity)
ret = media_entity_init(&entity->subdev.entity,
entity->num_pads, entity->pads, 0);
- } else
+ } else if (entity->vdev != NULL) {
ret = media_entity_init(&entity->vdev->entity,
entity->num_pads, entity->pads, 0);
+ } else
+ ret = 0;
return ret;
}
diff --git a/drivers/media/video/uvc/uvc_queue.c b/drivers/media/video/uvc/uvc_queue.c
index 109a06384a8..f90ce9fce53 100644
--- a/drivers/media/video/uvc/uvc_queue.c
+++ b/drivers/media/video/uvc/uvc_queue.c
@@ -104,6 +104,8 @@ static int __uvc_free_buffers(struct uvc_video_queue *queue)
}
if (queue->count) {
+ uvc_queue_cancel(queue, 0);
+ INIT_LIST_HEAD(&queue->mainqueue);
vfree(queue->mem);
queue->count = 0;
}
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index fc766b9f24c..49994793cc7 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -1255,8 +1255,10 @@ int uvc_video_enable(struct uvc_streaming *stream, int enable)
/* Commit the streaming parameters. */
ret = uvc_commit_video(stream, &stream->ctrl);
- if (ret < 0)
+ if (ret < 0) {
+ uvc_queue_enable(&stream->queue, 0);
return ret;
+ }
return uvc_init_video(stream, GFP_KERNEL);
}
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c
index 19d5ae29378..06f14008b34 100644
--- a/drivers/media/video/v4l2-dev.c
+++ b/drivers/media/video/v4l2-dev.c
@@ -167,6 +167,12 @@ static void v4l2_device_release(struct device *cd)
mutex_unlock(&videodev_lock);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
+ vdev->vfl_type != VFL_TYPE_SUBDEV)
+ media_device_unregister_entity(&vdev->entity);
+#endif
+
/* Release video_device and perform other
cleanups as needed. */
vdev->release(vdev);
@@ -389,9 +395,6 @@ static int v4l2_mmap(struct file *filp, struct vm_area_struct *vm)
static int v4l2_open(struct inode *inode, struct file *filp)
{
struct video_device *vdev;
-#if defined(CONFIG_MEDIA_CONTROLLER)
- struct media_entity *entity = NULL;
-#endif
int ret = 0;
/* Check if the video device is available */
@@ -405,17 +408,6 @@ static int v4l2_open(struct inode *inode, struct file *filp)
/* and increase the device refcount */
video_get(vdev);
mutex_unlock(&videodev_lock);
-#if defined(CONFIG_MEDIA_CONTROLLER)
- if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
- vdev->vfl_type != VFL_TYPE_SUBDEV) {
- entity = media_entity_get(&vdev->entity);
- if (!entity) {
- ret = -EBUSY;
- video_put(vdev);
- return ret;
- }
- }
-#endif
if (vdev->fops->open) {
if (vdev->lock && mutex_lock_interruptible(vdev->lock)) {
ret = -ERESTARTSYS;
@@ -431,14 +423,8 @@ static int v4l2_open(struct inode *inode, struct file *filp)
err:
/* decrease the refcount in case of an error */
- if (ret) {
-#if defined(CONFIG_MEDIA_CONTROLLER)
- if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
- vdev->vfl_type != VFL_TYPE_SUBDEV)
- media_entity_put(entity);
-#endif
+ if (ret)
video_put(vdev);
- }
return ret;
}
@@ -455,11 +441,6 @@ static int v4l2_release(struct inode *inode, struct file *filp)
if (vdev->lock)
mutex_unlock(vdev->lock);
}
-#if defined(CONFIG_MEDIA_CONTROLLER)
- if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
- vdev->vfl_type != VFL_TYPE_SUBDEV)
- media_entity_put(&vdev->entity);
-#endif
/* decrease the refcount unconditionally since the release()
return value is ignored. */
video_put(vdev);
@@ -754,12 +735,6 @@ void video_unregister_device(struct video_device *vdev)
if (!vdev || !video_is_registered(vdev))
return;
-#if defined(CONFIG_MEDIA_CONTROLLER)
- if (vdev->v4l2_dev && vdev->v4l2_dev->mdev &&
- vdev->vfl_type != VFL_TYPE_SUBDEV)
- media_device_unregister_entity(&vdev->entity);
-#endif
-
mutex_lock(&videodev_lock);
/* This must be in a critical section to prevent a race with v4l2_open.
* Once this bit has been cleared video_get may never be called again.
diff --git a/drivers/media/video/v4l2-ioctl.c b/drivers/media/video/v4l2-ioctl.c
index 506edcc2dde..69e8c6ffcc4 100644
--- a/drivers/media/video/v4l2-ioctl.c
+++ b/drivers/media/video/v4l2-ioctl.c
@@ -1822,6 +1822,8 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_g_tuner)
break;
+ p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
ret = ops->vidioc_g_tuner(file, fh, p);
if (!ret)
dbgarg(cmd, "index=%d, name=%s, type=%d, "
@@ -1840,6 +1842,8 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_s_tuner)
break;
+ p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
dbgarg(cmd, "index=%d, name=%s, type=%d, "
"capability=0x%x, rangelow=%d, "
"rangehigh=%d, signal=%d, afc=%d, "
@@ -1858,6 +1862,8 @@ static long __video_do_ioctl(struct file *file,
if (!ops->vidioc_g_frequency)
break;
+ p->type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
ret = ops->vidioc_g_frequency(file, fh, p);
if (!ret)
dbgarg(cmd, "tuner=%d, type=%d, frequency=%d\n",
@@ -1940,13 +1946,19 @@ static long __video_do_ioctl(struct file *file,
case VIDIOC_S_HW_FREQ_SEEK:
{
struct v4l2_hw_freq_seek *p = arg;
+ enum v4l2_tuner_type type;
if (!ops->vidioc_s_hw_freq_seek)
break;
+ type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+ V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
dbgarg(cmd,
- "tuner=%d, type=%d, seek_upward=%d, wrap_around=%d\n",
- p->tuner, p->type, p->seek_upward, p->wrap_around);
- ret = ops->vidioc_s_hw_freq_seek(file, fh, p);
+ "tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u\n",
+ p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing);
+ if (p->type != type)
+ ret = -EINVAL;
+ else
+ ret = ops->vidioc_s_hw_freq_seek(file, fh, p);
break;
}
case VIDIOC_ENUM_FRAMESIZES:
diff --git a/drivers/media/video/videobuf2-core.c b/drivers/media/video/videobuf2-core.c
index 6ba1461d51e..3015e600094 100644
--- a/drivers/media/video/videobuf2-core.c
+++ b/drivers/media/video/videobuf2-core.c
@@ -492,13 +492,6 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
return -EINVAL;
}
- /*
- * If the same number of buffers and memory access method is requested
- * then return immediately.
- */
- if (q->memory == req->memory && req->count == q->num_buffers)
- return 0;
-
if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
/*
* We already have buffers allocated, so first check if they
@@ -539,9 +532,9 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
/* Finally, allocate buffers and video memory */
ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes,
plane_sizes);
- if (ret < 0) {
- dprintk(1, "Memory allocation failed with error: %d\n", ret);
- return ret;
+ if (ret == 0) {
+ dprintk(1, "Memory allocation failed\n");
+ return -ENOMEM;
}
/*
@@ -1196,6 +1189,7 @@ static void __vb2_queue_cancel(struct vb2_queue *q)
* has not already dequeued before initiating cancel.
*/
INIT_LIST_HEAD(&q->done_list);
+ atomic_set(&q->queued_count, 0);
wake_up_all(&q->done_wq);
/*
diff --git a/drivers/media/video/videobuf2-dma-sg.c b/drivers/media/video/videobuf2-dma-sg.c
index b2d9485aac7..10a20d9509d 100644
--- a/drivers/media/video/videobuf2-dma-sg.c
+++ b/drivers/media/video/videobuf2-dma-sg.c
@@ -62,7 +62,7 @@ static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size)
goto fail_pages_array_alloc;
for (i = 0; i < buf->sg_desc.num_pages; ++i) {
- buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ buf->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN);
if (NULL == buf->pages[i])
goto fail_pages_alloc;
sg_set_page(&buf->sg_desc.sglist[i],
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 0f09c057e79..6ca938a6bf9 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -728,6 +728,9 @@ config MFD_TPS65910
if you say yes here you get support for the TPS65910 series of
Power Management chips.
+config TPS65911_COMPARATOR
+ tristate
+
endif # MFD_SUPPORT
menu "Multimedia Capabilities Port drivers"
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index efe3cc33ed9..d7d47d2a4c7 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -94,3 +94,4 @@ obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o
obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o
obj-$(CONFIG_MFD_PM8XXX_IRQ) += pm8xxx-irq.o
obj-$(CONFIG_MFD_TPS65910) += tps65910.o tps65910-irq.o
+obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index c27fd1fc3b8..c71ae09430c 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -619,6 +619,7 @@ static void asic3_clk_disable(struct asic3 *asic, struct asic3_clk *clk)
/* MFD cells (SPI, PWM, LED, DS1WM, MMC) */
static struct ds1wm_driver_data ds1wm_pdata = {
.active_high = 1,
+ .reset_recover_delay = 1,
};
static struct resource ds1wm_resources[] = {
diff --git a/drivers/mfd/htc-pasic3.c b/drivers/mfd/htc-pasic3.c
index 2808bd125d1..04c7093d649 100644
--- a/drivers/mfd/htc-pasic3.c
+++ b/drivers/mfd/htc-pasic3.c
@@ -99,6 +99,7 @@ static int ds1wm_disable(struct platform_device *pdev)
static struct ds1wm_driver_data ds1wm_pdata = {
.active_high = 0,
+ .reset_recover_delay = 1,
};
static struct resource ds1wm_resources[] __initdata = {
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 855219526cc..1717144fe7f 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -26,7 +26,6 @@
#include <linux/spinlock.h>
#include <linux/gpio.h>
#include <plat/usb.h>
-#include <linux/pm_runtime.h>
#define USBHS_DRIVER_NAME "usbhs-omap"
#define OMAP_EHCI_DEVICE "ehci-omap"
@@ -147,6 +146,9 @@
struct usbhs_hcd_omap {
+ struct clk *usbhost_ick;
+ struct clk *usbhost_hs_fck;
+ struct clk *usbhost_fs_fck;
struct clk *xclk60mhsp1_ck;
struct clk *xclk60mhsp2_ck;
struct clk *utmi_p1_fck;
@@ -156,6 +158,8 @@ struct usbhs_hcd_omap {
struct clk *usbhost_p2_fck;
struct clk *usbtll_p2_fck;
struct clk *init_60m_fclk;
+ struct clk *usbtll_fck;
+ struct clk *usbtll_ick;
void __iomem *uhh_base;
void __iomem *tll_base;
@@ -349,13 +353,46 @@ static int __devinit usbhs_omap_probe(struct platform_device *pdev)
omap->platdata.ehci_data = pdata->ehci_data;
omap->platdata.ohci_data = pdata->ohci_data;
- pm_runtime_enable(&pdev->dev);
+ omap->usbhost_ick = clk_get(dev, "usbhost_ick");
+ if (IS_ERR(omap->usbhost_ick)) {
+ ret = PTR_ERR(omap->usbhost_ick);
+ dev_err(dev, "usbhost_ick failed error:%d\n", ret);
+ goto err_end;
+ }
+
+ omap->usbhost_hs_fck = clk_get(dev, "hs_fck");
+ if (IS_ERR(omap->usbhost_hs_fck)) {
+ ret = PTR_ERR(omap->usbhost_hs_fck);
+ dev_err(dev, "usbhost_hs_fck failed error:%d\n", ret);
+ goto err_usbhost_ick;
+ }
+
+ omap->usbhost_fs_fck = clk_get(dev, "fs_fck");
+ if (IS_ERR(omap->usbhost_fs_fck)) {
+ ret = PTR_ERR(omap->usbhost_fs_fck);
+ dev_err(dev, "usbhost_fs_fck failed error:%d\n", ret);
+ goto err_usbhost_hs_fck;
+ }
+
+ omap->usbtll_fck = clk_get(dev, "usbtll_fck");
+ if (IS_ERR(omap->usbtll_fck)) {
+ ret = PTR_ERR(omap->usbtll_fck);
+ dev_err(dev, "usbtll_fck failed error:%d\n", ret);
+ goto err_usbhost_fs_fck;
+ }
+
+ omap->usbtll_ick = clk_get(dev, "usbtll_ick");
+ if (IS_ERR(omap->usbtll_ick)) {
+ ret = PTR_ERR(omap->usbtll_ick);
+ dev_err(dev, "usbtll_ick failed error:%d\n", ret);
+ goto err_usbtll_fck;
+ }
omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
if (IS_ERR(omap->utmi_p1_fck)) {
ret = PTR_ERR(omap->utmi_p1_fck);
dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
- goto err_end;
+ goto err_usbtll_ick;
}
omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
@@ -485,8 +522,22 @@ err_xclk60mhsp1_ck:
err_utmi_p1_fck:
clk_put(omap->utmi_p1_fck);
+err_usbtll_ick:
+ clk_put(omap->usbtll_ick);
+
+err_usbtll_fck:
+ clk_put(omap->usbtll_fck);
+
+err_usbhost_fs_fck:
+ clk_put(omap->usbhost_fs_fck);
+
+err_usbhost_hs_fck:
+ clk_put(omap->usbhost_hs_fck);
+
+err_usbhost_ick:
+ clk_put(omap->usbhost_ick);
+
err_end:
- pm_runtime_disable(&pdev->dev);
kfree(omap);
end_probe:
@@ -520,7 +571,11 @@ static int __devexit usbhs_omap_remove(struct platform_device *pdev)
clk_put(omap->utmi_p2_fck);
clk_put(omap->xclk60mhsp1_ck);
clk_put(omap->utmi_p1_fck);
- pm_runtime_disable(&pdev->dev);
+ clk_put(omap->usbtll_ick);
+ clk_put(omap->usbtll_fck);
+ clk_put(omap->usbhost_fs_fck);
+ clk_put(omap->usbhost_hs_fck);
+ clk_put(omap->usbhost_ick);
kfree(omap);
return 0;
@@ -640,6 +695,7 @@ static int usbhs_enable(struct device *dev)
struct usbhs_omap_platform_data *pdata = &omap->platdata;
unsigned long flags = 0;
int ret = 0;
+ unsigned long timeout;
unsigned reg;
dev_dbg(dev, "starting TI HSUSB Controller\n");
@@ -652,7 +708,11 @@ static int usbhs_enable(struct device *dev)
if (omap->count > 0)
goto end_count;
- pm_runtime_get_sync(dev);
+ clk_enable(omap->usbhost_ick);
+ clk_enable(omap->usbhost_hs_fck);
+ clk_enable(omap->usbhost_fs_fck);
+ clk_enable(omap->usbtll_fck);
+ clk_enable(omap->usbtll_ick);
if (pdata->ehci_data->phy_reset) {
if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) {
@@ -676,6 +736,50 @@ static int usbhs_enable(struct device *dev)
omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
+ /* perform TLL soft reset, and wait until reset is complete */
+ usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
+ OMAP_USBTLL_SYSCONFIG_SOFTRESET);
+
+ /* Wait for TLL reset to complete */
+ timeout = jiffies + msecs_to_jiffies(1000);
+ while (!(usbhs_read(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
+ & OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout)) {
+ dev_dbg(dev, "operation timed out\n");
+ ret = -EINVAL;
+ goto err_tll;
+ }
+ }
+
+ dev_dbg(dev, "TLL RESET DONE\n");
+
+ /* (1<<3) = no idle mode only for initial debugging */
+ usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
+ OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
+ OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
+ OMAP_USBTLL_SYSCONFIG_AUTOIDLE);
+
+ /* Put UHH in NoIdle/NoStandby mode */
+ reg = usbhs_read(omap->uhh_base, OMAP_UHH_SYSCONFIG);
+ if (is_omap_usbhs_rev1(omap)) {
+ reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
+ | OMAP_UHH_SYSCONFIG_SIDLEMODE
+ | OMAP_UHH_SYSCONFIG_CACTIVITY
+ | OMAP_UHH_SYSCONFIG_MIDLEMODE);
+ reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
+
+
+ } else if (is_omap_usbhs_rev2(omap)) {
+ reg &= ~OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR;
+ reg |= OMAP4_UHH_SYSCONFIG_NOIDLE;
+ reg &= ~OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR;
+ reg |= OMAP4_UHH_SYSCONFIG_NOSTDBY;
+ }
+
+ usbhs_write(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
+
reg = usbhs_read(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
/* setup ULPI bypass and burst configurations */
reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN
@@ -815,8 +919,6 @@ end_count:
return 0;
err_tll:
- pm_runtime_put_sync(dev);
- spin_unlock_irqrestore(&omap->lock, flags);
if (pdata->ehci_data->phy_reset) {
if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
gpio_free(pdata->ehci_data->reset_gpio_port[0]);
@@ -824,6 +926,13 @@ err_tll:
if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
gpio_free(pdata->ehci_data->reset_gpio_port[1]);
}
+
+ clk_disable(omap->usbtll_ick);
+ clk_disable(omap->usbtll_fck);
+ clk_disable(omap->usbhost_fs_fck);
+ clk_disable(omap->usbhost_hs_fck);
+ clk_disable(omap->usbhost_ick);
+ spin_unlock_irqrestore(&omap->lock, flags);
return ret;
}
@@ -896,7 +1005,11 @@ static void usbhs_disable(struct device *dev)
clk_disable(omap->utmi_p1_fck);
}
- pm_runtime_put_sync(dev);
+ clk_disable(omap->usbtll_ick);
+ clk_disable(omap->usbtll_fck);
+ clk_disable(omap->usbhost_fs_fck);
+ clk_disable(omap->usbhost_hs_fck);
+ clk_disable(omap->usbhost_ick);
/* The gpio_free migh sleep; so unlock the spinlock */
spin_unlock_irqrestore(&omap->lock, flags);
diff --git a/drivers/mfd/tps65911-comparator.c b/drivers/mfd/tps65911-comparator.c
index 3d2dc56a3d4..283ac675975 100644
--- a/drivers/mfd/tps65911-comparator.c
+++ b/drivers/mfd/tps65911-comparator.c
@@ -125,7 +125,7 @@ static DEVICE_ATTR(comp2_threshold, S_IRUGO, comp_threshold_show, NULL);
static __devinit int tps65911_comparator_probe(struct platform_device *pdev)
{
struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
- struct tps65910_platform_data *pdata = dev_get_platdata(tps65910->dev);
+ struct tps65910_board *pdata = dev_get_platdata(tps65910->dev);
int ret;
ret = comp_threshold_set(tps65910, COMP1, pdata->vmbch_threshold);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 2a7e43bc796..aa7d1d79b8c 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -247,12 +247,12 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
return 0;
/* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
+ card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
if (card->csd.structure == 3) {
- int ext_csd_struct = ext_csd[EXT_CSD_STRUCTURE];
- if (ext_csd_struct > 2) {
+ if (card->ext_csd.raw_ext_csd_structure > 2) {
printk(KERN_ERR "%s: unrecognised EXT_CSD structure "
"version %d\n", mmc_hostname(card->host),
- ext_csd_struct);
+ card->ext_csd.raw_ext_csd_structure);
err = -EINVAL;
goto out;
}
@@ -266,6 +266,10 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
goto out;
}
+ card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
+ card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
+ card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
+ card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
if (card->ext_csd.rev >= 2) {
card->ext_csd.sectors =
ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
@@ -277,7 +281,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
mmc_card_set_blockaddr(card);
}
-
+ card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
EXT_CSD_CARD_TYPE_26:
@@ -307,6 +311,11 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
mmc_hostname(card->host));
}
+ card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
+ card->ext_csd.raw_erase_timeout_mult =
+ ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
+ card->ext_csd.raw_hc_erase_grp_size =
+ ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
if (card->ext_csd.rev >= 3) {
u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
@@ -334,6 +343,16 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->ext_csd.boot_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
}
+ card->ext_csd.raw_hc_erase_gap_size =
+ ext_csd[EXT_CSD_PARTITION_ATTRIBUTE];
+ card->ext_csd.raw_sec_trim_mult =
+ ext_csd[EXT_CSD_SEC_TRIM_MULT];
+ card->ext_csd.raw_sec_erase_mult =
+ ext_csd[EXT_CSD_SEC_ERASE_MULT];
+ card->ext_csd.raw_sec_feature_support =
+ ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
+ card->ext_csd.raw_trim_mult =
+ ext_csd[EXT_CSD_TRIM_MULT];
if (card->ext_csd.rev >= 4) {
/*
* Enhanced area feature support -- check whether the eMMC
@@ -341,7 +360,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
* area offset and size to user by adding sysfs interface.
*/
if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
- (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
+ (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
u8 hc_erase_grp_sz =
ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
u8 hc_wp_grp_sz =
@@ -401,17 +420,17 @@ static inline void mmc_free_ext_csd(u8 *ext_csd)
}
-static int mmc_compare_ext_csds(struct mmc_card *card, u8 *ext_csd,
- unsigned bus_width)
+static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
{
u8 *bw_ext_csd;
int err;
+ if (bus_width == MMC_BUS_WIDTH_1)
+ return 0;
+
err = mmc_get_ext_csd(card, &bw_ext_csd);
- if (err)
- return err;
- if ((ext_csd == NULL || bw_ext_csd == NULL)) {
+ if (err || bw_ext_csd == NULL) {
if (bus_width != MMC_BUS_WIDTH_1)
err = -EINVAL;
goto out;
@@ -421,35 +440,40 @@ static int mmc_compare_ext_csds(struct mmc_card *card, u8 *ext_csd,
goto out;
/* only compare read only fields */
- err = (!(ext_csd[EXT_CSD_PARTITION_SUPPORT] ==
+ err = (!(card->ext_csd.raw_partition_support ==
bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
- (ext_csd[EXT_CSD_ERASED_MEM_CONT] ==
+ (card->ext_csd.raw_erased_mem_count ==
bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
- (ext_csd[EXT_CSD_REV] ==
+ (card->ext_csd.rev ==
bw_ext_csd[EXT_CSD_REV]) &&
- (ext_csd[EXT_CSD_STRUCTURE] ==
+ (card->ext_csd.raw_ext_csd_structure ==
bw_ext_csd[EXT_CSD_STRUCTURE]) &&
- (ext_csd[EXT_CSD_CARD_TYPE] ==
+ (card->ext_csd.raw_card_type ==
bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
- (ext_csd[EXT_CSD_S_A_TIMEOUT] ==
+ (card->ext_csd.raw_s_a_timeout ==
bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
- (ext_csd[EXT_CSD_HC_WP_GRP_SIZE] ==
+ (card->ext_csd.raw_hc_erase_gap_size ==
bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
- (ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT] ==
+ (card->ext_csd.raw_erase_timeout_mult ==
bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
- (ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] ==
+ (card->ext_csd.raw_hc_erase_grp_size ==
bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
- (ext_csd[EXT_CSD_SEC_TRIM_MULT] ==
+ (card->ext_csd.raw_sec_trim_mult ==
bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
- (ext_csd[EXT_CSD_SEC_ERASE_MULT] ==
+ (card->ext_csd.raw_sec_erase_mult ==
bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
- (ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT] ==
+ (card->ext_csd.raw_sec_feature_support ==
bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
- (ext_csd[EXT_CSD_TRIM_MULT] ==
+ (card->ext_csd.raw_trim_mult ==
bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
- memcmp(&ext_csd[EXT_CSD_SEC_CNT],
- &bw_ext_csd[EXT_CSD_SEC_CNT],
- 4) != 0);
+ (card->ext_csd.raw_sectors[0] ==
+ bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
+ (card->ext_csd.raw_sectors[1] ==
+ bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
+ (card->ext_csd.raw_sectors[2] ==
+ bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
+ (card->ext_csd.raw_sectors[3] ==
+ bw_ext_csd[EXT_CSD_SEC_CNT + 3]));
if (err)
err = -EINVAL;
@@ -770,7 +794,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
*/
if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
err = mmc_compare_ext_csds(card,
- ext_csd,
bus_width);
else
err = mmc_bus_test(card, bus_width);
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 7721de942c6..fe140724a02 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -582,6 +582,8 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
data->error = -EILSEQ;
} else if (status & MCI_DATATIMEOUT) {
data->error = -ETIMEDOUT;
+ } else if (status & MCI_STARTBITERR) {
+ data->error = -ECOMM;
} else if (status & MCI_TXUNDERRUN) {
data->error = -EIO;
} else if (status & MCI_RXOVERRUN) {
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index bb32e21c09d..2164e8c6476 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -86,6 +86,7 @@
#define MCI_CMDRESPEND (1 << 6)
#define MCI_CMDSENT (1 << 7)
#define MCI_DATAEND (1 << 8)
+#define MCI_STARTBITERR (1 << 9)
#define MCI_DATABLOCKEND (1 << 10)
#define MCI_CMDACTIVE (1 << 11)
#define MCI_TXACTIVE (1 << 12)
@@ -112,6 +113,7 @@
#define MCI_CMDRESPENDCLR (1 << 6)
#define MCI_CMDSENTCLR (1 << 7)
#define MCI_DATAENDCLR (1 << 8)
+#define MCI_STARTBITERRCLR (1 << 9)
#define MCI_DATABLOCKENDCLR (1 << 10)
/* Extended status bits for the ST Micro variants */
#define MCI_ST_SDIOITC (1 << 22)
@@ -127,6 +129,7 @@
#define MCI_CMDRESPENDMASK (1 << 6)
#define MCI_CMDSENTMASK (1 << 7)
#define MCI_DATAENDMASK (1 << 8)
+#define MCI_STARTBITERRMASK (1 << 9)
#define MCI_DATABLOCKENDMASK (1 << 10)
#define MCI_CMDACTIVEMASK (1 << 11)
#define MCI_TXACTIVEMASK (1 << 12)
@@ -150,7 +153,7 @@
#define MCI_IRQENABLE \
(MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK| \
MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK| \
- MCI_CMDRESPENDMASK|MCI_CMDSENTMASK)
+ MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_STARTBITERRMASK)
/* These interrupts are directed to IRQ1 when two IRQ lines are available */
#define MCI_IRQ1MASK \
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 8f8b65af9ed..60f46bc2bf6 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -140,7 +140,7 @@ MODULE_LICENSE("GPL");
module_param(mtu, int, 0);
module_param(debug, int, 0);
module_param(rx_copybreak, int, 0);
-module_param(dspcfg_workaround, int, 1);
+module_param(dspcfg_workaround, int, 0);
module_param_array(options, int, NULL, 0);
module_param_array(full_duplex, int, NULL, 0);
MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
@@ -2028,8 +2028,8 @@ static void drain_rx(struct net_device *dev)
np->rx_ring[i].cmd_status = 0;
np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
if (np->rx_skbuff[i]) {
- pci_unmap_single(np->pci_dev,
- np->rx_dma[i], buflen,
+ pci_unmap_single(np->pci_dev, np->rx_dma[i],
+ buflen + NATSEMI_PADDING,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(np->rx_skbuff[i]);
}
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 3da168a859c..ad35c210b83 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -139,6 +139,8 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
.tpauser = 1,
.hw_swap = 1,
.no_ade = 1,
+ .rpadir = 1,
+ .rpadir_value = 2 << 16,
};
#define SH_GIGA_ETH_BASE 0xfee00000
@@ -1185,8 +1187,8 @@ static void sh_eth_adjust_link(struct net_device *ndev)
mdp->cd->set_rate(ndev);
}
if (mdp->link == PHY_DOWN) {
- sh_eth_write(ndev, (sh_eth_read(ndev, ECMR) & ~ECMR_TXF)
- | ECMR_DM, ECMR);
+ sh_eth_write(ndev,
+ (sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
new_state = 1;
mdp->link = phydev->link;
}
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 11c9ab3b1e1..8befe697bd7 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -182,11 +182,11 @@ static int sl_alloc_bufs(struct slip *sl, int mtu)
#ifdef SL_INCLUDE_CSLIP
cbuff = xchg(&sl->cbuff, cbuff);
slcomp = xchg(&sl->slcomp, slcomp);
+#endif
#ifdef CONFIG_SLIP_MODE_SLIP6
sl->xdata = 0;
sl->xbits = 0;
#endif
-#endif
spin_unlock_bh(&sl->lock);
err = 0;
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 387ca43f26f..304fe78ff60 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2421,10 +2421,8 @@ static void hso_free_net_device(struct hso_device *hso_dev)
remove_net_device(hso_net->parent);
- if (hso_net->net) {
+ if (hso_net->net)
unregister_netdev(hso_net->net);
- free_netdev(hso_net->net);
- }
/* start freeing */
for (i = 0; i < MUX_BULK_RX_BUF_COUNT; i++) {
@@ -2436,6 +2434,9 @@ static void hso_free_net_device(struct hso_device *hso_dev)
kfree(hso_net->mux_bulk_tx_buf);
hso_net->mux_bulk_tx_buf = NULL;
+ if (hso_net->net)
+ free_netdev(hso_net->net);
+
kfree(hso_dev);
}
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index fabcded7c6a..009277e1084 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2897,6 +2897,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
else
#endif
num_rx_queues = 1;
+ num_rx_queues = rounddown_pow_of_two(num_rx_queues);
if (enable_mq)
num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
@@ -2904,6 +2905,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
else
num_tx_queues = 1;
+ num_tx_queues = rounddown_pow_of_two(num_tx_queues);
netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
max(num_tx_queues, num_rx_queues));
printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
@@ -3088,6 +3090,7 @@ vmxnet3_remove_device(struct pci_dev *pdev)
else
#endif
num_rx_queues = 1;
+ num_rx_queues = rounddown_pow_of_two(num_rx_queues);
cancel_work_sync(&adapter->work);
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index a9cb3fabb17..b18eac1dcca 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -56,6 +56,7 @@
#include <linux/if_vlan.h>
#include <linux/if_arp.h>
#include <linux/inetdevice.h>
+#include <linux/log2.h>
#include "vmxnet3_defs.h"
@@ -69,10 +70,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.1.14.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.1.18.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01010E00
+#define VMXNET3_DRIVER_VERSION_NUM 0x01011200
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index 62172d58572..f82383b3ed3 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -107,10 +107,13 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
case AR5K_PKT_TYPE_BEACON:
case AR5K_PKT_TYPE_PROBE_RESP:
frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY;
+ break;
case AR5K_PKT_TYPE_PIFS:
frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS;
+ break;
default:
frame_type = type;
+ break;
}
tx_ctl->tx_control_0 |=
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index 296c316a834..f2c0c236392 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -297,7 +297,9 @@ ath5k_pci_remove(struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int ath5k_pci_suspend(struct device *dev)
{
- struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev));
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ath5k_softc *sc = hw->priv;
ath5k_led_off(sc);
return 0;
@@ -306,7 +308,8 @@ static int ath5k_pci_suspend(struct device *dev)
static int ath5k_pci_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
- struct ath5k_softc *sc = pci_get_drvdata(pdev);
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct ath5k_softc *sc = hw->priv;
/*
* Suspend/Resume resets the PCI configuration space, so we have to
diff --git a/drivers/net/wireless/ath/ath5k/sysfs.c b/drivers/net/wireless/ath/ath5k/sysfs.c
index 929c68cdf8a..a073cdce1f1 100644
--- a/drivers/net/wireless/ath/ath5k/sysfs.c
+++ b/drivers/net/wireless/ath/ath5k/sysfs.c
@@ -10,7 +10,8 @@ static ssize_t ath5k_attr_show_##name(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
- struct ath5k_softc *sc = dev_get_drvdata(dev); \
+ struct ieee80211_hw *hw = dev_get_drvdata(dev); \
+ struct ath5k_softc *sc = hw->priv; \
return snprintf(buf, PAGE_SIZE, "%d\n", get); \
} \
\
@@ -18,7 +19,8 @@ static ssize_t ath5k_attr_store_##name(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
- struct ath5k_softc *sc = dev_get_drvdata(dev); \
+ struct ieee80211_hw *hw = dev_get_drvdata(dev); \
+ struct ath5k_softc *sc = hw->priv; \
int val; \
\
val = (int)simple_strtoul(buf, NULL, 10); \
@@ -33,7 +35,8 @@ static ssize_t ath5k_attr_show_##name(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
- struct ath5k_softc *sc = dev_get_drvdata(dev); \
+ struct ieee80211_hw *hw = dev_get_drvdata(dev); \
+ struct ath5k_softc *sc = hw->priv; \
return snprintf(buf, PAGE_SIZE, "%d\n", get); \
} \
static DEVICE_ATTR(name, S_IRUGO, ath5k_attr_show_##name, NULL)
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 52dadfc3884..6eb58b16ab0 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -662,7 +662,8 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
* TODO - this could be improved to be dependent on the rate.
* The hardware can keep up at lower rates, but not higher rates
*/
- if (fi->keyix != ATH9K_TXKEYIX_INVALID)
+ if ((fi->keyix != ATH9K_TXKEYIX_INVALID) &&
+ !(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA))
ndelim += ATH_AGGR_ENCRYPTDELIM;
/*
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 2fb53d06751..333b69ef2ae 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -112,6 +112,8 @@ static struct usb_device_id carl9170_usb_ids[] = {
{ USB_DEVICE(0x04bb, 0x093f) },
/* NEC WL300NU-G */
{ USB_DEVICE(0x0409, 0x0249) },
+ /* NEC WL300NU-AG */
+ { USB_DEVICE(0x0409, 0x02b4) },
/* AVM FRITZ!WLAN USB Stick N */
{ USB_DEVICE(0x057c, 0x8401) },
/* AVM FRITZ!WLAN USB Stick N 2.4 */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 092e342c19d..942f7a3969a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -298,6 +298,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
+ {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
{RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
/* HP - Lite-On ,8188CUS Slim Combo */
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 2c5b9b99127..692671b1166 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3483,6 +3483,8 @@ static int __init pci_setup(char *str)
pci_no_msi();
} else if (!strcmp(str, "noaer")) {
pci_no_aer();
+ } else if (!strncmp(str, "realloc", 7)) {
+ pci_realloc();
} else if (!strcmp(str, "nodomains")) {
pci_no_domains();
} else if (!strncmp(str, "cbiosize=", 9)) {
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 731e20265ac..3a39bf1f1e2 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -146,6 +146,8 @@ static inline void pci_no_msi(void) { }
static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { }
#endif
+extern void pci_realloc(void);
+
static inline int pci_no_d1d2(struct pci_dev *dev)
{
unsigned int parent_dstates = 0;
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 1e9e5a5b8c8..9995842e45b 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -47,6 +47,13 @@ struct resource_list_x {
(head)->next = NULL; \
} while (0)
+int pci_realloc_enable = 0;
+#define pci_realloc_enabled() pci_realloc_enable
+void pci_realloc(void)
+{
+ pci_realloc_enable = 1;
+}
+
/**
* add_to_list() - add a new resource tracker to the list
* @head: Head of the list
@@ -1025,6 +1032,7 @@ static int __init pci_get_max_depth(void)
return depth;
}
+
/*
* first try will not touch pci bridge res
* second and later try will clear small leaf bridge res
@@ -1068,6 +1076,13 @@ again:
/* any device complain? */
if (!head.next)
goto enable_and_dump;
+
+ /* don't realloc if asked to do so */
+ if (!pci_realloc_enabled()) {
+ free_list(resource_list_x, &head);
+ goto enable_and_dump;
+ }
+
failed_type = 0;
for (list = head.next; list;) {
failed_type |= list->flags;
diff --git a/drivers/pcmcia/pxa2xx_vpac270.c b/drivers/pcmcia/pxa2xx_vpac270.c
index 712baab3c83..e956f659089 100644
--- a/drivers/pcmcia/pxa2xx_vpac270.c
+++ b/drivers/pcmcia/pxa2xx_vpac270.c
@@ -76,10 +76,10 @@ static int vpac270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
static void vpac270_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
{
if (skt->nr == 0)
- gpio_request_array(vpac270_pcmcia_gpios,
+ gpio_free_array(vpac270_pcmcia_gpios,
ARRAY_SIZE(vpac270_pcmcia_gpios));
else
- gpio_request_array(vpac270_cf_gpios,
+ gpio_free_array(vpac270_cf_gpios,
ARRAY_SIZE(vpac270_cf_gpios));
}
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 005417bd429..e1c4938b301 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -1156,9 +1156,9 @@ static acpi_status wmid3_set_device_status(u32 value, u16 device)
struct wmid3_gds_input_param params = {
.function_num = 0x1,
.hotkey_number = 0x01,
- .devices = ACER_WMID3_GDS_WIRELESS &
- ACER_WMID3_GDS_THREEG &
- ACER_WMID3_GDS_WIMAX &
+ .devices = ACER_WMID3_GDS_WIRELESS |
+ ACER_WMID3_GDS_THREEG |
+ ACER_WMID3_GDS_WIMAX |
ACER_WMID3_GDS_BLUETOOTH,
};
struct acpi_buffer input = {
@@ -1445,6 +1445,8 @@ static void acer_wmi_notify(u32 value, void *context)
union acpi_object *obj;
struct event_return_value return_value;
acpi_status status;
+ u16 device_state;
+ const struct key_entry *key;
status = wmi_get_event_data(value, &response);
if (status != AE_OK) {
@@ -1472,23 +1474,32 @@ static void acer_wmi_notify(u32 value, void *context)
switch (return_value.function) {
case WMID_HOTKEY_EVENT:
- if (return_value.device_state) {
- u16 device_state = return_value.device_state;
- pr_debug("device state: 0x%x\n", device_state);
- if (has_cap(ACER_CAP_WIRELESS))
- rfkill_set_sw_state(wireless_rfkill,
- !(device_state & ACER_WMID3_GDS_WIRELESS));
- if (has_cap(ACER_CAP_BLUETOOTH))
- rfkill_set_sw_state(bluetooth_rfkill,
- !(device_state & ACER_WMID3_GDS_BLUETOOTH));
- if (has_cap(ACER_CAP_THREEG))
- rfkill_set_sw_state(threeg_rfkill,
- !(device_state & ACER_WMID3_GDS_THREEG));
- }
- if (!sparse_keymap_report_event(acer_wmi_input_dev,
- return_value.key_num, 1, true))
+ device_state = return_value.device_state;
+ pr_debug("device state: 0x%x\n", device_state);
+
+ key = sparse_keymap_entry_from_scancode(acer_wmi_input_dev,
+ return_value.key_num);
+ if (!key) {
pr_warn("Unknown key number - 0x%x\n",
return_value.key_num);
+ } else {
+ switch (key->keycode) {
+ case KEY_WLAN:
+ case KEY_BLUETOOTH:
+ if (has_cap(ACER_CAP_WIRELESS))
+ rfkill_set_sw_state(wireless_rfkill,
+ !(device_state & ACER_WMID3_GDS_WIRELESS));
+ if (has_cap(ACER_CAP_THREEG))
+ rfkill_set_sw_state(threeg_rfkill,
+ !(device_state & ACER_WMID3_GDS_THREEG));
+ if (has_cap(ACER_CAP_BLUETOOTH))
+ rfkill_set_sw_state(bluetooth_rfkill,
+ !(device_state & ACER_WMID3_GDS_BLUETOOTH));
+ break;
+ }
+ sparse_keymap_report_entry(acer_wmi_input_dev, key,
+ 1, true);
+ }
break;
default:
pr_warn("Unknown function number - %d - %d\n",
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index 00460cb9587..3c7857c71a2 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -1025,6 +1025,7 @@ static int asus_wmi_backlight_init(struct asus_wmi *asus)
return power;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
props.max_brightness = max;
bd = backlight_device_register(asus->driver->name,
&asus->platform_device->dev, asus,
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 3f204fde1b0..8877b836d27 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -1030,8 +1030,10 @@ static int __devinit compal_probe(struct platform_device *pdev)
initialize_fan_control_data(data);
err = sysfs_create_group(&pdev->dev.kobj, &compal_attribute_group);
- if (err)
+ if (err) {
+ kfree(data);
return err;
+ }
data->hwmon_dev = hwmon_device_register(&pdev->dev);
if (IS_ERR(data->hwmon_dev)) {
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index d3841de6a8c..e39ab1d3ed8 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -292,12 +292,9 @@ static int dell_rfkill_set(void *data, bool blocked)
dell_send_request(buffer, 17, 11);
/* If the hardware switch controls this radio, and the hardware
- switch is disabled, don't allow changing the software state.
- If the hardware switch is reported as not supported, always
- fire the SMI to toggle the killswitch. */
+ switch is disabled, don't allow changing the software state */
if ((hwswitch_state & BIT(hwswitch_bit)) &&
- !(buffer->output[1] & BIT(16)) &&
- (buffer->output[1] & BIT(0))) {
+ !(buffer->output[1] & BIT(16))) {
ret = -EINVAL;
goto out;
}
@@ -403,23 +400,6 @@ static const struct file_operations dell_debugfs_fops = {
static void dell_update_rfkill(struct work_struct *ignored)
{
- int status;
-
- get_buffer();
- dell_send_request(buffer, 17, 11);
- status = buffer->output[1];
- release_buffer();
-
- /* if hardware rfkill is not supported, set it explicitly */
- if (!(status & BIT(0))) {
- if (wifi_rfkill)
- dell_rfkill_set((void *)1, !((status & BIT(17)) >> 17));
- if (bluetooth_rfkill)
- dell_rfkill_set((void *)2, !((status & BIT(18)) >> 18));
- if (wwan_rfkill)
- dell_rfkill_set((void *)3, !((status & BIT(19)) >> 19));
- }
-
if (wifi_rfkill)
dell_rfkill_query(wifi_rfkill, (void *)1);
if (bluetooth_rfkill)
@@ -560,11 +540,11 @@ static int dell_get_intensity(struct backlight_device *bd)
else
dell_send_request(buffer, 0, 1);
+ ret = buffer->output[1];
+
out:
release_buffer();
- if (ret)
- return ret;
- return buffer->output[1];
+ return ret;
}
static const struct backlight_ops dell_ops = {
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index f94017bcdd6..e2faa3cbb79 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -207,6 +207,7 @@ static int hp_wmi_perform_query(int query, int write, void *buffer,
};
struct acpi_buffer input = { sizeof(struct bios_args), &args };
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ u32 rc;
if (WARN_ON(insize > sizeof(args.data)))
return -EINVAL;
@@ -224,13 +225,13 @@ static int hp_wmi_perform_query(int query, int write, void *buffer,
}
bios_return = (struct bios_return *)obj->buffer.pointer;
+ rc = bios_return->return_code;
- if (bios_return->return_code) {
- if (bios_return->return_code != HPWMI_RET_UNKNOWN_CMDTYPE)
- pr_warn("query 0x%x returned error 0x%x\n",
- query, bios_return->return_code);
+ if (rc) {
+ if (rc != HPWMI_RET_UNKNOWN_CMDTYPE)
+ pr_warn("query 0x%x returned error 0x%x\n", query, rc);
kfree(obj);
- return bios_return->return_code;
+ return rc;
}
if (!outsize) {
diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
index e936364a609..7f88c7923fc 100644
--- a/drivers/platform/x86/intel_oaktrail.c
+++ b/drivers/platform/x86/intel_oaktrail.c
@@ -250,6 +250,7 @@ static int oaktrail_backlight_init(void)
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_PLATFORM;
props.max_brightness = OT_EC_BL_BRIGHTNESS_MAX;
bd = backlight_device_register(DRIVER_NAME,
&oaktrail_device->dev, NULL,
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 77f6e707a2a..26c5b117df2 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -184,6 +184,10 @@ enum tpacpi_hkey_event_t {
/* Misc bay events */
TP_HKEY_EV_OPTDRV_EJ = 0x3006, /* opt. drive tray ejected */
+ TP_HKEY_EV_HOTPLUG_DOCK = 0x4010, /* docked into hotplug dock
+ or port replicator */
+ TP_HKEY_EV_HOTPLUG_UNDOCK = 0x4011, /* undocked from hotplug
+ dock or port replicator */
/* User-interface events */
TP_HKEY_EV_LID_CLOSE = 0x5001, /* laptop lid closed */
@@ -194,6 +198,10 @@ enum tpacpi_hkey_event_t {
TP_HKEY_EV_PEN_REMOVED = 0x500c, /* tablet pen removed */
TP_HKEY_EV_BRGHT_CHANGED = 0x5010, /* backlight control event */
+ /* Key-related user-interface events */
+ TP_HKEY_EV_KEY_NUMLOCK = 0x6000, /* NumLock key pressed */
+ TP_HKEY_EV_KEY_FN = 0x6005, /* Fn key pressed? E420 */
+
/* Thermal events */
TP_HKEY_EV_ALARM_BAT_HOT = 0x6011, /* battery too hot */
TP_HKEY_EV_ALARM_BAT_XHOT = 0x6012, /* battery critically hot */
@@ -201,6 +209,10 @@ enum tpacpi_hkey_event_t {
TP_HKEY_EV_ALARM_SENSOR_XHOT = 0x6022, /* sensor critically hot */
TP_HKEY_EV_THM_TABLE_CHANGED = 0x6030, /* thermal table changed */
+ TP_HKEY_EV_UNK_6040 = 0x6040, /* Related to AC change?
+ some sort of APM hint,
+ W520 */
+
/* Misc */
TP_HKEY_EV_RFKILL_CHANGED = 0x7000, /* rfkill switch changed */
};
@@ -3513,6 +3525,34 @@ static bool hotkey_notify_wakeup(const u32 hkey,
return true;
}
+static bool hotkey_notify_dockevent(const u32 hkey,
+ bool *send_acpi_ev,
+ bool *ignore_acpi_ev)
+{
+ /* 0x4000-0x4FFF: dock-related events */
+ *send_acpi_ev = true;
+ *ignore_acpi_ev = false;
+
+ switch (hkey) {
+ case TP_HKEY_EV_UNDOCK_ACK:
+ /* ACPI undock operation completed after wakeup */
+ hotkey_autosleep_ack = 1;
+ pr_info("undocked\n");
+ hotkey_wakeup_hotunplug_complete_notify_change();
+ return true;
+
+ case TP_HKEY_EV_HOTPLUG_DOCK: /* docked to port replicator */
+ pr_info("docked into hotplug port replicator\n");
+ return true;
+ case TP_HKEY_EV_HOTPLUG_UNDOCK: /* undocked from port replicator */
+ pr_info("undocked from hotplug port replicator\n");
+ return true;
+
+ default:
+ return false;
+ }
+}
+
static bool hotkey_notify_usrevent(const u32 hkey,
bool *send_acpi_ev,
bool *ignore_acpi_ev)
@@ -3547,13 +3587,13 @@ static bool hotkey_notify_usrevent(const u32 hkey,
static void thermal_dump_all_sensors(void);
-static bool hotkey_notify_thermal(const u32 hkey,
+static bool hotkey_notify_6xxx(const u32 hkey,
bool *send_acpi_ev,
bool *ignore_acpi_ev)
{
bool known = true;
- /* 0x6000-0x6FFF: thermal alarms */
+ /* 0x6000-0x6FFF: thermal alarms/notices and keyboard events */
*send_acpi_ev = true;
*ignore_acpi_ev = false;
@@ -3582,8 +3622,17 @@ static bool hotkey_notify_thermal(const u32 hkey,
"a sensor reports something is extremely hot!\n");
/* recommended action: immediate sleep/hibernate */
break;
+
+ case TP_HKEY_EV_KEY_NUMLOCK:
+ case TP_HKEY_EV_KEY_FN:
+ /* key press events, we just ignore them as long as the EC
+ * is still reporting them in the normal keyboard stream */
+ *send_acpi_ev = false;
+ *ignore_acpi_ev = true;
+ return true;
+
default:
- pr_alert("THERMAL ALERT: unknown thermal alarm received\n");
+ pr_warn("unknown possible thermal alarm or keyboard event received\n");
known = false;
}
@@ -3652,15 +3701,9 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
}
break;
case 4:
- /* 0x4000-0x4FFF: dock-related wakeups */
- if (hkey == TP_HKEY_EV_UNDOCK_ACK) {
- hotkey_autosleep_ack = 1;
- pr_info("undocked\n");
- hotkey_wakeup_hotunplug_complete_notify_change();
- known_ev = true;
- } else {
- known_ev = false;
- }
+ /* 0x4000-0x4FFF: dock-related events */
+ known_ev = hotkey_notify_dockevent(hkey, &send_acpi_ev,
+ &ignore_acpi_ev);
break;
case 5:
/* 0x5000-0x5FFF: human interface helpers */
@@ -3668,8 +3711,9 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
&ignore_acpi_ev);
break;
case 6:
- /* 0x6000-0x6FFF: thermal alarms */
- known_ev = hotkey_notify_thermal(hkey, &send_acpi_ev,
+ /* 0x6000-0x6FFF: thermal alarms/notices and
+ * keyboard events */
+ known_ev = hotkey_notify_6xxx(hkey, &send_acpi_ev,
&ignore_acpi_ev);
break;
case 7:
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index e5f7b8fe51f..2bb8f451cc0 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -266,7 +266,7 @@ static struct regulator_ops db8500_regulator_switch_ops = {
* Regulator information
*/
static struct db8500_regulator_info
- db8500_regulator_info[DB8500_NUM_REGULATORS] = {
+db8500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VAPE] = {
.desc = {
.name = "db8500-vape",
@@ -492,11 +492,9 @@ static int __devinit db8500_regulator_probe(struct platform_device *pdev)
info->desc.name, err);
/* if failing, unregister all earlier regulators */
- i--;
- while (i >= 0) {
+ while (--i >= 0) {
info = &db8500_regulator_info[i];
regulator_unregister(info->rdev);
- i--;
}
return err;
}
@@ -536,13 +534,7 @@ static struct platform_driver db8500_regulator_driver = {
static int __init db8500_regulator_init(void)
{
- int ret;
-
- ret = platform_driver_register(&db8500_regulator_driver);
- if (ret < 0)
- return -ENODEV;
-
- return 0;
+ return platform_driver_register(&db8500_regulator_driver);
}
static void __exit db8500_regulator_exit(void)
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index daff7fd0e95..486ed8141fc 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -139,7 +139,7 @@ static int max8952_set_voltage(struct regulator_dev *rdev,
s8 vid = -1, i;
if (!gpio_is_valid(max8952->pdata->gpio_vid0) ||
- !gpio_is_valid(max8952->pdata->gpio_vid0)) {
+ !gpio_is_valid(max8952->pdata->gpio_vid1)) {
/* DVS not supported */
return -EPERM;
}
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index 10d5a1d9768..ad6628ca94f 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -39,25 +39,28 @@ struct max8997_data {
struct regulator_dev **rdev;
int ramp_delay; /* in mV/us */
+ bool buck1_gpiodvs;
+ bool buck2_gpiodvs;
+ bool buck5_gpiodvs;
u8 buck1_vol[8];
u8 buck2_vol[8];
u8 buck5_vol[8];
+ int buck125_gpios[3];
int buck125_gpioindex;
+ bool ignore_gpiodvs_side_effect;
u8 saved_states[MAX8997_REG_MAX];
};
static inline void max8997_set_gpio(struct max8997_data *max8997)
{
- struct max8997_platform_data *pdata =
- dev_get_platdata(max8997->iodev->dev);
int set3 = (max8997->buck125_gpioindex) & 0x1;
int set2 = ((max8997->buck125_gpioindex) >> 1) & 0x1;
int set1 = ((max8997->buck125_gpioindex) >> 2) & 0x1;
- gpio_set_value(pdata->buck125_gpios[0], set1);
- gpio_set_value(pdata->buck125_gpios[1], set2);
- gpio_set_value(pdata->buck125_gpios[2], set3);
+ gpio_set_value(max8997->buck125_gpios[0], set1);
+ gpio_set_value(max8997->buck125_gpios[1], set2);
+ gpio_set_value(max8997->buck125_gpios[2], set3);
}
struct voltage_map_desc {
@@ -380,8 +383,6 @@ static int max8997_get_voltage_register(struct regulator_dev *rdev,
static int max8997_get_voltage(struct regulator_dev *rdev)
{
struct max8997_data *max8997 = rdev_get_drvdata(rdev);
- struct max8997_platform_data *pdata =
- dev_get_platdata(max8997->iodev->dev);
struct i2c_client *i2c = max8997->iodev->i2c;
int reg, shift, mask, ret;
int rid = max8997_get_rid(rdev);
@@ -391,9 +392,9 @@ static int max8997_get_voltage(struct regulator_dev *rdev)
if (ret)
return ret;
- if ((rid == MAX8997_BUCK1 && pdata->buck1_gpiodvs) ||
- (rid == MAX8997_BUCK2 && pdata->buck2_gpiodvs) ||
- (rid == MAX8997_BUCK5 && pdata->buck5_gpiodvs))
+ if ((rid == MAX8997_BUCK1 && max8997->buck1_gpiodvs) ||
+ (rid == MAX8997_BUCK2 && max8997->buck2_gpiodvs) ||
+ (rid == MAX8997_BUCK5 && max8997->buck5_gpiodvs))
reg += max8997->buck125_gpioindex;
ret = max8997_read_reg(i2c, reg, &val);
@@ -543,7 +544,8 @@ static int max8997_set_voltage_ldobuck(struct regulator_dev *rdev,
rid == MAX8997_BUCK4 || rid == MAX8997_BUCK5) {
/* If the voltage is increasing */
if (org < i)
- udelay(desc->step * (i - org) / max8997->ramp_delay);
+ udelay(DIV_ROUND_UP(desc->step * (i - org),
+ max8997->ramp_delay));
}
return ret;
@@ -561,8 +563,6 @@ static int max8997_assess_side_effect(struct regulator_dev *rdev,
u8 new_val, int *best)
{
struct max8997_data *max8997 = rdev_get_drvdata(rdev);
- struct max8997_platform_data *pdata =
- dev_get_platdata(max8997->iodev->dev);
int rid = max8997_get_rid(rdev);
u8 *buckx_val[3];
bool buckx_gpiodvs[3];
@@ -589,9 +589,9 @@ static int max8997_assess_side_effect(struct regulator_dev *rdev,
buckx_val[0] = max8997->buck1_vol;
buckx_val[1] = max8997->buck2_vol;
buckx_val[2] = max8997->buck5_vol;
- buckx_gpiodvs[0] = pdata->buck1_gpiodvs;
- buckx_gpiodvs[1] = pdata->buck2_gpiodvs;
- buckx_gpiodvs[2] = pdata->buck5_gpiodvs;
+ buckx_gpiodvs[0] = max8997->buck1_gpiodvs;
+ buckx_gpiodvs[1] = max8997->buck2_gpiodvs;
+ buckx_gpiodvs[2] = max8997->buck5_gpiodvs;
for (i = 0; i < 8; i++) {
int others;
@@ -640,8 +640,6 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
int min_uV, int max_uV, unsigned *selector)
{
struct max8997_data *max8997 = rdev_get_drvdata(rdev);
- struct max8997_platform_data *pdata =
- dev_get_platdata(max8997->iodev->dev);
int rid = max8997_get_rid(rdev);
const struct voltage_map_desc *desc;
int new_val, new_idx, damage, tmp_val, tmp_idx, tmp_dmg;
@@ -653,15 +651,15 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
switch (rid) {
case MAX8997_BUCK1:
- if (pdata->buck1_gpiodvs)
+ if (max8997->buck1_gpiodvs)
gpio_dvs_mode = true;
break;
case MAX8997_BUCK2:
- if (pdata->buck2_gpiodvs)
+ if (max8997->buck2_gpiodvs)
gpio_dvs_mode = true;
break;
case MAX8997_BUCK5:
- if (pdata->buck5_gpiodvs)
+ if (max8997->buck5_gpiodvs)
gpio_dvs_mode = true;
break;
}
@@ -695,7 +693,7 @@ static int max8997_set_voltage_buck(struct regulator_dev *rdev,
new_idx = tmp_idx;
new_val = tmp_val;
- if (pdata->ignore_gpiodvs_side_effect == false)
+ if (max8997->ignore_gpiodvs_side_effect == false)
return -EINVAL;
dev_warn(&rdev->dev, "MAX8997 GPIO-DVS Side Effect Warning: GPIO SET:"
@@ -993,6 +991,11 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
i2c = max8997->iodev->i2c;
max8997->buck125_gpioindex = pdata->buck125_default_idx;
+ max8997->buck1_gpiodvs = pdata->buck1_gpiodvs;
+ max8997->buck2_gpiodvs = pdata->buck2_gpiodvs;
+ max8997->buck5_gpiodvs = pdata->buck5_gpiodvs;
+ memcpy(max8997->buck125_gpios, pdata->buck125_gpios, sizeof(int) * 3);
+ max8997->ignore_gpiodvs_side_effect = pdata->ignore_gpiodvs_side_effect;
for (i = 0; i < 8; i++) {
max8997->buck1_vol[i] = ret =
@@ -1124,6 +1127,10 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
0x3f);
}
+ /* Misc Settings */
+ max8997->ramp_delay = 10; /* set 10mV/us, which is the default */
+ max8997_write_reg(i2c, MAX8997_REG_BUCKRAMP, (0xf << 4) | 0x9);
+
for (i = 0; i < pdata->num_regulators; i++) {
const struct voltage_map_desc *desc;
int id = pdata->regulators[i].id;
@@ -1148,10 +1155,6 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
}
}
- /* Misc Settings */
- max8997->ramp_delay = 10; /* set 10mV/us, which is the default */
- max8997_write_reg(i2c, MAX8997_REG_BUCKRAMP, (0xf << 4) | 0x9);
-
return 0;
err:
for (i = 0; i < max8997->num_regulators; i++)
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 4a1f029c4fe..8d9dae89f06 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -830,6 +830,19 @@ config SCSI_GDTH
To compile this driver as a module, choose M here: the
module will be called gdth.
+config SCSI_ISCI
+ tristate "Intel(R) C600 Series Chipset SAS Controller"
+ depends on PCI && SCSI
+ depends on X86
+ # (temporary): known alpha quality driver
+ depends on EXPERIMENTAL
+ select SCSI_SAS_LIBSAS
+ ---help---
+ This driver supports the 6Gb/s SAS capabilities of the storage
+ control unit found in the Intel(R) C600 series chipset.
+
+ The experimental tag will be removed after the driver exits alpha
+
config SCSI_GENERIC_NCR5380
tristate "Generic NCR5380/53c400 SCSI PIO support"
depends on ISA && SCSI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 7ad0b8a79ae..3c08f5352b2 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -73,6 +73,7 @@ obj-$(CONFIG_SCSI_AACRAID) += aacraid/
obj-$(CONFIG_SCSI_AIC7XXX_OLD) += aic7xxx_old.o
obj-$(CONFIG_SCSI_AIC94XX) += aic94xx/
obj-$(CONFIG_SCSI_PM8001) += pm8001/
+obj-$(CONFIG_SCSI_ISCI) += isci/
obj-$(CONFIG_SCSI_IPS) += ips.o
obj-$(CONFIG_SCSI_FD_MCS) += fd_mcs.o
obj-$(CONFIG_SCSI_FUTURE_DOMAIN)+= fdomain.o
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c6c0434d803..6bba23a2630 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1037,6 +1037,7 @@ static void complete_scsi_command(struct CommandList *cp)
unsigned char sense_key;
unsigned char asc; /* additional sense code */
unsigned char ascq; /* additional sense code qualifier */
+ unsigned long sense_data_size;
ei = cp->err_info;
cmd = (struct scsi_cmnd *) cp->scsi_cmd;
@@ -1051,10 +1052,14 @@ static void complete_scsi_command(struct CommandList *cp)
cmd->result |= ei->ScsiStatus;
/* copy the sense data whether we need to or not. */
- memcpy(cmd->sense_buffer, ei->SenseInfo,
- ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
- SCSI_SENSE_BUFFERSIZE :
- ei->SenseLen);
+ if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
+ sense_data_size = SCSI_SENSE_BUFFERSIZE;
+ else
+ sense_data_size = sizeof(ei->SenseInfo);
+ if (ei->SenseLen < sense_data_size)
+ sense_data_size = ei->SenseLen;
+
+ memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
scsi_set_resid(cmd, ei->ResidualCnt);
if (ei->CommandStatus == 0) {
@@ -2580,7 +2585,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
c->SG[0].Ext = 0; /* we are not chaining*/
}
hpsa_scsi_do_simple_cmd_core(h, c);
- hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
+ if (iocommand.buf_size > 0)
+ hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
check_ioctl_unit_attention(h, c);
/* Copy the error information out */
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index b7650613b8c..bdfa223a7db 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -4306,8 +4306,8 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
spin_lock_irqsave(vhost->host->host_lock, flags);
if (rc == H_CLOSED)
vio_enable_interrupts(to_vio_dev(vhost->dev));
- else if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
- (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
+ if (rc || (rc = ibmvfc_send_crq_init(vhost)) ||
+ (rc = vio_enable_interrupts(to_vio_dev(vhost->dev)))) {
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
dev_err(vhost->dev, "Error after reset (rc=%d)\n", rc);
}
diff --git a/drivers/scsi/isci/Makefile b/drivers/scsi/isci/Makefile
new file mode 100644
index 00000000000..3359e10e0d8
--- /dev/null
+++ b/drivers/scsi/isci/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_SCSI_ISCI) += isci.o
+isci-objs := init.o phy.o request.o \
+ remote_device.o port.o \
+ host.o task.o probe_roms.o \
+ remote_node_context.o \
+ remote_node_table.o \
+ unsolicited_frame_control.o \
+ port_config.o \
diff --git a/drivers/scsi/isci/firmware/Makefile b/drivers/scsi/isci/firmware/Makefile
new file mode 100644
index 00000000000..5f54461cabc
--- /dev/null
+++ b/drivers/scsi/isci/firmware/Makefile
@@ -0,0 +1,19 @@
+# Makefile for create_fw
+#
+CC=gcc
+CFLAGS=-c -Wall -O2 -g
+LDFLAGS=
+SOURCES=create_fw.c
+OBJECTS=$(SOURCES:.cpp=.o)
+EXECUTABLE=create_fw
+
+all: $(SOURCES) $(EXECUTABLE)
+
+$(EXECUTABLE): $(OBJECTS)
+ $(CC) $(LDFLAGS) $(OBJECTS) -o $@
+
+.c.o:
+ $(CC) $(CFLAGS) $< -O $@
+
+clean:
+ rm -f *.o $(EXECUTABLE)
diff --git a/drivers/scsi/isci/firmware/README b/drivers/scsi/isci/firmware/README
new file mode 100644
index 00000000000..8056d2bd233
--- /dev/null
+++ b/drivers/scsi/isci/firmware/README
@@ -0,0 +1,36 @@
+This defines the temporary binary blow we are to pass to the SCU
+driver to emulate the binary firmware that we will eventually be
+able to access via NVRAM on the SCU controller.
+
+The current size of the binary blob is expected to be 149 bytes or larger
+
+Header Types:
+0x1: Phy Masks
+0x2: Phy Gens
+0x3: SAS Addrs
+0xff: End of Data
+
+ID string - u8[12]: "#SCU MAGIC#\0"
+Version - u8: 1
+SubVersion - u8: 0
+
+Header Type - u8: 0x1
+Size - u8: 8
+Phy Mask - u32[8]
+
+Header Type - u8: 0x2
+Size - u8: 8
+Phy Gen - u32[8]
+
+Header Type - u8: 0x3
+Size - u8: 8
+Sas Addr - u64[8]
+
+Header Type - u8: 0xf
+
+
+==============================================================================
+
+Place isci_firmware.bin in /lib/firmware
+Be sure to recreate the initramfs image to include the firmware.
+
diff --git a/drivers/scsi/isci/firmware/create_fw.c b/drivers/scsi/isci/firmware/create_fw.c
new file mode 100644
index 00000000000..c7a2887a7e9
--- /dev/null
+++ b/drivers/scsi/isci/firmware/create_fw.c
@@ -0,0 +1,99 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <string.h>
+#include <errno.h>
+#include <asm/types.h>
+#include <strings.h>
+#include <stdint.h>
+
+#include "create_fw.h"
+#include "../probe_roms.h"
+
+int write_blob(struct isci_orom *isci_orom)
+{
+ FILE *fd;
+ int err;
+ size_t count;
+
+ fd = fopen(blob_name, "w+");
+ if (!fd) {
+ perror("Open file for write failed");
+ fclose(fd);
+ return -EIO;
+ }
+
+ count = fwrite(isci_orom, sizeof(struct isci_orom), 1, fd);
+ if (count != 1) {
+ perror("Write data failed");
+ fclose(fd);
+ return -EIO;
+ }
+
+ fclose(fd);
+
+ return 0;
+}
+
+void set_binary_values(struct isci_orom *isci_orom)
+{
+ int ctrl_idx, phy_idx, port_idx;
+
+ /* setting OROM signature */
+ strncpy(isci_orom->hdr.signature, sig, strlen(sig));
+ isci_orom->hdr.version = version;
+ isci_orom->hdr.total_block_length = sizeof(struct isci_orom);
+ isci_orom->hdr.hdr_length = sizeof(struct sci_bios_oem_param_block_hdr);
+ isci_orom->hdr.num_elements = num_elements;
+
+ for (ctrl_idx = 0; ctrl_idx < 2; ctrl_idx++) {
+ isci_orom->ctrl[ctrl_idx].controller.mode_type = mode_type;
+ isci_orom->ctrl[ctrl_idx].controller.max_concurrent_dev_spin_up =
+ max_num_concurrent_dev_spin_up;
+ isci_orom->ctrl[ctrl_idx].controller.do_enable_ssc =
+ enable_ssc;
+
+ for (port_idx = 0; port_idx < 4; port_idx++)
+ isci_orom->ctrl[ctrl_idx].ports[port_idx].phy_mask =
+ phy_mask[ctrl_idx][port_idx];
+
+ for (phy_idx = 0; phy_idx < 4; phy_idx++) {
+ isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.high =
+ (__u32)(sas_addr[ctrl_idx][phy_idx] >> 32);
+ isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.low =
+ (__u32)(sas_addr[ctrl_idx][phy_idx]);
+
+ isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control0 =
+ afe_tx_amp_control0;
+ isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control1 =
+ afe_tx_amp_control1;
+ isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control2 =
+ afe_tx_amp_control2;
+ isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control3 =
+ afe_tx_amp_control3;
+ }
+ }
+}
+
+int main(void)
+{
+ int err;
+ struct isci_orom *isci_orom;
+
+ isci_orom = malloc(sizeof(struct isci_orom));
+ memset(isci_orom, 0, sizeof(struct isci_orom));
+
+ set_binary_values(isci_orom);
+
+ err = write_blob(isci_orom);
+ if (err < 0) {
+ free(isci_orom);
+ return err;
+ }
+
+ free(isci_orom);
+ return 0;
+}
diff --git a/drivers/scsi/isci/firmware/create_fw.h b/drivers/scsi/isci/firmware/create_fw.h
new file mode 100644
index 00000000000..5f298828d22
--- /dev/null
+++ b/drivers/scsi/isci/firmware/create_fw.h
@@ -0,0 +1,77 @@
+#ifndef _CREATE_FW_H_
+#define _CREATE_FW_H_
+#include "../probe_roms.h"
+
+
+/* we are configuring for 2 SCUs */
+static const int num_elements = 2;
+
+/*
+ * For all defined arrays:
+ * elements 0-3 are for SCU0, ports 0-3
+ * elements 4-7 are for SCU1, ports 0-3
+ *
+ * valid configurations for one SCU are:
+ * P0 P1 P2 P3
+ * ----------------
+ * 0xF,0x0,0x0,0x0 # 1 x4 port
+ * 0x3,0x0,0x4,0x8 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are each x1
+ * # ports
+ * 0x1,0x2,0xC,0x0 # Phys 0 and 1 are each x1 ports, phy 2 and phy 3 are a x2
+ * # port
+ * 0x3,0x0,0xC,0x0 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are a x2 port
+ * 0x1,0x2,0x4,0x8 # Each phy is a x1 port (this is the default configuration)
+ *
+ * if there is a port/phy on which you do not wish to override the default
+ * values, use the value assigned to UNINIT_PARAM (255).
+ */
+
+/* discovery mode type (port auto config mode by default ) */
+
+/*
+ * if there is a port/phy on which you do not wish to override the default
+ * values, use the value "0000000000000000". SAS address of zero's is
+ * considered invalid and will not be used.
+ */
+#ifdef MPC
+static const int mode_type = SCIC_PORT_MANUAL_CONFIGURATION_MODE;
+static const __u8 phy_mask[2][4] = { {1, 2, 4, 8},
+ {1, 2, 4, 8} };
+static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFFF0000001ULL,
+ 0x5FCFFFFFF0000002ULL,
+ 0x5FCFFFFFF0000003ULL,
+ 0x5FCFFFFFF0000004ULL },
+ { 0x5FCFFFFFF0000005ULL,
+ 0x5FCFFFFFF0000006ULL,
+ 0x5FCFFFFFF0000007ULL,
+ 0x5FCFFFFFF0000008ULL } };
+#else /* APC (default) */
+static const int mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
+static const __u8 phy_mask[2][4];
+static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFF00000001ULL,
+ 0x5FCFFFFF00000001ULL,
+ 0x5FCFFFFF00000001ULL,
+ 0x5FCFFFFF00000001ULL },
+ { 0x5FCFFFFF00000002ULL,
+ 0x5FCFFFFF00000002ULL,
+ 0x5FCFFFFF00000002ULL,
+ 0x5FCFFFFF00000002ULL } };
+#endif
+
+/* Maximum number of concurrent device spin up */
+static const int max_num_concurrent_dev_spin_up = 1;
+
+/* enable of ssc operation */
+static const int enable_ssc;
+
+/* AFE_TX_AMP_CONTROL */
+static const unsigned int afe_tx_amp_control0 = 0x000bdd08;
+static const unsigned int afe_tx_amp_control1 = 0x000ffc00;
+static const unsigned int afe_tx_amp_control2 = 0x000b7c09;
+static const unsigned int afe_tx_amp_control3 = 0x000afc6e;
+
+static const char blob_name[] = "isci_firmware.bin";
+static const char sig[] = "ISCUOEMB";
+static const unsigned char version = 0x10;
+
+#endif
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
new file mode 100644
index 00000000000..26072f1e985
--- /dev/null
+++ b/drivers/scsi/isci/host.c
@@ -0,0 +1,2751 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/circ_buf.h>
+#include <linux/device.h>
+#include <scsi/sas.h>
+#include "host.h"
+#include "isci.h"
+#include "port.h"
+#include "host.h"
+#include "probe_roms.h"
+#include "remote_device.h"
+#include "request.h"
+#include "scu_completion_codes.h"
+#include "scu_event_codes.h"
+#include "registers.h"
+#include "scu_remote_node_context.h"
+#include "scu_task_context.h"
+
+#define SCU_CONTEXT_RAM_INIT_STALL_TIME 200
+
+#define smu_max_ports(dcc_value) \
+ (\
+ (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
+ )
+
+#define smu_max_task_contexts(dcc_value) \
+ (\
+ (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
+ )
+
+#define smu_max_rncs(dcc_value) \
+ (\
+ (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
+ )
+
+#define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
+
+/**
+ *
+ *
+ * The number of milliseconds to wait while a given phy is consuming power
+ * before allowing another set of phys to consume power. Ultimately, this will
+ * be specified by OEM parameter.
+ */
+#define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
+
+/**
+ * NORMALIZE_PUT_POINTER() -
+ *
+ * This macro will normalize the completion queue put pointer so its value can
+ * be used as an array inde
+ */
+#define NORMALIZE_PUT_POINTER(x) \
+ ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
+
+
+/**
+ * NORMALIZE_EVENT_POINTER() -
+ *
+ * This macro will normalize the completion queue event entry so its value can
+ * be used as an index.
+ */
+#define NORMALIZE_EVENT_POINTER(x) \
+ (\
+ ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
+ >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
+ )
+
+/**
+ * NORMALIZE_GET_POINTER() -
+ *
+ * This macro will normalize the completion queue get pointer so its value can
+ * be used as an index into an array
+ */
+#define NORMALIZE_GET_POINTER(x) \
+ ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
+
+/**
+ * NORMALIZE_GET_POINTER_CYCLE_BIT() -
+ *
+ * This macro will normalize the completion queue cycle pointer so it matches
+ * the completion queue cycle bit
+ */
+#define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
+ ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
+
+/**
+ * COMPLETION_QUEUE_CYCLE_BIT() -
+ *
+ * This macro will return the cycle bit of the completion queue entry
+ */
+#define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
+
+/* Init the state machine and call the state entry function (if any) */
+void sci_init_sm(struct sci_base_state_machine *sm,
+ const struct sci_base_state *state_table, u32 initial_state)
+{
+ sci_state_transition_t handler;
+
+ sm->initial_state_id = initial_state;
+ sm->previous_state_id = initial_state;
+ sm->current_state_id = initial_state;
+ sm->state_table = state_table;
+
+ handler = sm->state_table[initial_state].enter_state;
+ if (handler)
+ handler(sm);
+}
+
+/* Call the state exit fn, update the current state, call the state entry fn */
+void sci_change_state(struct sci_base_state_machine *sm, u32 next_state)
+{
+ sci_state_transition_t handler;
+
+ handler = sm->state_table[sm->current_state_id].exit_state;
+ if (handler)
+ handler(sm);
+
+ sm->previous_state_id = sm->current_state_id;
+ sm->current_state_id = next_state;
+
+ handler = sm->state_table[sm->current_state_id].enter_state;
+ if (handler)
+ handler(sm);
+}
+
+static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost)
+{
+ u32 get_value = ihost->completion_queue_get;
+ u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
+
+ if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) ==
+ COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]))
+ return true;
+
+ return false;
+}
+
+static bool sci_controller_isr(struct isci_host *ihost)
+{
+ if (sci_controller_completion_queue_has_entries(ihost)) {
+ return true;
+ } else {
+ /*
+ * we have a spurious interrupt it could be that we have already
+ * emptied the completion queue from a previous interrupt */
+ writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+
+ /*
+ * There is a race in the hardware that could cause us not to be notified
+ * of an interrupt completion if we do not take this step. We will mask
+ * then unmask the interrupts so if there is another interrupt pending
+ * the clearing of the interrupt source we get the next interrupt message. */
+ writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
+ writel(0, &ihost->smu_registers->interrupt_mask);
+ }
+
+ return false;
+}
+
+irqreturn_t isci_msix_isr(int vec, void *data)
+{
+ struct isci_host *ihost = data;
+
+ if (sci_controller_isr(ihost))
+ tasklet_schedule(&ihost->completion_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static bool sci_controller_error_isr(struct isci_host *ihost)
+{
+ u32 interrupt_status;
+
+ interrupt_status =
+ readl(&ihost->smu_registers->interrupt_status);
+ interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
+
+ if (interrupt_status != 0) {
+ /*
+ * There is an error interrupt pending so let it through and handle
+ * in the callback */
+ return true;
+ }
+
+ /*
+ * There is a race in the hardware that could cause us not to be notified
+ * of an interrupt completion if we do not take this step. We will mask
+ * then unmask the error interrupts so if there was another interrupt
+ * pending we will be notified.
+ * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
+ writel(0xff, &ihost->smu_registers->interrupt_mask);
+ writel(0, &ihost->smu_registers->interrupt_mask);
+
+ return false;
+}
+
+static void sci_controller_task_completion(struct isci_host *ihost, u32 ent)
+{
+ u32 index = SCU_GET_COMPLETION_INDEX(ent);
+ struct isci_request *ireq = ihost->reqs[index];
+
+ /* Make sure that we really want to process this IO request */
+ if (test_bit(IREQ_ACTIVE, &ireq->flags) &&
+ ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG &&
+ ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index])
+ /* Yep this is a valid io request pass it along to the
+ * io request handler
+ */
+ sci_io_request_tc_completion(ireq, ent);
+}
+
+static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent)
+{
+ u32 index;
+ struct isci_request *ireq;
+ struct isci_remote_device *idev;
+
+ index = SCU_GET_COMPLETION_INDEX(ent);
+
+ switch (scu_get_command_request_type(ent)) {
+ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
+ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
+ ireq = ihost->reqs[index];
+ dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n",
+ __func__, ent, ireq);
+ /* @todo For a post TC operation we need to fail the IO
+ * request
+ */
+ break;
+ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
+ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
+ case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
+ idev = ihost->device_table[index];
+ dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n",
+ __func__, ent, idev);
+ /* @todo For a port RNC operation we need to fail the
+ * device
+ */
+ break;
+ default:
+ dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n",
+ __func__, ent);
+ break;
+ }
+}
+
+static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent)
+{
+ u32 index;
+ u32 frame_index;
+
+ struct scu_unsolicited_frame_header *frame_header;
+ struct isci_phy *iphy;
+ struct isci_remote_device *idev;
+
+ enum sci_status result = SCI_FAILURE;
+
+ frame_index = SCU_GET_FRAME_INDEX(ent);
+
+ frame_header = ihost->uf_control.buffers.array[frame_index].header;
+ ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE;
+
+ if (SCU_GET_FRAME_ERROR(ent)) {
+ /*
+ * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
+ * / this cause a problem? We expect the phy initialization will
+ * / fail if there is an error in the frame. */
+ sci_controller_release_frame(ihost, frame_index);
+ return;
+ }
+
+ if (frame_header->is_address_frame) {
+ index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
+ iphy = &ihost->phys[index];
+ result = sci_phy_frame_handler(iphy, frame_index);
+ } else {
+
+ index = SCU_GET_COMPLETION_INDEX(ent);
+
+ if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+ /*
+ * This is a signature fis or a frame from a direct attached SATA
+ * device that has not yet been created. In either case forwared
+ * the frame to the PE and let it take care of the frame data. */
+ index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
+ iphy = &ihost->phys[index];
+ result = sci_phy_frame_handler(iphy, frame_index);
+ } else {
+ if (index < ihost->remote_node_entries)
+ idev = ihost->device_table[index];
+ else
+ idev = NULL;
+
+ if (idev != NULL)
+ result = sci_remote_device_frame_handler(idev, frame_index);
+ else
+ sci_controller_release_frame(ihost, frame_index);
+ }
+ }
+
+ if (result != SCI_SUCCESS) {
+ /*
+ * / @todo Is there any reason to report some additional error message
+ * / when we get this failure notifiction? */
+ }
+}
+
+static void sci_controller_event_completion(struct isci_host *ihost, u32 ent)
+{
+ struct isci_remote_device *idev;
+ struct isci_request *ireq;
+ struct isci_phy *iphy;
+ u32 index;
+
+ index = SCU_GET_COMPLETION_INDEX(ent);
+
+ switch (scu_get_event_type(ent)) {
+ case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
+ /* / @todo The driver did something wrong and we need to fix the condtion. */
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC Controller 0x%p received SMU command error "
+ "0x%x\n",
+ __func__,
+ ihost,
+ ent);
+ break;
+
+ case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
+ case SCU_EVENT_TYPE_SMU_ERROR:
+ case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
+ /*
+ * / @todo This is a hardware failure and its likely that we want to
+ * / reset the controller. */
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC Controller 0x%p received fatal controller "
+ "event 0x%x\n",
+ __func__,
+ ihost,
+ ent);
+ break;
+
+ case SCU_EVENT_TYPE_TRANSPORT_ERROR:
+ ireq = ihost->reqs[index];
+ sci_io_request_event_handler(ireq, ent);
+ break;
+
+ case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
+ switch (scu_get_event_specifier(ent)) {
+ case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
+ case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
+ ireq = ihost->reqs[index];
+ if (ireq != NULL)
+ sci_io_request_event_handler(ireq, ent);
+ else
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC Controller 0x%p received "
+ "event 0x%x for io request object "
+ "that doesnt exist.\n",
+ __func__,
+ ihost,
+ ent);
+
+ break;
+
+ case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
+ idev = ihost->device_table[index];
+ if (idev != NULL)
+ sci_remote_device_event_handler(idev, ent);
+ else
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC Controller 0x%p received "
+ "event 0x%x for remote device object "
+ "that doesnt exist.\n",
+ __func__,
+ ihost,
+ ent);
+
+ break;
+ }
+ break;
+
+ case SCU_EVENT_TYPE_BROADCAST_CHANGE:
+ /*
+ * direct the broadcast change event to the phy first and then let
+ * the phy redirect the broadcast change to the port object */
+ case SCU_EVENT_TYPE_ERR_CNT_EVENT:
+ /*
+ * direct error counter event to the phy object since that is where
+ * we get the event notification. This is a type 4 event. */
+ case SCU_EVENT_TYPE_OSSP_EVENT:
+ index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent);
+ iphy = &ihost->phys[index];
+ sci_phy_event_handler(iphy, ent);
+ break;
+
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+ case SCU_EVENT_TYPE_RNC_OPS_MISC:
+ if (index < ihost->remote_node_entries) {
+ idev = ihost->device_table[index];
+
+ if (idev != NULL)
+ sci_remote_device_event_handler(idev, ent);
+ } else
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC Controller 0x%p received event 0x%x "
+ "for remote device object 0x%0x that doesnt "
+ "exist.\n",
+ __func__,
+ ihost,
+ ent,
+ index);
+
+ break;
+
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC Controller received unknown event code %x\n",
+ __func__,
+ ent);
+ break;
+ }
+}
+
+static void sci_controller_process_completions(struct isci_host *ihost)
+{
+ u32 completion_count = 0;
+ u32 ent;
+ u32 get_index;
+ u32 get_cycle;
+ u32 event_get;
+ u32 event_cycle;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: completion queue begining get:0x%08x\n",
+ __func__,
+ ihost->completion_queue_get);
+
+ /* Get the component parts of the completion queue */
+ get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get);
+ get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get;
+
+ event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get);
+ event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get;
+
+ while (
+ NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
+ == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])
+ ) {
+ completion_count++;
+
+ ent = ihost->completion_queue[get_index];
+
+ /* increment the get pointer and check for rollover to toggle the cycle bit */
+ get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) <<
+ (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT);
+ get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: completion queue entry:0x%08x\n",
+ __func__,
+ ent);
+
+ switch (SCU_GET_COMPLETION_TYPE(ent)) {
+ case SCU_COMPLETION_TYPE_TASK:
+ sci_controller_task_completion(ihost, ent);
+ break;
+
+ case SCU_COMPLETION_TYPE_SDMA:
+ sci_controller_sdma_completion(ihost, ent);
+ break;
+
+ case SCU_COMPLETION_TYPE_UFI:
+ sci_controller_unsolicited_frame(ihost, ent);
+ break;
+
+ case SCU_COMPLETION_TYPE_EVENT:
+ case SCU_COMPLETION_TYPE_NOTIFY: {
+ event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) <<
+ (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT);
+ event_get = (event_get+1) & (SCU_MAX_EVENTS-1);
+
+ sci_controller_event_completion(ihost, ent);
+ break;
+ }
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC Controller received unknown "
+ "completion type %x\n",
+ __func__,
+ ent);
+ break;
+ }
+ }
+
+ /* Update the get register if we completed one or more entries */
+ if (completion_count > 0) {
+ ihost->completion_queue_get =
+ SMU_CQGR_GEN_BIT(ENABLE) |
+ SMU_CQGR_GEN_BIT(EVENT_ENABLE) |
+ event_cycle |
+ SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) |
+ get_cycle |
+ SMU_CQGR_GEN_VAL(POINTER, get_index);
+
+ writel(ihost->completion_queue_get,
+ &ihost->smu_registers->completion_queue_get);
+
+ }
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: completion queue ending get:0x%08x\n",
+ __func__,
+ ihost->completion_queue_get);
+
+}
+
+static void sci_controller_error_handler(struct isci_host *ihost)
+{
+ u32 interrupt_status;
+
+ interrupt_status =
+ readl(&ihost->smu_registers->interrupt_status);
+
+ if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
+ sci_controller_completion_queue_has_entries(ihost)) {
+
+ sci_controller_process_completions(ihost);
+ writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status);
+ } else {
+ dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__,
+ interrupt_status);
+
+ sci_change_state(&ihost->sm, SCIC_FAILED);
+
+ return;
+ }
+
+ /* If we dont process any completions I am not sure that we want to do this.
+ * We are in the middle of a hardware fault and should probably be reset.
+ */
+ writel(0, &ihost->smu_registers->interrupt_mask);
+}
+
+irqreturn_t isci_intx_isr(int vec, void *data)
+{
+ irqreturn_t ret = IRQ_NONE;
+ struct isci_host *ihost = data;
+
+ if (sci_controller_isr(ihost)) {
+ writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+ tasklet_schedule(&ihost->completion_tasklet);
+ ret = IRQ_HANDLED;
+ } else if (sci_controller_error_isr(ihost)) {
+ spin_lock(&ihost->scic_lock);
+ sci_controller_error_handler(ihost);
+ spin_unlock(&ihost->scic_lock);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+irqreturn_t isci_error_isr(int vec, void *data)
+{
+ struct isci_host *ihost = data;
+
+ if (sci_controller_error_isr(ihost))
+ sci_controller_error_handler(ihost);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * isci_host_start_complete() - This function is called by the core library,
+ * through the ISCI Module, to indicate controller start status.
+ * @isci_host: This parameter specifies the ISCI host object
+ * @completion_status: This parameter specifies the completion status from the
+ * core library.
+ *
+ */
+static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status)
+{
+ if (completion_status != SCI_SUCCESS)
+ dev_info(&ihost->pdev->dev,
+ "controller start timed out, continuing...\n");
+ isci_host_change_state(ihost, isci_ready);
+ clear_bit(IHOST_START_PENDING, &ihost->flags);
+ wake_up(&ihost->eventq);
+}
+
+int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
+
+ if (test_bit(IHOST_START_PENDING, &ihost->flags))
+ return 0;
+
+ /* todo: use sas_flush_discovery once it is upstream */
+ scsi_flush_work(shost);
+
+ scsi_flush_work(shost);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: ihost->status = %d, time = %ld\n",
+ __func__, isci_host_get_state(ihost), time);
+
+ return 1;
+
+}
+
+/**
+ * sci_controller_get_suggested_start_timeout() - This method returns the
+ * suggested sci_controller_start() timeout amount. The user is free to
+ * use any timeout value, but this method provides the suggested minimum
+ * start timeout value. The returned value is based upon empirical
+ * information determined as a result of interoperability testing.
+ * @controller: the handle to the controller object for which to return the
+ * suggested start timeout.
+ *
+ * This method returns the number of milliseconds for the suggested start
+ * operation timeout.
+ */
+static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost)
+{
+ /* Validate the user supplied parameters. */
+ if (!ihost)
+ return 0;
+
+ /*
+ * The suggested minimum timeout value for a controller start operation:
+ *
+ * Signature FIS Timeout
+ * + Phy Start Timeout
+ * + Number of Phy Spin Up Intervals
+ * ---------------------------------
+ * Number of milliseconds for the controller start operation.
+ *
+ * NOTE: The number of phy spin up intervals will be equivalent
+ * to the number of phys divided by the number phys allowed
+ * per interval - 1 (once OEM parameters are supported).
+ * Currently we assume only 1 phy per interval. */
+
+ return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
+ + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
+ + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
+}
+
+static void sci_controller_enable_interrupts(struct isci_host *ihost)
+{
+ BUG_ON(ihost->smu_registers == NULL);
+ writel(0, &ihost->smu_registers->interrupt_mask);
+}
+
+void sci_controller_disable_interrupts(struct isci_host *ihost)
+{
+ BUG_ON(ihost->smu_registers == NULL);
+ writel(0xffffffff, &ihost->smu_registers->interrupt_mask);
+}
+
+static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost)
+{
+ u32 port_task_scheduler_value;
+
+ port_task_scheduler_value =
+ readl(&ihost->scu_registers->peg0.ptsg.control);
+ port_task_scheduler_value |=
+ (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) |
+ SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
+ writel(port_task_scheduler_value,
+ &ihost->scu_registers->peg0.ptsg.control);
+}
+
+static void sci_controller_assign_task_entries(struct isci_host *ihost)
+{
+ u32 task_assignment;
+
+ /*
+ * Assign all the TCs to function 0
+ * TODO: Do we actually need to read this register to write it back?
+ */
+
+ task_assignment =
+ readl(&ihost->smu_registers->task_context_assignment[0]);
+
+ task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
+ (SMU_TCA_GEN_VAL(ENDING, ihost->task_context_entries - 1)) |
+ (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
+
+ writel(task_assignment,
+ &ihost->smu_registers->task_context_assignment[0]);
+
+}
+
+static void sci_controller_initialize_completion_queue(struct isci_host *ihost)
+{
+ u32 index;
+ u32 completion_queue_control_value;
+ u32 completion_queue_get_value;
+ u32 completion_queue_put_value;
+
+ ihost->completion_queue_get = 0;
+
+ completion_queue_control_value =
+ (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) |
+ SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1));
+
+ writel(completion_queue_control_value,
+ &ihost->smu_registers->completion_queue_control);
+
+
+ /* Set the completion queue get pointer and enable the queue */
+ completion_queue_get_value = (
+ (SMU_CQGR_GEN_VAL(POINTER, 0))
+ | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
+ | (SMU_CQGR_GEN_BIT(ENABLE))
+ | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
+ );
+
+ writel(completion_queue_get_value,
+ &ihost->smu_registers->completion_queue_get);
+
+ /* Set the completion queue put pointer */
+ completion_queue_put_value = (
+ (SMU_CQPR_GEN_VAL(POINTER, 0))
+ | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
+ );
+
+ writel(completion_queue_put_value,
+ &ihost->smu_registers->completion_queue_put);
+
+ /* Initialize the cycle bit of the completion queue entries */
+ for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) {
+ /*
+ * If get.cycle_bit != completion_queue.cycle_bit
+ * its not a valid completion queue entry
+ * so at system start all entries are invalid */
+ ihost->completion_queue[index] = 0x80000000;
+ }
+}
+
+static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost)
+{
+ u32 frame_queue_control_value;
+ u32 frame_queue_get_value;
+ u32 frame_queue_put_value;
+
+ /* Write the queue size */
+ frame_queue_control_value =
+ SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES);
+
+ writel(frame_queue_control_value,
+ &ihost->scu_registers->sdma.unsolicited_frame_queue_control);
+
+ /* Setup the get pointer for the unsolicited frame queue */
+ frame_queue_get_value = (
+ SCU_UFQGP_GEN_VAL(POINTER, 0)
+ | SCU_UFQGP_GEN_BIT(ENABLE_BIT)
+ );
+
+ writel(frame_queue_get_value,
+ &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+ /* Setup the put pointer for the unsolicited frame queue */
+ frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
+ writel(frame_queue_put_value,
+ &ihost->scu_registers->sdma.unsolicited_frame_put_pointer);
+}
+
+static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status)
+{
+ if (ihost->sm.current_state_id == SCIC_STARTING) {
+ /*
+ * We move into the ready state, because some of the phys/ports
+ * may be up and operational.
+ */
+ sci_change_state(&ihost->sm, SCIC_READY);
+
+ isci_host_start_complete(ihost, status);
+ }
+}
+
+static bool is_phy_starting(struct isci_phy *iphy)
+{
+ enum sci_phy_states state;
+
+ state = iphy->sm.current_state_id;
+ switch (state) {
+ case SCI_PHY_STARTING:
+ case SCI_PHY_SUB_INITIAL:
+ case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+ case SCI_PHY_SUB_AWAIT_IAF_UF:
+ case SCI_PHY_SUB_AWAIT_SAS_POWER:
+ case SCI_PHY_SUB_AWAIT_SATA_POWER:
+ case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+ case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+ case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+ case SCI_PHY_SUB_FINAL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * sci_controller_start_next_phy - start phy
+ * @scic: controller
+ *
+ * If all the phys have been started, then attempt to transition the
+ * controller to the READY state and inform the user
+ * (sci_cb_controller_start_complete()).
+ */
+static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
+{
+ struct sci_oem_params *oem = &ihost->oem_parameters;
+ struct isci_phy *iphy;
+ enum sci_status status;
+
+ status = SCI_SUCCESS;
+
+ if (ihost->phy_startup_timer_pending)
+ return status;
+
+ if (ihost->next_phy_to_start >= SCI_MAX_PHYS) {
+ bool is_controller_start_complete = true;
+ u32 state;
+ u8 index;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ iphy = &ihost->phys[index];
+ state = iphy->sm.current_state_id;
+
+ if (!phy_get_non_dummy_port(iphy))
+ continue;
+
+ /* The controller start operation is complete iff:
+ * - all links have been given an opportunity to start
+ * - have no indication of a connected device
+ * - have an indication of a connected device and it has
+ * finished the link training process.
+ */
+ if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
+ (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
+ (iphy->is_in_link_training == true && is_phy_starting(iphy))) {
+ is_controller_start_complete = false;
+ break;
+ }
+ }
+
+ /*
+ * The controller has successfully finished the start process.
+ * Inform the SCI Core user and transition to the READY state. */
+ if (is_controller_start_complete == true) {
+ sci_controller_transition_to_ready(ihost, SCI_SUCCESS);
+ sci_del_timer(&ihost->phy_timer);
+ ihost->phy_startup_timer_pending = false;
+ }
+ } else {
+ iphy = &ihost->phys[ihost->next_phy_to_start];
+
+ if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
+ if (phy_get_non_dummy_port(iphy) == NULL) {
+ ihost->next_phy_to_start++;
+
+ /* Caution recursion ahead be forwarned
+ *
+ * The PHY was never added to a PORT in MPC mode
+ * so start the next phy in sequence This phy
+ * will never go link up and will not draw power
+ * the OEM parameters either configured the phy
+ * incorrectly for the PORT or it was never
+ * assigned to a PORT
+ */
+ return sci_controller_start_next_phy(ihost);
+ }
+ }
+
+ status = sci_phy_start(iphy);
+
+ if (status == SCI_SUCCESS) {
+ sci_mod_timer(&ihost->phy_timer,
+ SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
+ ihost->phy_startup_timer_pending = true;
+ } else {
+ dev_warn(&ihost->pdev->dev,
+ "%s: Controller stop operation failed "
+ "to stop phy %d because of status "
+ "%d.\n",
+ __func__,
+ ihost->phys[ihost->next_phy_to_start].phy_index,
+ status);
+ }
+
+ ihost->next_phy_to_start++;
+ }
+
+ return status;
+}
+
+static void phy_startup_timeout(unsigned long data)
+{
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer);
+ unsigned long flags;
+ enum sci_status status;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ ihost->phy_startup_timer_pending = false;
+
+ do {
+ status = sci_controller_start_next_phy(ihost);
+ } while (status != SCI_SUCCESS);
+
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+static u16 isci_tci_active(struct isci_host *ihost)
+{
+ return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
+}
+
+static enum sci_status sci_controller_start(struct isci_host *ihost,
+ u32 timeout)
+{
+ enum sci_status result;
+ u16 index;
+
+ if (ihost->sm.current_state_id != SCIC_INITIALIZED) {
+ dev_warn(&ihost->pdev->dev,
+ "SCIC Controller start operation requested in "
+ "invalid state\n");
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ /* Build the TCi free pool */
+ BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8);
+ ihost->tci_head = 0;
+ ihost->tci_tail = 0;
+ for (index = 0; index < ihost->task_context_entries; index++)
+ isci_tci_free(ihost, index);
+
+ /* Build the RNi free pool */
+ sci_remote_node_table_initialize(&ihost->available_remote_nodes,
+ ihost->remote_node_entries);
+
+ /*
+ * Before anything else lets make sure we will not be
+ * interrupted by the hardware.
+ */
+ sci_controller_disable_interrupts(ihost);
+
+ /* Enable the port task scheduler */
+ sci_controller_enable_port_task_scheduler(ihost);
+
+ /* Assign all the task entries to ihost physical function */
+ sci_controller_assign_task_entries(ihost);
+
+ /* Now initialize the completion queue */
+ sci_controller_initialize_completion_queue(ihost);
+
+ /* Initialize the unsolicited frame queue for use */
+ sci_controller_initialize_unsolicited_frame_queue(ihost);
+
+ /* Start all of the ports on this controller */
+ for (index = 0; index < ihost->logical_port_entries; index++) {
+ struct isci_port *iport = &ihost->ports[index];
+
+ result = sci_port_start(iport);
+ if (result)
+ return result;
+ }
+
+ sci_controller_start_next_phy(ihost);
+
+ sci_mod_timer(&ihost->timer, timeout);
+
+ sci_change_state(&ihost->sm, SCIC_STARTING);
+
+ return SCI_SUCCESS;
+}
+
+void isci_host_scan_start(struct Scsi_Host *shost)
+{
+ struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha;
+ unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost);
+
+ set_bit(IHOST_START_PENDING, &ihost->flags);
+
+ spin_lock_irq(&ihost->scic_lock);
+ sci_controller_start(ihost, tmo);
+ sci_controller_enable_interrupts(ihost);
+ spin_unlock_irq(&ihost->scic_lock);
+}
+
+static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status)
+{
+ isci_host_change_state(ihost, isci_stopped);
+ sci_controller_disable_interrupts(ihost);
+ clear_bit(IHOST_STOP_PENDING, &ihost->flags);
+ wake_up(&ihost->eventq);
+}
+
+static void sci_controller_completion_handler(struct isci_host *ihost)
+{
+ /* Empty out the completion queue */
+ if (sci_controller_completion_queue_has_entries(ihost))
+ sci_controller_process_completions(ihost);
+
+ /* Clear the interrupt and enable all interrupts again */
+ writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status);
+ /* Could we write the value of SMU_ISR_COMPLETION? */
+ writel(0xFF000000, &ihost->smu_registers->interrupt_mask);
+ writel(0, &ihost->smu_registers->interrupt_mask);
+}
+
+/**
+ * isci_host_completion_routine() - This function is the delayed service
+ * routine that calls the sci core library's completion handler. It's
+ * scheduled as a tasklet from the interrupt service routine when interrupts
+ * in use, or set as the timeout function in polled mode.
+ * @data: This parameter specifies the ISCI host object
+ *
+ */
+static void isci_host_completion_routine(unsigned long data)
+{
+ struct isci_host *ihost = (struct isci_host *)data;
+ struct list_head completed_request_list;
+ struct list_head errored_request_list;
+ struct list_head *current_position;
+ struct list_head *next_position;
+ struct isci_request *request;
+ struct isci_request *next_request;
+ struct sas_task *task;
+
+ INIT_LIST_HEAD(&completed_request_list);
+ INIT_LIST_HEAD(&errored_request_list);
+
+ spin_lock_irq(&ihost->scic_lock);
+
+ sci_controller_completion_handler(ihost);
+
+ /* Take the lists of completed I/Os from the host. */
+
+ list_splice_init(&ihost->requests_to_complete,
+ &completed_request_list);
+
+ /* Take the list of errored I/Os from the host. */
+ list_splice_init(&ihost->requests_to_errorback,
+ &errored_request_list);
+
+ spin_unlock_irq(&ihost->scic_lock);
+
+ /* Process any completions in the lists. */
+ list_for_each_safe(current_position, next_position,
+ &completed_request_list) {
+
+ request = list_entry(current_position, struct isci_request,
+ completed_node);
+ task = isci_request_access_task(request);
+
+ /* Normal notification (task_done) */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: Normal - request/task = %p/%p\n",
+ __func__,
+ request,
+ task);
+
+ /* Return the task to libsas */
+ if (task != NULL) {
+
+ task->lldd_task = NULL;
+ if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+
+ /* If the task is already in the abort path,
+ * the task_done callback cannot be called.
+ */
+ task->task_done(task);
+ }
+ }
+
+ spin_lock_irq(&ihost->scic_lock);
+ isci_free_tag(ihost, request->io_tag);
+ spin_unlock_irq(&ihost->scic_lock);
+ }
+ list_for_each_entry_safe(request, next_request, &errored_request_list,
+ completed_node) {
+
+ task = isci_request_access_task(request);
+
+ /* Use sas_task_abort */
+ dev_warn(&ihost->pdev->dev,
+ "%s: Error - request/task = %p/%p\n",
+ __func__,
+ request,
+ task);
+
+ if (task != NULL) {
+
+ /* Put the task into the abort path if it's not there
+ * already.
+ */
+ if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED))
+ sas_task_abort(task);
+
+ } else {
+ /* This is a case where the request has completed with a
+ * status such that it needed further target servicing,
+ * but the sas_task reference has already been removed
+ * from the request. Since it was errored, it was not
+ * being aborted, so there is nothing to do except free
+ * it.
+ */
+
+ spin_lock_irq(&ihost->scic_lock);
+ /* Remove the request from the remote device's list
+ * of pending requests.
+ */
+ list_del_init(&request->dev_node);
+ isci_free_tag(ihost, request->io_tag);
+ spin_unlock_irq(&ihost->scic_lock);
+ }
+ }
+
+}
+
+/**
+ * sci_controller_stop() - This method will stop an individual controller
+ * object.This method will invoke the associated user callback upon
+ * completion. The completion callback is called when the following
+ * conditions are met: -# the method return status is SCI_SUCCESS. -# the
+ * controller has been quiesced. This method will ensure that all IO
+ * requests are quiesced, phys are stopped, and all additional operation by
+ * the hardware is halted.
+ * @controller: the handle to the controller object to stop.
+ * @timeout: This parameter specifies the number of milliseconds in which the
+ * stop operation should complete.
+ *
+ * The controller must be in the STARTED or STOPPED state. Indicate if the
+ * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
+ * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
+ * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
+ * controller is not either in the STARTED or STOPPED states.
+ */
+static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout)
+{
+ if (ihost->sm.current_state_id != SCIC_READY) {
+ dev_warn(&ihost->pdev->dev,
+ "SCIC Controller stop operation requested in "
+ "invalid state\n");
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_mod_timer(&ihost->timer, timeout);
+ sci_change_state(&ihost->sm, SCIC_STOPPING);
+ return SCI_SUCCESS;
+}
+
+/**
+ * sci_controller_reset() - This method will reset the supplied core
+ * controller regardless of the state of said controller. This operation is
+ * considered destructive. In other words, all current operations are wiped
+ * out. No IO completions for outstanding devices occur. Outstanding IO
+ * requests are not aborted or completed at the actual remote device.
+ * @controller: the handle to the controller object to reset.
+ *
+ * Indicate if the controller reset method succeeded or failed in some way.
+ * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
+ * the controller reset operation is unable to complete.
+ */
+static enum sci_status sci_controller_reset(struct isci_host *ihost)
+{
+ switch (ihost->sm.current_state_id) {
+ case SCIC_RESET:
+ case SCIC_READY:
+ case SCIC_STOPPED:
+ case SCIC_FAILED:
+ /*
+ * The reset operation is not a graceful cleanup, just
+ * perform the state transition.
+ */
+ sci_change_state(&ihost->sm, SCIC_RESETTING);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "SCIC Controller reset operation requested in "
+ "invalid state\n");
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+void isci_host_deinit(struct isci_host *ihost)
+{
+ int i;
+
+ isci_host_change_state(ihost, isci_stopping);
+ for (i = 0; i < SCI_MAX_PORTS; i++) {
+ struct isci_port *iport = &ihost->ports[i];
+ struct isci_remote_device *idev, *d;
+
+ list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) {
+ if (test_bit(IDEV_ALLOCATED, &idev->flags))
+ isci_remote_device_stop(ihost, idev);
+ }
+ }
+
+ set_bit(IHOST_STOP_PENDING, &ihost->flags);
+
+ spin_lock_irq(&ihost->scic_lock);
+ sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT);
+ spin_unlock_irq(&ihost->scic_lock);
+
+ wait_for_stop(ihost);
+ sci_controller_reset(ihost);
+
+ /* Cancel any/all outstanding port timers */
+ for (i = 0; i < ihost->logical_port_entries; i++) {
+ struct isci_port *iport = &ihost->ports[i];
+ del_timer_sync(&iport->timer.timer);
+ }
+
+ /* Cancel any/all outstanding phy timers */
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ struct isci_phy *iphy = &ihost->phys[i];
+ del_timer_sync(&iphy->sata_timer.timer);
+ }
+
+ del_timer_sync(&ihost->port_agent.timer.timer);
+
+ del_timer_sync(&ihost->power_control.timer.timer);
+
+ del_timer_sync(&ihost->timer.timer);
+
+ del_timer_sync(&ihost->phy_timer.timer);
+}
+
+static void __iomem *scu_base(struct isci_host *isci_host)
+{
+ struct pci_dev *pdev = isci_host->pdev;
+ int id = isci_host->id;
+
+ return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id;
+}
+
+static void __iomem *smu_base(struct isci_host *isci_host)
+{
+ struct pci_dev *pdev = isci_host->pdev;
+ int id = isci_host->id;
+
+ return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id;
+}
+
+static void isci_user_parameters_get(struct sci_user_parameters *u)
+{
+ int i;
+
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ struct sci_phy_user_params *u_phy = &u->phys[i];
+
+ u_phy->max_speed_generation = phy_gen;
+
+ /* we are not exporting these for now */
+ u_phy->align_insertion_frequency = 0x7f;
+ u_phy->in_connection_align_insertion_frequency = 0xff;
+ u_phy->notify_enable_spin_up_insertion_frequency = 0x33;
+ }
+
+ u->stp_inactivity_timeout = stp_inactive_to;
+ u->ssp_inactivity_timeout = ssp_inactive_to;
+ u->stp_max_occupancy_timeout = stp_max_occ_to;
+ u->ssp_max_occupancy_timeout = ssp_max_occ_to;
+ u->no_outbound_task_timeout = no_outbound_task_to;
+ u->max_number_concurrent_device_spin_up = max_concurr_spinup;
+}
+
+static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ sci_change_state(&ihost->sm, SCIC_RESET);
+}
+
+static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ sci_del_timer(&ihost->timer);
+}
+
+#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
+#define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
+#define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000
+#define INTERRUPT_COALESCE_NUMBER_MAX 256
+#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7
+#define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28
+
+/**
+ * sci_controller_set_interrupt_coalescence() - This method allows the user to
+ * configure the interrupt coalescence.
+ * @controller: This parameter represents the handle to the controller object
+ * for which its interrupt coalesce register is overridden.
+ * @coalesce_number: Used to control the number of entries in the Completion
+ * Queue before an interrupt is generated. If the number of entries exceed
+ * this number, an interrupt will be generated. The valid range of the input
+ * is [0, 256]. A setting of 0 results in coalescing being disabled.
+ * @coalesce_timeout: Timeout value in microseconds. The valid range of the
+ * input is [0, 2700000] . A setting of 0 is allowed and results in no
+ * interrupt coalescing timeout.
+ *
+ * Indicate if the user successfully set the interrupt coalesce parameters.
+ * SCI_SUCCESS The user successfully updated the interrutp coalescence.
+ * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
+ */
+static enum sci_status
+sci_controller_set_interrupt_coalescence(struct isci_host *ihost,
+ u32 coalesce_number,
+ u32 coalesce_timeout)
+{
+ u8 timeout_encode = 0;
+ u32 min = 0;
+ u32 max = 0;
+
+ /* Check if the input parameters fall in the range. */
+ if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ /*
+ * Defined encoding for interrupt coalescing timeout:
+ * Value Min Max Units
+ * ----- --- --- -----
+ * 0 - - Disabled
+ * 1 13.3 20.0 ns
+ * 2 26.7 40.0
+ * 3 53.3 80.0
+ * 4 106.7 160.0
+ * 5 213.3 320.0
+ * 6 426.7 640.0
+ * 7 853.3 1280.0
+ * 8 1.7 2.6 us
+ * 9 3.4 5.1
+ * 10 6.8 10.2
+ * 11 13.7 20.5
+ * 12 27.3 41.0
+ * 13 54.6 81.9
+ * 14 109.2 163.8
+ * 15 218.5 327.7
+ * 16 436.9 655.4
+ * 17 873.8 1310.7
+ * 18 1.7 2.6 ms
+ * 19 3.5 5.2
+ * 20 7.0 10.5
+ * 21 14.0 21.0
+ * 22 28.0 41.9
+ * 23 55.9 83.9
+ * 24 111.8 167.8
+ * 25 223.7 335.5
+ * 26 447.4 671.1
+ * 27 894.8 1342.2
+ * 28 1.8 2.7 s
+ * Others Undefined */
+
+ /*
+ * Use the table above to decide the encode of interrupt coalescing timeout
+ * value for register writing. */
+ if (coalesce_timeout == 0)
+ timeout_encode = 0;
+ else{
+ /* make the timeout value in unit of (10 ns). */
+ coalesce_timeout = coalesce_timeout * 100;
+ min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
+ max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
+
+ /* get the encode of timeout for register writing. */
+ for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
+ timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
+ timeout_encode++) {
+ if (min <= coalesce_timeout && max > coalesce_timeout)
+ break;
+ else if (coalesce_timeout >= max && coalesce_timeout < min * 2
+ && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
+ if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
+ break;
+ else{
+ timeout_encode++;
+ break;
+ }
+ } else {
+ max = max * 2;
+ min = min * 2;
+ }
+ }
+
+ if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
+ /* the value is out of range. */
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+ }
+
+ writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
+ SMU_ICC_GEN_VAL(TIMER, timeout_encode),
+ &ihost->smu_registers->interrupt_coalesce_control);
+
+
+ ihost->interrupt_coalesce_number = (u16)coalesce_number;
+ ihost->interrupt_coalesce_timeout = coalesce_timeout / 100;
+
+ return SCI_SUCCESS;
+}
+
+
+static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ /* set the default interrupt coalescence number and timeout value. */
+ sci_controller_set_interrupt_coalescence(ihost, 0x10, 250);
+}
+
+static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ /* disable interrupt coalescence. */
+ sci_controller_set_interrupt_coalescence(ihost, 0, 0);
+}
+
+static enum sci_status sci_controller_stop_phys(struct isci_host *ihost)
+{
+ u32 index;
+ enum sci_status status;
+ enum sci_status phy_status;
+
+ status = SCI_SUCCESS;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ phy_status = sci_phy_stop(&ihost->phys[index]);
+
+ if (phy_status != SCI_SUCCESS &&
+ phy_status != SCI_FAILURE_INVALID_STATE) {
+ status = SCI_FAILURE;
+
+ dev_warn(&ihost->pdev->dev,
+ "%s: Controller stop operation failed to stop "
+ "phy %d because of status %d.\n",
+ __func__,
+ ihost->phys[index].phy_index, phy_status);
+ }
+ }
+
+ return status;
+}
+
+static enum sci_status sci_controller_stop_ports(struct isci_host *ihost)
+{
+ u32 index;
+ enum sci_status port_status;
+ enum sci_status status = SCI_SUCCESS;
+
+ for (index = 0; index < ihost->logical_port_entries; index++) {
+ struct isci_port *iport = &ihost->ports[index];
+
+ port_status = sci_port_stop(iport);
+
+ if ((port_status != SCI_SUCCESS) &&
+ (port_status != SCI_FAILURE_INVALID_STATE)) {
+ status = SCI_FAILURE;
+
+ dev_warn(&ihost->pdev->dev,
+ "%s: Controller stop operation failed to "
+ "stop port %d because of status %d.\n",
+ __func__,
+ iport->logical_port_index,
+ port_status);
+ }
+ }
+
+ return status;
+}
+
+static enum sci_status sci_controller_stop_devices(struct isci_host *ihost)
+{
+ u32 index;
+ enum sci_status status;
+ enum sci_status device_status;
+
+ status = SCI_SUCCESS;
+
+ for (index = 0; index < ihost->remote_node_entries; index++) {
+ if (ihost->device_table[index] != NULL) {
+ /* / @todo What timeout value do we want to provide to this request? */
+ device_status = sci_remote_device_stop(ihost->device_table[index], 0);
+
+ if ((device_status != SCI_SUCCESS) &&
+ (device_status != SCI_FAILURE_INVALID_STATE)) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: Controller stop operation failed "
+ "to stop device 0x%p because of "
+ "status %d.\n",
+ __func__,
+ ihost->device_table[index], device_status);
+ }
+ }
+ }
+
+ return status;
+}
+
+static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ /* Stop all of the components for this controller */
+ sci_controller_stop_phys(ihost);
+ sci_controller_stop_ports(ihost);
+ sci_controller_stop_devices(ihost);
+}
+
+static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ sci_del_timer(&ihost->timer);
+}
+
+static void sci_controller_reset_hardware(struct isci_host *ihost)
+{
+ /* Disable interrupts so we dont take any spurious interrupts */
+ sci_controller_disable_interrupts(ihost);
+
+ /* Reset the SCU */
+ writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control);
+
+ /* Delay for 1ms to before clearing the CQP and UFQPR. */
+ udelay(1000);
+
+ /* The write to the CQGR clears the CQP */
+ writel(0x00000000, &ihost->smu_registers->completion_queue_get);
+
+ /* The write to the UFQGP clears the UFQPR */
+ writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+}
+
+static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_host *ihost = container_of(sm, typeof(*ihost), sm);
+
+ sci_controller_reset_hardware(ihost);
+ sci_change_state(&ihost->sm, SCIC_RESET);
+}
+
+static const struct sci_base_state sci_controller_state_table[] = {
+ [SCIC_INITIAL] = {
+ .enter_state = sci_controller_initial_state_enter,
+ },
+ [SCIC_RESET] = {},
+ [SCIC_INITIALIZING] = {},
+ [SCIC_INITIALIZED] = {},
+ [SCIC_STARTING] = {
+ .exit_state = sci_controller_starting_state_exit,
+ },
+ [SCIC_READY] = {
+ .enter_state = sci_controller_ready_state_enter,
+ .exit_state = sci_controller_ready_state_exit,
+ },
+ [SCIC_RESETTING] = {
+ .enter_state = sci_controller_resetting_state_enter,
+ },
+ [SCIC_STOPPING] = {
+ .enter_state = sci_controller_stopping_state_enter,
+ .exit_state = sci_controller_stopping_state_exit,
+ },
+ [SCIC_STOPPED] = {},
+ [SCIC_FAILED] = {}
+};
+
+static void sci_controller_set_default_config_parameters(struct isci_host *ihost)
+{
+ /* these defaults are overridden by the platform / firmware */
+ u16 index;
+
+ /* Default to APC mode. */
+ ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
+
+ /* Default to APC mode. */
+ ihost->oem_parameters.controller.max_concurrent_dev_spin_up = 1;
+
+ /* Default to no SSC operation. */
+ ihost->oem_parameters.controller.do_enable_ssc = false;
+
+ /* Initialize all of the port parameter information to narrow ports. */
+ for (index = 0; index < SCI_MAX_PORTS; index++) {
+ ihost->oem_parameters.ports[index].phy_mask = 0;
+ }
+
+ /* Initialize all of the phy parameter information. */
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ /* Default to 6G (i.e. Gen 3) for now. */
+ ihost->user_parameters.phys[index].max_speed_generation = 3;
+
+ /* the frequencies cannot be 0 */
+ ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
+ ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff;
+ ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
+
+ /*
+ * Previous Vitesse based expanders had a arbitration issue that
+ * is worked around by having the upper 32-bits of SAS address
+ * with a value greater then the Vitesse company identifier.
+ * Hence, usage of 0x5FCFFFFF. */
+ ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id;
+ ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF;
+ }
+
+ ihost->user_parameters.stp_inactivity_timeout = 5;
+ ihost->user_parameters.ssp_inactivity_timeout = 5;
+ ihost->user_parameters.stp_max_occupancy_timeout = 5;
+ ihost->user_parameters.ssp_max_occupancy_timeout = 20;
+ ihost->user_parameters.no_outbound_task_timeout = 20;
+}
+
+static void controller_timeout(unsigned long data)
+{
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer);
+ struct sci_base_state_machine *sm = &ihost->sm;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ if (sm->current_state_id == SCIC_STARTING)
+ sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT);
+ else if (sm->current_state_id == SCIC_STOPPING) {
+ sci_change_state(sm, SCIC_FAILED);
+ isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
+ } else /* / @todo Now what do we want to do in this case? */
+ dev_err(&ihost->pdev->dev,
+ "%s: Controller timer fired when controller was not "
+ "in a state being timed.\n",
+ __func__);
+
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+static enum sci_status sci_controller_construct(struct isci_host *ihost,
+ void __iomem *scu_base,
+ void __iomem *smu_base)
+{
+ u8 i;
+
+ sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL);
+
+ ihost->scu_registers = scu_base;
+ ihost->smu_registers = smu_base;
+
+ sci_port_configuration_agent_construct(&ihost->port_agent);
+
+ /* Construct the ports for this controller */
+ for (i = 0; i < SCI_MAX_PORTS; i++)
+ sci_port_construct(&ihost->ports[i], i, ihost);
+ sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost);
+
+ /* Construct the phys for this controller */
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ /* Add all the PHYs to the dummy port */
+ sci_phy_construct(&ihost->phys[i],
+ &ihost->ports[SCI_MAX_PORTS], i);
+ }
+
+ ihost->invalid_phy_mask = 0;
+
+ sci_init_timer(&ihost->timer, controller_timeout);
+
+ /* Initialize the User and OEM parameters to default values. */
+ sci_controller_set_default_config_parameters(ihost);
+
+ return sci_controller_reset(ihost);
+}
+
+int sci_oem_parameters_validate(struct sci_oem_params *oem)
+{
+ int i;
+
+ for (i = 0; i < SCI_MAX_PORTS; i++)
+ if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
+ return -EINVAL;
+
+ for (i = 0; i < SCI_MAX_PHYS; i++)
+ if (oem->phys[i].sas_address.high == 0 &&
+ oem->phys[i].sas_address.low == 0)
+ return -EINVAL;
+
+ if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
+ for (i = 0; i < SCI_MAX_PHYS; i++)
+ if (oem->ports[i].phy_mask != 0)
+ return -EINVAL;
+ } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
+ u8 phy_mask = 0;
+
+ for (i = 0; i < SCI_MAX_PHYS; i++)
+ phy_mask |= oem->ports[i].phy_mask;
+
+ if (phy_mask == 0)
+ return -EINVAL;
+ } else
+ return -EINVAL;
+
+ if (oem->controller.max_concurrent_dev_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT)
+ return -EINVAL;
+
+ return 0;
+}
+
+static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
+{
+ u32 state = ihost->sm.current_state_id;
+
+ if (state == SCIC_RESET ||
+ state == SCIC_INITIALIZING ||
+ state == SCIC_INITIALIZED) {
+
+ if (sci_oem_parameters_validate(&ihost->oem_parameters))
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE_INVALID_STATE;
+}
+
+static void power_control_timeout(unsigned long data)
+{
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer);
+ struct isci_phy *iphy;
+ unsigned long flags;
+ u8 i;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ ihost->power_control.phys_granted_power = 0;
+
+ if (ihost->power_control.phys_waiting == 0) {
+ ihost->power_control.timer_started = false;
+ goto done;
+ }
+
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+
+ if (ihost->power_control.phys_waiting == 0)
+ break;
+
+ iphy = ihost->power_control.requesters[i];
+ if (iphy == NULL)
+ continue;
+
+ if (ihost->power_control.phys_granted_power >=
+ ihost->oem_parameters.controller.max_concurrent_dev_spin_up)
+ break;
+
+ ihost->power_control.requesters[i] = NULL;
+ ihost->power_control.phys_waiting--;
+ ihost->power_control.phys_granted_power++;
+ sci_phy_consume_power_handler(iphy);
+ }
+
+ /*
+ * It doesn't matter if the power list is empty, we need to start the
+ * timer in case another phy becomes ready.
+ */
+ sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
+ ihost->power_control.timer_started = true;
+
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+void sci_controller_power_control_queue_insert(struct isci_host *ihost,
+ struct isci_phy *iphy)
+{
+ BUG_ON(iphy == NULL);
+
+ if (ihost->power_control.phys_granted_power <
+ ihost->oem_parameters.controller.max_concurrent_dev_spin_up) {
+ ihost->power_control.phys_granted_power++;
+ sci_phy_consume_power_handler(iphy);
+
+ /*
+ * stop and start the power_control timer. When the timer fires, the
+ * no_of_phys_granted_power will be set to 0
+ */
+ if (ihost->power_control.timer_started)
+ sci_del_timer(&ihost->power_control.timer);
+
+ sci_mod_timer(&ihost->power_control.timer,
+ SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
+ ihost->power_control.timer_started = true;
+
+ } else {
+ /* Add the phy in the waiting list */
+ ihost->power_control.requesters[iphy->phy_index] = iphy;
+ ihost->power_control.phys_waiting++;
+ }
+}
+
+void sci_controller_power_control_queue_remove(struct isci_host *ihost,
+ struct isci_phy *iphy)
+{
+ BUG_ON(iphy == NULL);
+
+ if (ihost->power_control.requesters[iphy->phy_index])
+ ihost->power_control.phys_waiting--;
+
+ ihost->power_control.requesters[iphy->phy_index] = NULL;
+}
+
+#define AFE_REGISTER_WRITE_DELAY 10
+
+/* Initialize the AFE for this phy index. We need to read the AFE setup from
+ * the OEM parameters
+ */
+static void sci_controller_afe_initialization(struct isci_host *ihost)
+{
+ const struct sci_oem_params *oem = &ihost->oem_parameters;
+ struct pci_dev *pdev = ihost->pdev;
+ u32 afe_status;
+ u32 phy_id;
+
+ /* Clear DFX Status registers */
+ writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ if (is_b0(pdev)) {
+ /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
+ * Timer, PM Stagger Timer */
+ writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ }
+
+ /* Configure bias currents to normal */
+ if (is_a2(pdev))
+ writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control);
+ else if (is_b0(pdev) || is_c0(pdev))
+ writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control);
+
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /* Enable PLL */
+ if (is_b0(pdev) || is_c0(pdev))
+ writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0);
+ else
+ writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0);
+
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /* Wait for the PLL to lock */
+ do {
+ afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ } while ((afe_status & 0x00001000) == 0);
+
+ if (is_a2(pdev)) {
+ /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
+ writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ }
+
+ for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
+ const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
+
+ if (is_b0(pdev)) {
+ /* Configure transmitter SSC parameters */
+ writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ } else if (is_c0(pdev)) {
+ /* Configure transmitter SSC parameters */
+ writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /*
+ * All defaults, except the Receive Word Alignament/Comma Detect
+ * Enable....(0xe800) */
+ writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ } else {
+ /*
+ * All defaults, except the Receive Word Alignament/Comma Detect
+ * Enable....(0xe800) */
+ writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ }
+
+ /*
+ * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
+ * & increase TX int & ext bias 20%....(0xe85c) */
+ if (is_a2(pdev))
+ writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ else if (is_b0(pdev)) {
+ /* Power down TX and RX (PWRDNTX and PWRDNRX) */
+ writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /*
+ * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
+ * & increase TX int & ext bias 20%....(0xe85c) */
+ writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ } else {
+ writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /*
+ * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
+ * & increase TX int & ext bias 20%....(0xe85c) */
+ writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ }
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ if (is_a2(pdev)) {
+ /* Enable TX equalization (0xe824) */
+ writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ }
+
+ /*
+ * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
+ * RDD=0x0(RX Detect Enabled) ....(0xe800) */
+ writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /* Leave DFE/FFE on */
+ if (is_a2(pdev))
+ writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+ else if (is_b0(pdev)) {
+ writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ /* Enable TX equalization (0xe824) */
+ writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+ } else {
+ writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /* Enable TX equalization (0xe824) */
+ writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+ }
+
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(oem_phy->afe_tx_amp_control0,
+ &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(oem_phy->afe_tx_amp_control1,
+ &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(oem_phy->afe_tx_amp_control2,
+ &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(oem_phy->afe_tx_amp_control3,
+ &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ }
+
+ /* Transfer control to the PEs */
+ writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+}
+
+static void sci_controller_initialize_power_control(struct isci_host *ihost)
+{
+ sci_init_timer(&ihost->power_control.timer, power_control_timeout);
+
+ memset(ihost->power_control.requesters, 0,
+ sizeof(ihost->power_control.requesters));
+
+ ihost->power_control.phys_waiting = 0;
+ ihost->power_control.phys_granted_power = 0;
+}
+
+static enum sci_status sci_controller_initialize(struct isci_host *ihost)
+{
+ struct sci_base_state_machine *sm = &ihost->sm;
+ enum sci_status result = SCI_FAILURE;
+ unsigned long i, state, val;
+
+ if (ihost->sm.current_state_id != SCIC_RESET) {
+ dev_warn(&ihost->pdev->dev,
+ "SCIC Controller initialize operation requested "
+ "in invalid state\n");
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_change_state(sm, SCIC_INITIALIZING);
+
+ sci_init_timer(&ihost->phy_timer, phy_startup_timeout);
+
+ ihost->next_phy_to_start = 0;
+ ihost->phy_startup_timer_pending = false;
+
+ sci_controller_initialize_power_control(ihost);
+
+ /*
+ * There is nothing to do here for B0 since we do not have to
+ * program the AFE registers.
+ * / @todo The AFE settings are supposed to be correct for the B0 but
+ * / presently they seem to be wrong. */
+ sci_controller_afe_initialization(ihost);
+
+
+ /* Take the hardware out of reset */
+ writel(0, &ihost->smu_registers->soft_reset_control);
+
+ /*
+ * / @todo Provide meaningfull error code for hardware failure
+ * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
+ for (i = 100; i >= 1; i--) {
+ u32 status;
+
+ /* Loop until the hardware reports success */
+ udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
+ status = readl(&ihost->smu_registers->control_status);
+
+ if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED)
+ break;
+ }
+ if (i == 0)
+ goto out;
+
+ /*
+ * Determine what are the actaul device capacities that the
+ * hardware will support */
+ val = readl(&ihost->smu_registers->device_context_capacity);
+
+ /* Record the smaller of the two capacity values */
+ ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS);
+ ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS);
+ ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES);
+
+ /*
+ * Make all PEs that are unassigned match up with the
+ * logical ports
+ */
+ for (i = 0; i < ihost->logical_port_entries; i++) {
+ struct scu_port_task_scheduler_group_registers __iomem
+ *ptsg = &ihost->scu_registers->peg0.ptsg;
+
+ writel(i, &ptsg->protocol_engine[i]);
+ }
+
+ /* Initialize hardware PCI Relaxed ordering in DMA engines */
+ val = readl(&ihost->scu_registers->sdma.pdma_configuration);
+ val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
+ writel(val, &ihost->scu_registers->sdma.pdma_configuration);
+
+ val = readl(&ihost->scu_registers->sdma.cdma_configuration);
+ val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
+ writel(val, &ihost->scu_registers->sdma.cdma_configuration);
+
+ /*
+ * Initialize the PHYs before the PORTs because the PHY registers
+ * are accessed during the port initialization.
+ */
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ result = sci_phy_initialize(&ihost->phys[i],
+ &ihost->scu_registers->peg0.pe[i].tl,
+ &ihost->scu_registers->peg0.pe[i].ll);
+ if (result != SCI_SUCCESS)
+ goto out;
+ }
+
+ for (i = 0; i < ihost->logical_port_entries; i++) {
+ struct isci_port *iport = &ihost->ports[i];
+
+ iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i];
+ iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0];
+ iport->viit_registers = &ihost->scu_registers->peg0.viit[i];
+ }
+
+ result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent);
+
+ out:
+ /* Advance the controller state machine */
+ if (result == SCI_SUCCESS)
+ state = SCIC_INITIALIZED;
+ else
+ state = SCIC_FAILED;
+ sci_change_state(sm, state);
+
+ return result;
+}
+
+static enum sci_status sci_user_parameters_set(struct isci_host *ihost,
+ struct sci_user_parameters *sci_parms)
+{
+ u32 state = ihost->sm.current_state_id;
+
+ if (state == SCIC_RESET ||
+ state == SCIC_INITIALIZING ||
+ state == SCIC_INITIALIZED) {
+ u16 index;
+
+ /*
+ * Validate the user parameters. If they are not legal, then
+ * return a failure.
+ */
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ struct sci_phy_user_params *user_phy;
+
+ user_phy = &sci_parms->phys[index];
+
+ if (!((user_phy->max_speed_generation <=
+ SCIC_SDS_PARM_MAX_SPEED) &&
+ (user_phy->max_speed_generation >
+ SCIC_SDS_PARM_NO_SPEED)))
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ if (user_phy->in_connection_align_insertion_frequency <
+ 3)
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ if ((user_phy->in_connection_align_insertion_frequency <
+ 3) ||
+ (user_phy->align_insertion_frequency == 0) ||
+ (user_phy->
+ notify_enable_spin_up_insertion_frequency ==
+ 0))
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+ }
+
+ if ((sci_parms->stp_inactivity_timeout == 0) ||
+ (sci_parms->ssp_inactivity_timeout == 0) ||
+ (sci_parms->stp_max_occupancy_timeout == 0) ||
+ (sci_parms->ssp_max_occupancy_timeout == 0) ||
+ (sci_parms->no_outbound_task_timeout == 0))
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+
+ memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms));
+
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE_INVALID_STATE;
+}
+
+static int sci_controller_mem_init(struct isci_host *ihost)
+{
+ struct device *dev = &ihost->pdev->dev;
+ dma_addr_t dma;
+ size_t size;
+ int err;
+
+ size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32);
+ ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
+ if (!ihost->completion_queue)
+ return -ENOMEM;
+
+ writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower);
+ writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper);
+
+ size = ihost->remote_node_entries * sizeof(union scu_remote_node_context);
+ ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma,
+ GFP_KERNEL);
+ if (!ihost->remote_node_context_table)
+ return -ENOMEM;
+
+ writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower);
+ writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper);
+
+ size = ihost->task_context_entries * sizeof(struct scu_task_context),
+ ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL);
+ if (!ihost->task_context_table)
+ return -ENOMEM;
+
+ ihost->task_context_dma = dma;
+ writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower);
+ writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper);
+
+ err = sci_unsolicited_frame_control_construct(ihost);
+ if (err)
+ return err;
+
+ /*
+ * Inform the silicon as to the location of the UF headers and
+ * address table.
+ */
+ writel(lower_32_bits(ihost->uf_control.headers.physical_address),
+ &ihost->scu_registers->sdma.uf_header_base_address_lower);
+ writel(upper_32_bits(ihost->uf_control.headers.physical_address),
+ &ihost->scu_registers->sdma.uf_header_base_address_upper);
+
+ writel(lower_32_bits(ihost->uf_control.address_table.physical_address),
+ &ihost->scu_registers->sdma.uf_address_table_lower);
+ writel(upper_32_bits(ihost->uf_control.address_table.physical_address),
+ &ihost->scu_registers->sdma.uf_address_table_upper);
+
+ return 0;
+}
+
+int isci_host_init(struct isci_host *ihost)
+{
+ int err = 0, i;
+ enum sci_status status;
+ struct sci_user_parameters sci_user_params;
+ struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
+
+ spin_lock_init(&ihost->state_lock);
+ spin_lock_init(&ihost->scic_lock);
+ init_waitqueue_head(&ihost->eventq);
+
+ isci_host_change_state(ihost, isci_starting);
+
+ status = sci_controller_construct(ihost, scu_base(ihost),
+ smu_base(ihost));
+
+ if (status != SCI_SUCCESS) {
+ dev_err(&ihost->pdev->dev,
+ "%s: sci_controller_construct failed - status = %x\n",
+ __func__,
+ status);
+ return -ENODEV;
+ }
+
+ ihost->sas_ha.dev = &ihost->pdev->dev;
+ ihost->sas_ha.lldd_ha = ihost;
+
+ /*
+ * grab initial values stored in the controller object for OEM and USER
+ * parameters
+ */
+ isci_user_parameters_get(&sci_user_params);
+ status = sci_user_parameters_set(ihost, &sci_user_params);
+ if (status != SCI_SUCCESS) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: sci_user_parameters_set failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ /* grab any OEM parameters specified in orom */
+ if (pci_info->orom) {
+ status = isci_parse_oem_parameters(&ihost->oem_parameters,
+ pci_info->orom,
+ ihost->id);
+ if (status != SCI_SUCCESS) {
+ dev_warn(&ihost->pdev->dev,
+ "parsing firmware oem parameters failed\n");
+ return -EINVAL;
+ }
+ }
+
+ status = sci_oem_parameters_set(ihost);
+ if (status != SCI_SUCCESS) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: sci_oem_parameters_set failed\n",
+ __func__);
+ return -ENODEV;
+ }
+
+ tasklet_init(&ihost->completion_tasklet,
+ isci_host_completion_routine, (unsigned long)ihost);
+
+ INIT_LIST_HEAD(&ihost->requests_to_complete);
+ INIT_LIST_HEAD(&ihost->requests_to_errorback);
+
+ spin_lock_irq(&ihost->scic_lock);
+ status = sci_controller_initialize(ihost);
+ spin_unlock_irq(&ihost->scic_lock);
+ if (status != SCI_SUCCESS) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: sci_controller_initialize failed -"
+ " status = 0x%x\n",
+ __func__, status);
+ return -ENODEV;
+ }
+
+ err = sci_controller_mem_init(ihost);
+ if (err)
+ return err;
+
+ for (i = 0; i < SCI_MAX_PORTS; i++)
+ isci_port_init(&ihost->ports[i], ihost, i);
+
+ for (i = 0; i < SCI_MAX_PHYS; i++)
+ isci_phy_init(&ihost->phys[i], ihost, i);
+
+ for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
+ struct isci_remote_device *idev = &ihost->devices[i];
+
+ INIT_LIST_HEAD(&idev->reqs_in_process);
+ INIT_LIST_HEAD(&idev->node);
+ }
+
+ for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
+ struct isci_request *ireq;
+ dma_addr_t dma;
+
+ ireq = dmam_alloc_coherent(&ihost->pdev->dev,
+ sizeof(struct isci_request), &dma,
+ GFP_KERNEL);
+ if (!ireq)
+ return -ENOMEM;
+
+ ireq->tc = &ihost->task_context_table[i];
+ ireq->owning_controller = ihost;
+ spin_lock_init(&ireq->state_lock);
+ ireq->request_daddr = dma;
+ ireq->isci_host = ihost;
+ ihost->reqs[i] = ireq;
+ }
+
+ return 0;
+}
+
+void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ switch (ihost->sm.current_state_id) {
+ case SCIC_STARTING:
+ sci_del_timer(&ihost->phy_timer);
+ ihost->phy_startup_timer_pending = false;
+ ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
+ iport, iphy);
+ sci_controller_start_next_phy(ihost);
+ break;
+ case SCIC_READY:
+ ihost->port_agent.link_up_handler(ihost, &ihost->port_agent,
+ iport, iphy);
+ break;
+ default:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCIC Controller linkup event from phy %d in "
+ "unexpected state %d\n", __func__, iphy->phy_index,
+ ihost->sm.current_state_id);
+ }
+}
+
+void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ switch (ihost->sm.current_state_id) {
+ case SCIC_STARTING:
+ case SCIC_READY:
+ ihost->port_agent.link_down_handler(ihost, &ihost->port_agent,
+ iport, iphy);
+ break;
+ default:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCIC Controller linkdown event from phy %d in "
+ "unexpected state %d\n",
+ __func__,
+ iphy->phy_index,
+ ihost->sm.current_state_id);
+ }
+}
+
+static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost)
+{
+ u32 index;
+
+ for (index = 0; index < ihost->remote_node_entries; index++) {
+ if ((ihost->device_table[index] != NULL) &&
+ (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING))
+ return true;
+ }
+
+ return false;
+}
+
+void sci_controller_remote_device_stopped(struct isci_host *ihost,
+ struct isci_remote_device *idev)
+{
+ if (ihost->sm.current_state_id != SCIC_STOPPING) {
+ dev_dbg(&ihost->pdev->dev,
+ "SCIC Controller 0x%p remote device stopped event "
+ "from device 0x%p in unexpected state %d\n",
+ ihost, idev,
+ ihost->sm.current_state_id);
+ return;
+ }
+
+ if (!sci_controller_has_remote_devices_stopping(ihost))
+ sci_change_state(&ihost->sm, SCIC_STOPPED);
+}
+
+void sci_controller_post_request(struct isci_host *ihost, u32 request)
+{
+ dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n",
+ __func__, ihost->id, request);
+
+ writel(request, &ihost->smu_registers->post_context_port);
+}
+
+struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag)
+{
+ u16 task_index;
+ u16 task_sequence;
+
+ task_index = ISCI_TAG_TCI(io_tag);
+
+ if (task_index < ihost->task_context_entries) {
+ struct isci_request *ireq = ihost->reqs[task_index];
+
+ if (test_bit(IREQ_ACTIVE, &ireq->flags)) {
+ task_sequence = ISCI_TAG_SEQ(io_tag);
+
+ if (task_sequence == ihost->io_request_sequence[task_index])
+ return ireq;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * This method allocates remote node index and the reserves the remote node
+ * context space for use. This method can fail if there are no more remote
+ * node index available.
+ * @scic: This is the controller object which contains the set of
+ * free remote node ids
+ * @sci_dev: This is the device object which is requesting the a remote node
+ * id
+ * @node_id: This is the remote node id that is assinged to the device if one
+ * is available
+ *
+ * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
+ * node index available.
+ */
+enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 *node_id)
+{
+ u16 node_index;
+ u32 remote_node_count = sci_remote_device_node_count(idev);
+
+ node_index = sci_remote_node_table_allocate_remote_node(
+ &ihost->available_remote_nodes, remote_node_count
+ );
+
+ if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+ ihost->device_table[node_index] = idev;
+
+ *node_id = node_index;
+
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE_INSUFFICIENT_RESOURCES;
+}
+
+void sci_controller_free_remote_node_context(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 node_id)
+{
+ u32 remote_node_count = sci_remote_device_node_count(idev);
+
+ if (ihost->device_table[node_id] == idev) {
+ ihost->device_table[node_id] = NULL;
+
+ sci_remote_node_table_release_remote_node_index(
+ &ihost->available_remote_nodes, remote_node_count, node_id
+ );
+ }
+}
+
+void sci_controller_copy_sata_response(void *response_buffer,
+ void *frame_header,
+ void *frame_buffer)
+{
+ /* XXX type safety? */
+ memcpy(response_buffer, frame_header, sizeof(u32));
+
+ memcpy(response_buffer + sizeof(u32),
+ frame_buffer,
+ sizeof(struct dev_to_host_fis) - sizeof(u32));
+}
+
+void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index)
+{
+ if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index))
+ writel(ihost->uf_control.get,
+ &ihost->scu_registers->sdma.unsolicited_frame_get_pointer);
+}
+
+void isci_tci_free(struct isci_host *ihost, u16 tci)
+{
+ u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1);
+
+ ihost->tci_pool[tail] = tci;
+ ihost->tci_tail = tail + 1;
+}
+
+static u16 isci_tci_alloc(struct isci_host *ihost)
+{
+ u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1);
+ u16 tci = ihost->tci_pool[head];
+
+ ihost->tci_head = head + 1;
+ return tci;
+}
+
+static u16 isci_tci_space(struct isci_host *ihost)
+{
+ return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS);
+}
+
+u16 isci_alloc_tag(struct isci_host *ihost)
+{
+ if (isci_tci_space(ihost)) {
+ u16 tci = isci_tci_alloc(ihost);
+ u8 seq = ihost->io_request_sequence[tci];
+
+ return ISCI_TAG(seq, tci);
+ }
+
+ return SCI_CONTROLLER_INVALID_IO_TAG;
+}
+
+enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag)
+{
+ u16 tci = ISCI_TAG_TCI(io_tag);
+ u16 seq = ISCI_TAG_SEQ(io_tag);
+
+ /* prevent tail from passing head */
+ if (isci_tci_active(ihost) == 0)
+ return SCI_FAILURE_INVALID_IO_TAG;
+
+ if (seq == ihost->io_request_sequence[tci]) {
+ ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1);
+
+ isci_tci_free(ihost, tci);
+
+ return SCI_SUCCESS;
+ }
+ return SCI_FAILURE_INVALID_IO_TAG;
+}
+
+enum sci_status sci_controller_start_io(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_status status;
+
+ if (ihost->sm.current_state_id != SCIC_READY) {
+ dev_warn(&ihost->pdev->dev, "invalid state to start I/O");
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ status = sci_remote_device_start_io(ihost, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ set_bit(IREQ_ACTIVE, &ireq->flags);
+ sci_controller_post_request(ihost, ireq->post_context);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_controller_terminate_request(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ /* terminate an ongoing (i.e. started) core IO request. This does not
+ * abort the IO request at the target, but rather removes the IO
+ * request from the host controller.
+ */
+ enum sci_status status;
+
+ if (ihost->sm.current_state_id != SCIC_READY) {
+ dev_warn(&ihost->pdev->dev,
+ "invalid state to terminate request\n");
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ status = sci_io_request_terminate(ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ /*
+ * Utilize the original post context command and or in the POST_TC_ABORT
+ * request sub-type.
+ */
+ sci_controller_post_request(ihost,
+ ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT);
+ return SCI_SUCCESS;
+}
+
+/**
+ * sci_controller_complete_io() - This method will perform core specific
+ * completion operations for an IO request. After this method is invoked,
+ * the user should consider the IO request as invalid until it is properly
+ * reused (i.e. re-constructed).
+ * @ihost: The handle to the controller object for which to complete the
+ * IO request.
+ * @idev: The handle to the remote device object for which to complete
+ * the IO request.
+ * @ireq: the handle to the io request object to complete.
+ */
+enum sci_status sci_controller_complete_io(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_status status;
+ u16 index;
+
+ switch (ihost->sm.current_state_id) {
+ case SCIC_STOPPING:
+ /* XXX: Implement this function */
+ return SCI_FAILURE;
+ case SCIC_READY:
+ status = sci_remote_device_complete_io(ihost, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ index = ISCI_TAG_TCI(ireq->io_tag);
+ clear_bit(IREQ_ACTIVE, &ireq->flags);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(&ihost->pdev->dev, "invalid state to complete I/O");
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+}
+
+enum sci_status sci_controller_continue_io(struct isci_request *ireq)
+{
+ struct isci_host *ihost = ireq->owning_controller;
+
+ if (ihost->sm.current_state_id != SCIC_READY) {
+ dev_warn(&ihost->pdev->dev, "invalid state to continue I/O");
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ set_bit(IREQ_ACTIVE, &ireq->flags);
+ sci_controller_post_request(ihost, ireq->post_context);
+ return SCI_SUCCESS;
+}
+
+/**
+ * sci_controller_start_task() - This method is called by the SCIC user to
+ * send/start a framework task management request.
+ * @controller: the handle to the controller object for which to start the task
+ * management request.
+ * @remote_device: the handle to the remote device object for which to start
+ * the task management request.
+ * @task_request: the handle to the task request object to start.
+ */
+enum sci_task_status sci_controller_start_task(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_status status;
+
+ if (ihost->sm.current_state_id != SCIC_READY) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC Controller starting task from invalid "
+ "state\n",
+ __func__);
+ return SCI_TASK_FAILURE_INVALID_STATE;
+ }
+
+ status = sci_remote_device_start_task(ihost, idev, ireq);
+ switch (status) {
+ case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS:
+ set_bit(IREQ_ACTIVE, &ireq->flags);
+
+ /*
+ * We will let framework know this task request started successfully,
+ * although core is still woring on starting the request (to post tc when
+ * RNC is resumed.)
+ */
+ return SCI_SUCCESS;
+ case SCI_SUCCESS:
+ set_bit(IREQ_ACTIVE, &ireq->flags);
+ sci_controller_post_request(ihost, ireq->post_context);
+ break;
+ default:
+ break;
+ }
+
+ return status;
+}
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
new file mode 100644
index 00000000000..062101a39f7
--- /dev/null
+++ b/drivers/scsi/isci/host.h
@@ -0,0 +1,542 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _SCI_HOST_H_
+#define _SCI_HOST_H_
+
+#include "remote_device.h"
+#include "phy.h"
+#include "isci.h"
+#include "remote_node_table.h"
+#include "registers.h"
+#include "unsolicited_frame_control.h"
+#include "probe_roms.h"
+
+struct isci_request;
+struct scu_task_context;
+
+
+/**
+ * struct sci_power_control -
+ *
+ * This structure defines the fields for managing power control for direct
+ * attached disk devices.
+ */
+struct sci_power_control {
+ /**
+ * This field is set when the power control timer is running and cleared when
+ * it is not.
+ */
+ bool timer_started;
+
+ /**
+ * Timer to control when the directed attached disks can consume power.
+ */
+ struct sci_timer timer;
+
+ /**
+ * This field is used to keep track of how many phys are put into the
+ * requesters field.
+ */
+ u8 phys_waiting;
+
+ /**
+ * This field is used to keep track of how many phys have been granted to consume power
+ */
+ u8 phys_granted_power;
+
+ /**
+ * This field is an array of phys that we are waiting on. The phys are direct
+ * mapped into requesters via struct sci_phy.phy_index
+ */
+ struct isci_phy *requesters[SCI_MAX_PHYS];
+
+};
+
+struct sci_port_configuration_agent;
+typedef void (*port_config_fn)(struct isci_host *,
+ struct sci_port_configuration_agent *,
+ struct isci_port *, struct isci_phy *);
+
+struct sci_port_configuration_agent {
+ u16 phy_configured_mask;
+ u16 phy_ready_mask;
+ struct {
+ u8 min_index;
+ u8 max_index;
+ } phy_valid_port_range[SCI_MAX_PHYS];
+ bool timer_pending;
+ port_config_fn link_up_handler;
+ port_config_fn link_down_handler;
+ struct sci_timer timer;
+};
+
+/**
+ * isci_host - primary host/controller object
+ * @timer: timeout start/stop operations
+ * @device_table: rni (hw remote node index) to remote device lookup table
+ * @available_remote_nodes: rni allocator
+ * @power_control: manage device spin up
+ * @io_request_sequence: generation number for tci's (task contexts)
+ * @task_context_table: hw task context table
+ * @remote_node_context_table: hw remote node context table
+ * @completion_queue: hw-producer driver-consumer communication ring
+ * @completion_queue_get: tracks the driver 'head' of the ring to notify hw
+ * @logical_port_entries: min({driver|silicon}-supported-port-count)
+ * @remote_node_entries: min({driver|silicon}-supported-node-count)
+ * @task_context_entries: min({driver|silicon}-supported-task-count)
+ * @phy_timer: phy startup timer
+ * @invalid_phy_mask: if an invalid_link_up notification is reported a bit for
+ * the phy index is set so further notifications are not
+ * made. Once the phy reports link up and is made part of a
+ * port then this bit is cleared.
+
+ */
+struct isci_host {
+ struct sci_base_state_machine sm;
+ /* XXX can we time this externally */
+ struct sci_timer timer;
+ /* XXX drop reference module params directly */
+ struct sci_user_parameters user_parameters;
+ /* XXX no need to be a union */
+ struct sci_oem_params oem_parameters;
+ struct sci_port_configuration_agent port_agent;
+ struct isci_remote_device *device_table[SCI_MAX_REMOTE_DEVICES];
+ struct sci_remote_node_table available_remote_nodes;
+ struct sci_power_control power_control;
+ u8 io_request_sequence[SCI_MAX_IO_REQUESTS];
+ struct scu_task_context *task_context_table;
+ dma_addr_t task_context_dma;
+ union scu_remote_node_context *remote_node_context_table;
+ u32 *completion_queue;
+ u32 completion_queue_get;
+ u32 logical_port_entries;
+ u32 remote_node_entries;
+ u32 task_context_entries;
+ struct sci_unsolicited_frame_control uf_control;
+
+ /* phy startup */
+ struct sci_timer phy_timer;
+ /* XXX kill */
+ bool phy_startup_timer_pending;
+ u32 next_phy_to_start;
+ /* XXX convert to unsigned long and use bitops */
+ u8 invalid_phy_mask;
+
+ /* TODO attempt dynamic interrupt coalescing scheme */
+ u16 interrupt_coalesce_number;
+ u32 interrupt_coalesce_timeout;
+ struct smu_registers __iomem *smu_registers;
+ struct scu_registers __iomem *scu_registers;
+
+ u16 tci_head;
+ u16 tci_tail;
+ u16 tci_pool[SCI_MAX_IO_REQUESTS];
+
+ int id; /* unique within a given pci device */
+ struct isci_phy phys[SCI_MAX_PHYS];
+ struct isci_port ports[SCI_MAX_PORTS + 1]; /* includes dummy port */
+ struct sas_ha_struct sas_ha;
+
+ spinlock_t state_lock;
+ struct pci_dev *pdev;
+ enum isci_status status;
+ #define IHOST_START_PENDING 0
+ #define IHOST_STOP_PENDING 1
+ unsigned long flags;
+ wait_queue_head_t eventq;
+ struct Scsi_Host *shost;
+ struct tasklet_struct completion_tasklet;
+ struct list_head requests_to_complete;
+ struct list_head requests_to_errorback;
+ spinlock_t scic_lock;
+ struct isci_request *reqs[SCI_MAX_IO_REQUESTS];
+ struct isci_remote_device devices[SCI_MAX_REMOTE_DEVICES];
+};
+
+/**
+ * enum sci_controller_states - This enumeration depicts all the states
+ * for the common controller state machine.
+ */
+enum sci_controller_states {
+ /**
+ * Simply the initial state for the base controller state machine.
+ */
+ SCIC_INITIAL = 0,
+
+ /**
+ * This state indicates that the controller is reset. The memory for
+ * the controller is in it's initial state, but the controller requires
+ * initialization.
+ * This state is entered from the INITIAL state.
+ * This state is entered from the RESETTING state.
+ */
+ SCIC_RESET,
+
+ /**
+ * This state is typically an action state that indicates the controller
+ * is in the process of initialization. In this state no new IO operations
+ * are permitted.
+ * This state is entered from the RESET state.
+ */
+ SCIC_INITIALIZING,
+
+ /**
+ * This state indicates that the controller has been successfully
+ * initialized. In this state no new IO operations are permitted.
+ * This state is entered from the INITIALIZING state.
+ */
+ SCIC_INITIALIZED,
+
+ /**
+ * This state indicates the the controller is in the process of becoming
+ * ready (i.e. starting). In this state no new IO operations are permitted.
+ * This state is entered from the INITIALIZED state.
+ */
+ SCIC_STARTING,
+
+ /**
+ * This state indicates the controller is now ready. Thus, the user
+ * is able to perform IO operations on the controller.
+ * This state is entered from the STARTING state.
+ */
+ SCIC_READY,
+
+ /**
+ * This state is typically an action state that indicates the controller
+ * is in the process of resetting. Thus, the user is unable to perform
+ * IO operations on the controller. A reset is considered destructive in
+ * most cases.
+ * This state is entered from the READY state.
+ * This state is entered from the FAILED state.
+ * This state is entered from the STOPPED state.
+ */
+ SCIC_RESETTING,
+
+ /**
+ * This state indicates that the controller is in the process of stopping.
+ * In this state no new IO operations are permitted, but existing IO
+ * operations are allowed to complete.
+ * This state is entered from the READY state.
+ */
+ SCIC_STOPPING,
+
+ /**
+ * This state indicates that the controller has successfully been stopped.
+ * In this state no new IO operations are permitted.
+ * This state is entered from the STOPPING state.
+ */
+ SCIC_STOPPED,
+
+ /**
+ * This state indicates that the controller could not successfully be
+ * initialized. In this state no new IO operations are permitted.
+ * This state is entered from the INITIALIZING state.
+ * This state is entered from the STARTING state.
+ * This state is entered from the STOPPING state.
+ * This state is entered from the RESETTING state.
+ */
+ SCIC_FAILED,
+};
+
+/**
+ * struct isci_pci_info - This class represents the pci function containing the
+ * controllers. Depending on PCI SKU, there could be up to 2 controllers in
+ * the PCI function.
+ */
+#define SCI_MAX_MSIX_INT (SCI_NUM_MSI_X_INT*SCI_MAX_CONTROLLERS)
+
+struct isci_pci_info {
+ struct msix_entry msix_entries[SCI_MAX_MSIX_INT];
+ struct isci_host *hosts[SCI_MAX_CONTROLLERS];
+ struct isci_orom *orom;
+};
+
+static inline struct isci_pci_info *to_pci_info(struct pci_dev *pdev)
+{
+ return pci_get_drvdata(pdev);
+}
+
+#define for_each_isci_host(id, ihost, pdev) \
+ for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
+ id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
+ ihost = to_pci_info(pdev)->hosts[++id])
+
+static inline enum isci_status isci_host_get_state(struct isci_host *isci_host)
+{
+ return isci_host->status;
+}
+
+static inline void isci_host_change_state(struct isci_host *isci_host,
+ enum isci_status status)
+{
+ unsigned long flags;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_host = %p, state = 0x%x",
+ __func__,
+ isci_host,
+ status);
+ spin_lock_irqsave(&isci_host->state_lock, flags);
+ isci_host->status = status;
+ spin_unlock_irqrestore(&isci_host->state_lock, flags);
+
+}
+
+static inline void wait_for_start(struct isci_host *ihost)
+{
+ wait_event(ihost->eventq, !test_bit(IHOST_START_PENDING, &ihost->flags));
+}
+
+static inline void wait_for_stop(struct isci_host *ihost)
+{
+ wait_event(ihost->eventq, !test_bit(IHOST_STOP_PENDING, &ihost->flags));
+}
+
+static inline void wait_for_device_start(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ wait_event(ihost->eventq, !test_bit(IDEV_START_PENDING, &idev->flags));
+}
+
+static inline void wait_for_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ wait_event(ihost->eventq, !test_bit(IDEV_STOP_PENDING, &idev->flags));
+}
+
+static inline struct isci_host *dev_to_ihost(struct domain_device *dev)
+{
+ return dev->port->ha->lldd_ha;
+}
+
+/* we always use protocol engine group zero */
+#define ISCI_PEG 0
+
+/* see sci_controller_io_tag_allocate|free for how seq and tci are built */
+#define ISCI_TAG(seq, tci) (((u16) (seq)) << 12 | tci)
+
+/* these are returned by the hardware, so sanitize them */
+#define ISCI_TAG_SEQ(tag) (((tag) >> 12) & (SCI_MAX_SEQ-1))
+#define ISCI_TAG_TCI(tag) ((tag) & (SCI_MAX_IO_REQUESTS-1))
+
+/* expander attached sata devices require 3 rnc slots */
+static inline int sci_remote_device_node_count(struct isci_remote_device *idev)
+{
+ struct domain_device *dev = idev->domain_dev;
+
+ if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
+ !idev->is_direct_attached)
+ return SCU_STP_REMOTE_NODE_COUNT;
+ return SCU_SSP_REMOTE_NODE_COUNT;
+}
+
+/**
+ * sci_controller_clear_invalid_phy() -
+ *
+ * This macro will clear the bit in the invalid phy mask for this controller
+ * object. This is used to control messages reported for invalid link up
+ * notifications.
+ */
+#define sci_controller_clear_invalid_phy(controller, phy) \
+ ((controller)->invalid_phy_mask &= ~(1 << (phy)->phy_index))
+
+static inline struct device *sciphy_to_dev(struct isci_phy *iphy)
+{
+
+ if (!iphy || !iphy->isci_port || !iphy->isci_port->isci_host)
+ return NULL;
+
+ return &iphy->isci_port->isci_host->pdev->dev;
+}
+
+static inline struct device *sciport_to_dev(struct isci_port *iport)
+{
+
+ if (!iport || !iport->isci_host)
+ return NULL;
+
+ return &iport->isci_host->pdev->dev;
+}
+
+static inline struct device *scirdev_to_dev(struct isci_remote_device *idev)
+{
+ if (!idev || !idev->isci_port || !idev->isci_port->isci_host)
+ return NULL;
+
+ return &idev->isci_port->isci_host->pdev->dev;
+}
+
+static inline bool is_a2(struct pci_dev *pdev)
+{
+ if (pdev->revision < 4)
+ return true;
+ return false;
+}
+
+static inline bool is_b0(struct pci_dev *pdev)
+{
+ if (pdev->revision == 4)
+ return true;
+ return false;
+}
+
+static inline bool is_c0(struct pci_dev *pdev)
+{
+ if (pdev->revision >= 5)
+ return true;
+ return false;
+}
+
+void sci_controller_post_request(struct isci_host *ihost,
+ u32 request);
+void sci_controller_release_frame(struct isci_host *ihost,
+ u32 frame_index);
+void sci_controller_copy_sata_response(void *response_buffer,
+ void *frame_header,
+ void *frame_buffer);
+enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 *node_id);
+void sci_controller_free_remote_node_context(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 node_id);
+
+struct isci_request *sci_request_by_tag(struct isci_host *ihost,
+ u16 io_tag);
+
+void sci_controller_power_control_queue_insert(
+ struct isci_host *ihost,
+ struct isci_phy *iphy);
+
+void sci_controller_power_control_queue_remove(
+ struct isci_host *ihost,
+ struct isci_phy *iphy);
+
+void sci_controller_link_up(
+ struct isci_host *ihost,
+ struct isci_port *iport,
+ struct isci_phy *iphy);
+
+void sci_controller_link_down(
+ struct isci_host *ihost,
+ struct isci_port *iport,
+ struct isci_phy *iphy);
+
+void sci_controller_remote_device_stopped(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev);
+
+void sci_controller_copy_task_context(
+ struct isci_host *ihost,
+ struct isci_request *ireq);
+
+void sci_controller_register_setup(struct isci_host *ihost);
+
+enum sci_status sci_controller_continue_io(struct isci_request *ireq);
+int isci_host_scan_finished(struct Scsi_Host *, unsigned long);
+void isci_host_scan_start(struct Scsi_Host *);
+u16 isci_alloc_tag(struct isci_host *ihost);
+enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag);
+void isci_tci_free(struct isci_host *ihost, u16 tci);
+
+int isci_host_init(struct isci_host *);
+
+void isci_host_init_controller_names(
+ struct isci_host *isci_host,
+ unsigned int controller_idx);
+
+void isci_host_deinit(
+ struct isci_host *);
+
+void isci_host_port_link_up(
+ struct isci_host *,
+ struct isci_port *,
+ struct isci_phy *);
+int isci_host_dev_found(struct domain_device *);
+
+void isci_host_remote_device_start_complete(
+ struct isci_host *,
+ struct isci_remote_device *,
+ enum sci_status);
+
+void sci_controller_disable_interrupts(
+ struct isci_host *ihost);
+
+enum sci_status sci_controller_start_io(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_task_status sci_controller_start_task(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status sci_controller_terminate_request(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status sci_controller_complete_io(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+void sci_port_configuration_agent_construct(
+ struct sci_port_configuration_agent *port_agent);
+
+enum sci_status sci_port_configuration_agent_initialize(
+ struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent);
+#endif
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
new file mode 100644
index 00000000000..61e0d09e2b5
--- /dev/null
+++ b/drivers/scsi/isci/init.c
@@ -0,0 +1,565 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/firmware.h>
+#include <linux/efi.h>
+#include <asm/string.h>
+#include "isci.h"
+#include "task.h"
+#include "probe_roms.h"
+
+static struct scsi_transport_template *isci_transport_template;
+
+static DEFINE_PCI_DEVICE_TABLE(isci_id_table) = {
+ { PCI_VDEVICE(INTEL, 0x1D61),},
+ { PCI_VDEVICE(INTEL, 0x1D63),},
+ { PCI_VDEVICE(INTEL, 0x1D65),},
+ { PCI_VDEVICE(INTEL, 0x1D67),},
+ { PCI_VDEVICE(INTEL, 0x1D69),},
+ { PCI_VDEVICE(INTEL, 0x1D6B),},
+ { PCI_VDEVICE(INTEL, 0x1D60),},
+ { PCI_VDEVICE(INTEL, 0x1D62),},
+ { PCI_VDEVICE(INTEL, 0x1D64),},
+ { PCI_VDEVICE(INTEL, 0x1D66),},
+ { PCI_VDEVICE(INTEL, 0x1D68),},
+ { PCI_VDEVICE(INTEL, 0x1D6A),},
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, isci_id_table);
+
+/* linux isci specific settings */
+
+unsigned char no_outbound_task_to = 20;
+module_param(no_outbound_task_to, byte, 0);
+MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
+
+u16 ssp_max_occ_to = 20;
+module_param(ssp_max_occ_to, ushort, 0);
+MODULE_PARM_DESC(ssp_max_occ_to, "SSP Max occupancy timeout (100us incr)");
+
+u16 stp_max_occ_to = 5;
+module_param(stp_max_occ_to, ushort, 0);
+MODULE_PARM_DESC(stp_max_occ_to, "STP Max occupancy timeout (100us incr)");
+
+u16 ssp_inactive_to = 5;
+module_param(ssp_inactive_to, ushort, 0);
+MODULE_PARM_DESC(ssp_inactive_to, "SSP inactivity timeout (100us incr)");
+
+u16 stp_inactive_to = 5;
+module_param(stp_inactive_to, ushort, 0);
+MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
+
+unsigned char phy_gen = 3;
+module_param(phy_gen, byte, 0);
+MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
+
+unsigned char max_concurr_spinup = 1;
+module_param(max_concurr_spinup, byte, 0);
+MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
+
+static struct scsi_host_template isci_sht = {
+
+ .module = THIS_MODULE,
+ .name = DRV_NAME,
+ .proc_name = DRV_NAME,
+ .queuecommand = sas_queuecommand,
+ .target_alloc = sas_target_alloc,
+ .slave_configure = sas_slave_configure,
+ .slave_destroy = sas_slave_destroy,
+ .scan_finished = isci_host_scan_finished,
+ .scan_start = isci_host_scan_start,
+ .change_queue_depth = sas_change_queue_depth,
+ .change_queue_type = sas_change_queue_type,
+ .bios_param = sas_bios_param,
+ .can_queue = ISCI_CAN_QUEUE_VAL,
+ .cmd_per_lun = 1,
+ .this_id = -1,
+ .sg_tablesize = SG_ALL,
+ .max_sectors = SCSI_DEFAULT_MAX_SECTORS,
+ .use_clustering = ENABLE_CLUSTERING,
+ .eh_device_reset_handler = sas_eh_device_reset_handler,
+ .eh_bus_reset_handler = isci_bus_reset_handler,
+ .slave_alloc = sas_slave_alloc,
+ .target_destroy = sas_target_destroy,
+ .ioctl = sas_ioctl,
+};
+
+static struct sas_domain_function_template isci_transport_ops = {
+
+ /* The class calls these to notify the LLDD of an event. */
+ .lldd_port_formed = isci_port_formed,
+ .lldd_port_deformed = isci_port_deformed,
+
+ /* The class calls these when a device is found or gone. */
+ .lldd_dev_found = isci_remote_device_found,
+ .lldd_dev_gone = isci_remote_device_gone,
+
+ .lldd_execute_task = isci_task_execute_task,
+ /* Task Management Functions. Must be called from process context. */
+ .lldd_abort_task = isci_task_abort_task,
+ .lldd_abort_task_set = isci_task_abort_task_set,
+ .lldd_clear_aca = isci_task_clear_aca,
+ .lldd_clear_task_set = isci_task_clear_task_set,
+ .lldd_I_T_nexus_reset = isci_task_I_T_nexus_reset,
+ .lldd_lu_reset = isci_task_lu_reset,
+ .lldd_query_task = isci_task_query_task,
+
+ /* Port and Adapter management */
+ .lldd_clear_nexus_port = isci_task_clear_nexus_port,
+ .lldd_clear_nexus_ha = isci_task_clear_nexus_ha,
+
+ /* Phy management */
+ .lldd_control_phy = isci_phy_control,
+};
+
+
+/******************************************************************************
+* P R O T E C T E D M E T H O D S
+******************************************************************************/
+
+
+
+/**
+ * isci_register_sas_ha() - This method initializes various lldd
+ * specific members of the sas_ha struct and calls the libsas
+ * sas_register_ha() function.
+ * @isci_host: This parameter specifies the lldd specific wrapper for the
+ * libsas sas_ha struct.
+ *
+ * This method returns an error code indicating sucess or failure. The user
+ * should check for possible memory allocation error return otherwise, a zero
+ * indicates success.
+ */
+static int isci_register_sas_ha(struct isci_host *isci_host)
+{
+ int i;
+ struct sas_ha_struct *sas_ha = &(isci_host->sas_ha);
+ struct asd_sas_phy **sas_phys;
+ struct asd_sas_port **sas_ports;
+
+ sas_phys = devm_kzalloc(&isci_host->pdev->dev,
+ SCI_MAX_PHYS * sizeof(void *),
+ GFP_KERNEL);
+ if (!sas_phys)
+ return -ENOMEM;
+
+ sas_ports = devm_kzalloc(&isci_host->pdev->dev,
+ SCI_MAX_PORTS * sizeof(void *),
+ GFP_KERNEL);
+ if (!sas_ports)
+ return -ENOMEM;
+
+ /*----------------- Libsas Initialization Stuff----------------------
+ * Set various fields in the sas_ha struct:
+ */
+
+ sas_ha->sas_ha_name = DRV_NAME;
+ sas_ha->lldd_module = THIS_MODULE;
+ sas_ha->sas_addr = &isci_host->phys[0].sas_addr[0];
+
+ /* set the array of phy and port structs. */
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ sas_phys[i] = &isci_host->phys[i].sas_phy;
+ sas_ports[i] = &isci_host->ports[i].sas_port;
+ }
+
+ sas_ha->sas_phy = sas_phys;
+ sas_ha->sas_port = sas_ports;
+ sas_ha->num_phys = SCI_MAX_PHYS;
+
+ sas_ha->lldd_queue_size = ISCI_CAN_QUEUE_VAL;
+ sas_ha->lldd_max_execute_num = 1;
+ sas_ha->strict_wide_ports = 1;
+
+ sas_register_ha(sas_ha);
+
+ return 0;
+}
+
+static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
+ struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost);
+ struct isci_host *ihost = container_of(sas_ha, typeof(*ihost), sas_ha);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ihost->id);
+}
+
+static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
+
+static void isci_unregister(struct isci_host *isci_host)
+{
+ struct Scsi_Host *shost;
+
+ if (!isci_host)
+ return;
+
+ shost = isci_host->shost;
+ device_remove_file(&shost->shost_dev, &dev_attr_isci_id);
+
+ sas_unregister_ha(&isci_host->sas_ha);
+
+ sas_remove_host(isci_host->shost);
+ scsi_remove_host(isci_host->shost);
+ scsi_host_put(isci_host->shost);
+}
+
+static int __devinit isci_pci_init(struct pci_dev *pdev)
+{
+ int err, bar_num, bar_mask = 0;
+ void __iomem * const *iomap;
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "failed enable PCI device %s!\n",
+ pci_name(pdev));
+ return err;
+ }
+
+ for (bar_num = 0; bar_num < SCI_PCI_BAR_COUNT; bar_num++)
+ bar_mask |= 1 << (bar_num * 2);
+
+ err = pcim_iomap_regions(pdev, bar_mask, DRV_NAME);
+ if (err)
+ return err;
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap)
+ return -ENOMEM;
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (err) {
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int num_controllers(struct pci_dev *pdev)
+{
+ /* bar size alone can tell us if we are running with a dual controller
+ * part, no need to trust revision ids that might be under broken firmware
+ * control
+ */
+ resource_size_t scu_bar_size = pci_resource_len(pdev, SCI_SCU_BAR*2);
+ resource_size_t smu_bar_size = pci_resource_len(pdev, SCI_SMU_BAR*2);
+
+ if (scu_bar_size >= SCI_SCU_BAR_SIZE*SCI_MAX_CONTROLLERS &&
+ smu_bar_size >= SCI_SMU_BAR_SIZE*SCI_MAX_CONTROLLERS)
+ return SCI_MAX_CONTROLLERS;
+ else
+ return 1;
+}
+
+static int isci_setup_interrupts(struct pci_dev *pdev)
+{
+ int err, i, num_msix;
+ struct isci_host *ihost;
+ struct isci_pci_info *pci_info = to_pci_info(pdev);
+
+ /*
+ * Determine the number of vectors associated with this
+ * PCI function.
+ */
+ num_msix = num_controllers(pdev) * SCI_NUM_MSI_X_INT;
+
+ for (i = 0; i < num_msix; i++)
+ pci_info->msix_entries[i].entry = i;
+
+ err = pci_enable_msix(pdev, pci_info->msix_entries, num_msix);
+ if (err)
+ goto intx;
+
+ for (i = 0; i < num_msix; i++) {
+ int id = i / SCI_NUM_MSI_X_INT;
+ struct msix_entry *msix = &pci_info->msix_entries[i];
+ irq_handler_t isr;
+
+ ihost = pci_info->hosts[id];
+ /* odd numbered vectors are error interrupts */
+ if (i & 1)
+ isr = isci_error_isr;
+ else
+ isr = isci_msix_isr;
+
+ err = devm_request_irq(&pdev->dev, msix->vector, isr, 0,
+ DRV_NAME"-msix", ihost);
+ if (!err)
+ continue;
+
+ dev_info(&pdev->dev, "msix setup failed falling back to intx\n");
+ while (i--) {
+ id = i / SCI_NUM_MSI_X_INT;
+ ihost = pci_info->hosts[id];
+ msix = &pci_info->msix_entries[i];
+ devm_free_irq(&pdev->dev, msix->vector, ihost);
+ }
+ pci_disable_msix(pdev);
+ goto intx;
+ }
+ return 0;
+
+ intx:
+ for_each_isci_host(i, ihost, pdev) {
+ err = devm_request_irq(&pdev->dev, pdev->irq, isci_intx_isr,
+ IRQF_SHARED, DRV_NAME"-intx", ihost);
+ if (err)
+ break;
+ }
+ return err;
+}
+
+static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
+{
+ struct isci_host *isci_host;
+ struct Scsi_Host *shost;
+ int err;
+
+ isci_host = devm_kzalloc(&pdev->dev, sizeof(*isci_host), GFP_KERNEL);
+ if (!isci_host)
+ return NULL;
+
+ isci_host->pdev = pdev;
+ isci_host->id = id;
+
+ shost = scsi_host_alloc(&isci_sht, sizeof(void *));
+ if (!shost)
+ return NULL;
+ isci_host->shost = shost;
+
+ err = isci_host_init(isci_host);
+ if (err)
+ goto err_shost;
+
+ SHOST_TO_SAS_HA(shost) = &isci_host->sas_ha;
+ isci_host->sas_ha.core.shost = shost;
+ shost->transportt = isci_transport_template;
+
+ shost->max_id = ~0;
+ shost->max_lun = ~0;
+ shost->max_cmd_len = MAX_COMMAND_SIZE;
+
+ err = scsi_add_host(shost, &pdev->dev);
+ if (err)
+ goto err_shost;
+
+ err = isci_register_sas_ha(isci_host);
+ if (err)
+ goto err_shost_remove;
+
+ err = device_create_file(&shost->shost_dev, &dev_attr_isci_id);
+ if (err)
+ goto err_unregister_ha;
+
+ return isci_host;
+
+ err_unregister_ha:
+ sas_unregister_ha(&(isci_host->sas_ha));
+ err_shost_remove:
+ scsi_remove_host(shost);
+ err_shost:
+ scsi_host_put(shost);
+
+ return NULL;
+}
+
+static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct isci_pci_info *pci_info;
+ int err, i;
+ struct isci_host *isci_host;
+ const struct firmware *fw = NULL;
+ struct isci_orom *orom = NULL;
+ char *source = "(platform)";
+
+ dev_info(&pdev->dev, "driver configured for rev: %d silicon\n",
+ pdev->revision);
+
+ pci_info = devm_kzalloc(&pdev->dev, sizeof(*pci_info), GFP_KERNEL);
+ if (!pci_info)
+ return -ENOMEM;
+ pci_set_drvdata(pdev, pci_info);
+
+ if (efi_enabled)
+ orom = isci_get_efi_var(pdev);
+
+ if (!orom)
+ orom = isci_request_oprom(pdev);
+
+ for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
+ if (sci_oem_parameters_validate(&orom->ctrl[i])) {
+ dev_warn(&pdev->dev,
+ "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
+ devm_kfree(&pdev->dev, orom);
+ orom = NULL;
+ break;
+ }
+ }
+
+ if (!orom) {
+ source = "(firmware)";
+ orom = isci_request_firmware(pdev, fw);
+ if (!orom) {
+ /* TODO convert this to WARN_TAINT_ONCE once the
+ * orom/efi parameter support is widely available
+ */
+ dev_warn(&pdev->dev,
+ "Loading user firmware failed, using default "
+ "values\n");
+ dev_warn(&pdev->dev,
+ "Default OEM configuration being used: 4 "
+ "narrow ports, and default SAS Addresses\n");
+ }
+ }
+
+ if (orom)
+ dev_info(&pdev->dev,
+ "OEM SAS parameters (version: %u.%u) loaded %s\n",
+ (orom->hdr.version & 0xf0) >> 4,
+ (orom->hdr.version & 0xf), source);
+
+ pci_info->orom = orom;
+
+ err = isci_pci_init(pdev);
+ if (err)
+ return err;
+
+ for (i = 0; i < num_controllers(pdev); i++) {
+ struct isci_host *h = isci_host_alloc(pdev, i);
+
+ if (!h) {
+ err = -ENOMEM;
+ goto err_host_alloc;
+ }
+ pci_info->hosts[i] = h;
+ }
+
+ err = isci_setup_interrupts(pdev);
+ if (err)
+ goto err_host_alloc;
+
+ for_each_isci_host(i, isci_host, pdev)
+ scsi_scan_host(isci_host->shost);
+
+ return 0;
+
+ err_host_alloc:
+ for_each_isci_host(i, isci_host, pdev)
+ isci_unregister(isci_host);
+ return err;
+}
+
+static void __devexit isci_pci_remove(struct pci_dev *pdev)
+{
+ struct isci_host *ihost;
+ int i;
+
+ for_each_isci_host(i, ihost, pdev) {
+ isci_unregister(ihost);
+ isci_host_deinit(ihost);
+ sci_controller_disable_interrupts(ihost);
+ }
+}
+
+static struct pci_driver isci_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = isci_id_table,
+ .probe = isci_pci_probe,
+ .remove = __devexit_p(isci_pci_remove),
+};
+
+static __init int isci_init(void)
+{
+ int err;
+
+ pr_info("%s: Intel(R) C600 SAS Controller Driver\n", DRV_NAME);
+
+ isci_transport_template = sas_domain_attach_transport(&isci_transport_ops);
+ if (!isci_transport_template)
+ return -ENOMEM;
+
+ err = pci_register_driver(&isci_pci_driver);
+ if (err)
+ sas_release_transport(isci_transport_template);
+
+ return err;
+}
+
+static __exit void isci_exit(void)
+{
+ pci_unregister_driver(&isci_pci_driver);
+ sas_release_transport(isci_transport_template);
+}
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_FIRMWARE(ISCI_FW_NAME);
+module_init(isci_init);
+module_exit(isci_exit);
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
new file mode 100644
index 00000000000..d1de63312e7
--- /dev/null
+++ b/drivers/scsi/isci/isci.h
@@ -0,0 +1,538 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __ISCI_H__
+#define __ISCI_H__
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+
+#define DRV_NAME "isci"
+#define SCI_PCI_BAR_COUNT 2
+#define SCI_NUM_MSI_X_INT 2
+#define SCI_SMU_BAR 0
+#define SCI_SMU_BAR_SIZE (16*1024)
+#define SCI_SCU_BAR 1
+#define SCI_SCU_BAR_SIZE (4*1024*1024)
+#define SCI_IO_SPACE_BAR0 2
+#define SCI_IO_SPACE_BAR1 3
+#define ISCI_CAN_QUEUE_VAL 250 /* < SCI_MAX_IO_REQUESTS ? */
+#define SCIC_CONTROLLER_STOP_TIMEOUT 5000
+
+#define SCI_CONTROLLER_INVALID_IO_TAG 0xFFFF
+
+#define SCI_MAX_PHYS (4UL)
+#define SCI_MAX_PORTS SCI_MAX_PHYS
+#define SCI_MAX_SMP_PHYS (384) /* not silicon constrained */
+#define SCI_MAX_REMOTE_DEVICES (256UL)
+#define SCI_MAX_IO_REQUESTS (256UL)
+#define SCI_MAX_SEQ (16)
+#define SCI_MAX_MSIX_MESSAGES (2)
+#define SCI_MAX_SCATTER_GATHER_ELEMENTS 130 /* not silicon constrained */
+#define SCI_MAX_CONTROLLERS 2
+#define SCI_MAX_DOMAINS SCI_MAX_PORTS
+
+#define SCU_MAX_CRITICAL_NOTIFICATIONS (384)
+#define SCU_MAX_EVENTS_SHIFT (7)
+#define SCU_MAX_EVENTS (1 << SCU_MAX_EVENTS_SHIFT)
+#define SCU_MAX_UNSOLICITED_FRAMES (128)
+#define SCU_MAX_COMPLETION_QUEUE_SCRATCH (128)
+#define SCU_MAX_COMPLETION_QUEUE_ENTRIES (SCU_MAX_CRITICAL_NOTIFICATIONS \
+ + SCU_MAX_EVENTS \
+ + SCU_MAX_UNSOLICITED_FRAMES \
+ + SCI_MAX_IO_REQUESTS \
+ + SCU_MAX_COMPLETION_QUEUE_SCRATCH)
+#define SCU_MAX_COMPLETION_QUEUE_SHIFT (ilog2(SCU_MAX_COMPLETION_QUEUE_ENTRIES))
+
+#define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096)
+#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024)
+#define SCU_INVALID_FRAME_INDEX (0xFFFF)
+
+#define SCU_IO_REQUEST_MAX_SGE_SIZE (0x00FFFFFF)
+#define SCU_IO_REQUEST_MAX_TRANSFER_LENGTH (0x00FFFFFF)
+
+static inline void check_sizes(void)
+{
+ BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_EVENTS);
+ BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES <= 8);
+ BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_UNSOLICITED_FRAMES);
+ BUILD_BUG_ON_NOT_POWER_OF_2(SCU_MAX_COMPLETION_QUEUE_ENTRIES);
+ BUILD_BUG_ON(SCU_MAX_UNSOLICITED_FRAMES > SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES);
+ BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_IO_REQUESTS);
+ BUILD_BUG_ON_NOT_POWER_OF_2(SCI_MAX_SEQ);
+}
+
+/**
+ * enum sci_status - This is the general return status enumeration for non-IO,
+ * non-task management related SCI interface methods.
+ *
+ *
+ */
+enum sci_status {
+ /**
+ * This member indicates successful completion.
+ */
+ SCI_SUCCESS = 0,
+
+ /**
+ * This value indicates that the calling method completed successfully,
+ * but that the IO may have completed before having it's start method
+ * invoked. This occurs during SAT translation for requests that do
+ * not require an IO to the target or for any other requests that may
+ * be completed without having to submit IO.
+ */
+ SCI_SUCCESS_IO_COMPLETE_BEFORE_START,
+
+ /**
+ * This Value indicates that the SCU hardware returned an early response
+ * because the io request specified more data than is returned by the
+ * target device (mode pages, inquiry data, etc.). The completion routine
+ * will handle this case to get the actual number of bytes transferred.
+ */
+ SCI_SUCCESS_IO_DONE_EARLY,
+
+ /**
+ * This member indicates that the object for which a state change is
+ * being requested is already in said state.
+ */
+ SCI_WARNING_ALREADY_IN_STATE,
+
+ /**
+ * This member indicates interrupt coalescence timer may cause SAS
+ * specification compliance issues (i.e. SMP target mode response
+ * frames must be returned within 1.9 milliseconds).
+ */
+ SCI_WARNING_TIMER_CONFLICT,
+
+ /**
+ * This field indicates a sequence of action is not completed yet. Mostly,
+ * this status is used when multiple ATA commands are needed in a SATI translation.
+ */
+ SCI_WARNING_SEQUENCE_INCOMPLETE,
+
+ /**
+ * This member indicates that there was a general failure.
+ */
+ SCI_FAILURE,
+
+ /**
+ * This member indicates that the SCI implementation is unable to complete
+ * an operation due to a critical flaw the prevents any further operation
+ * (i.e. an invalid pointer).
+ */
+ SCI_FATAL_ERROR,
+
+ /**
+ * This member indicates the calling function failed, because the state
+ * of the controller is in a state that prevents successful completion.
+ */
+ SCI_FAILURE_INVALID_STATE,
+
+ /**
+ * This member indicates the calling function failed, because there is
+ * insufficient resources/memory to complete the request.
+ */
+ SCI_FAILURE_INSUFFICIENT_RESOURCES,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * controller object required for the operation can't be located.
+ */
+ SCI_FAILURE_CONTROLLER_NOT_FOUND,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * discovered controller type is not supported by the library.
+ */
+ SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * requested initialization data version isn't supported.
+ */
+ SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * requested configuration of SAS Phys into SAS Ports is not supported.
+ */
+ SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * requested protocol is not supported by the remote device, port,
+ * or controller.
+ */
+ SCI_FAILURE_UNSUPPORTED_PROTOCOL,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * requested information type is not supported by the SCI implementation.
+ */
+ SCI_FAILURE_UNSUPPORTED_INFORMATION_TYPE,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * device already exists.
+ */
+ SCI_FAILURE_DEVICE_EXISTS,
+
+ /**
+ * This member indicates the calling function failed, because adding
+ * a phy to the object is not possible.
+ */
+ SCI_FAILURE_ADDING_PHY_UNSUPPORTED,
+
+ /**
+ * This member indicates the calling function failed, because the
+ * requested information type is not supported by the SCI implementation.
+ */
+ SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD,
+
+ /**
+ * This member indicates the calling function failed, because the SCI
+ * implementation does not support the supplied time limit.
+ */
+ SCI_FAILURE_UNSUPPORTED_TIME_LIMIT,
+
+ /**
+ * This member indicates the calling method failed, because the SCI
+ * implementation does not contain the specified Phy.
+ */
+ SCI_FAILURE_INVALID_PHY,
+
+ /**
+ * This member indicates the calling method failed, because the SCI
+ * implementation does not contain the specified Port.
+ */
+ SCI_FAILURE_INVALID_PORT,
+
+ /**
+ * This member indicates the calling method was partly successful
+ * The port was reset but not all phys in port are operational
+ */
+ SCI_FAILURE_RESET_PORT_PARTIAL_SUCCESS,
+
+ /**
+ * This member indicates that calling method failed
+ * The port reset did not complete because none of the phys are operational
+ */
+ SCI_FAILURE_RESET_PORT_FAILURE,
+
+ /**
+ * This member indicates the calling method failed, because the SCI
+ * implementation does not contain the specified remote device.
+ */
+ SCI_FAILURE_INVALID_REMOTE_DEVICE,
+
+ /**
+ * This member indicates the calling method failed, because the remote
+ * device is in a bad state and requires a reset.
+ */
+ SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
+
+ /**
+ * This member indicates the calling method failed, because the SCI
+ * implementation does not contain or support the specified IO tag.
+ */
+ SCI_FAILURE_INVALID_IO_TAG,
+
+ /**
+ * This member indicates that the operation failed and the user should
+ * check the response data associated with the IO.
+ */
+ SCI_FAILURE_IO_RESPONSE_VALID,
+
+ /**
+ * This member indicates that the operation failed, the failure is
+ * controller implementation specific, and the response data associated
+ * with the request is not valid. You can query for the controller
+ * specific error information via sci_controller_get_request_status()
+ */
+ SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
+
+ /**
+ * This member indicated that the operation failed because the
+ * user requested this IO to be terminated.
+ */
+ SCI_FAILURE_IO_TERMINATED,
+
+ /**
+ * This member indicates that the operation failed and the associated
+ * request requires a SCSI abort task to be sent to the target.
+ */
+ SCI_FAILURE_IO_REQUIRES_SCSI_ABORT,
+
+ /**
+ * This member indicates that the operation failed because the supplied
+ * device could not be located.
+ */
+ SCI_FAILURE_DEVICE_NOT_FOUND,
+
+ /**
+ * This member indicates that the operation failed because the
+ * objects association is required and is not correctly set.
+ */
+ SCI_FAILURE_INVALID_ASSOCIATION,
+
+ /**
+ * This member indicates that the operation failed, because a timeout
+ * occurred.
+ */
+ SCI_FAILURE_TIMEOUT,
+
+ /**
+ * This member indicates that the operation failed, because the user
+ * specified a value that is either invalid or not supported.
+ */
+ SCI_FAILURE_INVALID_PARAMETER_VALUE,
+
+ /**
+ * This value indicates that the operation failed, because the number
+ * of messages (MSI-X) is not supported.
+ */
+ SCI_FAILURE_UNSUPPORTED_MESSAGE_COUNT,
+
+ /**
+ * This value indicates that the method failed due to a lack of
+ * available NCQ tags.
+ */
+ SCI_FAILURE_NO_NCQ_TAG_AVAILABLE,
+
+ /**
+ * This value indicates that a protocol violation has occurred on the
+ * link.
+ */
+ SCI_FAILURE_PROTOCOL_VIOLATION,
+
+ /**
+ * This value indicates a failure condition that retry may help to clear.
+ */
+ SCI_FAILURE_RETRY_REQUIRED,
+
+ /**
+ * This field indicates the retry limit was reached when a retry is attempted
+ */
+ SCI_FAILURE_RETRY_LIMIT_REACHED,
+
+ /**
+ * This member indicates the calling method was partly successful.
+ * Mostly, this status is used when a LUN_RESET issued to an expander attached
+ * STP device in READY NCQ substate needs to have RNC suspended/resumed
+ * before posting TC.
+ */
+ SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS,
+
+ /**
+ * This field indicates an illegal phy connection based on the routing attribute
+ * of both expander phy attached to each other.
+ */
+ SCI_FAILURE_ILLEGAL_ROUTING_ATTRIBUTE_CONFIGURATION,
+
+ /**
+ * This field indicates a CONFIG ROUTE INFO command has a response with function result
+ * INDEX DOES NOT EXIST, usually means exceeding max route index.
+ */
+ SCI_FAILURE_EXCEED_MAX_ROUTE_INDEX,
+
+ /**
+ * This value indicates that an unsupported PCI device ID has been
+ * specified. This indicates that attempts to invoke
+ * sci_library_allocate_controller() will fail.
+ */
+ SCI_FAILURE_UNSUPPORTED_PCI_DEVICE_ID
+
+};
+
+/**
+ * enum sci_io_status - This enumeration depicts all of the possible IO
+ * completion status values. Each value in this enumeration maps directly
+ * to a value in the enum sci_status enumeration. Please refer to that
+ * enumeration for detailed comments concerning what the status represents.
+ *
+ * Add the API to retrieve the SCU status from the core. Check to see that the
+ * following status are properly handled: - SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL
+ * - SCI_IO_FAILURE_INVALID_IO_TAG
+ */
+enum sci_io_status {
+ SCI_IO_SUCCESS = SCI_SUCCESS,
+ SCI_IO_FAILURE = SCI_FAILURE,
+ SCI_IO_SUCCESS_COMPLETE_BEFORE_START = SCI_SUCCESS_IO_COMPLETE_BEFORE_START,
+ SCI_IO_SUCCESS_IO_DONE_EARLY = SCI_SUCCESS_IO_DONE_EARLY,
+ SCI_IO_FAILURE_INVALID_STATE = SCI_FAILURE_INVALID_STATE,
+ SCI_IO_FAILURE_INSUFFICIENT_RESOURCES = SCI_FAILURE_INSUFFICIENT_RESOURCES,
+ SCI_IO_FAILURE_UNSUPPORTED_PROTOCOL = SCI_FAILURE_UNSUPPORTED_PROTOCOL,
+ SCI_IO_FAILURE_RESPONSE_VALID = SCI_FAILURE_IO_RESPONSE_VALID,
+ SCI_IO_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
+ SCI_IO_FAILURE_TERMINATED = SCI_FAILURE_IO_TERMINATED,
+ SCI_IO_FAILURE_REQUIRES_SCSI_ABORT = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT,
+ SCI_IO_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE,
+ SCI_IO_FAILURE_NO_NCQ_TAG_AVAILABLE = SCI_FAILURE_NO_NCQ_TAG_AVAILABLE,
+ SCI_IO_FAILURE_PROTOCOL_VIOLATION = SCI_FAILURE_PROTOCOL_VIOLATION,
+
+ SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
+
+ SCI_IO_FAILURE_RETRY_REQUIRED = SCI_FAILURE_RETRY_REQUIRED,
+ SCI_IO_FAILURE_RETRY_LIMIT_REACHED = SCI_FAILURE_RETRY_LIMIT_REACHED,
+ SCI_IO_FAILURE_INVALID_REMOTE_DEVICE = SCI_FAILURE_INVALID_REMOTE_DEVICE
+};
+
+/**
+ * enum sci_task_status - This enumeration depicts all of the possible task
+ * completion status values. Each value in this enumeration maps directly
+ * to a value in the enum sci_status enumeration. Please refer to that
+ * enumeration for detailed comments concerning what the status represents.
+ *
+ * Check to see that the following status are properly handled:
+ */
+enum sci_task_status {
+ SCI_TASK_SUCCESS = SCI_SUCCESS,
+ SCI_TASK_FAILURE = SCI_FAILURE,
+ SCI_TASK_FAILURE_INVALID_STATE = SCI_FAILURE_INVALID_STATE,
+ SCI_TASK_FAILURE_INSUFFICIENT_RESOURCES = SCI_FAILURE_INSUFFICIENT_RESOURCES,
+ SCI_TASK_FAILURE_UNSUPPORTED_PROTOCOL = SCI_FAILURE_UNSUPPORTED_PROTOCOL,
+ SCI_TASK_FAILURE_INVALID_TAG = SCI_FAILURE_INVALID_IO_TAG,
+ SCI_TASK_FAILURE_RESPONSE_VALID = SCI_FAILURE_IO_RESPONSE_VALID,
+ SCI_TASK_FAILURE_CONTROLLER_SPECIFIC_ERR = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR,
+ SCI_TASK_FAILURE_TERMINATED = SCI_FAILURE_IO_TERMINATED,
+ SCI_TASK_FAILURE_INVALID_PARAMETER_VALUE = SCI_FAILURE_INVALID_PARAMETER_VALUE,
+
+ SCI_TASK_FAILURE_REMOTE_DEVICE_RESET_REQUIRED = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED,
+ SCI_TASK_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS = SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS
+
+};
+
+/**
+ * sci_swab32_cpy - convert between scsi and scu-hardware byte format
+ * @dest: receive the 4-byte endian swapped version of src
+ * @src: word aligned source buffer
+ *
+ * scu hardware handles SSP/SMP control, response, and unidentified
+ * frames in "big endian dword" order. Regardless of host endian this
+ * is always a swab32()-per-dword conversion of the standard definition,
+ * i.e. single byte fields swapped and multi-byte fields in little-
+ * endian
+ */
+static inline void sci_swab32_cpy(void *_dest, void *_src, ssize_t word_cnt)
+{
+ u32 *dest = _dest, *src = _src;
+
+ while (--word_cnt >= 0)
+ dest[word_cnt] = swab32(src[word_cnt]);
+}
+
+extern unsigned char no_outbound_task_to;
+extern u16 ssp_max_occ_to;
+extern u16 stp_max_occ_to;
+extern u16 ssp_inactive_to;
+extern u16 stp_inactive_to;
+extern unsigned char phy_gen;
+extern unsigned char max_concurr_spinup;
+
+irqreturn_t isci_msix_isr(int vec, void *data);
+irqreturn_t isci_intx_isr(int vec, void *data);
+irqreturn_t isci_error_isr(int vec, void *data);
+
+/*
+ * Each timer is associated with a cancellation flag that is set when
+ * del_timer() is called and checked in the timer callback function. This
+ * is needed since del_timer_sync() cannot be called with sci_lock held.
+ * For deinit however, del_timer_sync() is used without holding the lock.
+ */
+struct sci_timer {
+ struct timer_list timer;
+ bool cancel;
+};
+
+static inline
+void sci_init_timer(struct sci_timer *tmr, void (*fn)(unsigned long))
+{
+ tmr->timer.function = fn;
+ tmr->timer.data = (unsigned long) tmr;
+ tmr->cancel = 0;
+ init_timer(&tmr->timer);
+}
+
+static inline void sci_mod_timer(struct sci_timer *tmr, unsigned long msec)
+{
+ tmr->cancel = 0;
+ mod_timer(&tmr->timer, jiffies + msecs_to_jiffies(msec));
+}
+
+static inline void sci_del_timer(struct sci_timer *tmr)
+{
+ tmr->cancel = 1;
+ del_timer(&tmr->timer);
+}
+
+struct sci_base_state_machine {
+ const struct sci_base_state *state_table;
+ u32 initial_state_id;
+ u32 current_state_id;
+ u32 previous_state_id;
+};
+
+typedef void (*sci_state_transition_t)(struct sci_base_state_machine *sm);
+
+struct sci_base_state {
+ sci_state_transition_t enter_state; /* Called on state entry */
+ sci_state_transition_t exit_state; /* Called on state exit */
+};
+
+extern void sci_init_sm(struct sci_base_state_machine *sm,
+ const struct sci_base_state *state_table,
+ u32 initial_state);
+extern void sci_change_state(struct sci_base_state_machine *sm, u32 next_state);
+#endif /* __ISCI_H__ */
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
new file mode 100644
index 00000000000..79313a7a235
--- /dev/null
+++ b/drivers/scsi/isci/phy.c
@@ -0,0 +1,1312 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "isci.h"
+#include "host.h"
+#include "phy.h"
+#include "scu_event_codes.h"
+#include "probe_roms.h"
+
+/* Maximum arbitration wait time in micro-seconds */
+#define SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME (700)
+
+enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy)
+{
+ return iphy->max_negotiated_speed;
+}
+
+static enum sci_status
+sci_phy_transport_layer_initialization(struct isci_phy *iphy,
+ struct scu_transport_layer_registers __iomem *reg)
+{
+ u32 tl_control;
+
+ iphy->transport_layer_registers = reg;
+
+ writel(SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX,
+ &iphy->transport_layer_registers->stp_rni);
+
+ /*
+ * Hardware team recommends that we enable the STP prefetch for all
+ * transports
+ */
+ tl_control = readl(&iphy->transport_layer_registers->control);
+ tl_control |= SCU_TLCR_GEN_BIT(STP_WRITE_DATA_PREFETCH);
+ writel(tl_control, &iphy->transport_layer_registers->control);
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status
+sci_phy_link_layer_initialization(struct isci_phy *iphy,
+ struct scu_link_layer_registers __iomem *reg)
+{
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+ int phy_idx = iphy->phy_index;
+ struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx];
+ struct sci_phy_oem_params *phy_oem =
+ &ihost->oem_parameters.phys[phy_idx];
+ u32 phy_configuration;
+ struct sci_phy_cap phy_cap;
+ u32 parity_check = 0;
+ u32 parity_count = 0;
+ u32 llctl, link_rate;
+ u32 clksm_value = 0;
+
+ iphy->link_layer_registers = reg;
+
+ /* Set our IDENTIFY frame data */
+ #define SCI_END_DEVICE 0x01
+
+ writel(SCU_SAS_TIID_GEN_BIT(SMP_INITIATOR) |
+ SCU_SAS_TIID_GEN_BIT(SSP_INITIATOR) |
+ SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) |
+ SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) |
+ SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE),
+ &iphy->link_layer_registers->transmit_identification);
+
+ /* Write the device SAS Address */
+ writel(0xFEDCBA98,
+ &iphy->link_layer_registers->sas_device_name_high);
+ writel(phy_idx, &iphy->link_layer_registers->sas_device_name_low);
+
+ /* Write the source SAS Address */
+ writel(phy_oem->sas_address.high,
+ &iphy->link_layer_registers->source_sas_address_high);
+ writel(phy_oem->sas_address.low,
+ &iphy->link_layer_registers->source_sas_address_low);
+
+ /* Clear and Set the PHY Identifier */
+ writel(0, &iphy->link_layer_registers->identify_frame_phy_id);
+ writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx),
+ &iphy->link_layer_registers->identify_frame_phy_id);
+
+ /* Change the initial state of the phy configuration register */
+ phy_configuration =
+ readl(&iphy->link_layer_registers->phy_configuration);
+
+ /* Hold OOB state machine in reset */
+ phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+ writel(phy_configuration,
+ &iphy->link_layer_registers->phy_configuration);
+
+ /* Configure the SNW capabilities */
+ phy_cap.all = 0;
+ phy_cap.start = 1;
+ phy_cap.gen3_no_ssc = 1;
+ phy_cap.gen2_no_ssc = 1;
+ phy_cap.gen1_no_ssc = 1;
+ if (ihost->oem_parameters.controller.do_enable_ssc == true) {
+ phy_cap.gen3_ssc = 1;
+ phy_cap.gen2_ssc = 1;
+ phy_cap.gen1_ssc = 1;
+ }
+
+ /*
+ * The SAS specification indicates that the phy_capabilities that
+ * are transmitted shall have an even parity. Calculate the parity. */
+ parity_check = phy_cap.all;
+ while (parity_check != 0) {
+ if (parity_check & 0x1)
+ parity_count++;
+ parity_check >>= 1;
+ }
+
+ /*
+ * If parity indicates there are an odd number of bits set, then
+ * set the parity bit to 1 in the phy capabilities. */
+ if ((parity_count % 2) != 0)
+ phy_cap.parity = 1;
+
+ writel(phy_cap.all, &iphy->link_layer_registers->phy_capabilities);
+
+ /* Set the enable spinup period but disable the ability to send
+ * notify enable spinup
+ */
+ writel(SCU_ENSPINUP_GEN_VAL(COUNT,
+ phy_user->notify_enable_spin_up_insertion_frequency),
+ &iphy->link_layer_registers->notify_enable_spinup_control);
+
+ /* Write the ALIGN Insertion Ferequency for connected phy and
+ * inpendent of connected state
+ */
+ clksm_value = SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(CONNECTED,
+ phy_user->in_connection_align_insertion_frequency);
+
+ clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL,
+ phy_user->align_insertion_frequency);
+
+ writel(clksm_value, &iphy->link_layer_registers->clock_skew_management);
+
+ /* @todo Provide a way to write this register correctly */
+ writel(0x02108421,
+ &iphy->link_layer_registers->afe_lookup_table_control);
+
+ llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT,
+ (u8)ihost->user_parameters.no_outbound_task_timeout);
+
+ switch (phy_user->max_speed_generation) {
+ case SCIC_SDS_PARM_GEN3_SPEED:
+ link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3;
+ break;
+ case SCIC_SDS_PARM_GEN2_SPEED:
+ link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2;
+ break;
+ default:
+ link_rate = SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1;
+ break;
+ }
+ llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
+ writel(llctl, &iphy->link_layer_registers->link_layer_control);
+
+ if (is_a2(ihost->pdev)) {
+ /* Program the max ARB time for the PHY to 700us so we inter-operate with
+ * the PMC expander which shuts down PHYs if the expander PHY generates too
+ * many breaks. This time value will guarantee that the initiator PHY will
+ * generate the break.
+ */
+ writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME,
+ &iphy->link_layer_registers->maximum_arbitration_wait_timer_timeout);
+ }
+
+ /* Disable link layer hang detection, rely on the OS timeout for I/O timeouts. */
+ writel(0, &iphy->link_layer_registers->link_layer_hang_detection_timeout);
+
+ /* We can exit the initial state to the stopped state */
+ sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
+
+ return SCI_SUCCESS;
+}
+
+static void phy_sata_timeout(unsigned long data)
+{
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct isci_phy *iphy = container_of(tmr, typeof(*iphy), sata_timer);
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: SCIC SDS Phy 0x%p did not receive signature fis before "
+ "timeout.\n",
+ __func__,
+ iphy);
+
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/**
+ * This method returns the port currently containing this phy. If the phy is
+ * currently contained by the dummy port, then the phy is considered to not
+ * be part of a port.
+ * @sci_phy: This parameter specifies the phy for which to retrieve the
+ * containing port.
+ *
+ * This method returns a handle to a port that contains the supplied phy.
+ * NULL This value is returned if the phy is not part of a real
+ * port (i.e. it's contained in the dummy port). !NULL All other
+ * values indicate a handle/pointer to the port containing the phy.
+ */
+struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy)
+{
+ struct isci_port *iport = iphy->owning_port;
+
+ if (iport->physical_port_index == SCIC_SDS_DUMMY_PORT)
+ return NULL;
+
+ return iphy->owning_port;
+}
+
+/**
+ * This method will assign a port to the phy object.
+ * @out]: iphy This parameter specifies the phy for which to assign a port
+ * object.
+ *
+ *
+ */
+void sci_phy_set_port(
+ struct isci_phy *iphy,
+ struct isci_port *iport)
+{
+ iphy->owning_port = iport;
+
+ if (iphy->bcn_received_while_port_unassigned) {
+ iphy->bcn_received_while_port_unassigned = false;
+ sci_port_broadcast_change_received(iphy->owning_port, iphy);
+ }
+}
+
+enum sci_status sci_phy_initialize(struct isci_phy *iphy,
+ struct scu_transport_layer_registers __iomem *tl,
+ struct scu_link_layer_registers __iomem *ll)
+{
+ /* Perfrom the initialization of the TL hardware */
+ sci_phy_transport_layer_initialization(iphy, tl);
+
+ /* Perofrm the initialization of the PE hardware */
+ sci_phy_link_layer_initialization(iphy, ll);
+
+ /* There is nothing that needs to be done in this state just
+ * transition to the stopped state
+ */
+ sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
+
+ return SCI_SUCCESS;
+}
+
+/**
+ * This method assigns the direct attached device ID for this phy.
+ *
+ * @iphy The phy for which the direct attached device id is to
+ * be assigned.
+ * @device_id The direct attached device ID to assign to the phy.
+ * This will either be the RNi for the device or an invalid RNi if there
+ * is no current device assigned to the phy.
+ */
+void sci_phy_setup_transport(struct isci_phy *iphy, u32 device_id)
+{
+ u32 tl_control;
+
+ writel(device_id, &iphy->transport_layer_registers->stp_rni);
+
+ /*
+ * The read should guarantee that the first write gets posted
+ * before the next write
+ */
+ tl_control = readl(&iphy->transport_layer_registers->control);
+ tl_control |= SCU_TLCR_GEN_BIT(CLEAR_TCI_NCQ_MAPPING_TABLE);
+ writel(tl_control, &iphy->transport_layer_registers->control);
+}
+
+static void sci_phy_suspend(struct isci_phy *iphy)
+{
+ u32 scu_sas_pcfg_value;
+
+ scu_sas_pcfg_value =
+ readl(&iphy->link_layer_registers->phy_configuration);
+ scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE);
+ writel(scu_sas_pcfg_value,
+ &iphy->link_layer_registers->phy_configuration);
+
+ sci_phy_setup_transport(iphy, SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
+}
+
+void sci_phy_resume(struct isci_phy *iphy)
+{
+ u32 scu_sas_pcfg_value;
+
+ scu_sas_pcfg_value =
+ readl(&iphy->link_layer_registers->phy_configuration);
+ scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE);
+ writel(scu_sas_pcfg_value,
+ &iphy->link_layer_registers->phy_configuration);
+}
+
+void sci_phy_get_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
+{
+ sas->high = readl(&iphy->link_layer_registers->source_sas_address_high);
+ sas->low = readl(&iphy->link_layer_registers->source_sas_address_low);
+}
+
+void sci_phy_get_attached_sas_address(struct isci_phy *iphy, struct sci_sas_address *sas)
+{
+ struct sas_identify_frame *iaf;
+
+ iaf = &iphy->frame_rcvd.iaf;
+ memcpy(sas, iaf->sas_addr, SAS_ADDR_SIZE);
+}
+
+void sci_phy_get_protocols(struct isci_phy *iphy, struct sci_phy_proto *proto)
+{
+ proto->all = readl(&iphy->link_layer_registers->transmit_identification);
+}
+
+enum sci_status sci_phy_start(struct isci_phy *iphy)
+{
+ enum sci_phy_states state = iphy->sm.current_state_id;
+
+ if (state != SCI_PHY_STOPPED) {
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_phy_stop(struct isci_phy *iphy)
+{
+ enum sci_phy_states state = iphy->sm.current_state_id;
+
+ switch (state) {
+ case SCI_PHY_SUB_INITIAL:
+ case SCI_PHY_SUB_AWAIT_OSSP_EN:
+ case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+ case SCI_PHY_SUB_AWAIT_SAS_POWER:
+ case SCI_PHY_SUB_AWAIT_SATA_POWER:
+ case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+ case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+ case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+ case SCI_PHY_SUB_FINAL:
+ case SCI_PHY_READY:
+ break;
+ default:
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_phy_reset(struct isci_phy *iphy)
+{
+ enum sci_phy_states state = iphy->sm.current_state_id;
+
+ if (state != SCI_PHY_READY) {
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_change_state(&iphy->sm, SCI_PHY_RESETTING);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_phy_consume_power_handler(struct isci_phy *iphy)
+{
+ enum sci_phy_states state = iphy->sm.current_state_id;
+
+ switch (state) {
+ case SCI_PHY_SUB_AWAIT_SAS_POWER: {
+ u32 enable_spinup;
+
+ enable_spinup = readl(&iphy->link_layer_registers->notify_enable_spinup_control);
+ enable_spinup |= SCU_ENSPINUP_GEN_BIT(ENABLE);
+ writel(enable_spinup, &iphy->link_layer_registers->notify_enable_spinup_control);
+
+ /* Change state to the final state this substate machine has run to completion */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL);
+
+ return SCI_SUCCESS;
+ }
+ case SCI_PHY_SUB_AWAIT_SATA_POWER: {
+ u32 scu_sas_pcfg_value;
+
+ /* Release the spinup hold state and reset the OOB state machine */
+ scu_sas_pcfg_value =
+ readl(&iphy->link_layer_registers->phy_configuration);
+ scu_sas_pcfg_value &=
+ ~(SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD) | SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE));
+ scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+ writel(scu_sas_pcfg_value,
+ &iphy->link_layer_registers->phy_configuration);
+
+ /* Now restart the OOB operation */
+ scu_sas_pcfg_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+ scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+ writel(scu_sas_pcfg_value,
+ &iphy->link_layer_registers->phy_configuration);
+
+ /* Change state to the final state this substate machine has run to completion */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_PHY_EN);
+
+ return SCI_SUCCESS;
+ }
+ default:
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+static void sci_phy_start_sas_link_training(struct isci_phy *iphy)
+{
+ /* continue the link training for the phy as if it were a SAS PHY
+ * instead of a SATA PHY. This is done because the completion queue had a SAS
+ * PHY DETECTED event when the state machine was expecting a SATA PHY event.
+ */
+ u32 phy_control;
+
+ phy_control = readl(&iphy->link_layer_registers->phy_configuration);
+ phy_control |= SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD);
+ writel(phy_control,
+ &iphy->link_layer_registers->phy_configuration);
+
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SAS_SPEED_EN);
+
+ iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SAS;
+}
+
+static void sci_phy_start_sata_link_training(struct isci_phy *iphy)
+{
+ /* This method continues the link training for the phy as if it were a SATA PHY
+ * instead of a SAS PHY. This is done because the completion queue had a SATA
+ * SPINUP HOLD event when the state machine was expecting a SAS PHY event. none
+ */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_POWER);
+
+ iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
+}
+
+/**
+ * sci_phy_complete_link_training - perform processing common to
+ * all protocols upon completion of link training.
+ * @sci_phy: This parameter specifies the phy object for which link training
+ * has completed.
+ * @max_link_rate: This parameter specifies the maximum link rate to be
+ * associated with this phy.
+ * @next_state: This parameter specifies the next state for the phy's starting
+ * sub-state machine.
+ *
+ */
+static void sci_phy_complete_link_training(struct isci_phy *iphy,
+ enum sas_linkrate max_link_rate,
+ u32 next_state)
+{
+ iphy->max_negotiated_speed = max_link_rate;
+
+ sci_change_state(&iphy->sm, next_state);
+}
+
+enum sci_status sci_phy_event_handler(struct isci_phy *iphy, u32 event_code)
+{
+ enum sci_phy_states state = iphy->sm.current_state_id;
+
+ switch (state) {
+ case SCI_PHY_SUB_AWAIT_OSSP_EN:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ sci_phy_start_sas_link_training(iphy);
+ iphy->is_in_link_training = true;
+ break;
+ case SCU_EVENT_SATA_SPINUP_HOLD:
+ sci_phy_start_sata_link_training(iphy);
+ iphy->is_in_link_training = true;
+ break;
+ default:
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected event_code %x\n",
+ __func__,
+ event_code);
+ return SCI_FAILURE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ /*
+ * Why is this being reported again by the controller?
+ * We would re-enter this state so just stay here */
+ break;
+ case SCU_EVENT_SAS_15:
+ case SCU_EVENT_SAS_15_SSC:
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
+ SCI_PHY_SUB_AWAIT_IAF_UF);
+ break;
+ case SCU_EVENT_SAS_30:
+ case SCU_EVENT_SAS_30_SSC:
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS,
+ SCI_PHY_SUB_AWAIT_IAF_UF);
+ break;
+ case SCU_EVENT_SAS_60:
+ case SCU_EVENT_SAS_60_SSC:
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS,
+ SCI_PHY_SUB_AWAIT_IAF_UF);
+ break;
+ case SCU_EVENT_SATA_SPINUP_HOLD:
+ /*
+ * We were doing SAS PHY link training and received a SATA PHY event
+ * continue OOB/SN as if this were a SATA PHY */
+ sci_phy_start_sata_link_training(iphy);
+ break;
+ case SCU_EVENT_LINK_FAILURE:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected event_code %x\n",
+ __func__, event_code);
+
+ return SCI_FAILURE;
+ break;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_IAF_UF:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ /* Backup the state machine */
+ sci_phy_start_sas_link_training(iphy);
+ break;
+ case SCU_EVENT_SATA_SPINUP_HOLD:
+ /* We were doing SAS PHY link training and received a
+ * SATA PHY event continue OOB/SN as if this were a
+ * SATA PHY
+ */
+ sci_phy_start_sata_link_training(iphy);
+ break;
+ case SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT:
+ case SCU_EVENT_LINK_FAILURE:
+ case SCU_EVENT_HARD_RESET_RECEIVED:
+ /* Start the oob/sn state machine over again */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected event_code %x\n",
+ __func__, event_code);
+ return SCI_FAILURE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_SAS_POWER:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_LINK_FAILURE:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received unexpected "
+ "event_code %x\n",
+ __func__,
+ event_code);
+ return SCI_FAILURE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_SATA_POWER:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_LINK_FAILURE:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ case SCU_EVENT_SATA_SPINUP_HOLD:
+ /* These events are received every 10ms and are
+ * expected while in this state
+ */
+ break;
+
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ /* There has been a change in the phy type before OOB/SN for the
+ * SATA finished start down the SAS link traning path.
+ */
+ sci_phy_start_sas_link_training(iphy);
+ break;
+
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected event_code %x\n",
+ __func__, event_code);
+
+ return SCI_FAILURE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_SATA_PHY_EN:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_LINK_FAILURE:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ case SCU_EVENT_SATA_SPINUP_HOLD:
+ /* These events might be received since we dont know how many may be in
+ * the completion queue while waiting for power
+ */
+ break;
+ case SCU_EVENT_SATA_PHY_DETECTED:
+ iphy->protocol = SCIC_SDS_PHY_PROTOCOL_SATA;
+
+ /* We have received the SATA PHY notification change state */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
+ break;
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ /* There has been a change in the phy type before OOB/SN for the
+ * SATA finished start down the SAS link traning path.
+ */
+ sci_phy_start_sas_link_training(iphy);
+ break;
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected event_code %x\n",
+ __func__,
+ event_code);
+
+ return SCI_FAILURE;;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_SATA_PHY_DETECTED:
+ /*
+ * The hardware reports multiple SATA PHY detected events
+ * ignore the extras */
+ break;
+ case SCU_EVENT_SATA_15:
+ case SCU_EVENT_SATA_15_SSC:
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_1_5_GBPS,
+ SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
+ break;
+ case SCU_EVENT_SATA_30:
+ case SCU_EVENT_SATA_30_SSC:
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_3_0_GBPS,
+ SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
+ break;
+ case SCU_EVENT_SATA_60:
+ case SCU_EVENT_SATA_60_SSC:
+ sci_phy_complete_link_training(iphy, SAS_LINK_RATE_6_0_GBPS,
+ SCI_PHY_SUB_AWAIT_SIG_FIS_UF);
+ break;
+ case SCU_EVENT_LINK_FAILURE:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ case SCU_EVENT_SAS_PHY_DETECTED:
+ /*
+ * There has been a change in the phy type before OOB/SN for the
+ * SATA finished start down the SAS link traning path. */
+ sci_phy_start_sas_link_training(iphy);
+ break;
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected event_code %x\n",
+ __func__, event_code);
+
+ return SCI_FAILURE;
+ }
+
+ return SCI_SUCCESS;
+ case SCI_PHY_SUB_AWAIT_SIG_FIS_UF:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_SATA_PHY_DETECTED:
+ /* Backup the state machine */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_SATA_SPEED_EN);
+ break;
+
+ case SCU_EVENT_LINK_FAILURE:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected event_code %x\n",
+ __func__,
+ event_code);
+
+ return SCI_FAILURE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_READY:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_LINK_FAILURE:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ case SCU_EVENT_BROADCAST_CHANGE:
+ /* Broadcast change received. Notify the port. */
+ if (phy_get_non_dummy_port(iphy) != NULL)
+ sci_port_broadcast_change_received(iphy->owning_port, iphy);
+ else
+ iphy->bcn_received_while_port_unassigned = true;
+ break;
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%sP SCIC PHY 0x%p ready state machine received "
+ "unexpected event_code %x\n",
+ __func__, iphy, event_code);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+ return SCI_SUCCESS;
+ case SCI_PHY_RESETTING:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_HARD_RESET_TRANSMITTED:
+ /* Link failure change state back to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ break;
+ default:
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: SCIC PHY 0x%p resetting state machine received "
+ "unexpected event_code %x\n",
+ __func__, iphy, event_code);
+
+ return SCI_FAILURE_INVALID_STATE;
+ break;
+ }
+ return SCI_SUCCESS;
+ default:
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_phy_frame_handler(struct isci_phy *iphy, u32 frame_index)
+{
+ enum sci_phy_states state = iphy->sm.current_state_id;
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+ enum sci_status result;
+ unsigned long flags;
+
+ switch (state) {
+ case SCI_PHY_SUB_AWAIT_IAF_UF: {
+ u32 *frame_words;
+ struct sas_identify_frame iaf;
+
+ result = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_words);
+
+ if (result != SCI_SUCCESS)
+ return result;
+
+ sci_swab32_cpy(&iaf, frame_words, sizeof(iaf) / sizeof(u32));
+ if (iaf.frame_type == 0) {
+ u32 state;
+
+ spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
+ memcpy(&iphy->frame_rcvd.iaf, &iaf, sizeof(iaf));
+ spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
+ if (iaf.smp_tport) {
+ /* We got the IAF for an expander PHY go to the final
+ * state since there are no power requirements for
+ * expander phys.
+ */
+ state = SCI_PHY_SUB_FINAL;
+ } else {
+ /* We got the IAF we can now go to the await spinup
+ * semaphore state
+ */
+ state = SCI_PHY_SUB_AWAIT_SAS_POWER;
+ }
+ sci_change_state(&iphy->sm, state);
+ result = SCI_SUCCESS;
+ } else
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected frame id %x\n",
+ __func__, frame_index);
+
+ sci_controller_release_frame(ihost, frame_index);
+ return result;
+ }
+ case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: {
+ struct dev_to_host_fis *frame_header;
+ u32 *fis_frame_data;
+
+ result = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if (result != SCI_SUCCESS)
+ return result;
+
+ if ((frame_header->fis_type == FIS_REGD2H) &&
+ !(frame_header->status & ATA_BUSY)) {
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&fis_frame_data);
+
+ spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
+ sci_controller_copy_sata_response(&iphy->frame_rcvd.fis,
+ frame_header,
+ fis_frame_data);
+ spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
+
+ /* got IAF we can now go to the await spinup semaphore state */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_FINAL);
+
+ result = SCI_SUCCESS;
+ } else
+ dev_warn(sciphy_to_dev(iphy),
+ "%s: PHY starting substate machine received "
+ "unexpected frame id %x\n",
+ __func__, frame_index);
+
+ /* Regardless of the result we are done with this frame with it */
+ sci_controller_release_frame(ihost, frame_index);
+
+ return result;
+ }
+ default:
+ dev_dbg(sciphy_to_dev(iphy),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+}
+
+static void sci_phy_starting_initial_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ /* This is just an temporary state go off to the starting state */
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_AWAIT_OSSP_EN);
+}
+
+static void sci_phy_starting_await_sas_power_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+ sci_controller_power_control_queue_insert(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sas_power_substate_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+ sci_controller_power_control_queue_remove(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sata_power_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+ sci_controller_power_control_queue_insert(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sata_power_substate_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_host *ihost = iphy->owning_port->owning_controller;
+
+ sci_controller_power_control_queue_remove(ihost, iphy);
+}
+
+static void sci_phy_starting_await_sata_phy_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
+}
+
+static void sci_phy_starting_await_sata_phy_substate_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ sci_del_timer(&iphy->sata_timer);
+}
+
+static void sci_phy_starting_await_sata_speed_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ sci_mod_timer(&iphy->sata_timer, SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT);
+}
+
+static void sci_phy_starting_await_sata_speed_substate_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ sci_del_timer(&iphy->sata_timer);
+}
+
+static void sci_phy_starting_await_sig_fis_uf_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ if (sci_port_link_detected(iphy->owning_port, iphy)) {
+
+ /*
+ * Clear the PE suspend condition so we can actually
+ * receive SIG FIS
+ * The hardware will not respond to the XRDY until the PE
+ * suspend condition is cleared.
+ */
+ sci_phy_resume(iphy);
+
+ sci_mod_timer(&iphy->sata_timer,
+ SCIC_SDS_SIGNATURE_FIS_TIMEOUT);
+ } else
+ iphy->is_in_link_training = false;
+}
+
+static void sci_phy_starting_await_sig_fis_uf_substate_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ sci_del_timer(&iphy->sata_timer);
+}
+
+static void sci_phy_starting_final_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ /* State machine has run to completion so exit out and change
+ * the base state machine to the ready state
+ */
+ sci_change_state(&iphy->sm, SCI_PHY_READY);
+}
+
+/**
+ *
+ * @sci_phy: This is the struct isci_phy object to stop.
+ *
+ * This method will stop the struct isci_phy object. This does not reset the
+ * protocol engine it just suspends it and places it in a state where it will
+ * not cause the end device to power up. none
+ */
+static void scu_link_layer_stop_protocol_engine(
+ struct isci_phy *iphy)
+{
+ u32 scu_sas_pcfg_value;
+ u32 enable_spinup_value;
+
+ /* Suspend the protocol engine and place it in a sata spinup hold state */
+ scu_sas_pcfg_value =
+ readl(&iphy->link_layer_registers->phy_configuration);
+ scu_sas_pcfg_value |=
+ (SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
+ SCU_SAS_PCFG_GEN_BIT(SUSPEND_PROTOCOL_ENGINE) |
+ SCU_SAS_PCFG_GEN_BIT(SATA_SPINUP_HOLD));
+ writel(scu_sas_pcfg_value,
+ &iphy->link_layer_registers->phy_configuration);
+
+ /* Disable the notify enable spinup primitives */
+ enable_spinup_value = readl(&iphy->link_layer_registers->notify_enable_spinup_control);
+ enable_spinup_value &= ~SCU_ENSPINUP_GEN_BIT(ENABLE);
+ writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control);
+}
+
+/**
+ *
+ *
+ * This method will start the OOB/SN state machine for this struct isci_phy object.
+ */
+static void scu_link_layer_start_oob(
+ struct isci_phy *iphy)
+{
+ u32 scu_sas_pcfg_value;
+
+ scu_sas_pcfg_value =
+ readl(&iphy->link_layer_registers->phy_configuration);
+ scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+ scu_sas_pcfg_value &=
+ ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
+ SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
+ writel(scu_sas_pcfg_value,
+ &iphy->link_layer_registers->phy_configuration);
+}
+
+/**
+ *
+ *
+ * This method will transmit a hard reset request on the specified phy. The SCU
+ * hardware requires that we reset the OOB state machine and set the hard reset
+ * bit in the phy configuration register. We then must start OOB over with the
+ * hard reset bit set.
+ */
+static void scu_link_layer_tx_hard_reset(
+ struct isci_phy *iphy)
+{
+ u32 phy_configuration_value;
+
+ /*
+ * SAS Phys must wait for the HARD_RESET_TX event notification to transition
+ * to the starting state. */
+ phy_configuration_value =
+ readl(&iphy->link_layer_registers->phy_configuration);
+ phy_configuration_value |=
+ (SCU_SAS_PCFG_GEN_BIT(HARD_RESET) |
+ SCU_SAS_PCFG_GEN_BIT(OOB_RESET));
+ writel(phy_configuration_value,
+ &iphy->link_layer_registers->phy_configuration);
+
+ /* Now take the OOB state machine out of reset */
+ phy_configuration_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+ phy_configuration_value &= ~SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
+ writel(phy_configuration_value,
+ &iphy->link_layer_registers->phy_configuration);
+}
+
+static void sci_phy_stopped_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_port *iport = iphy->owning_port;
+ struct isci_host *ihost = iport->owning_controller;
+
+ /*
+ * @todo We need to get to the controller to place this PE in a
+ * reset state
+ */
+ sci_del_timer(&iphy->sata_timer);
+
+ scu_link_layer_stop_protocol_engine(iphy);
+
+ if (iphy->sm.previous_state_id != SCI_PHY_INITIAL)
+ sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy);
+}
+
+static void sci_phy_starting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_port *iport = iphy->owning_port;
+ struct isci_host *ihost = iport->owning_controller;
+
+ scu_link_layer_stop_protocol_engine(iphy);
+ scu_link_layer_start_oob(iphy);
+
+ /* We don't know what kind of phy we are going to be just yet */
+ iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN;
+ iphy->bcn_received_while_port_unassigned = false;
+
+ if (iphy->sm.previous_state_id == SCI_PHY_READY)
+ sci_controller_link_down(ihost, phy_get_non_dummy_port(iphy), iphy);
+
+ sci_change_state(&iphy->sm, SCI_PHY_SUB_INITIAL);
+}
+
+static void sci_phy_ready_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+ struct isci_port *iport = iphy->owning_port;
+ struct isci_host *ihost = iport->owning_controller;
+
+ sci_controller_link_up(ihost, phy_get_non_dummy_port(iphy), iphy);
+}
+
+static void sci_phy_ready_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ sci_phy_suspend(iphy);
+}
+
+static void sci_phy_resetting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_phy *iphy = container_of(sm, typeof(*iphy), sm);
+
+ /* The phy is being reset, therefore deactivate it from the port. In
+ * the resetting state we don't notify the user regarding link up and
+ * link down notifications
+ */
+ sci_port_deactivate_phy(iphy->owning_port, iphy, false);
+
+ if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+ scu_link_layer_tx_hard_reset(iphy);
+ } else {
+ /* The SCU does not need to have a discrete reset state so
+ * just go back to the starting state.
+ */
+ sci_change_state(&iphy->sm, SCI_PHY_STARTING);
+ }
+}
+
+static const struct sci_base_state sci_phy_state_table[] = {
+ [SCI_PHY_INITIAL] = { },
+ [SCI_PHY_STOPPED] = {
+ .enter_state = sci_phy_stopped_state_enter,
+ },
+ [SCI_PHY_STARTING] = {
+ .enter_state = sci_phy_starting_state_enter,
+ },
+ [SCI_PHY_SUB_INITIAL] = {
+ .enter_state = sci_phy_starting_initial_substate_enter,
+ },
+ [SCI_PHY_SUB_AWAIT_OSSP_EN] = { },
+ [SCI_PHY_SUB_AWAIT_SAS_SPEED_EN] = { },
+ [SCI_PHY_SUB_AWAIT_IAF_UF] = { },
+ [SCI_PHY_SUB_AWAIT_SAS_POWER] = {
+ .enter_state = sci_phy_starting_await_sas_power_substate_enter,
+ .exit_state = sci_phy_starting_await_sas_power_substate_exit,
+ },
+ [SCI_PHY_SUB_AWAIT_SATA_POWER] = {
+ .enter_state = sci_phy_starting_await_sata_power_substate_enter,
+ .exit_state = sci_phy_starting_await_sata_power_substate_exit
+ },
+ [SCI_PHY_SUB_AWAIT_SATA_PHY_EN] = {
+ .enter_state = sci_phy_starting_await_sata_phy_substate_enter,
+ .exit_state = sci_phy_starting_await_sata_phy_substate_exit
+ },
+ [SCI_PHY_SUB_AWAIT_SATA_SPEED_EN] = {
+ .enter_state = sci_phy_starting_await_sata_speed_substate_enter,
+ .exit_state = sci_phy_starting_await_sata_speed_substate_exit
+ },
+ [SCI_PHY_SUB_AWAIT_SIG_FIS_UF] = {
+ .enter_state = sci_phy_starting_await_sig_fis_uf_substate_enter,
+ .exit_state = sci_phy_starting_await_sig_fis_uf_substate_exit
+ },
+ [SCI_PHY_SUB_FINAL] = {
+ .enter_state = sci_phy_starting_final_substate_enter,
+ },
+ [SCI_PHY_READY] = {
+ .enter_state = sci_phy_ready_state_enter,
+ .exit_state = sci_phy_ready_state_exit,
+ },
+ [SCI_PHY_RESETTING] = {
+ .enter_state = sci_phy_resetting_state_enter,
+ },
+ [SCI_PHY_FINAL] = { },
+};
+
+void sci_phy_construct(struct isci_phy *iphy,
+ struct isci_port *iport, u8 phy_index)
+{
+ sci_init_sm(&iphy->sm, sci_phy_state_table, SCI_PHY_INITIAL);
+
+ /* Copy the rest of the input data to our locals */
+ iphy->owning_port = iport;
+ iphy->phy_index = phy_index;
+ iphy->bcn_received_while_port_unassigned = false;
+ iphy->protocol = SCIC_SDS_PHY_PROTOCOL_UNKNOWN;
+ iphy->link_layer_registers = NULL;
+ iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
+
+ /* Create the SIGNATURE FIS Timeout timer for this phy */
+ sci_init_timer(&iphy->sata_timer, phy_sata_timeout);
+}
+
+void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index)
+{
+ struct sci_oem_params *oem = &ihost->oem_parameters;
+ u64 sci_sas_addr;
+ __be64 sas_addr;
+
+ sci_sas_addr = oem->phys[index].sas_address.high;
+ sci_sas_addr <<= 32;
+ sci_sas_addr |= oem->phys[index].sas_address.low;
+ sas_addr = cpu_to_be64(sci_sas_addr);
+ memcpy(iphy->sas_addr, &sas_addr, sizeof(sas_addr));
+
+ iphy->isci_port = NULL;
+ iphy->sas_phy.enabled = 0;
+ iphy->sas_phy.id = index;
+ iphy->sas_phy.sas_addr = &iphy->sas_addr[0];
+ iphy->sas_phy.frame_rcvd = (u8 *)&iphy->frame_rcvd;
+ iphy->sas_phy.ha = &ihost->sas_ha;
+ iphy->sas_phy.lldd_phy = iphy;
+ iphy->sas_phy.enabled = 1;
+ iphy->sas_phy.class = SAS;
+ iphy->sas_phy.iproto = SAS_PROTOCOL_ALL;
+ iphy->sas_phy.tproto = 0;
+ iphy->sas_phy.type = PHY_TYPE_PHYSICAL;
+ iphy->sas_phy.role = PHY_ROLE_INITIATOR;
+ iphy->sas_phy.oob_mode = OOB_NOT_CONNECTED;
+ iphy->sas_phy.linkrate = SAS_LINK_RATE_UNKNOWN;
+ memset(&iphy->frame_rcvd, 0, sizeof(iphy->frame_rcvd));
+}
+
+
+/**
+ * isci_phy_control() - This function is one of the SAS Domain Template
+ * functions. This is a phy management function.
+ * @phy: This parameter specifies the sphy being controlled.
+ * @func: This parameter specifies the phy control function being invoked.
+ * @buf: This parameter is specific to the phy function being invoked.
+ *
+ * status, zero indicates success.
+ */
+int isci_phy_control(struct asd_sas_phy *sas_phy,
+ enum phy_func func,
+ void *buf)
+{
+ int ret = 0;
+ struct isci_phy *iphy = sas_phy->lldd_phy;
+ struct isci_port *iport = iphy->isci_port;
+ struct isci_host *ihost = sas_phy->ha->lldd_ha;
+ unsigned long flags;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: phy %p; func %d; buf %p; isci phy %p, port %p\n",
+ __func__, sas_phy, func, buf, iphy, iport);
+
+ switch (func) {
+ case PHY_FUNC_DISABLE:
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ sci_phy_stop(iphy);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ break;
+
+ case PHY_FUNC_LINK_RESET:
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ sci_phy_stop(iphy);
+ sci_phy_start(iphy);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ break;
+
+ case PHY_FUNC_HARD_RESET:
+ if (!iport)
+ return -ENODEV;
+
+ /* Perform the port reset. */
+ ret = isci_port_perform_hard_reset(ihost, iport, iphy);
+
+ break;
+
+ default:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: phy %p; func %d NOT IMPLEMENTED!\n",
+ __func__, sas_phy, func);
+ ret = -ENOSYS;
+ break;
+ }
+ return ret;
+}
diff --git a/drivers/scsi/isci/phy.h b/drivers/scsi/isci/phy.h
new file mode 100644
index 00000000000..67699c8e321
--- /dev/null
+++ b/drivers/scsi/isci/phy.h
@@ -0,0 +1,504 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ISCI_PHY_H_
+#define _ISCI_PHY_H_
+
+#include <scsi/sas.h>
+#include <scsi/libsas.h>
+#include "isci.h"
+#include "sas.h"
+
+/* This is the timeout value for the SATA phy to wait for a SIGNATURE FIS
+ * before restarting the starting state machine. Technically, the old parallel
+ * ATA specification required up to 30 seconds for a device to issue its
+ * signature FIS as a result of a soft reset. Now we see that devices respond
+ * generally within 15 seconds, but we'll use 25 for now.
+ */
+#define SCIC_SDS_SIGNATURE_FIS_TIMEOUT 25000
+
+/* This is the timeout for the SATA OOB/SN because the hardware does not
+ * recognize a hot plug after OOB signal but before the SN signals. We need to
+ * make sure after a hotplug timeout if we have not received the speed event
+ * notification from the hardware that we restart the hardware OOB state
+ * machine.
+ */
+#define SCIC_SDS_SATA_LINK_TRAINING_TIMEOUT 250
+
+enum sci_phy_protocol {
+ SCIC_SDS_PHY_PROTOCOL_UNKNOWN,
+ SCIC_SDS_PHY_PROTOCOL_SAS,
+ SCIC_SDS_PHY_PROTOCOL_SATA,
+ SCIC_SDS_MAX_PHY_PROTOCOLS
+};
+
+/**
+ * isci_phy - hba local phy infrastructure
+ * @sm:
+ * @protocol: attached device protocol
+ * @phy_index: physical index relative to the controller (0-3)
+ * @bcn_received_while_port_unassigned: bcn to report after port association
+ * @sata_timer: timeout SATA signature FIS arrival
+ */
+struct isci_phy {
+ struct sci_base_state_machine sm;
+ struct isci_port *owning_port;
+ enum sas_linkrate max_negotiated_speed;
+ enum sci_phy_protocol protocol;
+ u8 phy_index;
+ bool bcn_received_while_port_unassigned;
+ bool is_in_link_training;
+ struct sci_timer sata_timer;
+ struct scu_transport_layer_registers __iomem *transport_layer_registers;
+ struct scu_link_layer_registers __iomem *link_layer_registers;
+ struct asd_sas_phy sas_phy;
+ struct isci_port *isci_port;
+ u8 sas_addr[SAS_ADDR_SIZE];
+ union {
+ struct sas_identify_frame iaf;
+ struct dev_to_host_fis fis;
+ } frame_rcvd;
+};
+
+static inline struct isci_phy *to_iphy(struct asd_sas_phy *sas_phy)
+{
+ struct isci_phy *iphy = container_of(sas_phy, typeof(*iphy), sas_phy);
+
+ return iphy;
+}
+
+struct sci_phy_cap {
+ union {
+ struct {
+ /*
+ * The SAS specification indicates the start bit shall
+ * always be set to
+ * 1. This implementation will have the start bit set
+ * to 0 if the PHY CAPABILITIES were either not
+ * received or speed negotiation failed.
+ */
+ u8 start:1;
+ u8 tx_ssc_type:1;
+ u8 res1:2;
+ u8 req_logical_linkrate:4;
+
+ u32 gen1_no_ssc:1;
+ u32 gen1_ssc:1;
+ u32 gen2_no_ssc:1;
+ u32 gen2_ssc:1;
+ u32 gen3_no_ssc:1;
+ u32 gen3_ssc:1;
+ u32 res2:17;
+ u32 parity:1;
+ };
+ u32 all;
+ };
+} __packed;
+
+/* this data structure reflects the link layer transmit identification reg */
+struct sci_phy_proto {
+ union {
+ struct {
+ u16 _r_a:1;
+ u16 smp_iport:1;
+ u16 stp_iport:1;
+ u16 ssp_iport:1;
+ u16 _r_b:4;
+ u16 _r_c:1;
+ u16 smp_tport:1;
+ u16 stp_tport:1;
+ u16 ssp_tport:1;
+ u16 _r_d:4;
+ };
+ u16 all;
+ };
+} __packed;
+
+
+/**
+ * struct sci_phy_properties - This structure defines the properties common to
+ * all phys that can be retrieved.
+ *
+ *
+ */
+struct sci_phy_properties {
+ /**
+ * This field specifies the port that currently contains the
+ * supplied phy. This field may be set to NULL
+ * if the phy is not currently contained in a port.
+ */
+ struct isci_port *iport;
+
+ /**
+ * This field specifies the link rate at which the phy is
+ * currently operating.
+ */
+ enum sas_linkrate negotiated_link_rate;
+
+ /**
+ * This field specifies the index of the phy in relation to other
+ * phys within the controller. This index is zero relative.
+ */
+ u8 index;
+};
+
+/**
+ * struct sci_sas_phy_properties - This structure defines the properties,
+ * specific to a SAS phy, that can be retrieved.
+ *
+ *
+ */
+struct sci_sas_phy_properties {
+ /**
+ * This field delineates the Identify Address Frame received
+ * from the remote end point.
+ */
+ struct sas_identify_frame rcvd_iaf;
+
+ /**
+ * This field delineates the Phy capabilities structure received
+ * from the remote end point.
+ */
+ struct sci_phy_cap rcvd_cap;
+
+};
+
+/**
+ * struct sci_sata_phy_properties - This structure defines the properties,
+ * specific to a SATA phy, that can be retrieved.
+ *
+ *
+ */
+struct sci_sata_phy_properties {
+ /**
+ * This field delineates the signature FIS received from the
+ * attached target.
+ */
+ struct dev_to_host_fis signature_fis;
+
+ /**
+ * This field specifies to the user if a port selector is connected
+ * on the specified phy.
+ */
+ bool is_port_selector_present;
+
+};
+
+/**
+ * enum sci_phy_counter_id - This enumeration depicts the various pieces of
+ * optional information that can be retrieved for a specific phy.
+ *
+ *
+ */
+enum sci_phy_counter_id {
+ /**
+ * This PHY information field tracks the number of frames received.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_FRAME,
+
+ /**
+ * This PHY information field tracks the number of frames transmitted.
+ */
+ SCIC_PHY_COUNTER_TRANSMITTED_FRAME,
+
+ /**
+ * This PHY information field tracks the number of DWORDs received.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_FRAME_WORD,
+
+ /**
+ * This PHY information field tracks the number of DWORDs transmitted.
+ */
+ SCIC_PHY_COUNTER_TRANSMITTED_FRAME_DWORD,
+
+ /**
+ * This PHY information field tracks the number of times DWORD
+ * synchronization was lost.
+ */
+ SCIC_PHY_COUNTER_LOSS_OF_SYNC_ERROR,
+
+ /**
+ * This PHY information field tracks the number of received DWORDs with
+ * running disparity errors.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_DISPARITY_ERROR,
+
+ /**
+ * This PHY information field tracks the number of received frames with a
+ * CRC error (not including short or truncated frames).
+ */
+ SCIC_PHY_COUNTER_RECEIVED_FRAME_CRC_ERROR,
+
+ /**
+ * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT)
+ * primitives received.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_DONE_ACK_NAK_TIMEOUT,
+
+ /**
+ * This PHY information field tracks the number of DONE (ACK/NAK TIMEOUT)
+ * primitives transmitted.
+ */
+ SCIC_PHY_COUNTER_TRANSMITTED_DONE_ACK_NAK_TIMEOUT,
+
+ /**
+ * This PHY information field tracks the number of times the inactivity
+ * timer for connections on the phy has been utilized.
+ */
+ SCIC_PHY_COUNTER_INACTIVITY_TIMER_EXPIRED,
+
+ /**
+ * This PHY information field tracks the number of DONE (CREDIT TIMEOUT)
+ * primitives received.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_DONE_CREDIT_TIMEOUT,
+
+ /**
+ * This PHY information field tracks the number of DONE (CREDIT TIMEOUT)
+ * primitives transmitted.
+ */
+ SCIC_PHY_COUNTER_TRANSMITTED_DONE_CREDIT_TIMEOUT,
+
+ /**
+ * This PHY information field tracks the number of CREDIT BLOCKED
+ * primitives received.
+ * @note Depending on remote device implementation, credit blocks
+ * may occur regularly.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_CREDIT_BLOCKED,
+
+ /**
+ * This PHY information field contains the number of short frames
+ * received. A short frame is simply a frame smaller then what is
+ * allowed by either the SAS or SATA specification.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_SHORT_FRAME,
+
+ /**
+ * This PHY information field contains the number of frames received after
+ * credit has been exhausted.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_FRAME_WITHOUT_CREDIT,
+
+ /**
+ * This PHY information field contains the number of frames received after
+ * a DONE has been received.
+ */
+ SCIC_PHY_COUNTER_RECEIVED_FRAME_AFTER_DONE,
+
+ /**
+ * This PHY information field contains the number of times the phy
+ * failed to achieve DWORD synchronization during speed negotiation.
+ */
+ SCIC_PHY_COUNTER_SN_DWORD_SYNC_ERROR
+};
+
+enum sci_phy_states {
+ /**
+ * Simply the initial state for the base domain state machine.
+ */
+ SCI_PHY_INITIAL,
+
+ /**
+ * This state indicates that the phy has successfully been stopped.
+ * In this state no new IO operations are permitted on this phy.
+ * This state is entered from the INITIAL state.
+ * This state is entered from the STARTING state.
+ * This state is entered from the READY state.
+ * This state is entered from the RESETTING state.
+ */
+ SCI_PHY_STOPPED,
+
+ /**
+ * This state indicates that the phy is in the process of becomming
+ * ready. In this state no new IO operations are permitted on this phy.
+ * This state is entered from the STOPPED state.
+ * This state is entered from the READY state.
+ * This state is entered from the RESETTING state.
+ */
+ SCI_PHY_STARTING,
+
+ /**
+ * Initial state
+ */
+ SCI_PHY_SUB_INITIAL,
+
+ /**
+ * Wait state for the hardware OSSP event type notification
+ */
+ SCI_PHY_SUB_AWAIT_OSSP_EN,
+
+ /**
+ * Wait state for the PHY speed notification
+ */
+ SCI_PHY_SUB_AWAIT_SAS_SPEED_EN,
+
+ /**
+ * Wait state for the IAF Unsolicited frame notification
+ */
+ SCI_PHY_SUB_AWAIT_IAF_UF,
+
+ /**
+ * Wait state for the request to consume power
+ */
+ SCI_PHY_SUB_AWAIT_SAS_POWER,
+
+ /**
+ * Wait state for request to consume power
+ */
+ SCI_PHY_SUB_AWAIT_SATA_POWER,
+
+ /**
+ * Wait state for the SATA PHY notification
+ */
+ SCI_PHY_SUB_AWAIT_SATA_PHY_EN,
+
+ /**
+ * Wait for the SATA PHY speed notification
+ */
+ SCI_PHY_SUB_AWAIT_SATA_SPEED_EN,
+
+ /**
+ * Wait state for the SIGNATURE FIS unsolicited frame notification
+ */
+ SCI_PHY_SUB_AWAIT_SIG_FIS_UF,
+
+ /**
+ * Exit state for this state machine
+ */
+ SCI_PHY_SUB_FINAL,
+
+ /**
+ * This state indicates the the phy is now ready. Thus, the user
+ * is able to perform IO operations utilizing this phy as long as it
+ * is currently part of a valid port.
+ * This state is entered from the STARTING state.
+ */
+ SCI_PHY_READY,
+
+ /**
+ * This state indicates that the phy is in the process of being reset.
+ * In this state no new IO operations are permitted on this phy.
+ * This state is entered from the READY state.
+ */
+ SCI_PHY_RESETTING,
+
+ /**
+ * Simply the final state for the base phy state machine.
+ */
+ SCI_PHY_FINAL,
+};
+
+void sci_phy_construct(
+ struct isci_phy *iphy,
+ struct isci_port *iport,
+ u8 phy_index);
+
+struct isci_port *phy_get_non_dummy_port(struct isci_phy *iphy);
+
+void sci_phy_set_port(
+ struct isci_phy *iphy,
+ struct isci_port *iport);
+
+enum sci_status sci_phy_initialize(
+ struct isci_phy *iphy,
+ struct scu_transport_layer_registers __iomem *transport_layer_registers,
+ struct scu_link_layer_registers __iomem *link_layer_registers);
+
+enum sci_status sci_phy_start(
+ struct isci_phy *iphy);
+
+enum sci_status sci_phy_stop(
+ struct isci_phy *iphy);
+
+enum sci_status sci_phy_reset(
+ struct isci_phy *iphy);
+
+void sci_phy_resume(
+ struct isci_phy *iphy);
+
+void sci_phy_setup_transport(
+ struct isci_phy *iphy,
+ u32 device_id);
+
+enum sci_status sci_phy_event_handler(
+ struct isci_phy *iphy,
+ u32 event_code);
+
+enum sci_status sci_phy_frame_handler(
+ struct isci_phy *iphy,
+ u32 frame_index);
+
+enum sci_status sci_phy_consume_power_handler(
+ struct isci_phy *iphy);
+
+void sci_phy_get_sas_address(
+ struct isci_phy *iphy,
+ struct sci_sas_address *sas_address);
+
+void sci_phy_get_attached_sas_address(
+ struct isci_phy *iphy,
+ struct sci_sas_address *sas_address);
+
+struct sci_phy_proto;
+void sci_phy_get_protocols(
+ struct isci_phy *iphy,
+ struct sci_phy_proto *protocols);
+enum sas_linkrate sci_phy_linkrate(struct isci_phy *iphy);
+
+struct isci_host;
+void isci_phy_init(struct isci_phy *iphy, struct isci_host *ihost, int index);
+int isci_phy_control(struct asd_sas_phy *phy, enum phy_func func, void *buf);
+
+#endif /* !defined(_ISCI_PHY_H_) */
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
new file mode 100644
index 00000000000..8f6f9b77e41
--- /dev/null
+++ b/drivers/scsi/isci/port.c
@@ -0,0 +1,1757 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "isci.h"
+#include "port.h"
+#include "request.h"
+
+#define SCIC_SDS_PORT_HARD_RESET_TIMEOUT (1000)
+#define SCU_DUMMY_INDEX (0xFFFF)
+
+static void isci_port_change_state(struct isci_port *iport, enum isci_status status)
+{
+ unsigned long flags;
+
+ dev_dbg(&iport->isci_host->pdev->dev,
+ "%s: iport = %p, state = 0x%x\n",
+ __func__, iport, status);
+
+ /* XXX pointless lock */
+ spin_lock_irqsave(&iport->state_lock, flags);
+ iport->status = status;
+ spin_unlock_irqrestore(&iport->state_lock, flags);
+}
+
+static void sci_port_get_protocols(struct isci_port *iport, struct sci_phy_proto *proto)
+{
+ u8 index;
+
+ proto->all = 0;
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ struct isci_phy *iphy = iport->phy_table[index];
+
+ if (!iphy)
+ continue;
+ sci_phy_get_protocols(iphy, proto);
+ }
+}
+
+static u32 sci_port_get_phys(struct isci_port *iport)
+{
+ u32 index;
+ u32 mask;
+
+ mask = 0;
+ for (index = 0; index < SCI_MAX_PHYS; index++)
+ if (iport->phy_table[index])
+ mask |= (1 << index);
+
+ return mask;
+}
+
+/**
+ * sci_port_get_properties() - This method simply returns the properties
+ * regarding the port, such as: physical index, protocols, sas address, etc.
+ * @port: this parameter specifies the port for which to retrieve the physical
+ * index.
+ * @properties: This parameter specifies the properties structure into which to
+ * copy the requested information.
+ *
+ * Indicate if the user specified a valid port. SCI_SUCCESS This value is
+ * returned if the specified port was valid. SCI_FAILURE_INVALID_PORT This
+ * value is returned if the specified port is not valid. When this value is
+ * returned, no data is copied to the properties output parameter.
+ */
+static enum sci_status sci_port_get_properties(struct isci_port *iport,
+ struct sci_port_properties *prop)
+{
+ if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
+ return SCI_FAILURE_INVALID_PORT;
+
+ prop->index = iport->logical_port_index;
+ prop->phy_mask = sci_port_get_phys(iport);
+ sci_port_get_sas_address(iport, &prop->local.sas_address);
+ sci_port_get_protocols(iport, &prop->local.protocols);
+ sci_port_get_attached_sas_address(iport, &prop->remote.sas_address);
+
+ return SCI_SUCCESS;
+}
+
+static void sci_port_bcn_enable(struct isci_port *iport)
+{
+ struct isci_phy *iphy;
+ u32 val;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
+ iphy = iport->phy_table[i];
+ if (!iphy)
+ continue;
+ val = readl(&iphy->link_layer_registers->link_layer_control);
+ /* clear the bit by writing 1. */
+ writel(val, &iphy->link_layer_registers->link_layer_control);
+ }
+}
+
+/* called under sci_lock to stabilize phy:port associations */
+void isci_port_bcn_enable(struct isci_host *ihost, struct isci_port *iport)
+{
+ int i;
+
+ clear_bit(IPORT_BCN_BLOCKED, &iport->flags);
+ wake_up(&ihost->eventq);
+
+ if (!test_and_clear_bit(IPORT_BCN_PENDING, &iport->flags))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(iport->phy_table); i++) {
+ struct isci_phy *iphy = iport->phy_table[i];
+
+ if (!iphy)
+ continue;
+
+ ihost->sas_ha.notify_port_event(&iphy->sas_phy,
+ PORTE_BROADCAST_RCVD);
+ break;
+ }
+}
+
+static void isci_port_bc_change_received(struct isci_host *ihost,
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ if (iport && test_bit(IPORT_BCN_BLOCKED, &iport->flags)) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: disabled BCN; isci_phy = %p, sas_phy = %p\n",
+ __func__, iphy, &iphy->sas_phy);
+ set_bit(IPORT_BCN_PENDING, &iport->flags);
+ atomic_inc(&iport->event);
+ wake_up(&ihost->eventq);
+ } else {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_phy = %p, sas_phy = %p\n",
+ __func__, iphy, &iphy->sas_phy);
+
+ ihost->sas_ha.notify_port_event(&iphy->sas_phy,
+ PORTE_BROADCAST_RCVD);
+ }
+ sci_port_bcn_enable(iport);
+}
+
+static void isci_port_link_up(struct isci_host *isci_host,
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ unsigned long flags;
+ struct sci_port_properties properties;
+ unsigned long success = true;
+
+ BUG_ON(iphy->isci_port != NULL);
+
+ iphy->isci_port = iport;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_port = %p\n",
+ __func__, iport);
+
+ spin_lock_irqsave(&iphy->sas_phy.frame_rcvd_lock, flags);
+
+ isci_port_change_state(iphy->isci_port, isci_starting);
+
+ sci_port_get_properties(iport, &properties);
+
+ if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) {
+ u64 attached_sas_address;
+
+ iphy->sas_phy.oob_mode = SATA_OOB_MODE;
+ iphy->sas_phy.frame_rcvd_size = sizeof(struct dev_to_host_fis);
+
+ /*
+ * For direct-attached SATA devices, the SCI core will
+ * automagically assign a SAS address to the end device
+ * for the purpose of creating a port. This SAS address
+ * will not be the same as assigned to the PHY and needs
+ * to be obtained from struct sci_port_properties properties.
+ */
+ attached_sas_address = properties.remote.sas_address.high;
+ attached_sas_address <<= 32;
+ attached_sas_address |= properties.remote.sas_address.low;
+ swab64s(&attached_sas_address);
+
+ memcpy(&iphy->sas_phy.attached_sas_addr,
+ &attached_sas_address, sizeof(attached_sas_address));
+ } else if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+ iphy->sas_phy.oob_mode = SAS_OOB_MODE;
+ iphy->sas_phy.frame_rcvd_size = sizeof(struct sas_identify_frame);
+
+ /* Copy the attached SAS address from the IAF */
+ memcpy(iphy->sas_phy.attached_sas_addr,
+ iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE);
+ } else {
+ dev_err(&isci_host->pdev->dev, "%s: unkown target\n", __func__);
+ success = false;
+ }
+
+ iphy->sas_phy.phy->negotiated_linkrate = sci_phy_linkrate(iphy);
+
+ spin_unlock_irqrestore(&iphy->sas_phy.frame_rcvd_lock, flags);
+
+ /* Notify libsas that we have an address frame, if indeed
+ * we've found an SSP, SMP, or STP target */
+ if (success)
+ isci_host->sas_ha.notify_port_event(&iphy->sas_phy,
+ PORTE_BYTES_DMAED);
+}
+
+
+/**
+ * isci_port_link_down() - This function is called by the sci core when a link
+ * becomes inactive.
+ * @isci_host: This parameter specifies the isci host object.
+ * @phy: This parameter specifies the isci phy with the active link.
+ * @port: This parameter specifies the isci port with the active link.
+ *
+ */
+static void isci_port_link_down(struct isci_host *isci_host,
+ struct isci_phy *isci_phy,
+ struct isci_port *isci_port)
+{
+ struct isci_remote_device *isci_device;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_port = %p\n", __func__, isci_port);
+
+ if (isci_port) {
+
+ /* check to see if this is the last phy on this port. */
+ if (isci_phy->sas_phy.port &&
+ isci_phy->sas_phy.port->num_phys == 1) {
+ atomic_inc(&isci_port->event);
+ isci_port_bcn_enable(isci_host, isci_port);
+
+ /* change the state for all devices on this port. The
+ * next task sent to this device will be returned as
+ * SAS_TASK_UNDELIVERED, and the scsi mid layer will
+ * remove the target
+ */
+ list_for_each_entry(isci_device,
+ &isci_port->remote_dev_list,
+ node) {
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_device = %p\n",
+ __func__, isci_device);
+ set_bit(IDEV_GONE, &isci_device->flags);
+ }
+ }
+ isci_port_change_state(isci_port, isci_stopping);
+ }
+
+ /* Notify libsas of the borken link, this will trigger calls to our
+ * isci_port_deformed and isci_dev_gone functions.
+ */
+ sas_phy_disconnected(&isci_phy->sas_phy);
+ isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy,
+ PHYE_LOSS_OF_SIGNAL);
+
+ isci_phy->isci_port = NULL;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_port = %p - Done\n", __func__, isci_port);
+}
+
+
+/**
+ * isci_port_ready() - This function is called by the sci core when a link
+ * becomes ready.
+ * @isci_host: This parameter specifies the isci host object.
+ * @port: This parameter specifies the sci port with the active link.
+ *
+ */
+static void isci_port_ready(struct isci_host *isci_host, struct isci_port *isci_port)
+{
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_port = %p\n", __func__, isci_port);
+
+ complete_all(&isci_port->start_complete);
+ isci_port_change_state(isci_port, isci_ready);
+ return;
+}
+
+/**
+ * isci_port_not_ready() - This function is called by the sci core when a link
+ * is not ready. All remote devices on this link will be removed if they are
+ * in the stopping state.
+ * @isci_host: This parameter specifies the isci host object.
+ * @port: This parameter specifies the sci port with the active link.
+ *
+ */
+static void isci_port_not_ready(struct isci_host *isci_host, struct isci_port *isci_port)
+{
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_port = %p\n", __func__, isci_port);
+}
+
+static void isci_port_stop_complete(struct isci_host *ihost,
+ struct isci_port *iport,
+ enum sci_status completion_status)
+{
+ dev_dbg(&ihost->pdev->dev, "Port stop complete\n");
+}
+
+/**
+ * isci_port_hard_reset_complete() - This function is called by the sci core
+ * when the hard reset complete notification has been received.
+ * @port: This parameter specifies the sci port with the active link.
+ * @completion_status: This parameter specifies the core status for the reset
+ * process.
+ *
+ */
+static void isci_port_hard_reset_complete(struct isci_port *isci_port,
+ enum sci_status completion_status)
+{
+ dev_dbg(&isci_port->isci_host->pdev->dev,
+ "%s: isci_port = %p, completion_status=%x\n",
+ __func__, isci_port, completion_status);
+
+ /* Save the status of the hard reset from the port. */
+ isci_port->hard_reset_status = completion_status;
+
+ complete_all(&isci_port->hard_reset_complete);
+}
+
+/* This method will return a true value if the specified phy can be assigned to
+ * this port The following is a list of phys for each port that are allowed: -
+ * Port 0 - 3 2 1 0 - Port 1 - 1 - Port 2 - 3 2 - Port 3 - 3 This method
+ * doesn't preclude all configurations. It merely ensures that a phy is part
+ * of the allowable set of phy identifiers for that port. For example, one
+ * could assign phy 3 to port 0 and no other phys. Please refer to
+ * sci_port_is_phy_mask_valid() for information regarding whether the
+ * phy_mask for a port can be supported. bool true if this is a valid phy
+ * assignment for the port false if this is not a valid phy assignment for the
+ * port
+ */
+bool sci_port_is_valid_phy_assignment(struct isci_port *iport, u32 phy_index)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ struct sci_user_parameters *user = &ihost->user_parameters;
+
+ /* Initialize to invalid value. */
+ u32 existing_phy_index = SCI_MAX_PHYS;
+ u32 index;
+
+ if ((iport->physical_port_index == 1) && (phy_index != 1))
+ return false;
+
+ if (iport->physical_port_index == 3 && phy_index != 3)
+ return false;
+
+ if (iport->physical_port_index == 2 &&
+ (phy_index == 0 || phy_index == 1))
+ return false;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++)
+ if (iport->phy_table[index] && index != phy_index)
+ existing_phy_index = index;
+
+ /* Ensure that all of the phys in the port are capable of
+ * operating at the same maximum link rate.
+ */
+ if (existing_phy_index < SCI_MAX_PHYS &&
+ user->phys[phy_index].max_speed_generation !=
+ user->phys[existing_phy_index].max_speed_generation)
+ return false;
+
+ return true;
+}
+
+/**
+ *
+ * @sci_port: This is the port object for which to determine if the phy mask
+ * can be supported.
+ *
+ * This method will return a true value if the port's phy mask can be supported
+ * by the SCU. The following is a list of valid PHY mask configurations for
+ * each port: - Port 0 - [[3 2] 1] 0 - Port 1 - [1] - Port 2 - [[3] 2]
+ * - Port 3 - [3] This method returns a boolean indication specifying if the
+ * phy mask can be supported. true if this is a valid phy assignment for the
+ * port false if this is not a valid phy assignment for the port
+ */
+static bool sci_port_is_phy_mask_valid(
+ struct isci_port *iport,
+ u32 phy_mask)
+{
+ if (iport->physical_port_index == 0) {
+ if (((phy_mask & 0x0F) == 0x0F)
+ || ((phy_mask & 0x03) == 0x03)
+ || ((phy_mask & 0x01) == 0x01)
+ || (phy_mask == 0))
+ return true;
+ } else if (iport->physical_port_index == 1) {
+ if (((phy_mask & 0x02) == 0x02)
+ || (phy_mask == 0))
+ return true;
+ } else if (iport->physical_port_index == 2) {
+ if (((phy_mask & 0x0C) == 0x0C)
+ || ((phy_mask & 0x04) == 0x04)
+ || (phy_mask == 0))
+ return true;
+ } else if (iport->physical_port_index == 3) {
+ if (((phy_mask & 0x08) == 0x08)
+ || (phy_mask == 0))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * This method retrieves a currently active (i.e. connected) phy contained in
+ * the port. Currently, the lowest order phy that is connected is returned.
+ * This method returns a pointer to a SCIS_SDS_PHY object. NULL This value is
+ * returned if there are no currently active (i.e. connected to a remote end
+ * point) phys contained in the port. All other values specify a struct sci_phy
+ * object that is active in the port.
+ */
+static struct isci_phy *sci_port_get_a_connected_phy(struct isci_port *iport)
+{
+ u32 index;
+ struct isci_phy *iphy;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ /* Ensure that the phy is both part of the port and currently
+ * connected to the remote end-point.
+ */
+ iphy = iport->phy_table[index];
+ if (iphy && sci_port_active_phy(iport, iphy))
+ return iphy;
+ }
+
+ return NULL;
+}
+
+static enum sci_status sci_port_set_phy(struct isci_port *iport, struct isci_phy *iphy)
+{
+ /* Check to see if we can add this phy to a port
+ * that means that the phy is not part of a port and that the port does
+ * not already have a phy assinged to the phy index.
+ */
+ if (!iport->phy_table[iphy->phy_index] &&
+ !phy_get_non_dummy_port(iphy) &&
+ sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
+ /* Phy is being added in the stopped state so we are in MPC mode
+ * make logical port index = physical port index
+ */
+ iport->logical_port_index = iport->physical_port_index;
+ iport->phy_table[iphy->phy_index] = iphy;
+ sci_phy_set_port(iphy, iport);
+
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE;
+}
+
+static enum sci_status sci_port_clear_phy(struct isci_port *iport, struct isci_phy *iphy)
+{
+ /* Make sure that this phy is part of this port */
+ if (iport->phy_table[iphy->phy_index] == iphy &&
+ phy_get_non_dummy_port(iphy) == iport) {
+ struct isci_host *ihost = iport->owning_controller;
+
+ /* Yep it is assigned to this port so remove it */
+ sci_phy_set_port(iphy, &ihost->ports[SCI_MAX_PORTS]);
+ iport->phy_table[iphy->phy_index] = NULL;
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE;
+}
+
+void sci_port_get_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
+{
+ u32 index;
+
+ sas->high = 0;
+ sas->low = 0;
+ for (index = 0; index < SCI_MAX_PHYS; index++)
+ if (iport->phy_table[index])
+ sci_phy_get_sas_address(iport->phy_table[index], sas);
+}
+
+void sci_port_get_attached_sas_address(struct isci_port *iport, struct sci_sas_address *sas)
+{
+ struct isci_phy *iphy;
+
+ /*
+ * Ensure that the phy is both part of the port and currently
+ * connected to the remote end-point.
+ */
+ iphy = sci_port_get_a_connected_phy(iport);
+ if (iphy) {
+ if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA) {
+ sci_phy_get_attached_sas_address(iphy, sas);
+ } else {
+ sci_phy_get_sas_address(iphy, sas);
+ sas->low += iphy->phy_index;
+ }
+ } else {
+ sas->high = 0;
+ sas->low = 0;
+ }
+}
+
+/**
+ * sci_port_construct_dummy_rnc() - create dummy rnc for si workaround
+ *
+ * @sci_port: logical port on which we need to create the remote node context
+ * @rni: remote node index for this remote node context.
+ *
+ * This routine will construct a dummy remote node context data structure
+ * This structure will be posted to the hardware to work around a scheduler
+ * error in the hardware.
+ */
+static void sci_port_construct_dummy_rnc(struct isci_port *iport, u16 rni)
+{
+ union scu_remote_node_context *rnc;
+
+ rnc = &iport->owning_controller->remote_node_context_table[rni];
+
+ memset(rnc, 0, sizeof(union scu_remote_node_context));
+
+ rnc->ssp.remote_sas_address_hi = 0;
+ rnc->ssp.remote_sas_address_lo = 0;
+
+ rnc->ssp.remote_node_index = rni;
+ rnc->ssp.remote_node_port_width = 1;
+ rnc->ssp.logical_port_index = iport->physical_port_index;
+
+ rnc->ssp.nexus_loss_timer_enable = false;
+ rnc->ssp.check_bit = false;
+ rnc->ssp.is_valid = true;
+ rnc->ssp.is_remote_node_context = true;
+ rnc->ssp.function_number = 0;
+ rnc->ssp.arbitration_wait_time = 0;
+}
+
+/*
+ * construct a dummy task context data structure. This
+ * structure will be posted to the hardwre to work around a scheduler error
+ * in the hardware.
+ */
+static void sci_port_construct_dummy_task(struct isci_port *iport, u16 tag)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ struct scu_task_context *task_context;
+
+ task_context = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
+ memset(task_context, 0, sizeof(struct scu_task_context));
+
+ task_context->initiator_request = 1;
+ task_context->connection_rate = 1;
+ task_context->logical_port_index = iport->physical_port_index;
+ task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
+ task_context->task_index = ISCI_TAG_TCI(tag);
+ task_context->valid = SCU_TASK_CONTEXT_VALID;
+ task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+ task_context->remote_node_index = iport->reserved_rni;
+ task_context->do_not_dma_ssp_good_response = 1;
+ task_context->task_phase = 0x01;
+}
+
+static void sci_port_destroy_dummy_resources(struct isci_port *iport)
+{
+ struct isci_host *ihost = iport->owning_controller;
+
+ if (iport->reserved_tag != SCI_CONTROLLER_INVALID_IO_TAG)
+ isci_free_tag(ihost, iport->reserved_tag);
+
+ if (iport->reserved_rni != SCU_DUMMY_INDEX)
+ sci_remote_node_table_release_remote_node_index(&ihost->available_remote_nodes,
+ 1, iport->reserved_rni);
+
+ iport->reserved_rni = SCU_DUMMY_INDEX;
+ iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
+}
+
+void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
+{
+ u8 index;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ if (iport->active_phy_mask & (1 << index))
+ sci_phy_setup_transport(iport->phy_table[index], device_id);
+ }
+}
+
+static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy,
+ bool do_notify_user)
+{
+ struct isci_host *ihost = iport->owning_controller;
+
+ if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA)
+ sci_phy_resume(iphy);
+
+ iport->active_phy_mask |= 1 << iphy->phy_index;
+
+ sci_controller_clear_invalid_phy(ihost, iphy);
+
+ if (do_notify_user == true)
+ isci_port_link_up(ihost, iport, iphy);
+}
+
+void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
+ bool do_notify_user)
+{
+ struct isci_host *ihost = iport->owning_controller;
+
+ iport->active_phy_mask &= ~(1 << iphy->phy_index);
+
+ iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
+
+ /* Re-assign the phy back to the LP as if it were a narrow port */
+ writel(iphy->phy_index,
+ &iport->port_pe_configuration_register[iphy->phy_index]);
+
+ if (do_notify_user == true)
+ isci_port_link_down(ihost, iphy, iport);
+}
+
+static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *iphy)
+{
+ struct isci_host *ihost = iport->owning_controller;
+
+ /*
+ * Check to see if we have alreay reported this link as bad and if
+ * not go ahead and tell the SCI_USER that we have discovered an
+ * invalid link.
+ */
+ if ((ihost->invalid_phy_mask & (1 << iphy->phy_index)) == 0) {
+ ihost->invalid_phy_mask |= 1 << iphy->phy_index;
+ dev_warn(&ihost->pdev->dev, "Invalid link up!\n");
+ }
+}
+
+static bool is_port_ready_state(enum sci_port_states state)
+{
+ switch (state) {
+ case SCI_PORT_READY:
+ case SCI_PORT_SUB_WAITING:
+ case SCI_PORT_SUB_OPERATIONAL:
+ case SCI_PORT_SUB_CONFIGURING:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* flag dummy rnc hanling when exiting a ready state */
+static void port_state_machine_change(struct isci_port *iport,
+ enum sci_port_states state)
+{
+ struct sci_base_state_machine *sm = &iport->sm;
+ enum sci_port_states old_state = sm->current_state_id;
+
+ if (is_port_ready_state(old_state) && !is_port_ready_state(state))
+ iport->ready_exit = true;
+
+ sci_change_state(sm, state);
+ iport->ready_exit = false;
+}
+
+/**
+ * sci_port_general_link_up_handler - phy can be assigned to port?
+ * @sci_port: sci_port object for which has a phy that has gone link up.
+ * @sci_phy: This is the struct isci_phy object that has gone link up.
+ * @do_notify_user: This parameter specifies whether to inform the user (via
+ * sci_port_link_up()) as to the fact that a new phy as become ready.
+ *
+ * Determine if this phy can be assigned to this
+ * port . If the phy is not a valid PHY for
+ * this port then the function will notify the user. A PHY can only be
+ * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in
+ * the same port. none
+ */
+static void sci_port_general_link_up_handler(struct isci_port *iport,
+ struct isci_phy *iphy,
+ bool do_notify_user)
+{
+ struct sci_sas_address port_sas_address;
+ struct sci_sas_address phy_sas_address;
+
+ sci_port_get_attached_sas_address(iport, &port_sas_address);
+ sci_phy_get_attached_sas_address(iphy, &phy_sas_address);
+
+ /* If the SAS address of the new phy matches the SAS address of
+ * other phys in the port OR this is the first phy in the port,
+ * then activate the phy and allow it to be used for operations
+ * in this port.
+ */
+ if ((phy_sas_address.high == port_sas_address.high &&
+ phy_sas_address.low == port_sas_address.low) ||
+ iport->active_phy_mask == 0) {
+ struct sci_base_state_machine *sm = &iport->sm;
+
+ sci_port_activate_phy(iport, iphy, do_notify_user);
+ if (sm->current_state_id == SCI_PORT_RESETTING)
+ port_state_machine_change(iport, SCI_PORT_READY);
+ } else
+ sci_port_invalid_link_up(iport, iphy);
+}
+
+
+
+/**
+ * This method returns false if the port only has a single phy object assigned.
+ * If there are no phys or more than one phy then the method will return
+ * true.
+ * @sci_port: The port for which the wide port condition is to be checked.
+ *
+ * bool true Is returned if this is a wide ported port. false Is returned if
+ * this is a narrow port.
+ */
+static bool sci_port_is_wide(struct isci_port *iport)
+{
+ u32 index;
+ u32 phy_count = 0;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ if (iport->phy_table[index] != NULL) {
+ phy_count++;
+ }
+ }
+
+ return phy_count != 1;
+}
+
+/**
+ * This method is called by the PHY object when the link is detected. if the
+ * port wants the PHY to continue on to the link up state then the port
+ * layer must return true. If the port object returns false the phy object
+ * must halt its attempt to go link up.
+ * @sci_port: The port associated with the phy object.
+ * @sci_phy: The phy object that is trying to go link up.
+ *
+ * true if the phy object can continue to the link up condition. true Is
+ * returned if this phy can continue to the ready state. false Is returned if
+ * can not continue on to the ready state. This notification is in place for
+ * wide ports and direct attached phys. Since there are no wide ported SATA
+ * devices this could become an invalid port configuration.
+ */
+bool sci_port_link_detected(
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
+ (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) &&
+ sci_port_is_wide(iport)) {
+ sci_port_invalid_link_up(iport, iphy);
+
+ return false;
+ }
+
+ return true;
+}
+
+static void port_timeout(unsigned long data)
+{
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct isci_port *iport = container_of(tmr, typeof(*iport), timer);
+ struct isci_host *ihost = iport->owning_controller;
+ unsigned long flags;
+ u32 current_state;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ current_state = iport->sm.current_state_id;
+
+ if (current_state == SCI_PORT_RESETTING) {
+ /* if the port is still in the resetting state then the timeout
+ * fired before the reset completed.
+ */
+ port_state_machine_change(iport, SCI_PORT_FAILED);
+ } else if (current_state == SCI_PORT_STOPPED) {
+ /* if the port is stopped then the start request failed In this
+ * case stay in the stopped state.
+ */
+ dev_err(sciport_to_dev(iport),
+ "%s: SCIC Port 0x%p failed to stop before tiemout.\n",
+ __func__,
+ iport);
+ } else if (current_state == SCI_PORT_STOPPING) {
+ /* if the port is still stopping then the stop has not completed */
+ isci_port_stop_complete(iport->owning_controller,
+ iport,
+ SCI_FAILURE_TIMEOUT);
+ } else {
+ /* The port is in the ready state and we have a timer
+ * reporting a timeout this should not happen.
+ */
+ dev_err(sciport_to_dev(iport),
+ "%s: SCIC Port 0x%p is processing a timeout operation "
+ "in state %d.\n", __func__, iport, current_state);
+ }
+
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/* --------------------------------------------------------------------------- */
+
+/**
+ * This function updates the hardwares VIIT entry for this port.
+ *
+ *
+ */
+static void sci_port_update_viit_entry(struct isci_port *iport)
+{
+ struct sci_sas_address sas_address;
+
+ sci_port_get_sas_address(iport, &sas_address);
+
+ writel(sas_address.high,
+ &iport->viit_registers->initiator_sas_address_hi);
+ writel(sas_address.low,
+ &iport->viit_registers->initiator_sas_address_lo);
+
+ /* This value get cleared just in case its not already cleared */
+ writel(0, &iport->viit_registers->reserved);
+
+ /* We are required to update the status register last */
+ writel(SCU_VIIT_ENTRY_ID_VIIT |
+ SCU_VIIT_IPPT_INITIATOR |
+ ((1 << iport->physical_port_index) << SCU_VIIT_ENTRY_LPVIE_SHIFT) |
+ SCU_VIIT_STATUS_ALL_VALID,
+ &iport->viit_registers->status);
+}
+
+enum sas_linkrate sci_port_get_max_allowed_speed(struct isci_port *iport)
+{
+ u16 index;
+ struct isci_phy *iphy;
+ enum sas_linkrate max_allowed_speed = SAS_LINK_RATE_6_0_GBPS;
+
+ /*
+ * Loop through all of the phys in this port and find the phy with the
+ * lowest maximum link rate. */
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ iphy = iport->phy_table[index];
+ if (iphy && sci_port_active_phy(iport, iphy) &&
+ iphy->max_negotiated_speed < max_allowed_speed)
+ max_allowed_speed = iphy->max_negotiated_speed;
+ }
+
+ return max_allowed_speed;
+}
+
+static void sci_port_suspend_port_task_scheduler(struct isci_port *iport)
+{
+ u32 pts_control_value;
+
+ pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+ pts_control_value |= SCU_PTSxCR_GEN_BIT(SUSPEND);
+ writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+/**
+ * sci_port_post_dummy_request() - post dummy/workaround request
+ * @sci_port: port to post task
+ *
+ * Prevent the hardware scheduler from posting new requests to the front
+ * of the scheduler queue causing a starvation problem for currently
+ * ongoing requests.
+ *
+ */
+static void sci_port_post_dummy_request(struct isci_port *iport)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ u16 tag = iport->reserved_tag;
+ struct scu_task_context *tc;
+ u32 command;
+
+ tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
+ tc->abort = 0;
+
+ command = SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+ iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
+ ISCI_TAG_TCI(tag);
+
+ sci_controller_post_request(ihost, command);
+}
+
+/**
+ * This routine will abort the dummy request. This will alow the hardware to
+ * power down parts of the silicon to save power.
+ *
+ * @sci_port: The port on which the task must be aborted.
+ *
+ */
+static void sci_port_abort_dummy_request(struct isci_port *iport)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ u16 tag = iport->reserved_tag;
+ struct scu_task_context *tc;
+ u32 command;
+
+ tc = &ihost->task_context_table[ISCI_TAG_TCI(tag)];
+ tc->abort = 1;
+
+ command = SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT |
+ iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT |
+ ISCI_TAG_TCI(tag);
+
+ sci_controller_post_request(ihost, command);
+}
+
+/**
+ *
+ * @sci_port: This is the struct isci_port object to resume.
+ *
+ * This method will resume the port task scheduler for this port object. none
+ */
+static void
+sci_port_resume_port_task_scheduler(struct isci_port *iport)
+{
+ u32 pts_control_value;
+
+ pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+ pts_control_value &= ~SCU_PTSxCR_GEN_BIT(SUSPEND);
+ writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ sci_port_suspend_port_task_scheduler(iport);
+
+ iport->not_ready_reason = SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS;
+
+ if (iport->active_phy_mask != 0) {
+ /* At least one of the phys on the port is ready */
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_OPERATIONAL);
+ }
+}
+
+static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
+{
+ u32 index;
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+ struct isci_host *ihost = iport->owning_controller;
+
+ isci_port_ready(ihost, iport);
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ if (iport->phy_table[index]) {
+ writel(iport->physical_port_index,
+ &iport->port_pe_configuration_register[
+ iport->phy_table[index]->phy_index]);
+ }
+ }
+
+ sci_port_update_viit_entry(iport);
+
+ sci_port_resume_port_task_scheduler(iport);
+
+ /*
+ * Post the dummy task for the port so the hardware can schedule
+ * io correctly
+ */
+ sci_port_post_dummy_request(iport);
+}
+
+static void sci_port_invalidate_dummy_remote_node(struct isci_port *iport)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ u8 phys_index = iport->physical_port_index;
+ union scu_remote_node_context *rnc;
+ u16 rni = iport->reserved_rni;
+ u32 command;
+
+ rnc = &ihost->remote_node_context_table[rni];
+
+ rnc->ssp.is_valid = false;
+
+ /* ensure the preceding tc abort request has reached the
+ * controller and give it ample time to act before posting the rnc
+ * invalidate
+ */
+ readl(&ihost->smu_registers->interrupt_status); /* flush */
+ udelay(10);
+
+ command = SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE |
+ phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
+
+ sci_controller_post_request(ihost, command);
+}
+
+/**
+ *
+ * @object: This is the object which is cast to a struct isci_port object.
+ *
+ * This method will perform the actions required by the struct isci_port on
+ * exiting the SCI_PORT_SUB_OPERATIONAL. This function reports
+ * the port not ready and suspends the port task scheduler. none
+ */
+static void sci_port_ready_substate_operational_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+ struct isci_host *ihost = iport->owning_controller;
+
+ /*
+ * Kill the dummy task for this port if it has not yet posted
+ * the hardware will treat this as a NOP and just return abort
+ * complete.
+ */
+ sci_port_abort_dummy_request(iport);
+
+ isci_port_not_ready(ihost, iport);
+
+ if (iport->ready_exit)
+ sci_port_invalidate_dummy_remote_node(iport);
+}
+
+static void sci_port_ready_substate_configuring_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+ struct isci_host *ihost = iport->owning_controller;
+
+ if (iport->active_phy_mask == 0) {
+ isci_port_not_ready(ihost, iport);
+
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_WAITING);
+ } else if (iport->started_request_count == 0)
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_OPERATIONAL);
+}
+
+static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ sci_port_suspend_port_task_scheduler(iport);
+ if (iport->ready_exit)
+ sci_port_invalidate_dummy_remote_node(iport);
+}
+
+enum sci_status sci_port_start(struct isci_port *iport)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ enum sci_status status = SCI_SUCCESS;
+ enum sci_port_states state;
+ u32 phy_mask;
+
+ state = iport->sm.current_state_id;
+ if (state != SCI_PORT_STOPPED) {
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ if (iport->assigned_device_count > 0) {
+ /* TODO This is a start failure operation because
+ * there are still devices assigned to this port.
+ * There must be no devices assigned to a port on a
+ * start operation.
+ */
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+
+ if (iport->reserved_rni == SCU_DUMMY_INDEX) {
+ u16 rni = sci_remote_node_table_allocate_remote_node(
+ &ihost->available_remote_nodes, 1);
+
+ if (rni != SCU_DUMMY_INDEX)
+ sci_port_construct_dummy_rnc(iport, rni);
+ else
+ status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
+ iport->reserved_rni = rni;
+ }
+
+ if (iport->reserved_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
+ u16 tag;
+
+ tag = isci_alloc_tag(ihost);
+ if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
+ status = SCI_FAILURE_INSUFFICIENT_RESOURCES;
+ else
+ sci_port_construct_dummy_task(iport, tag);
+ iport->reserved_tag = tag;
+ }
+
+ if (status == SCI_SUCCESS) {
+ phy_mask = sci_port_get_phys(iport);
+
+ /*
+ * There are one or more phys assigned to this port. Make sure
+ * the port's phy mask is in fact legal and supported by the
+ * silicon.
+ */
+ if (sci_port_is_phy_mask_valid(iport, phy_mask) == true) {
+ port_state_machine_change(iport,
+ SCI_PORT_READY);
+
+ return SCI_SUCCESS;
+ }
+ status = SCI_FAILURE;
+ }
+
+ if (status != SCI_SUCCESS)
+ sci_port_destroy_dummy_resources(iport);
+
+ return status;
+}
+
+enum sci_status sci_port_stop(struct isci_port *iport)
+{
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+ switch (state) {
+ case SCI_PORT_STOPPED:
+ return SCI_SUCCESS;
+ case SCI_PORT_SUB_WAITING:
+ case SCI_PORT_SUB_OPERATIONAL:
+ case SCI_PORT_SUB_CONFIGURING:
+ case SCI_PORT_RESETTING:
+ port_state_machine_change(iport,
+ SCI_PORT_STOPPING);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+static enum sci_status sci_port_hard_reset(struct isci_port *iport, u32 timeout)
+{
+ enum sci_status status = SCI_FAILURE_INVALID_PHY;
+ struct isci_phy *iphy = NULL;
+ enum sci_port_states state;
+ u32 phy_index;
+
+ state = iport->sm.current_state_id;
+ if (state != SCI_PORT_SUB_OPERATIONAL) {
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ /* Select a phy on which we can send the hard reset request. */
+ for (phy_index = 0; phy_index < SCI_MAX_PHYS && !iphy; phy_index++) {
+ iphy = iport->phy_table[phy_index];
+ if (iphy && !sci_port_active_phy(iport, iphy)) {
+ /*
+ * We found a phy but it is not ready select
+ * different phy
+ */
+ iphy = NULL;
+ }
+ }
+
+ /* If we have a phy then go ahead and start the reset procedure */
+ if (!iphy)
+ return status;
+ status = sci_phy_reset(iphy);
+
+ if (status != SCI_SUCCESS)
+ return status;
+
+ sci_mod_timer(&iport->timer, timeout);
+ iport->not_ready_reason = SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED;
+
+ port_state_machine_change(iport, SCI_PORT_RESETTING);
+ return SCI_SUCCESS;
+}
+
+/**
+ * sci_port_add_phy() -
+ * @sci_port: This parameter specifies the port in which the phy will be added.
+ * @sci_phy: This parameter is the phy which is to be added to the port.
+ *
+ * This method will add a PHY to the selected port. This method returns an
+ * enum sci_status. SCI_SUCCESS the phy has been added to the port. Any other
+ * status is a failure to add the phy to the port.
+ */
+enum sci_status sci_port_add_phy(struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ enum sci_status status;
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+ switch (state) {
+ case SCI_PORT_STOPPED: {
+ struct sci_sas_address port_sas_address;
+
+ /* Read the port assigned SAS Address if there is one */
+ sci_port_get_sas_address(iport, &port_sas_address);
+
+ if (port_sas_address.high != 0 && port_sas_address.low != 0) {
+ struct sci_sas_address phy_sas_address;
+
+ /* Make sure that the PHY SAS Address matches the SAS Address
+ * for this port
+ */
+ sci_phy_get_sas_address(iphy, &phy_sas_address);
+
+ if (port_sas_address.high != phy_sas_address.high ||
+ port_sas_address.low != phy_sas_address.low)
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+ return sci_port_set_phy(iport, iphy);
+ }
+ case SCI_PORT_SUB_WAITING:
+ case SCI_PORT_SUB_OPERATIONAL:
+ status = sci_port_set_phy(iport, iphy);
+
+ if (status != SCI_SUCCESS)
+ return status;
+
+ sci_port_general_link_up_handler(iport, iphy, true);
+ iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
+ port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
+
+ return status;
+ case SCI_PORT_SUB_CONFIGURING:
+ status = sci_port_set_phy(iport, iphy);
+
+ if (status != SCI_SUCCESS)
+ return status;
+ sci_port_general_link_up_handler(iport, iphy, true);
+
+ /* Re-enter the configuring state since this may be the last phy in
+ * the port.
+ */
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_CONFIGURING);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+/**
+ * sci_port_remove_phy() -
+ * @sci_port: This parameter specifies the port in which the phy will be added.
+ * @sci_phy: This parameter is the phy which is to be added to the port.
+ *
+ * This method will remove the PHY from the selected PORT. This method returns
+ * an enum sci_status. SCI_SUCCESS the phy has been removed from the port. Any
+ * other status is a failure to add the phy to the port.
+ */
+enum sci_status sci_port_remove_phy(struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ enum sci_status status;
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+
+ switch (state) {
+ case SCI_PORT_STOPPED:
+ return sci_port_clear_phy(iport, iphy);
+ case SCI_PORT_SUB_OPERATIONAL:
+ status = sci_port_clear_phy(iport, iphy);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ sci_port_deactivate_phy(iport, iphy, true);
+ iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_CONFIGURING);
+ return SCI_SUCCESS;
+ case SCI_PORT_SUB_CONFIGURING:
+ status = sci_port_clear_phy(iport, iphy);
+
+ if (status != SCI_SUCCESS)
+ return status;
+ sci_port_deactivate_phy(iport, iphy, true);
+
+ /* Re-enter the configuring state since this may be the last phy in
+ * the port
+ */
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_CONFIGURING);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_port_link_up(struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+ switch (state) {
+ case SCI_PORT_SUB_WAITING:
+ /* Since this is the first phy going link up for the port we
+ * can just enable it and continue
+ */
+ sci_port_activate_phy(iport, iphy, true);
+
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_OPERATIONAL);
+ return SCI_SUCCESS;
+ case SCI_PORT_SUB_OPERATIONAL:
+ sci_port_general_link_up_handler(iport, iphy, true);
+ return SCI_SUCCESS;
+ case SCI_PORT_RESETTING:
+ /* TODO We should make sure that the phy that has gone
+ * link up is the same one on which we sent the reset. It is
+ * possible that the phy on which we sent the reset is not the
+ * one that has gone link up and we want to make sure that
+ * phy being reset comes back. Consider the case where a
+ * reset is sent but before the hardware processes the reset it
+ * get a link up on the port because of a hot plug event.
+ * because of the reset request this phy will go link down
+ * almost immediately.
+ */
+
+ /* In the resetting state we don't notify the user regarding
+ * link up and link down notifications.
+ */
+ sci_port_general_link_up_handler(iport, iphy, false);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_port_link_down(struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+ switch (state) {
+ case SCI_PORT_SUB_OPERATIONAL:
+ sci_port_deactivate_phy(iport, iphy, true);
+
+ /* If there are no active phys left in the port, then
+ * transition the port to the WAITING state until such time
+ * as a phy goes link up
+ */
+ if (iport->active_phy_mask == 0)
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_WAITING);
+ return SCI_SUCCESS;
+ case SCI_PORT_RESETTING:
+ /* In the resetting state we don't notify the user regarding
+ * link up and link down notifications. */
+ sci_port_deactivate_phy(iport, iphy, false);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_port_start_io(struct isci_port *iport,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+ switch (state) {
+ case SCI_PORT_SUB_WAITING:
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_PORT_SUB_OPERATIONAL:
+ iport->started_request_count++;
+ return SCI_SUCCESS;
+ default:
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_port_complete_io(struct isci_port *iport,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_port_states state;
+
+ state = iport->sm.current_state_id;
+ switch (state) {
+ case SCI_PORT_STOPPED:
+ dev_warn(sciport_to_dev(iport),
+ "%s: in wrong state: %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_PORT_STOPPING:
+ sci_port_decrement_request_count(iport);
+
+ if (iport->started_request_count == 0)
+ port_state_machine_change(iport,
+ SCI_PORT_STOPPED);
+ break;
+ case SCI_PORT_READY:
+ case SCI_PORT_RESETTING:
+ case SCI_PORT_FAILED:
+ case SCI_PORT_SUB_WAITING:
+ case SCI_PORT_SUB_OPERATIONAL:
+ sci_port_decrement_request_count(iport);
+ break;
+ case SCI_PORT_SUB_CONFIGURING:
+ sci_port_decrement_request_count(iport);
+ if (iport->started_request_count == 0) {
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_OPERATIONAL);
+ }
+ break;
+ }
+ return SCI_SUCCESS;
+}
+
+static void sci_port_enable_port_task_scheduler(struct isci_port *iport)
+{
+ u32 pts_control_value;
+
+ /* enable the port task scheduler in a suspended state */
+ pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+ pts_control_value |= SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND);
+ writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+static void sci_port_disable_port_task_scheduler(struct isci_port *iport)
+{
+ u32 pts_control_value;
+
+ pts_control_value = readl(&iport->port_task_scheduler_registers->control);
+ pts_control_value &=
+ ~(SCU_PTSxCR_GEN_BIT(ENABLE) | SCU_PTSxCR_GEN_BIT(SUSPEND));
+ writel(pts_control_value, &iport->port_task_scheduler_registers->control);
+}
+
+static void sci_port_post_dummy_remote_node(struct isci_port *iport)
+{
+ struct isci_host *ihost = iport->owning_controller;
+ u8 phys_index = iport->physical_port_index;
+ union scu_remote_node_context *rnc;
+ u16 rni = iport->reserved_rni;
+ u32 command;
+
+ rnc = &ihost->remote_node_context_table[rni];
+ rnc->ssp.is_valid = true;
+
+ command = SCU_CONTEXT_COMMAND_POST_RNC_32 |
+ phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
+
+ sci_controller_post_request(ihost, command);
+
+ /* ensure hardware has seen the post rnc command and give it
+ * ample time to act before sending the suspend
+ */
+ readl(&ihost->smu_registers->interrupt_status); /* flush */
+ udelay(10);
+
+ command = SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX |
+ phys_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT | rni;
+
+ sci_controller_post_request(ihost, command);
+}
+
+static void sci_port_stopped_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ if (iport->sm.previous_state_id == SCI_PORT_STOPPING) {
+ /*
+ * If we enter this state becasuse of a request to stop
+ * the port then we want to disable the hardwares port
+ * task scheduler. */
+ sci_port_disable_port_task_scheduler(iport);
+ }
+}
+
+static void sci_port_stopped_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ /* Enable and suspend the port task scheduler */
+ sci_port_enable_port_task_scheduler(iport);
+}
+
+static void sci_port_ready_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+ struct isci_host *ihost = iport->owning_controller;
+ u32 prev_state;
+
+ prev_state = iport->sm.previous_state_id;
+ if (prev_state == SCI_PORT_RESETTING)
+ isci_port_hard_reset_complete(iport, SCI_SUCCESS);
+ else
+ isci_port_not_ready(ihost, iport);
+
+ /* Post and suspend the dummy remote node context for this port. */
+ sci_port_post_dummy_remote_node(iport);
+
+ /* Start the ready substate machine */
+ port_state_machine_change(iport,
+ SCI_PORT_SUB_WAITING);
+}
+
+static void sci_port_resetting_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ sci_del_timer(&iport->timer);
+}
+
+static void sci_port_stopping_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ sci_del_timer(&iport->timer);
+
+ sci_port_destroy_dummy_resources(iport);
+}
+
+static void sci_port_failed_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+
+ isci_port_hard_reset_complete(iport, SCI_FAILURE_TIMEOUT);
+}
+
+/* --------------------------------------------------------------------------- */
+
+static const struct sci_base_state sci_port_state_table[] = {
+ [SCI_PORT_STOPPED] = {
+ .enter_state = sci_port_stopped_state_enter,
+ .exit_state = sci_port_stopped_state_exit
+ },
+ [SCI_PORT_STOPPING] = {
+ .exit_state = sci_port_stopping_state_exit
+ },
+ [SCI_PORT_READY] = {
+ .enter_state = sci_port_ready_state_enter,
+ },
+ [SCI_PORT_SUB_WAITING] = {
+ .enter_state = sci_port_ready_substate_waiting_enter,
+ },
+ [SCI_PORT_SUB_OPERATIONAL] = {
+ .enter_state = sci_port_ready_substate_operational_enter,
+ .exit_state = sci_port_ready_substate_operational_exit
+ },
+ [SCI_PORT_SUB_CONFIGURING] = {
+ .enter_state = sci_port_ready_substate_configuring_enter,
+ .exit_state = sci_port_ready_substate_configuring_exit
+ },
+ [SCI_PORT_RESETTING] = {
+ .exit_state = sci_port_resetting_state_exit
+ },
+ [SCI_PORT_FAILED] = {
+ .enter_state = sci_port_failed_state_enter,
+ }
+};
+
+void sci_port_construct(struct isci_port *iport, u8 index,
+ struct isci_host *ihost)
+{
+ sci_init_sm(&iport->sm, sci_port_state_table, SCI_PORT_STOPPED);
+
+ iport->logical_port_index = SCIC_SDS_DUMMY_PORT;
+ iport->physical_port_index = index;
+ iport->active_phy_mask = 0;
+ iport->ready_exit = false;
+
+ iport->owning_controller = ihost;
+
+ iport->started_request_count = 0;
+ iport->assigned_device_count = 0;
+
+ iport->reserved_rni = SCU_DUMMY_INDEX;
+ iport->reserved_tag = SCI_CONTROLLER_INVALID_IO_TAG;
+
+ sci_init_timer(&iport->timer, port_timeout);
+
+ iport->port_task_scheduler_registers = NULL;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++)
+ iport->phy_table[index] = NULL;
+}
+
+void isci_port_init(struct isci_port *iport, struct isci_host *ihost, int index)
+{
+ INIT_LIST_HEAD(&iport->remote_dev_list);
+ INIT_LIST_HEAD(&iport->domain_dev_list);
+ spin_lock_init(&iport->state_lock);
+ init_completion(&iport->start_complete);
+ iport->isci_host = ihost;
+ isci_port_change_state(iport, isci_freed);
+ atomic_set(&iport->event, 0);
+}
+
+/**
+ * isci_port_get_state() - This function gets the status of the port object.
+ * @isci_port: This parameter points to the isci_port object
+ *
+ * status of the object as a isci_status enum.
+ */
+enum isci_status isci_port_get_state(
+ struct isci_port *isci_port)
+{
+ return isci_port->status;
+}
+
+void sci_port_broadcast_change_received(struct isci_port *iport, struct isci_phy *iphy)
+{
+ struct isci_host *ihost = iport->owning_controller;
+
+ /* notify the user. */
+ isci_port_bc_change_received(ihost, iport, iphy);
+}
+
+int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ unsigned long flags;
+ enum sci_status status;
+ int idx, ret = TMF_RESP_FUNC_COMPLETE;
+
+ dev_dbg(&ihost->pdev->dev, "%s: iport = %p\n",
+ __func__, iport);
+
+ init_completion(&iport->hard_reset_complete);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ #define ISCI_PORT_RESET_TIMEOUT SCIC_SDS_SIGNATURE_FIS_TIMEOUT
+ status = sci_port_hard_reset(iport, ISCI_PORT_RESET_TIMEOUT);
+
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (status == SCI_SUCCESS) {
+ wait_for_completion(&iport->hard_reset_complete);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: iport = %p; hard reset completion\n",
+ __func__, iport);
+
+ if (iport->hard_reset_status != SCI_SUCCESS)
+ ret = TMF_RESP_FUNC_FAILED;
+ } else {
+ ret = TMF_RESP_FUNC_FAILED;
+
+ dev_err(&ihost->pdev->dev,
+ "%s: iport = %p; sci_port_hard_reset call"
+ " failed 0x%x\n",
+ __func__, iport, status);
+
+ }
+
+ /* If the hard reset for the port has failed, consider this
+ * the same as link failures on all phys in the port.
+ */
+ if (ret != TMF_RESP_FUNC_COMPLETE) {
+
+ dev_err(&ihost->pdev->dev,
+ "%s: iport = %p; hard reset failed "
+ "(0x%x) - driving explicit link fail for all phys\n",
+ __func__, iport, iport->hard_reset_status);
+
+ /* Down all phys in the port. */
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ for (idx = 0; idx < SCI_MAX_PHYS; ++idx) {
+ struct isci_phy *iphy = iport->phy_table[idx];
+
+ if (!iphy)
+ continue;
+ sci_phy_stop(iphy);
+ sci_phy_start(iphy);
+ }
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ }
+ return ret;
+}
+
+/**
+ * isci_port_deformed() - This function is called by libsas when a port becomes
+ * inactive.
+ * @phy: This parameter specifies the libsas phy with the inactive port.
+ *
+ */
+void isci_port_deformed(struct asd_sas_phy *phy)
+{
+ pr_debug("%s: sas_phy = %p\n", __func__, phy);
+}
+
+/**
+ * isci_port_formed() - This function is called by libsas when a port becomes
+ * active.
+ * @phy: This parameter specifies the libsas phy with the active port.
+ *
+ */
+void isci_port_formed(struct asd_sas_phy *phy)
+{
+ pr_debug("%s: sas_phy = %p, sas_port = %p\n", __func__, phy, phy->port);
+}
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
new file mode 100644
index 00000000000..b50ecd4e8f9
--- /dev/null
+++ b/drivers/scsi/isci/port.h
@@ -0,0 +1,306 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ISCI_PORT_H_
+#define _ISCI_PORT_H_
+
+#include <scsi/libsas.h>
+#include "isci.h"
+#include "sas.h"
+#include "phy.h"
+
+#define SCIC_SDS_DUMMY_PORT 0xFF
+
+struct isci_phy;
+struct isci_host;
+
+enum isci_status {
+ isci_freed = 0x00,
+ isci_starting = 0x01,
+ isci_ready = 0x02,
+ isci_ready_for_io = 0x03,
+ isci_stopping = 0x04,
+ isci_stopped = 0x05,
+};
+
+/**
+ * struct isci_port - isci direct attached sas port object
+ * @event: counts bcns and port stop events (for bcn filtering)
+ * @ready_exit: several states constitute 'ready'. When exiting ready we
+ * need to take extra port-teardown actions that are
+ * skipped when exiting to another 'ready' state.
+ * @logical_port_index: software port index
+ * @physical_port_index: hardware port index
+ * @active_phy_mask: identifies phy members
+ * @reserved_tag:
+ * @reserved_rni: reserver for port task scheduler workaround
+ * @started_request_count: reference count for outstanding commands
+ * @not_ready_reason: set during state transitions and notified
+ * @timer: timeout start/stop operations
+ */
+struct isci_port {
+ enum isci_status status;
+ #define IPORT_BCN_BLOCKED 0
+ #define IPORT_BCN_PENDING 1
+ unsigned long flags;
+ atomic_t event;
+ struct isci_host *isci_host;
+ struct asd_sas_port sas_port;
+ struct list_head remote_dev_list;
+ spinlock_t state_lock;
+ struct list_head domain_dev_list;
+ struct completion start_complete;
+ struct completion hard_reset_complete;
+ enum sci_status hard_reset_status;
+ struct sci_base_state_machine sm;
+ bool ready_exit;
+ u8 logical_port_index;
+ u8 physical_port_index;
+ u8 active_phy_mask;
+ u16 reserved_rni;
+ u16 reserved_tag;
+ u32 started_request_count;
+ u32 assigned_device_count;
+ u32 not_ready_reason;
+ struct isci_phy *phy_table[SCI_MAX_PHYS];
+ struct isci_host *owning_controller;
+ struct sci_timer timer;
+ struct scu_port_task_scheduler_registers __iomem *port_task_scheduler_registers;
+ /* XXX rework: only one register, no need to replicate per-port */
+ u32 __iomem *port_pe_configuration_register;
+ struct scu_viit_entry __iomem *viit_registers;
+};
+
+enum sci_port_not_ready_reason_code {
+ SCIC_PORT_NOT_READY_NO_ACTIVE_PHYS,
+ SCIC_PORT_NOT_READY_HARD_RESET_REQUESTED,
+ SCIC_PORT_NOT_READY_INVALID_PORT_CONFIGURATION,
+ SCIC_PORT_NOT_READY_RECONFIGURING,
+
+ SCIC_PORT_NOT_READY_REASON_CODE_MAX
+};
+
+struct sci_port_end_point_properties {
+ struct sci_sas_address sas_address;
+ struct sci_phy_proto protocols;
+};
+
+struct sci_port_properties {
+ u32 index;
+ struct sci_port_end_point_properties local;
+ struct sci_port_end_point_properties remote;
+ u32 phy_mask;
+};
+
+/**
+ * enum sci_port_states - This enumeration depicts all the states for the
+ * common port state machine.
+ *
+ *
+ */
+enum sci_port_states {
+ /**
+ * This state indicates that the port has successfully been stopped.
+ * In this state no new IO operations are permitted.
+ * This state is entered from the STOPPING state.
+ */
+ SCI_PORT_STOPPED,
+
+ /**
+ * This state indicates that the port is in the process of stopping.
+ * In this state no new IO operations are permitted, but existing IO
+ * operations are allowed to complete.
+ * This state is entered from the READY state.
+ */
+ SCI_PORT_STOPPING,
+
+ /**
+ * This state indicates the port is now ready. Thus, the user is
+ * able to perform IO operations on this port.
+ * This state is entered from the STARTING state.
+ */
+ SCI_PORT_READY,
+
+ /**
+ * The substate where the port is started and ready but has no
+ * active phys.
+ */
+ SCI_PORT_SUB_WAITING,
+
+ /**
+ * The substate where the port is started and ready and there is
+ * at least one phy operational.
+ */
+ SCI_PORT_SUB_OPERATIONAL,
+
+ /**
+ * The substate where the port is started and there was an
+ * add/remove phy event. This state is only used in Automatic
+ * Port Configuration Mode (APC)
+ */
+ SCI_PORT_SUB_CONFIGURING,
+
+ /**
+ * This state indicates the port is in the process of performing a hard
+ * reset. Thus, the user is unable to perform IO operations on this
+ * port.
+ * This state is entered from the READY state.
+ */
+ SCI_PORT_RESETTING,
+
+ /**
+ * This state indicates the port has failed a reset request. This state
+ * is entered when a port reset request times out.
+ * This state is entered from the RESETTING state.
+ */
+ SCI_PORT_FAILED,
+
+
+};
+
+static inline void sci_port_decrement_request_count(struct isci_port *iport)
+{
+ if (WARN_ONCE(iport->started_request_count == 0,
+ "%s: tried to decrement started_request_count past 0!?",
+ __func__))
+ /* pass */;
+ else
+ iport->started_request_count--;
+}
+
+#define sci_port_active_phy(port, phy) \
+ (((port)->active_phy_mask & (1 << (phy)->phy_index)) != 0)
+
+void sci_port_construct(
+ struct isci_port *iport,
+ u8 port_index,
+ struct isci_host *ihost);
+
+enum sci_status sci_port_start(struct isci_port *iport);
+enum sci_status sci_port_stop(struct isci_port *iport);
+
+enum sci_status sci_port_add_phy(
+ struct isci_port *iport,
+ struct isci_phy *iphy);
+
+enum sci_status sci_port_remove_phy(
+ struct isci_port *iport,
+ struct isci_phy *iphy);
+
+void sci_port_setup_transports(
+ struct isci_port *iport,
+ u32 device_id);
+
+void isci_port_bcn_enable(struct isci_host *, struct isci_port *);
+
+void sci_port_deactivate_phy(
+ struct isci_port *iport,
+ struct isci_phy *iphy,
+ bool do_notify_user);
+
+bool sci_port_link_detected(
+ struct isci_port *iport,
+ struct isci_phy *iphy);
+
+enum sci_status sci_port_link_up(struct isci_port *iport,
+ struct isci_phy *iphy);
+enum sci_status sci_port_link_down(struct isci_port *iport,
+ struct isci_phy *iphy);
+
+struct isci_request;
+struct isci_remote_device;
+enum sci_status sci_port_start_io(
+ struct isci_port *iport,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status sci_port_complete_io(
+ struct isci_port *iport,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sas_linkrate sci_port_get_max_allowed_speed(
+ struct isci_port *iport);
+
+void sci_port_broadcast_change_received(
+ struct isci_port *iport,
+ struct isci_phy *iphy);
+
+bool sci_port_is_valid_phy_assignment(
+ struct isci_port *iport,
+ u32 phy_index);
+
+void sci_port_get_sas_address(
+ struct isci_port *iport,
+ struct sci_sas_address *sas_address);
+
+void sci_port_get_attached_sas_address(
+ struct isci_port *iport,
+ struct sci_sas_address *sas_address);
+
+enum isci_status isci_port_get_state(
+ struct isci_port *isci_port);
+
+void isci_port_formed(struct asd_sas_phy *);
+void isci_port_deformed(struct asd_sas_phy *);
+
+void isci_port_init(
+ struct isci_port *port,
+ struct isci_host *host,
+ int index);
+
+int isci_port_perform_hard_reset(struct isci_host *ihost, struct isci_port *iport,
+ struct isci_phy *iphy);
+#endif /* !defined(_ISCI_PORT_H_) */
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
new file mode 100644
index 00000000000..486b113c634
--- /dev/null
+++ b/drivers/scsi/isci/port_config.c
@@ -0,0 +1,754 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "host.h"
+
+#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10)
+#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10)
+#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (100)
+
+enum SCIC_SDS_APC_ACTIVITY {
+ SCIC_SDS_APC_SKIP_PHY,
+ SCIC_SDS_APC_ADD_PHY,
+ SCIC_SDS_APC_START_TIMER,
+
+ SCIC_SDS_APC_ACTIVITY_MAX
+};
+
+/*
+ * ******************************************************************************
+ * General port configuration agent routines
+ * ****************************************************************************** */
+
+/**
+ *
+ * @address_one: A SAS Address to be compared.
+ * @address_two: A SAS Address to be compared.
+ *
+ * Compare the two SAS Address and if SAS Address One is greater than SAS
+ * Address Two then return > 0 else if SAS Address One is less than SAS Address
+ * Two return < 0 Otherwise they are the same return 0 A signed value of x > 0
+ * > y where x is returned for Address One > Address Two y is returned for
+ * Address One < Address Two 0 is returned ofr Address One = Address Two
+ */
+static s32 sci_sas_address_compare(
+ struct sci_sas_address address_one,
+ struct sci_sas_address address_two)
+{
+ if (address_one.high > address_two.high) {
+ return 1;
+ } else if (address_one.high < address_two.high) {
+ return -1;
+ } else if (address_one.low > address_two.low) {
+ return 1;
+ } else if (address_one.low < address_two.low) {
+ return -1;
+ }
+
+ /* The two SAS Address must be identical */
+ return 0;
+}
+
+/**
+ *
+ * @controller: The controller object used for the port search.
+ * @phy: The phy object to match.
+ *
+ * This routine will find a matching port for the phy. This means that the
+ * port and phy both have the same broadcast sas address and same received sas
+ * address. The port address or the NULL if there is no matching
+ * port. port address if the port can be found to match the phy.
+ * NULL if there is no matching port for the phy.
+ */
+static struct isci_port *sci_port_configuration_agent_find_port(
+ struct isci_host *ihost,
+ struct isci_phy *iphy)
+{
+ u8 i;
+ struct sci_sas_address port_sas_address;
+ struct sci_sas_address port_attached_device_address;
+ struct sci_sas_address phy_sas_address;
+ struct sci_sas_address phy_attached_device_address;
+
+ /*
+ * Since this phy can be a member of a wide port check to see if one or
+ * more phys match the sent and received SAS address as this phy in which
+ * case it should participate in the same port.
+ */
+ sci_phy_get_sas_address(iphy, &phy_sas_address);
+ sci_phy_get_attached_sas_address(iphy, &phy_attached_device_address);
+
+ for (i = 0; i < ihost->logical_port_entries; i++) {
+ struct isci_port *iport = &ihost->ports[i];
+
+ sci_port_get_sas_address(iport, &port_sas_address);
+ sci_port_get_attached_sas_address(iport, &port_attached_device_address);
+
+ if (sci_sas_address_compare(port_sas_address, phy_sas_address) == 0 &&
+ sci_sas_address_compare(port_attached_device_address, phy_attached_device_address) == 0)
+ return iport;
+ }
+
+ return NULL;
+}
+
+/**
+ *
+ * @controller: This is the controller object that contains the port agent
+ * @port_agent: This is the port configruation agent for the controller.
+ *
+ * This routine will validate the port configuration is correct for the SCU
+ * hardware. The SCU hardware allows for port configurations as follows. LP0
+ * -> (PE0), (PE0, PE1), (PE0, PE1, PE2, PE3) LP1 -> (PE1) LP2 -> (PE2), (PE2,
+ * PE3) LP3 -> (PE3) enum sci_status SCI_SUCCESS the port configuration is valid for
+ * this port configuration agent. SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION
+ * the port configuration is not valid for this port configuration agent.
+ */
+static enum sci_status sci_port_configuration_agent_validate_ports(
+ struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent)
+{
+ struct sci_sas_address first_address;
+ struct sci_sas_address second_address;
+
+ /*
+ * Sanity check the max ranges for all the phys the max index
+ * is always equal to the port range index */
+ if (port_agent->phy_valid_port_range[0].max_index != 0 ||
+ port_agent->phy_valid_port_range[1].max_index != 1 ||
+ port_agent->phy_valid_port_range[2].max_index != 2 ||
+ port_agent->phy_valid_port_range[3].max_index != 3)
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+
+ /*
+ * This is a request to configure a single x4 port or at least attempt
+ * to make all the phys into a single port */
+ if (port_agent->phy_valid_port_range[0].min_index == 0 &&
+ port_agent->phy_valid_port_range[1].min_index == 0 &&
+ port_agent->phy_valid_port_range[2].min_index == 0 &&
+ port_agent->phy_valid_port_range[3].min_index == 0)
+ return SCI_SUCCESS;
+
+ /*
+ * This is a degenerate case where phy 1 and phy 2 are assigned
+ * to the same port this is explicitly disallowed by the hardware
+ * unless they are part of the same x4 port and this condition was
+ * already checked above. */
+ if (port_agent->phy_valid_port_range[2].min_index == 1) {
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+
+ /*
+ * PE0 and PE3 can never have the same SAS Address unless they
+ * are part of the same x4 wide port and we have already checked
+ * for this condition. */
+ sci_phy_get_sas_address(&ihost->phys[0], &first_address);
+ sci_phy_get_sas_address(&ihost->phys[3], &second_address);
+
+ if (sci_sas_address_compare(first_address, second_address) == 0) {
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+
+ /*
+ * PE0 and PE1 are configured into a 2x1 ports make sure that the
+ * SAS Address for PE0 and PE2 are different since they can not be
+ * part of the same port. */
+ if (port_agent->phy_valid_port_range[0].min_index == 0 &&
+ port_agent->phy_valid_port_range[1].min_index == 1) {
+ sci_phy_get_sas_address(&ihost->phys[0], &first_address);
+ sci_phy_get_sas_address(&ihost->phys[2], &second_address);
+
+ if (sci_sas_address_compare(first_address, second_address) == 0) {
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+ }
+
+ /*
+ * PE2 and PE3 are configured into a 2x1 ports make sure that the
+ * SAS Address for PE1 and PE3 are different since they can not be
+ * part of the same port. */
+ if (port_agent->phy_valid_port_range[2].min_index == 2 &&
+ port_agent->phy_valid_port_range[3].min_index == 3) {
+ sci_phy_get_sas_address(&ihost->phys[1], &first_address);
+ sci_phy_get_sas_address(&ihost->phys[3], &second_address);
+
+ if (sci_sas_address_compare(first_address, second_address) == 0) {
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+ }
+
+ return SCI_SUCCESS;
+}
+
+/*
+ * ******************************************************************************
+ * Manual port configuration agent routines
+ * ****************************************************************************** */
+
+/* verify all of the phys in the same port are using the same SAS address */
+static enum sci_status
+sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent)
+{
+ u32 phy_mask;
+ u32 assigned_phy_mask;
+ struct sci_sas_address sas_address;
+ struct sci_sas_address phy_assigned_address;
+ u8 port_index;
+ u8 phy_index;
+
+ assigned_phy_mask = 0;
+ sas_address.high = 0;
+ sas_address.low = 0;
+
+ for (port_index = 0; port_index < SCI_MAX_PORTS; port_index++) {
+ phy_mask = ihost->oem_parameters.ports[port_index].phy_mask;
+
+ if (!phy_mask)
+ continue;
+ /*
+ * Make sure that one or more of the phys were not already assinged to
+ * a different port. */
+ if ((phy_mask & ~assigned_phy_mask) == 0) {
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+
+ /* Find the starting phy index for this round through the loop */
+ for (phy_index = 0; phy_index < SCI_MAX_PHYS; phy_index++) {
+ if ((phy_mask & (1 << phy_index)) == 0)
+ continue;
+ sci_phy_get_sas_address(&ihost->phys[phy_index],
+ &sas_address);
+
+ /*
+ * The phy_index can be used as the starting point for the
+ * port range since the hardware starts all logical ports
+ * the same as the PE index. */
+ port_agent->phy_valid_port_range[phy_index].min_index = port_index;
+ port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+
+ if (phy_index != port_index) {
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+
+ break;
+ }
+
+ /*
+ * See how many additional phys are being added to this logical port.
+ * Note: We have not moved the current phy_index so we will actually
+ * compare the startting phy with itself.
+ * This is expected and required to add the phy to the port. */
+ while (phy_index < SCI_MAX_PHYS) {
+ if ((phy_mask & (1 << phy_index)) == 0)
+ continue;
+ sci_phy_get_sas_address(&ihost->phys[phy_index],
+ &phy_assigned_address);
+
+ if (sci_sas_address_compare(sas_address, phy_assigned_address) != 0) {
+ /*
+ * The phy mask specified that this phy is part of the same port
+ * as the starting phy and it is not so fail this configuration */
+ return SCI_FAILURE_UNSUPPORTED_PORT_CONFIGURATION;
+ }
+
+ port_agent->phy_valid_port_range[phy_index].min_index = port_index;
+ port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+
+ sci_port_add_phy(&ihost->ports[port_index],
+ &ihost->phys[phy_index]);
+
+ assigned_phy_mask |= (1 << phy_index);
+ }
+
+ phy_index++;
+ }
+
+ return sci_port_configuration_agent_validate_ports(ihost, port_agent);
+}
+
+static void mpc_agent_timeout(unsigned long data)
+{
+ u8 index;
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct sci_port_configuration_agent *port_agent;
+ struct isci_host *ihost;
+ unsigned long flags;
+ u16 configure_phy_mask;
+
+ port_agent = container_of(tmr, typeof(*port_agent), timer);
+ ihost = container_of(port_agent, typeof(*ihost), port_agent);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ port_agent->timer_pending = false;
+
+ /* Find the mask of phys that are reported read but as yet unconfigured into a port */
+ configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ struct isci_phy *iphy = &ihost->phys[index];
+
+ if (configure_phy_mask & (1 << index)) {
+ port_agent->link_up_handler(ihost, port_agent,
+ phy_get_non_dummy_port(iphy),
+ iphy);
+ }
+ }
+
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+static void sci_mpc_agent_link_up(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent,
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ /* If the port is NULL then the phy was not assigned to a port.
+ * This is because the phy was not given the same SAS Address as
+ * the other PHYs in the port.
+ */
+ if (!iport)
+ return;
+
+ port_agent->phy_ready_mask |= (1 << iphy->phy_index);
+ sci_port_link_up(iport, iphy);
+ if ((iport->active_phy_mask & (1 << iphy->phy_index)))
+ port_agent->phy_configured_mask |= (1 << iphy->phy_index);
+}
+
+/**
+ *
+ * @controller: This is the controller object that receives the link down
+ * notification.
+ * @port: This is the port object associated with the phy. If the is no
+ * associated port this is an NULL. The port is an invalid
+ * handle only if the phy was never port of this port. This happens when
+ * the phy is not broadcasting the same SAS address as the other phys in the
+ * assigned port.
+ * @phy: This is the phy object which has gone link down.
+ *
+ * This function handles the manual port configuration link down notifications.
+ * Since all ports and phys are associated at initialization time we just turn
+ * around and notifiy the port object of the link down event. If this PHY is
+ * not associated with a port there is no action taken. Is it possible to get a
+ * link down notification from a phy that has no assocoated port?
+ */
+static void sci_mpc_agent_link_down(
+ struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent,
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ if (iport != NULL) {
+ /*
+ * If we can form a new port from the remainder of the phys
+ * then we want to start the timer to allow the SCI User to
+ * cleanup old devices and rediscover the port before
+ * rebuilding the port with the phys that remain in the ready
+ * state.
+ */
+ port_agent->phy_ready_mask &= ~(1 << iphy->phy_index);
+ port_agent->phy_configured_mask &= ~(1 << iphy->phy_index);
+
+ /*
+ * Check to see if there are more phys waiting to be
+ * configured into a port. If there are allow the SCI User
+ * to tear down this port, if necessary, and then reconstruct
+ * the port after the timeout.
+ */
+ if ((port_agent->phy_configured_mask == 0x0000) &&
+ (port_agent->phy_ready_mask != 0x0000) &&
+ !port_agent->timer_pending) {
+ port_agent->timer_pending = true;
+
+ sci_mod_timer(&port_agent->timer,
+ SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT);
+ }
+
+ sci_port_link_down(iport, iphy);
+ }
+}
+
+/* verify phys are assigned a valid SAS address for automatic port
+ * configuration mode.
+ */
+static enum sci_status
+sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent)
+{
+ u8 phy_index;
+ u8 port_index;
+ struct sci_sas_address sas_address;
+ struct sci_sas_address phy_assigned_address;
+
+ phy_index = 0;
+
+ while (phy_index < SCI_MAX_PHYS) {
+ port_index = phy_index;
+
+ /* Get the assigned SAS Address for the first PHY on the controller. */
+ sci_phy_get_sas_address(&ihost->phys[phy_index],
+ &sas_address);
+
+ while (++phy_index < SCI_MAX_PHYS) {
+ sci_phy_get_sas_address(&ihost->phys[phy_index],
+ &phy_assigned_address);
+
+ /* Verify each of the SAS address are all the same for every PHY */
+ if (sci_sas_address_compare(sas_address, phy_assigned_address) == 0) {
+ port_agent->phy_valid_port_range[phy_index].min_index = port_index;
+ port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+ } else {
+ port_agent->phy_valid_port_range[phy_index].min_index = phy_index;
+ port_agent->phy_valid_port_range[phy_index].max_index = phy_index;
+ break;
+ }
+ }
+ }
+
+ return sci_port_configuration_agent_validate_ports(ihost, port_agent);
+}
+
+static void sci_apc_agent_configure_ports(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent,
+ struct isci_phy *iphy,
+ bool start_timer)
+{
+ u8 port_index;
+ enum sci_status status;
+ struct isci_port *iport;
+ enum SCIC_SDS_APC_ACTIVITY apc_activity = SCIC_SDS_APC_SKIP_PHY;
+
+ iport = sci_port_configuration_agent_find_port(ihost, iphy);
+
+ if (iport) {
+ if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index))
+ apc_activity = SCIC_SDS_APC_ADD_PHY;
+ else
+ apc_activity = SCIC_SDS_APC_SKIP_PHY;
+ } else {
+ /*
+ * There is no matching Port for this PHY so lets search through the
+ * Ports and see if we can add the PHY to its own port or maybe start
+ * the timer and wait to see if a wider port can be made.
+ *
+ * Note the break when we reach the condition of the port id == phy id */
+ for (port_index = port_agent->phy_valid_port_range[iphy->phy_index].min_index;
+ port_index <= port_agent->phy_valid_port_range[iphy->phy_index].max_index;
+ port_index++) {
+
+ iport = &ihost->ports[port_index];
+
+ /* First we must make sure that this PHY can be added to this Port. */
+ if (sci_port_is_valid_phy_assignment(iport, iphy->phy_index)) {
+ /*
+ * Port contains a PHY with a greater PHY ID than the current
+ * PHY that has gone link up. This phy can not be part of any
+ * port so skip it and move on. */
+ if (iport->active_phy_mask > (1 << iphy->phy_index)) {
+ apc_activity = SCIC_SDS_APC_SKIP_PHY;
+ break;
+ }
+
+ /*
+ * We have reached the end of our Port list and have not found
+ * any reason why we should not either add the PHY to the port
+ * or wait for more phys to become active. */
+ if (iport->physical_port_index == iphy->phy_index) {
+ /*
+ * The Port either has no active PHYs.
+ * Consider that if the port had any active PHYs we would have
+ * or active PHYs with
+ * a lower PHY Id than this PHY. */
+ if (apc_activity != SCIC_SDS_APC_START_TIMER) {
+ apc_activity = SCIC_SDS_APC_ADD_PHY;
+ }
+
+ break;
+ }
+
+ /*
+ * The current Port has no active PHYs and this PHY could be part
+ * of this Port. Since we dont know as yet setup to start the
+ * timer and see if there is a better configuration. */
+ if (iport->active_phy_mask == 0) {
+ apc_activity = SCIC_SDS_APC_START_TIMER;
+ }
+ } else if (iport->active_phy_mask != 0) {
+ /*
+ * The Port has an active phy and the current Phy can not
+ * participate in this port so skip the PHY and see if
+ * there is a better configuration. */
+ apc_activity = SCIC_SDS_APC_SKIP_PHY;
+ }
+ }
+ }
+
+ /*
+ * Check to see if the start timer operations should instead map to an
+ * add phy operation. This is caused because we have been waiting to
+ * add a phy to a port but could not becuase the automatic port
+ * configuration engine had a choice of possible ports for the phy.
+ * Since we have gone through a timeout we are going to restrict the
+ * choice to the smallest possible port. */
+ if (
+ (start_timer == false)
+ && (apc_activity == SCIC_SDS_APC_START_TIMER)
+ ) {
+ apc_activity = SCIC_SDS_APC_ADD_PHY;
+ }
+
+ switch (apc_activity) {
+ case SCIC_SDS_APC_ADD_PHY:
+ status = sci_port_add_phy(iport, iphy);
+
+ if (status == SCI_SUCCESS) {
+ port_agent->phy_configured_mask |= (1 << iphy->phy_index);
+ }
+ break;
+
+ case SCIC_SDS_APC_START_TIMER:
+ /*
+ * This can occur for either a link down event, or a link
+ * up event where we cannot yet tell the port to which a
+ * phy belongs.
+ */
+ if (port_agent->timer_pending)
+ sci_del_timer(&port_agent->timer);
+
+ port_agent->timer_pending = true;
+ sci_mod_timer(&port_agent->timer,
+ SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
+ break;
+
+ case SCIC_SDS_APC_SKIP_PHY:
+ default:
+ /* do nothing the PHY can not be made part of a port at this time. */
+ break;
+ }
+}
+
+/**
+ * sci_apc_agent_link_up - handle apc link up events
+ * @scic: This is the controller object that receives the link up
+ * notification.
+ * @sci_port: This is the port object associated with the phy. If the is no
+ * associated port this is an NULL.
+ * @sci_phy: This is the phy object which has gone link up.
+ *
+ * This method handles the automatic port configuration for link up
+ * notifications. Is it possible to get a link down notification from a phy
+ * that has no assocoated port?
+ */
+static void sci_apc_agent_link_up(struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent,
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ u8 phy_index = iphy->phy_index;
+
+ if (!iport) {
+ /* the phy is not the part of this port */
+ port_agent->phy_ready_mask |= 1 << phy_index;
+ sci_apc_agent_configure_ports(ihost, port_agent, iphy, true);
+ } else {
+ /* the phy is already the part of the port */
+ u32 port_state = iport->sm.current_state_id;
+
+ /* if the PORT'S state is resetting then the link up is from
+ * port hard reset in this case, we need to tell the port
+ * that link up is recieved
+ */
+ BUG_ON(port_state != SCI_PORT_RESETTING);
+ port_agent->phy_ready_mask |= 1 << phy_index;
+ sci_port_link_up(iport, iphy);
+ }
+}
+
+/**
+ *
+ * @controller: This is the controller object that receives the link down
+ * notification.
+ * @iport: This is the port object associated with the phy. If the is no
+ * associated port this is an NULL.
+ * @iphy: This is the phy object which has gone link down.
+ *
+ * This method handles the automatic port configuration link down
+ * notifications. not associated with a port there is no action taken. Is it
+ * possible to get a link down notification from a phy that has no assocoated
+ * port?
+ */
+static void sci_apc_agent_link_down(
+ struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent,
+ struct isci_port *iport,
+ struct isci_phy *iphy)
+{
+ port_agent->phy_ready_mask &= ~(1 << iphy->phy_index);
+
+ if (!iport)
+ return;
+ if (port_agent->phy_configured_mask & (1 << iphy->phy_index)) {
+ enum sci_status status;
+
+ status = sci_port_remove_phy(iport, iphy);
+
+ if (status == SCI_SUCCESS)
+ port_agent->phy_configured_mask &= ~(1 << iphy->phy_index);
+ }
+}
+
+/* configure the phys into ports when the timer fires */
+static void apc_agent_timeout(unsigned long data)
+{
+ u32 index;
+ struct sci_timer *tmr = (struct sci_timer *)data;
+ struct sci_port_configuration_agent *port_agent;
+ struct isci_host *ihost;
+ unsigned long flags;
+ u16 configure_phy_mask;
+
+ port_agent = container_of(tmr, typeof(*port_agent), timer);
+ ihost = container_of(port_agent, typeof(*ihost), port_agent);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmr->cancel)
+ goto done;
+
+ port_agent->timer_pending = false;
+
+ configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask;
+
+ if (!configure_phy_mask)
+ return;
+
+ for (index = 0; index < SCI_MAX_PHYS; index++) {
+ if ((configure_phy_mask & (1 << index)) == 0)
+ continue;
+
+ sci_apc_agent_configure_ports(ihost, port_agent,
+ &ihost->phys[index], false);
+ }
+
+done:
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/*
+ * ******************************************************************************
+ * Public port configuration agent routines
+ * ****************************************************************************** */
+
+/**
+ *
+ *
+ * This method will construct the port configuration agent for operation. This
+ * call is universal for both manual port configuration and automatic port
+ * configuration modes.
+ */
+void sci_port_configuration_agent_construct(
+ struct sci_port_configuration_agent *port_agent)
+{
+ u32 index;
+
+ port_agent->phy_configured_mask = 0x00;
+ port_agent->phy_ready_mask = 0x00;
+
+ port_agent->link_up_handler = NULL;
+ port_agent->link_down_handler = NULL;
+
+ port_agent->timer_pending = false;
+
+ for (index = 0; index < SCI_MAX_PORTS; index++) {
+ port_agent->phy_valid_port_range[index].min_index = 0;
+ port_agent->phy_valid_port_range[index].max_index = 0;
+ }
+}
+
+enum sci_status sci_port_configuration_agent_initialize(
+ struct isci_host *ihost,
+ struct sci_port_configuration_agent *port_agent)
+{
+ enum sci_status status;
+ enum sci_port_configuration_mode mode;
+
+ mode = ihost->oem_parameters.controller.mode_type;
+
+ if (mode == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
+ status = sci_mpc_agent_validate_phy_configuration(
+ ihost, port_agent);
+
+ port_agent->link_up_handler = sci_mpc_agent_link_up;
+ port_agent->link_down_handler = sci_mpc_agent_link_down;
+
+ sci_init_timer(&port_agent->timer, mpc_agent_timeout);
+ } else {
+ status = sci_apc_agent_validate_phy_configuration(
+ ihost, port_agent);
+
+ port_agent->link_up_handler = sci_apc_agent_link_up;
+ port_agent->link_down_handler = sci_apc_agent_link_down;
+
+ sci_init_timer(&port_agent->timer, apc_agent_timeout);
+ }
+
+ return status;
+}
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
new file mode 100644
index 00000000000..b5f4341de24
--- /dev/null
+++ b/drivers/scsi/isci/probe_roms.c
@@ -0,0 +1,243 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ */
+
+/* probe_roms - scan for oem parameters */
+
+#include <linux/kernel.h>
+#include <linux/firmware.h>
+#include <linux/uaccess.h>
+#include <linux/efi.h>
+#include <asm/probe_roms.h>
+
+#include "isci.h"
+#include "task.h"
+#include "probe_roms.h"
+
+static efi_char16_t isci_efivar_name[] = {
+ 'R', 's', 't', 'S', 'c', 'u', 'O'
+};
+
+struct isci_orom *isci_request_oprom(struct pci_dev *pdev)
+{
+ void __iomem *oprom = pci_map_biosrom(pdev);
+ struct isci_orom *rom = NULL;
+ size_t len, i;
+ int j;
+ char oem_sig[4];
+ struct isci_oem_hdr oem_hdr;
+ u8 *tmp, sum;
+
+ if (!oprom)
+ return NULL;
+
+ len = pci_biosrom_size(pdev);
+ rom = devm_kzalloc(&pdev->dev, sizeof(*rom), GFP_KERNEL);
+ if (!rom) {
+ dev_warn(&pdev->dev,
+ "Unable to allocate memory for orom\n");
+ return NULL;
+ }
+
+ for (i = 0; i < len && rom; i += ISCI_OEM_SIG_SIZE) {
+ memcpy_fromio(oem_sig, oprom + i, ISCI_OEM_SIG_SIZE);
+
+ /* we think we found the OEM table */
+ if (memcmp(oem_sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) == 0) {
+ size_t copy_len;
+
+ memcpy_fromio(&oem_hdr, oprom + i, sizeof(oem_hdr));
+
+ copy_len = min(oem_hdr.len - sizeof(oem_hdr),
+ sizeof(*rom));
+
+ memcpy_fromio(rom,
+ oprom + i + sizeof(oem_hdr),
+ copy_len);
+
+ /* calculate checksum */
+ tmp = (u8 *)&oem_hdr;
+ for (j = 0, sum = 0; j < sizeof(oem_hdr); j++, tmp++)
+ sum += *tmp;
+
+ tmp = (u8 *)rom;
+ for (j = 0; j < sizeof(*rom); j++, tmp++)
+ sum += *tmp;
+
+ if (sum != 0) {
+ dev_warn(&pdev->dev,
+ "OEM table checksum failed\n");
+ continue;
+ }
+
+ /* keep going if that's not the oem param table */
+ if (memcmp(rom->hdr.signature,
+ ISCI_ROM_SIG,
+ ISCI_ROM_SIG_SIZE) != 0)
+ continue;
+
+ dev_info(&pdev->dev,
+ "OEM parameter table found in OROM\n");
+ break;
+ }
+ }
+
+ if (i >= len) {
+ dev_err(&pdev->dev, "oprom parse error\n");
+ devm_kfree(&pdev->dev, rom);
+ rom = NULL;
+ }
+ pci_unmap_biosrom(oprom);
+
+ return rom;
+}
+
+enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
+ struct isci_orom *orom, int scu_index)
+{
+ /* check for valid inputs */
+ if (scu_index < 0 || scu_index >= SCI_MAX_CONTROLLERS ||
+ scu_index > orom->hdr.num_elements || !oem)
+ return -EINVAL;
+
+ *oem = orom->ctrl[scu_index];
+ return 0;
+}
+
+struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw)
+{
+ struct isci_orom *orom = NULL, *data;
+ int i, j;
+
+ if (request_firmware(&fw, ISCI_FW_NAME, &pdev->dev) != 0)
+ return NULL;
+
+ if (fw->size < sizeof(*orom))
+ goto out;
+
+ data = (struct isci_orom *)fw->data;
+
+ if (strncmp(ISCI_ROM_SIG, data->hdr.signature,
+ strlen(ISCI_ROM_SIG)) != 0)
+ goto out;
+
+ orom = devm_kzalloc(&pdev->dev, fw->size, GFP_KERNEL);
+ if (!orom)
+ goto out;
+
+ memcpy(orom, fw->data, fw->size);
+
+ if (is_c0(pdev))
+ goto out;
+
+ /*
+ * deprecated: override default amp_control for pre-preproduction
+ * silicon revisions
+ */
+ for (i = 0; i < ARRAY_SIZE(orom->ctrl); i++)
+ for (j = 0; j < ARRAY_SIZE(orom->ctrl[i].phys); j++) {
+ orom->ctrl[i].phys[j].afe_tx_amp_control0 = 0xe7c03;
+ orom->ctrl[i].phys[j].afe_tx_amp_control1 = 0xe7c03;
+ orom->ctrl[i].phys[j].afe_tx_amp_control2 = 0xe7c03;
+ orom->ctrl[i].phys[j].afe_tx_amp_control3 = 0xe7c03;
+ }
+ out:
+ release_firmware(fw);
+
+ return orom;
+}
+
+static struct efi *get_efi(void)
+{
+#ifdef CONFIG_EFI
+ return &efi;
+#else
+ return NULL;
+#endif
+}
+
+struct isci_orom *isci_get_efi_var(struct pci_dev *pdev)
+{
+ efi_status_t status;
+ struct isci_orom *rom;
+ struct isci_oem_hdr *oem_hdr;
+ u8 *tmp, sum;
+ int j;
+ unsigned long data_len;
+ u8 *efi_data;
+ u32 efi_attrib = 0;
+
+ data_len = 1024;
+ efi_data = devm_kzalloc(&pdev->dev, data_len, GFP_KERNEL);
+ if (!efi_data) {
+ dev_warn(&pdev->dev,
+ "Unable to allocate memory for EFI data\n");
+ return NULL;
+ }
+
+ rom = (struct isci_orom *)(efi_data + sizeof(struct isci_oem_hdr));
+
+ if (get_efi())
+ status = get_efi()->get_variable(isci_efivar_name,
+ &ISCI_EFI_VENDOR_GUID,
+ &efi_attrib,
+ &data_len,
+ efi_data);
+ else
+ status = EFI_NOT_FOUND;
+
+ if (status != EFI_SUCCESS) {
+ dev_warn(&pdev->dev,
+ "Unable to obtain EFI var data for OEM parms\n");
+ return NULL;
+ }
+
+ oem_hdr = (struct isci_oem_hdr *)efi_data;
+
+ if (memcmp(oem_hdr->sig, ISCI_OEM_SIG, ISCI_OEM_SIG_SIZE) != 0) {
+ dev_warn(&pdev->dev,
+ "Invalid OEM header signature\n");
+ return NULL;
+ }
+
+ /* calculate checksum */
+ tmp = (u8 *)efi_data;
+ for (j = 0, sum = 0; j < (sizeof(*oem_hdr) + sizeof(*rom)); j++, tmp++)
+ sum += *tmp;
+
+ if (sum != 0) {
+ dev_warn(&pdev->dev,
+ "OEM table checksum failed\n");
+ return NULL;
+ }
+
+ if (memcmp(rom->hdr.signature,
+ ISCI_ROM_SIG,
+ ISCI_ROM_SIG_SIZE) != 0) {
+ dev_warn(&pdev->dev,
+ "Invalid OEM table signature\n");
+ return NULL;
+ }
+
+ return rom;
+}
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h
new file mode 100644
index 00000000000..dc007e692f4
--- /dev/null
+++ b/drivers/scsi/isci/probe_roms.h
@@ -0,0 +1,249 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ISCI_PROBE_ROMS_H_
+#define _ISCI_PROBE_ROMS_H_
+
+#ifdef __KERNEL__
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include <linux/efi.h>
+#include "isci.h"
+
+#define SCIC_SDS_PARM_NO_SPEED 0
+
+/* generation 1 (i.e. 1.5 Gb/s) */
+#define SCIC_SDS_PARM_GEN1_SPEED 1
+
+/* generation 2 (i.e. 3.0 Gb/s) */
+#define SCIC_SDS_PARM_GEN2_SPEED 2
+
+/* generation 3 (i.e. 6.0 Gb/s) */
+#define SCIC_SDS_PARM_GEN3_SPEED 3
+#define SCIC_SDS_PARM_MAX_SPEED SCIC_SDS_PARM_GEN3_SPEED
+
+/* parameters that can be set by module parameters */
+struct sci_user_parameters {
+ struct sci_phy_user_params {
+ /**
+ * This field specifies the NOTIFY (ENABLE SPIN UP) primitive
+ * insertion frequency for this phy index.
+ */
+ u32 notify_enable_spin_up_insertion_frequency;
+
+ /**
+ * This method specifies the number of transmitted DWORDs within which
+ * to transmit a single ALIGN primitive. This value applies regardless
+ * of what type of device is attached or connection state. A value of
+ * 0 indicates that no ALIGN primitives will be inserted.
+ */
+ u16 align_insertion_frequency;
+
+ /**
+ * This method specifies the number of transmitted DWORDs within which
+ * to transmit 2 ALIGN primitives. This applies for SAS connections
+ * only. A minimum value of 3 is required for this field.
+ */
+ u16 in_connection_align_insertion_frequency;
+
+ /**
+ * This field indicates the maximum speed generation to be utilized
+ * by phys in the supplied port.
+ * - A value of 1 indicates generation 1 (i.e. 1.5 Gb/s).
+ * - A value of 2 indicates generation 2 (i.e. 3.0 Gb/s).
+ * - A value of 3 indicates generation 3 (i.e. 6.0 Gb/s).
+ */
+ u8 max_speed_generation;
+
+ } phys[SCI_MAX_PHYS];
+
+ /**
+ * This field specifies the maximum number of direct attached devices
+ * that can have power supplied to them simultaneously.
+ */
+ u8 max_number_concurrent_device_spin_up;
+
+ /**
+ * This field specifies the number of seconds to allow a phy to consume
+ * power before yielding to another phy.
+ *
+ */
+ u8 phy_spin_up_delay_interval;
+
+ /**
+ * These timer values specifies how long a link will remain open with no
+ * activity in increments of a microsecond, it can be in increments of
+ * 100 microseconds if the upper most bit is set.
+ *
+ */
+ u16 stp_inactivity_timeout;
+ u16 ssp_inactivity_timeout;
+
+ /**
+ * These timer values specifies how long a link will remain open in increments
+ * of 100 microseconds.
+ *
+ */
+ u16 stp_max_occupancy_timeout;
+ u16 ssp_max_occupancy_timeout;
+
+ /**
+ * This timer value specifies how long a link will remain open with no
+ * outbound traffic in increments of a microsecond.
+ *
+ */
+ u8 no_outbound_task_timeout;
+
+};
+
+#define SCIC_SDS_PARM_PHY_MASK_MIN 0x0
+#define SCIC_SDS_PARM_PHY_MASK_MAX 0xF
+#define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4
+
+struct sci_oem_params;
+int sci_oem_parameters_validate(struct sci_oem_params *oem);
+
+struct isci_orom;
+struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
+enum sci_status isci_parse_oem_parameters(struct sci_oem_params *oem,
+ struct isci_orom *orom, int scu_index);
+struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmware *fw);
+struct isci_orom *isci_get_efi_var(struct pci_dev *pdev);
+
+struct isci_oem_hdr {
+ u8 sig[4];
+ u8 rev_major;
+ u8 rev_minor;
+ u16 len;
+ u8 checksum;
+ u8 reserved1;
+ u16 reserved2;
+} __attribute__ ((packed));
+
+#else
+#define SCI_MAX_PORTS 4
+#define SCI_MAX_PHYS 4
+#define SCI_MAX_CONTROLLERS 2
+#endif
+
+#define ISCI_FW_NAME "isci/isci_firmware.bin"
+
+#define ROMSIGNATURE 0xaa55
+
+#define ISCI_OEM_SIG "$OEM"
+#define ISCI_OEM_SIG_SIZE 4
+#define ISCI_ROM_SIG "ISCUOEMB"
+#define ISCI_ROM_SIG_SIZE 8
+
+#define ISCI_EFI_VENDOR_GUID \
+ EFI_GUID(0x193dfefa, 0xa445, 0x4302, 0x99, 0xd8, 0xef, 0x3a, 0xad, \
+ 0x1a, 0x04, 0xc6)
+#define ISCI_EFI_VAR_NAME "RstScuO"
+
+/* Allowed PORT configuration modes APC Automatic PORT configuration mode is
+ * defined by the OEM configuration parameters providing no PHY_MASK parameters
+ * for any PORT. i.e. There are no phys assigned to any of the ports at start.
+ * MPC Manual PORT configuration mode is defined by the OEM configuration
+ * parameters providing a PHY_MASK value for any PORT. It is assumed that any
+ * PORT with no PHY_MASK is an invalid port and not all PHYs must be assigned.
+ * A PORT_PHY mask that assigns just a single PHY to a port and no other PHYs
+ * being assigned is sufficient to declare manual PORT configuration.
+ */
+enum sci_port_configuration_mode {
+ SCIC_PORT_MANUAL_CONFIGURATION_MODE = 0,
+ SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE = 1
+};
+
+struct sci_bios_oem_param_block_hdr {
+ uint8_t signature[ISCI_ROM_SIG_SIZE];
+ uint16_t total_block_length;
+ uint8_t hdr_length;
+ uint8_t version;
+ uint8_t preboot_source;
+ uint8_t num_elements;
+ uint16_t element_length;
+ uint8_t reserved[8];
+} __attribute__ ((packed));
+
+struct sci_oem_params {
+ struct {
+ uint8_t mode_type;
+ uint8_t max_concurrent_dev_spin_up;
+ uint8_t do_enable_ssc;
+ uint8_t reserved;
+ } controller;
+
+ struct {
+ uint8_t phy_mask;
+ } ports[SCI_MAX_PORTS];
+
+ struct sci_phy_oem_params {
+ struct {
+ uint32_t high;
+ uint32_t low;
+ } sas_address;
+
+ uint32_t afe_tx_amp_control0;
+ uint32_t afe_tx_amp_control1;
+ uint32_t afe_tx_amp_control2;
+ uint32_t afe_tx_amp_control3;
+ } phys[SCI_MAX_PHYS];
+} __attribute__ ((packed));
+
+struct isci_orom {
+ struct sci_bios_oem_param_block_hdr hdr;
+ struct sci_oem_params ctrl[SCI_MAX_CONTROLLERS];
+} __attribute__ ((packed));
+
+#endif
diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h
new file mode 100644
index 00000000000..9b266c7428e
--- /dev/null
+++ b/drivers/scsi/isci/registers.h
@@ -0,0 +1,1934 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCU_REGISTERS_H_
+#define _SCU_REGISTERS_H_
+
+/**
+ * This file contains the constants and structures for the SCU memory mapped
+ * registers.
+ *
+ *
+ */
+
+#define SCU_VIIT_ENTRY_ID_MASK (0xC0000000)
+#define SCU_VIIT_ENTRY_ID_SHIFT (30)
+
+#define SCU_VIIT_ENTRY_FUNCTION_MASK (0x0FF00000)
+#define SCU_VIIT_ENTRY_FUNCTION_SHIFT (20)
+
+#define SCU_VIIT_ENTRY_IPPTMODE_MASK (0x0001F800)
+#define SCU_VIIT_ENTRY_IPPTMODE_SHIFT (12)
+
+#define SCU_VIIT_ENTRY_LPVIE_MASK (0x00000F00)
+#define SCU_VIIT_ENTRY_LPVIE_SHIFT (8)
+
+#define SCU_VIIT_ENTRY_STATUS_MASK (0x000000FF)
+#define SCU_VIIT_ENTRY_STATUS_SHIFT (0)
+
+#define SCU_VIIT_ENTRY_ID_INVALID (0 << SCU_VIIT_ENTRY_ID_SHIFT)
+#define SCU_VIIT_ENTRY_ID_VIIT (1 << SCU_VIIT_ENTRY_ID_SHIFT)
+#define SCU_VIIT_ENTRY_ID_IIT (2 << SCU_VIIT_ENTRY_ID_SHIFT)
+#define SCU_VIIT_ENTRY_ID_VIRT_EXP (3 << SCU_VIIT_ENTRY_ID_SHIFT)
+
+#define SCU_VIIT_IPPT_SSP_INITIATOR (0x01 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+#define SCU_VIIT_IPPT_SMP_INITIATOR (0x02 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+#define SCU_VIIT_IPPT_STP_INITIATOR (0x04 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+#define SCU_VIIT_IPPT_INITIATOR \
+ (\
+ SCU_VIIT_IPPT_SSP_INITIATOR \
+ | SCU_VIIT_IPPT_SMP_INITIATOR \
+ | SCU_VIIT_IPPT_STP_INITIATOR \
+ )
+
+#define SCU_VIIT_STATUS_RNC_VALID (0x01 << SCU_VIIT_ENTRY_STATUS_SHIFT)
+#define SCU_VIIT_STATUS_ADDRESS_VALID (0x02 << SCU_VIIT_ENTRY_STATUS_SHIFT)
+#define SCU_VIIT_STATUS_RNI_VALID (0x04 << SCU_VIIT_ENTRY_STATUS_SHIFT)
+#define SCU_VIIT_STATUS_ALL_VALID \
+ (\
+ SCU_VIIT_STATUS_RNC_VALID \
+ | SCU_VIIT_STATUS_ADDRESS_VALID \
+ | SCU_VIIT_STATUS_RNI_VALID \
+ )
+
+#define SCU_VIIT_IPPT_SMP_TARGET (0x10 << SCU_VIIT_ENTRY_IPPTMODE_SHIFT)
+
+/**
+ * struct scu_viit_entry - This is the SCU Virtual Initiator Table Entry
+ *
+ *
+ */
+struct scu_viit_entry {
+ /**
+ * This must be encoded as to the type of initiator that is being constructed
+ * for this port.
+ */
+ u32 status;
+
+ /**
+ * Virtual initiator high SAS Address
+ */
+ u32 initiator_sas_address_hi;
+
+ /**
+ * Virtual initiator low SAS Address
+ */
+ u32 initiator_sas_address_lo;
+
+ /**
+ * This must be 0
+ */
+ u32 reserved;
+
+};
+
+
+/* IIT Status Defines */
+#define SCU_IIT_ENTRY_ID_MASK (0xC0000000)
+#define SCU_IIT_ENTRY_ID_SHIFT (30)
+
+#define SCU_IIT_ENTRY_STATUS_UPDATE_MASK (0x20000000)
+#define SCU_IIT_ENTRY_STATUS_UPDATE_SHIFT (29)
+
+#define SCU_IIT_ENTRY_LPI_MASK (0x00000F00)
+#define SCU_IIT_ENTRY_LPI_SHIFT (8)
+
+#define SCU_IIT_ENTRY_STATUS_MASK (0x000000FF)
+#define SCU_IIT_ENTRY_STATUS_SHIFT (0)
+
+/* IIT Remote Initiator Defines */
+#define SCU_IIT_ENTRY_REMOTE_TAG_MASK (0x0000FFFF)
+#define SCU_IIT_ENTRY_REMOTE_TAG_SHIFT (0)
+
+#define SCU_IIT_ENTRY_REMOTE_RNC_MASK (0x0FFF0000)
+#define SCU_IIT_ENTRY_REMOTE_RNC_SHIFT (16)
+
+#define SCU_IIT_ENTRY_ID_INVALID (0 << SCU_IIT_ENTRY_ID_SHIFT)
+#define SCU_IIT_ENTRY_ID_VIIT (1 << SCU_IIT_ENTRY_ID_SHIFT)
+#define SCU_IIT_ENTRY_ID_IIT (2 << SCU_IIT_ENTRY_ID_SHIFT)
+#define SCU_IIT_ENTRY_ID_VIRT_EXP (3 << SCU_IIT_ENTRY_ID_SHIFT)
+
+/**
+ * struct scu_iit_entry - This will be implemented later when we support
+ * virtual functions
+ *
+ *
+ */
+struct scu_iit_entry {
+ u32 status;
+ u32 remote_initiator_sas_address_hi;
+ u32 remote_initiator_sas_address_lo;
+ u32 remote_initiator;
+
+};
+
+/* Generate a value for an SCU register */
+#define SCU_GEN_VALUE(name, value) \
+ (((value) << name ## _SHIFT) & (name ## _MASK))
+
+/*
+ * Generate a bit value for an SCU register
+ * Make sure that the register MASK is just a single bit */
+#define SCU_GEN_BIT(name) \
+ SCU_GEN_VALUE(name, ((u32)1))
+
+#define SCU_SET_BIT(name, reg_value) \
+ ((reg_value) | SCU_GEN_BIT(name))
+
+#define SCU_CLEAR_BIT(name, reg_value) \
+ ((reg_value)$ ~(SCU_GEN_BIT(name)))
+
+/*
+ * *****************************************************************************
+ * Unions for bitfield definitions of SCU Registers
+ * SMU Post Context Port
+ * ***************************************************************************** */
+#define SMU_POST_CONTEXT_PORT_CONTEXT_INDEX_SHIFT (0)
+#define SMU_POST_CONTEXT_PORT_CONTEXT_INDEX_MASK (0x00000FFF)
+#define SMU_POST_CONTEXT_PORT_LOGICAL_PORT_INDEX_SHIFT (12)
+#define SMU_POST_CONTEXT_PORT_LOGICAL_PORT_INDEX_MASK (0x0000F000)
+#define SMU_POST_CONTEXT_PORT_PROTOCOL_ENGINE_SHIFT (16)
+#define SMU_POST_CONTEXT_PORT_PROTOCOL_ENGINE_MASK (0x00030000)
+#define SMU_POST_CONTEXT_PORT_COMMAND_CONTEXT_SHIFT (18)
+#define SMU_POST_CONTEXT_PORT_COMMAND_CONTEXT_MASK (0x00FC0000)
+#define SMU_POST_CONTEXT_PORT_RESERVED_MASK (0xFF000000)
+
+#define SMU_PCP_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_POST_CONTEXT_PORT_ ## name, value)
+
+/* ***************************************************************************** */
+#define SMU_INTERRUPT_STATUS_COMPLETION_SHIFT (31)
+#define SMU_INTERRUPT_STATUS_COMPLETION_MASK (0x80000000)
+#define SMU_INTERRUPT_STATUS_QUEUE_SUSPEND_SHIFT (1)
+#define SMU_INTERRUPT_STATUS_QUEUE_SUSPEND_MASK (0x00000002)
+#define SMU_INTERRUPT_STATUS_QUEUE_ERROR_SHIFT (0)
+#define SMU_INTERRUPT_STATUS_QUEUE_ERROR_MASK (0x00000001)
+#define SMU_INTERRUPT_STATUS_RESERVED_MASK (0x7FFFFFFC)
+
+#define SMU_ISR_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_INTERRUPT_STATUS_ ## name)
+
+#define SMU_ISR_QUEUE_ERROR SMU_ISR_GEN_BIT(QUEUE_ERROR)
+#define SMU_ISR_QUEUE_SUSPEND SMU_ISR_GEN_BIT(QUEUE_SUSPEND)
+#define SMU_ISR_COMPLETION SMU_ISR_GEN_BIT(COMPLETION)
+
+/* ***************************************************************************** */
+#define SMU_INTERRUPT_MASK_COMPLETION_SHIFT (31)
+#define SMU_INTERRUPT_MASK_COMPLETION_MASK (0x80000000)
+#define SMU_INTERRUPT_MASK_QUEUE_SUSPEND_SHIFT (1)
+#define SMU_INTERRUPT_MASK_QUEUE_SUSPEND_MASK (0x00000002)
+#define SMU_INTERRUPT_MASK_QUEUE_ERROR_SHIFT (0)
+#define SMU_INTERRUPT_MASK_QUEUE_ERROR_MASK (0x00000001)
+#define SMU_INTERRUPT_MASK_RESERVED_MASK (0x7FFFFFFC)
+
+#define SMU_IMR_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_INTERRUPT_MASK_ ## name)
+
+#define SMU_IMR_QUEUE_ERROR SMU_IMR_GEN_BIT(QUEUE_ERROR)
+#define SMU_IMR_QUEUE_SUSPEND SMU_IMR_GEN_BIT(QUEUE_SUSPEND)
+#define SMU_IMR_COMPLETION SMU_IMR_GEN_BIT(COMPLETION)
+
+/* ***************************************************************************** */
+#define SMU_INTERRUPT_COALESCING_CONTROL_TIMER_SHIFT (0)
+#define SMU_INTERRUPT_COALESCING_CONTROL_TIMER_MASK (0x0000001F)
+#define SMU_INTERRUPT_COALESCING_CONTROL_NUMBER_SHIFT (8)
+#define SMU_INTERRUPT_COALESCING_CONTROL_NUMBER_MASK (0x0000FF00)
+#define SMU_INTERRUPT_COALESCING_CONTROL_RESERVED_MASK (0xFFFF00E0)
+
+#define SMU_ICC_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_INTERRUPT_COALESCING_CONTROL_ ## name, value)
+
+/* ***************************************************************************** */
+#define SMU_TASK_CONTEXT_RANGE_START_SHIFT (0)
+#define SMU_TASK_CONTEXT_RANGE_START_MASK (0x00000FFF)
+#define SMU_TASK_CONTEXT_RANGE_ENDING_SHIFT (16)
+#define SMU_TASK_CONTEXT_RANGE_ENDING_MASK (0x0FFF0000)
+#define SMU_TASK_CONTEXT_RANGE_ENABLE_SHIFT (31)
+#define SMU_TASK_CONTEXT_RANGE_ENABLE_MASK (0x80000000)
+#define SMU_TASK_CONTEXT_RANGE_RESERVED_MASK (0x7000F000)
+
+#define SMU_TCR_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_TASK_CONTEXT_RANGE_ ## name, value)
+
+#define SMU_TCR_GEN_BIT(name, value) \
+ SCU_GEN_BIT(SMU_TASK_CONTEXT_RANGE_ ## name)
+
+/* ***************************************************************************** */
+
+#define SMU_COMPLETION_QUEUE_PUT_POINTER_SHIFT (0)
+#define SMU_COMPLETION_QUEUE_PUT_POINTER_MASK (0x00003FFF)
+#define SMU_COMPLETION_QUEUE_PUT_CYCLE_BIT_SHIFT (15)
+#define SMU_COMPLETION_QUEUE_PUT_CYCLE_BIT_MASK (0x00008000)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_POINTER_SHIFT (16)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_POINTER_MASK (0x03FF0000)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_CYCLE_BIT_SHIFT (26)
+#define SMU_COMPLETION_QUEUE_PUT_EVENT_CYCLE_BIT_MASK (0x04000000)
+#define SMU_COMPLETION_QUEUE_PUT_RESERVED_MASK (0xF8004000)
+
+#define SMU_CQPR_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_PUT_ ## name, value)
+
+#define SMU_CQPR_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_COMPLETION_QUEUE_PUT_ ## name)
+
+/* ***************************************************************************** */
+
+#define SMU_COMPLETION_QUEUE_GET_POINTER_SHIFT (0)
+#define SMU_COMPLETION_QUEUE_GET_POINTER_MASK (0x00003FFF)
+#define SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT (15)
+#define SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_MASK (0x00008000)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT (16)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK (0x03FF0000)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT (26)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_MASK (0x04000000)
+#define SMU_COMPLETION_QUEUE_GET_ENABLE_SHIFT (30)
+#define SMU_COMPLETION_QUEUE_GET_ENABLE_MASK (0x40000000)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_ENABLE_SHIFT (31)
+#define SMU_COMPLETION_QUEUE_GET_EVENT_ENABLE_MASK (0x80000000)
+#define SMU_COMPLETION_QUEUE_GET_RESERVED_MASK (0x38004000)
+
+#define SMU_CQGR_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_GET_ ## name, value)
+
+#define SMU_CQGR_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_COMPLETION_QUEUE_GET_ ## name)
+
+#define SMU_CQGR_CYCLE_BIT \
+ SMU_CQGR_GEN_BIT(CYCLE_BIT)
+
+#define SMU_CQGR_EVENT_CYCLE_BIT \
+ SMU_CQGR_GEN_BIT(EVENT_CYCLE_BIT)
+
+#define SMU_CQGR_GET_POINTER_SET(value) \
+ SMU_CQGR_GEN_VAL(POINTER, value)
+
+
+/* ***************************************************************************** */
+#define SMU_COMPLETION_QUEUE_CONTROL_QUEUE_LIMIT_SHIFT (0)
+#define SMU_COMPLETION_QUEUE_CONTROL_QUEUE_LIMIT_MASK (0x00003FFF)
+#define SMU_COMPLETION_QUEUE_CONTROL_EVENT_LIMIT_SHIFT (16)
+#define SMU_COMPLETION_QUEUE_CONTROL_EVENT_LIMIT_MASK (0x03FF0000)
+#define SMU_COMPLETION_QUEUE_CONTROL_RESERVED_MASK (0xFC00C000)
+
+#define SMU_CQC_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_COMPLETION_QUEUE_CONTROL_ ## name, value)
+
+#define SMU_CQC_QUEUE_LIMIT_SET(value) \
+ SMU_CQC_GEN_VAL(QUEUE_LIMIT, value)
+
+#define SMU_CQC_EVENT_LIMIT_SET(value) \
+ SMU_CQC_GEN_VAL(EVENT_LIMIT, value)
+
+
+/* ***************************************************************************** */
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT (0)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK (0x00000FFF)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT (12)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK (0x00007000)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT (15)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK (0x07FF8000)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_SHIFT (27)
+#define SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_MASK (0x08000000)
+#define SMU_DEVICE_CONTEXT_CAPACITY_RESERVED_MASK (0xF0000000)
+
+#define SMU_DCC_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_DEVICE_CONTEXT_CAPACITY_ ## name, value)
+
+#define SMU_DCC_GET_MAX_PEG(value) \
+ (\
+ ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_PEG_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT \
+ )
+
+#define SMU_DCC_GET_MAX_LP(value) \
+ (\
+ ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT \
+ )
+
+#define SMU_DCC_GET_MAX_TC(value) \
+ (\
+ ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT \
+ )
+
+#define SMU_DCC_GET_MAX_RNC(value) \
+ (\
+ ((value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
+ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT \
+ )
+
+/* -------------------------------------------------------------------------- */
+
+#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_SHIFT (0)
+#define SMU_CONTROL_STATUS_TASK_CONTEXT_RANGE_ENABLE_MASK (0x00000001)
+#define SMU_CONTROL_STATUS_COMPLETION_BYTE_SWAP_ENABLE_SHIFT (1)
+#define SMU_CONTROL_STATUS_COMPLETION_BYTE_SWAP_ENABLE_MASK (0x00000002)
+#define SMU_CONTROL_STATUS_CONTEXT_RAM_INIT_COMPLETED_SHIFT (16)
+#define SMU_CONTROL_STATUS_CONTEXT_RAM_INIT_COMPLETED_MASK (0x00010000)
+#define SMU_CONTROL_STATUS_SCHEDULER_RAM_INIT_COMPLETED_SHIFT (17)
+#define SMU_CONTROL_STATUS_SCHEDULER_RAM_INIT_COMPLETED_MASK (0x00020000)
+#define SMU_CONTROL_STATUS_RESERVED_MASK (0xFFFCFFFC)
+
+#define SMU_SMUCSR_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_CONTROL_STATUS_ ## name)
+
+#define SMU_SMUCSR_SCHEDULER_RAM_INIT_COMPLETED \
+ (SMU_SMUCSR_GEN_BIT(SCHEDULER_RAM_INIT_COMPLETED))
+
+#define SMU_SMUCSR_CONTEXT_RAM_INIT_COMPLETED \
+ (SMU_SMUCSR_GEN_BIT(CONTEXT_RAM_INIT_COMPLETED))
+
+#define SCU_RAM_INIT_COMPLETED \
+ (\
+ SMU_SMUCSR_CONTEXT_RAM_INIT_COMPLETED \
+ | SMU_SMUCSR_SCHEDULER_RAM_INIT_COMPLETED \
+ )
+
+/* -------------------------------------------------------------------------- */
+
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE0_SHIFT (0)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE0_MASK (0x00000001)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE1_SHIFT (1)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE1_MASK (0x00000002)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE2_SHIFT (2)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE2_MASK (0x00000004)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE3_SHIFT (3)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_PE3_MASK (0x00000008)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE0_SHIFT (8)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE0_MASK (0x00000100)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE1_SHIFT (9)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE1_MASK (0x00000200)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE2_SHIFT (10)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE2_MASK (0x00000400)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE3_SHIFT (11)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_PE3_MASK (0x00000800)
+
+#define SMU_RESET_PROTOCOL_ENGINE(peg, pe) \
+ ((1 << (pe)) << ((peg) * 8))
+
+#define SMU_RESET_PEG_PROTOCOL_ENGINES(peg) \
+ (\
+ SMU_RESET_PROTOCOL_ENGINE(peg, 0) \
+ | SMU_RESET_PROTOCOL_ENGINE(peg, 1) \
+ | SMU_RESET_PROTOCOL_ENGINE(peg, 2) \
+ | SMU_RESET_PROTOCOL_ENGINE(peg, 3) \
+ )
+
+#define SMU_RESET_ALL_PROTOCOL_ENGINES() \
+ (\
+ SMU_RESET_PEG_PROTOCOL_ENGINES(0) \
+ | SMU_RESET_PEG_PROTOCOL_ENGINES(1) \
+ )
+
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP0_SHIFT (16)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP0_MASK (0x00010000)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP2_SHIFT (17)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG0_LP2_MASK (0x00020000)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP0_SHIFT (18)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP0_MASK (0x00040000)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP2_SHIFT (19)
+#define SMU_SOFTRESET_CONTROL_RESET_WIDE_PORT_PEG1_LP2_MASK (0x00080000)
+
+#define SMU_RESET_WIDE_PORT_QUEUE(peg, wide_port) \
+ ((1 << ((wide_port) / 2)) << ((peg) * 2) << 16)
+
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_SHIFT (20)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG0_MASK (0x00100000)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_SHIFT (21)
+#define SMU_SOFTRESET_CONTROL_RESET_PEG1_MASK (0x00200000)
+#define SMU_SOFTRESET_CONTROL_RESET_SCU_SHIFT (22)
+#define SMU_SOFTRESET_CONTROL_RESET_SCU_MASK (0x00400000)
+
+/*
+ * It seems to make sense that if you are going to reset the protocol
+ * engine group that you would also reset all of the protocol engines */
+#define SMU_RESET_PROTOCOL_ENGINE_GROUP(peg) \
+ (\
+ (1 << ((peg) + 20)) \
+ | SMU_RESET_WIDE_PORT_QUEUE(peg, 0) \
+ | SMU_RESET_WIDE_PORT_QUEUE(peg, 1) \
+ | SMU_RESET_PEG_PROTOCOL_ENGINES(peg) \
+ )
+
+#define SMU_RESET_ALL_PROTOCOL_ENGINE_GROUPS() \
+ (\
+ SMU_RESET_PROTOCOL_ENGINE_GROUP(0) \
+ | SMU_RESET_PROTOCOL_ENGINE_GROUP(1) \
+ )
+
+#define SMU_RESET_SCU() (0xFFFFFFFF)
+
+
+
+/* ***************************************************************************** */
+#define SMU_TASK_CONTEXT_ASSIGNMENT_STARTING_SHIFT (0)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_STARTING_MASK (0x00000FFF)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_ENDING_SHIFT (16)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_ENDING_MASK (0x0FFF0000)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_RANGE_CHECK_ENABLE_SHIFT (31)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_RANGE_CHECK_ENABLE_MASK (0x80000000)
+#define SMU_TASK_CONTEXT_ASSIGNMENT_RESERVED_MASK (0x7000F000)
+
+#define SMU_TCA_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SMU_TASK_CONTEXT_ASSIGNMENT_ ## name, value)
+
+#define SMU_TCA_GEN_BIT(name) \
+ SCU_GEN_BIT(SMU_TASK_CONTEXT_ASSIGNMENT_ ## name)
+
+/* ***************************************************************************** */
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_QUEUE_SIZE_SHIFT (0)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_QUEUE_SIZE_MASK (0x00000FFF)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_RESERVED_MASK (0xFFFFF000)
+
+#define SCU_UFQC_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_CONTROL_ ## name, value)
+
+#define SCU_UFQC_QUEUE_SIZE_SET(value) \
+ SCU_UFQC_GEN_VAL(QUEUE_SIZE, value)
+
+/* ***************************************************************************** */
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_POINTER_SHIFT (0)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_POINTER_MASK (0x00000FFF)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_CYCLE_BIT_SHIFT (12)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_CYCLE_BIT_MASK (0x00001000)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_RESERVED_MASK (0xFFFFE000)
+
+#define SCU_UFQPP_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_ ## name, value)
+
+#define SCU_UFQPP_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_PUT_ ## name)
+
+/*
+ * *****************************************************************************
+ * * SDMA Registers
+ * ***************************************************************************** */
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_POINTER_SHIFT (0)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_POINTER_MASK (0x00000FFF)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_CYCLE_BIT_SHIFT (12)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_CYCLE_BIT_MASK (12)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ENABLE_BIT_SHIFT (31)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ENABLE_BIT_MASK (0x80000000)
+#define SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_RESERVED_MASK (0x7FFFE000)
+
+#define SCU_UFQGP_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ ## name, value)
+
+#define SCU_UFQGP_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SDMA_UNSOLICITED_FRAME_QUEUE_GET_ ## name)
+
+#define SCU_UFQGP_CYCLE_BIT(value) \
+ SCU_UFQGP_GEN_BIT(CYCLE_BIT, value)
+
+#define SCU_UFQGP_GET_POINTER(value) \
+ SCU_UFQGP_GEN_VALUE(POINTER, value)
+
+#define SCU_UFQGP_ENABLE(value) \
+ (SCU_UFQGP_GEN_BIT(ENABLE) | value)
+
+#define SCU_UFQGP_DISABLE(value) \
+ (~SCU_UFQGP_GEN_BIT(ENABLE) & value)
+
+#define SCU_UFQGP_VALUE(bit, value) \
+ (SCU_UFQGP_CYCLE_BIT(bit) | SCU_UFQGP_GET_POINTER(value))
+
+/* ***************************************************************************** */
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SHIFT (0)
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_MASK (0x0000FFFF)
+#define SCU_PDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_SHIFT (16)
+#define SCU_PDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_MASK (0x00010000)
+#define SCU_PDMA_CONFIGURATION_PCI_NO_SNOOP_ENABLE_SHIFT (17)
+#define SCU_PDMA_CONFIGURATION_PCI_NO_SNOOP_ENABLE_MASK (0x00020000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_BYTE_SWAP_SHIFT (18)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_BYTE_SWAP_MASK (0x00040000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_SGL_FETCH_SHIFT (19)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_SGL_FETCH_MASK (0x00080000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_RX_HEADER_RAM_WRITE_SHIFT (20)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_RX_HEADER_RAM_WRITE_MASK (0x00100000)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_UF_ADDRESS_FETCH_SHIFT (21)
+#define SCU_PDMA_CONFIGURATION_BIG_ENDIAN_CONTROL_XPI_UF_ADDRESS_FETCH_MASK (0x00200000)
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SELECT_SHIFT (22)
+#define SCU_PDMA_CONFIGURATION_ADDRESS_MODIFIER_SELECT_MASK (0x00400000)
+#define SCU_PDMA_CONFIGURATION_RESERVED_MASK (0xFF800000)
+
+#define SCU_PDMACR_GEN_VALUE(name, value) \
+ SCU_GEN_VALUE(SCU_PDMA_CONFIGURATION_ ## name, value)
+
+#define SCU_PDMACR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_PDMA_CONFIGURATION_ ## name)
+
+#define SCU_PDMACR_BE_GEN_BIT(name) \
+ SCU_PCMACR_GEN_BIT(BIG_ENDIAN_CONTROL_ ## name)
+
+/* ***************************************************************************** */
+#define SCU_CDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_SHIFT (8)
+#define SCU_CDMA_CONFIGURATION_PCI_RELAXED_ORDERING_ENABLE_MASK (0x00000100)
+
+#define SCU_CDMACR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_CDMA_CONFIGURATION_ ## name)
+
+/*
+ * *****************************************************************************
+ * * SCU Link Layer Registers
+ * ***************************************************************************** */
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_TIMEOUT_SHIFT (0)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_TIMEOUT_MASK (0x000000FF)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_LOCK_TIME_SHIFT (8)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_LOCK_TIME_MASK (0x0000FF00)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_RATE_CHANGE_DELAY_SHIFT (16)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_RATE_CHANGE_DELAY_MASK (0x00FF0000)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_DWORD_SYNC_TIMEOUT_SHIFT (24)
+#define SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_DWORD_SYNC_TIMEOUT_MASK (0xFF000000)
+#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_REQUIRED_MASK (0x00000000)
+#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_DEFAULT_MASK (0x7D00676F)
+#define SCU_LINK_LAYER_SPEED_NECGOIATION_TIMER_VALUES_RESERVED_MASK (0x00FF0000)
+
+#define SCU_SAS_SPDTOV_GEN_VALUE(name, value) \
+ SCU_GEN_VALUE(SCU_LINK_LAYER_SPEED_NEGOTIATION_TIMER_VALUES_ ## name, value)
+
+
+#define SCU_LINK_STATUS_DWORD_SYNC_AQUIRED_SHIFT (2)
+#define SCU_LINK_STATUS_DWORD_SYNC_AQUIRED_MASK (0x00000004)
+#define SCU_LINK_STATUS_TRANSMIT_PORT_SELECTION_DONE_SHIFT (4)
+#define SCU_LINK_STATUS_TRANSMIT_PORT_SELECTION_DONE_MASK (0x00000010)
+#define SCU_LINK_STATUS_RECEIVER_CREDIT_EXHAUSTED_SHIFT (5)
+#define SCU_LINK_STATUS_RECEIVER_CREDIT_EXHAUSTED_MASK (0x00000020)
+#define SCU_LINK_STATUS_RESERVED_MASK (0xFFFFFFCD)
+
+#define SCU_SAS_LLSTA_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_LINK_STATUS_ ## name)
+
+
+/* TODO: Where is the SATA_PSELTOV register? */
+
+/*
+ * *****************************************************************************
+ * * SCU SAS Maximum Arbitration Wait Time Timeout Register
+ * ***************************************************************************** */
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_VALUE_SHIFT (0)
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_VALUE_MASK (0x00007FFF)
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_SCALE_SHIFT (15)
+#define SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_SCALE_MASK (0x00008000)
+
+#define SCU_SAS_MAWTTOV_GEN_VALUE(name, value) \
+ SCU_GEN_VALUE(SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_ ## name, value)
+
+#define SCU_SAS_MAWTTOV_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SAS_MAX_ARBITRATION_WAIT_TIME_TIMEOUT_ ## name)
+
+
+/*
+ * TODO: Where is the SAS_LNKTOV regsiter?
+ * TODO: Where is the SAS_PHYTOV register? */
+
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_SHIFT (1)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_TARGET_MASK (0x00000002)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_TARGET_SHIFT (2)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_TARGET_MASK (0x00000004)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_TARGET_SHIFT (3)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_TARGET_MASK (0x00000008)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DA_SATA_HOST_SHIFT (8)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DA_SATA_HOST_MASK (0x00000100)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_INITIATOR_SHIFT (9)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SMP_INITIATOR_MASK (0x00000200)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_INITIATOR_SHIFT (10)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_STP_INITIATOR_MASK (0x00000400)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_INITIATOR_SHIFT (11)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_SSP_INITIATOR_MASK (0x00000800)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_REASON_CODE_SHIFT (16)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_REASON_CODE_MASK (0x000F0000)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_ADDRESS_FRAME_TYPE_SHIFT (24)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_ADDRESS_FRAME_TYPE_MASK (0x0F000000)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DEVICE_TYPE_SHIFT (28)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_DEVICE_TYPE_MASK (0x70000000)
+#define SCU_SAS_TRANSMIT_IDENTIFICATION_RESERVED_MASK (0x80F0F1F1)
+
+#define SCU_SAS_TIID_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SAS_TRANSMIT_IDENTIFICATION_ ## name, value)
+
+#define SCU_SAS_TIID_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SAS_TRANSMIT_IDENTIFICATION_ ## name)
+
+/* SAS Identify Frame PHY Identifier Register */
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_BREAK_REPLY_CAPABLE_SHIFT (16)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_BREAK_REPLY_CAPABLE_MASK (0x00010000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_REQUESTED_INSIDE_ZPSDS_SHIFT (17)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_REQUESTED_INSIDE_ZPSDS_MASK (0x00020000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_INSIDE_ZPSDS_PERSISTENT_SHIFT (18)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_INSIDE_ZPSDS_PERSISTENT_MASK (0x00040000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ID_SHIFT (24)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ID_MASK (0xFF000000)
+#define SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_RESERVED_MASK (0x00F800FF)
+
+#define SCU_SAS_TIPID_GEN_VALUE(name, value) \
+ SCU_GEN_VALUE(SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ ## name, value)
+
+#define SCU_SAS_TIPID_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_LINK_LAYER_IDENTIFY_FRAME_PHY_IDENTIFIER_ ## name)
+
+
+#define SCU_SAS_PHY_CONFIGURATION_TX_PARITY_CHECK_SHIFT (4)
+#define SCU_SAS_PHY_CONFIGURATION_TX_PARITY_CHECK_MASK (0x00000010)
+#define SCU_SAS_PHY_CONFIGURATION_TX_BAD_CRC_SHIFT (6)
+#define SCU_SAS_PHY_CONFIGURATION_TX_BAD_CRC_MASK (0x00000040)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_SCRAMBLER_SHIFT (7)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_SCRAMBLER_MASK (0x00000080)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_DESCRAMBLER_SHIFT (8)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_DESCRAMBLER_MASK (0x00000100)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_CREDIT_INSERTION_SHIFT (9)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_CREDIT_INSERTION_MASK (0x00000200)
+#define SCU_SAS_PHY_CONFIGURATION_SUSPEND_PROTOCOL_ENGINE_SHIFT (11)
+#define SCU_SAS_PHY_CONFIGURATION_SUSPEND_PROTOCOL_ENGINE_MASK (0x00000800)
+#define SCU_SAS_PHY_CONFIGURATION_SATA_SPINUP_HOLD_SHIFT (12)
+#define SCU_SAS_PHY_CONFIGURATION_SATA_SPINUP_HOLD_MASK (0x00001000)
+#define SCU_SAS_PHY_CONFIGURATION_TRANSMIT_PORT_SELECTION_SIGNAL_SHIFT (13)
+#define SCU_SAS_PHY_CONFIGURATION_TRANSMIT_PORT_SELECTION_SIGNAL_MASK (0x00002000)
+#define SCU_SAS_PHY_CONFIGURATION_HARD_RESET_SHIFT (14)
+#define SCU_SAS_PHY_CONFIGURATION_HARD_RESET_MASK (0x00004000)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ENABLE_SHIFT (15)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ENABLE_MASK (0x00008000)
+#define SCU_SAS_PHY_CONFIGURATION_ENABLE_FRAME_TX_INSERT_ALIGN_SHIFT (23)
+#define SCU_SAS_PHY_CONFIGURATION_ENABLE_FRAME_TX_INSERT_ALIGN_MASK (0x00800000)
+#define SCU_SAS_PHY_CONFIGURATION_FORWARD_IDENTIFY_FRAME_SHIFT (27)
+#define SCU_SAS_PHY_CONFIGURATION_FORWARD_IDENTIFY_FRAME_MASK (0x08000000)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_BYTE_TRANSPOSE_STP_FRAME_SHIFT (28)
+#define SCU_SAS_PHY_CONFIGURATION_DISABLE_BYTE_TRANSPOSE_STP_FRAME_MASK (0x10000000)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_RESET_SHIFT (29)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_RESET_MASK (0x20000000)
+#define SCU_SAS_PHY_CONFIGURATION_THREE_IAF_ENABLE_SHIFT (30)
+#define SCU_SAS_PHY_CONFIGURATION_THREE_IAF_ENABLE_MASK (0x40000000)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ALIGN0_ENABLE_SHIFT (31)
+#define SCU_SAS_PHY_CONFIGURATION_OOB_ALIGN0_ENABLE_MASK (0x80000000)
+#define SCU_SAS_PHY_CONFIGURATION_REQUIRED_MASK (0x0100000F)
+#define SCU_SAS_PHY_CONFIGURATION_DEFAULT_MASK (0x4180100F)
+#define SCU_SAS_PHY_CONFIGURATION_RESERVED_MASK (0x00000000)
+
+#define SCU_SAS_PCFG_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SAS_PHY_CONFIGURATION_ ## name)
+
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_GENERAL_SHIFT (0)
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_GENERAL_MASK (0x000007FF)
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_CONNECTED_SHIFT (16)
+#define SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_CONNECTED_MASK (0x00ff0000)
+
+#define SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_LINK_LAYER_ALIGN_INSERTION_FREQUENCY_##name, value)
+
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_COUNT_SHIFT (0)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_COUNT_MASK (0x0003FFFF)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ENABLE_SHIFT (31)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ENABLE_MASK (0x80000000)
+#define SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_RESERVED_MASK (0x7FFC0000)
+
+#define SCU_ENSPINUP_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ ## name, value)
+
+#define SCU_ENSPINUP_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_LINK_LAYER_ENABLE_SPINUP_CONTROL_ ## name)
+
+
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_TXSSCTYPE_SHIFT (1)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_TXSSCTYPE_MASK (0x00000002)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_RLLRATE_SHIFT (4)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_RLLRATE_MASK (0x000000F0)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO15GBPS_SHIFT (8)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO15GBPS_MASK (0x00000100)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW15GBPS_SHIFT (9)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW15GBPS_MASK (0x00000201)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO30GBPS_SHIFT (10)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO30GBPS_MASK (0x00000401)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW30GBPS_SHIFT (11)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW30GBPS_MASK (0x00000801)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO60GBPS_SHIFT (12)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SWO60GBPS_MASK (0x00001001)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW60GBPS_SHIFT (13)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_SW60GBPS_MASK (0x00002001)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_EVEN_PARITY_SHIFT (31)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_EVEN_PARITY_MASK (0x80000000)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_DEFAULT_MASK (0x00003F01)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_REQUIRED_MASK (0x00000001)
+#define SCU_LINK_LAYER_PHY_CAPABILITIES_RESERVED_MASK (0x7FFFC00D)
+
+#define SCU_SAS_PHYCAP_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_LINK_LAYER_PHY_CAPABILITIES_ ## name, value)
+
+#define SCU_SAS_PHYCAP_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_LINK_LAYER_PHY_CAPABILITIES_ ## name)
+
+
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_VIRTUAL_EXPANDER_PHY_ZONE_GROUP_SHIFT (0)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_VIRTUAL_EXPANDER_PHY_ZONE_GROUP_MASK (0x000000FF)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_INSIDE_SOURCE_ZONE_GROUP_SHIFT (31)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_INSIDE_SOURCE_ZONE_GROUP_MASK (0x80000000)
+#define SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_RESERVED_MASK (0x7FFFFF00)
+
+#define SCU_PSZGCR_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_ ## name, value)
+
+#define SCU_PSZGCR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_LINK_LAYER_PHY_SOURCE_ZONE_GROUP_CONTROL_ ## name)
+
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_LOCKED_SHIFT (1)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_LOCKED_MASK (0x00000002)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_UPDATING_SHIFT (2)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE0_UPDATING_MASK (0x00000004)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_LOCKED_SHIFT (4)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_LOCKED_MASK (0x00000010)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_UPDATING_SHIFT (5)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZONE1_UPDATING_MASK (0x00000020)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE0_SHIFT (16)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE0_MASK (0x00030000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE0_SHIFT (19)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE0_MASK (0x00080000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE1_SHIFT (20)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE1_MASK (0x00300000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE1_SHIFT (23)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE1_MASK (0x00800000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE2_SHIFT (24)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE2_MASK (0x03000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE2_SHIFT (27)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE2_MASK (0x08000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE3_SHIFT (28)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ZPT_ASSOCIATION_PE3_MASK (0x30000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE3_SHIFT (31)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_AIP_ENABLE_PE3_MASK (0x80000000)
+#define SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_RESERVED_MASK (0x4444FFC9)
+
+#define SCU_PEG_SCUVZECR_GEN_VAL(name, val) \
+ SCU_GEN_VALUE(SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ ## name, val)
+
+#define SCU_PEG_SCUVZECR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_PROTOCOL_ENGINE_GROUP_VIRTUAL_ZONING_EXPANDER_CONTROL_ ## name)
+
+
+/*
+ * *****************************************************************************
+ * * Port Task Scheduler registers shift and mask values
+ * ***************************************************************************** */
+#define SCU_PTSG_CONTROL_IT_NEXUS_TIMEOUT_SHIFT (0)
+#define SCU_PTSG_CONTROL_IT_NEXUS_TIMEOUT_MASK (0x0000FFFF)
+#define SCU_PTSG_CONTROL_TASK_TIMEOUT_SHIFT (16)
+#define SCU_PTSG_CONTROL_TASK_TIMEOUT_MASK (0x00FF0000)
+#define SCU_PTSG_CONTROL_PTSG_ENABLE_SHIFT (24)
+#define SCU_PTSG_CONTROL_PTSG_ENABLE_MASK (0x01000000)
+#define SCU_PTSG_CONTROL_ETM_ENABLE_SHIFT (25)
+#define SCU_PTSG_CONTROL_ETM_ENABLE_MASK (0x02000000)
+#define SCU_PTSG_CONTROL_DEFAULT_MASK (0x00020002)
+#define SCU_PTSG_CONTROL_REQUIRED_MASK (0x00000000)
+#define SCU_PTSG_CONTROL_RESERVED_MASK (0xFC000000)
+
+#define SCU_PTSGCR_GEN_VAL(name, val) \
+ SCU_GEN_VALUE(SCU_PTSG_CONTROL_ ## name, val)
+
+#define SCU_PTSGCR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_PTSG_CONTROL_ ## name)
+
+
+/* ***************************************************************************** */
+#define SCU_PTSG_REAL_TIME_CLOCK_SHIFT (0)
+#define SCU_PTSG_REAL_TIME_CLOCK_MASK (0x0000FFFF)
+#define SCU_PTSG_REAL_TIME_CLOCK_RESERVED_MASK (0xFFFF0000)
+
+#define SCU_RTCR_GEN_VAL(name, val) \
+ SCU_GEN_VALUE(SCU_PTSG_ ## name, val)
+
+
+#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_PRESCALER_VALUE_SHIFT (0)
+#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_PRESCALER_VALUE_MASK (0x00FFFFFF)
+#define SCU_PTSG_REAL_TIME_CLOCK_CONTROL_RESERVED_MASK (0xFF000000)
+
+#define SCU_RTCCR_GEN_VAL(name, val) \
+ SCU_GEN_VALUE(SCU_PTSG_REAL_TIME_CLOCK_CONTROL_ ## name, val)
+
+
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_SUSPEND_SHIFT (0)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_SUSPEND_MASK (0x00000001)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ENABLE_SHIFT (1)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ENABLE_MASK (0x00000002)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_RESERVED_MASK (0xFFFFFFFC)
+
+#define SCU_PTSxCR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_CONTROL_ ## name)
+
+
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_NEXT_RN_VALID_SHIFT (0)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_NEXT_RN_VALID_MASK (0x00000001)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ACTIVE_RNSC_LIST_VALID_SHIFT (1)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ACTIVE_RNSC_LIST_VALID_MASK (0x00000002)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_PTS_SUSPENDED_SHIFT (2)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_PTS_SUSPENDED_MASK (0x00000004)
+#define SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_RESERVED_MASK (0xFFFFFFF8)
+
+#define SCU_PTSxSR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ ## name)
+
+
+/*
+ * *****************************************************************************
+ * * SGPIO Register shift and mask values
+ * ***************************************************************************** */
+#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_SHIFT (0)
+#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_MASK (0x00000001)
+#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_SHIFT (1)
+#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_MASK (0x00000002)
+#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_SHIFT (2)
+#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_MASK (0x00000004)
+#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_SHIFT (15)
+#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_MASK (0x00008000)
+#define SCU_SGPIO_CONTROL_SGPIO_RESERVED_MASK (0xFFFF7FF8)
+
+#define SCU_SGICRx_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SGPIO_CONTROL_SGPIO_ ## name)
+
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_SHIFT (0)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_MASK (0x0000000F)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_SHIFT (4)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_MASK (0x000000F0)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_SHIFT (8)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_MASK (0x00000F00)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_SHIFT (12)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_MASK (0x0000F000)
+#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_RESERVED_MASK (0xFFFF0000)
+
+#define SCU_SGPBRx_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_ ## name, value)
+
+#define SCU_SGPIO_START_DRIVE_LOWER_R0_SHIFT (0)
+#define SCU_SGPIO_START_DRIVE_LOWER_R0_MASK (0x00000003)
+#define SCU_SGPIO_START_DRIVE_LOWER_R1_SHIFT (4)
+#define SCU_SGPIO_START_DRIVE_LOWER_R1_MASK (0x00000030)
+#define SCU_SGPIO_START_DRIVE_LOWER_R2_SHIFT (8)
+#define SCU_SGPIO_START_DRIVE_LOWER_R2_MASK (0x00000300)
+#define SCU_SGPIO_START_DRIVE_LOWER_R3_SHIFT (12)
+#define SCU_SGPIO_START_DRIVE_LOWER_R3_MASK (0x00003000)
+#define SCU_SGPIO_START_DRIVE_LOWER_RESERVED_MASK (0xFFFF8888)
+
+#define SCU_SGSDLRx_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value)
+
+#define SCU_SGPIO_START_DRIVE_UPPER_R0_SHIFT (0)
+#define SCU_SGPIO_START_DRIVE_UPPER_R0_MASK (0x00000003)
+#define SCU_SGPIO_START_DRIVE_UPPER_R1_SHIFT (4)
+#define SCU_SGPIO_START_DRIVE_UPPER_R1_MASK (0x00000030)
+#define SCU_SGPIO_START_DRIVE_UPPER_R2_SHIFT (8)
+#define SCU_SGPIO_START_DRIVE_UPPER_R2_MASK (0x00000300)
+#define SCU_SGPIO_START_DRIVE_UPPER_R3_SHIFT (12)
+#define SCU_SGPIO_START_DRIVE_UPPER_R3_MASK (0x00003000)
+#define SCU_SGPIO_START_DRIVE_UPPER_RESERVED_MASK (0xFFFF8888)
+
+#define SCU_SGSDURx_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value)
+
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_SHIFT (0)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_MASK (0x00000003)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_SHIFT (4)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_MASK (0x00000030)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_SHIFT (8)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_MASK (0x00000300)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_SHIFT (12)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_MASK (0x00003000)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_RESERVED_MASK (0xFFFF8888)
+
+#define SCU_SGSIDLRx_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value)
+
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_SHIFT (0)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_MASK (0x00000003)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_SHIFT (4)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_MASK (0x00000030)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_SHIFT (8)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_MASK (0x00000300)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_SHIFT (12)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_MASK (0x00003000)
+#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_RESERVED_MASK (0xFFFF8888)
+
+#define SCU_SGSIDURx_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value)
+
+#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_SHIFT (0)
+#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_MASK (0x0000000F)
+#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_RESERVED_MASK (0xFFFFFFF0)
+
+#define SCU_SGVSCR_GEN_VAL(value) \
+ SCU_GEN_VALUE(SCU_SGPIO_VENDOR_SPECIFIC_CODE ## name, value)
+
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_SHIFT (0)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_MASK (0x00000003)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_SHIFT (2)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_MASK (0x00000004)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_SHIFT (3)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_MASK (0x00000008)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_SHIFT (4)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_MASK (0x00000030)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_SHIFT (6)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_MASK (0x00000040)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_SHIFT (7)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_MASK (0x00000080)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_SHIFT (8)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_MASK (0x00000300)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_SHIFT (10)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_MASK (0x00000400)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_SHIFT (11)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_MASK (0x00000800)
+#define SCU_SGPIO_OUPUT_DATA_SELECT_RESERVED_MASK (0xFFFFF000)
+
+#define SCU_SGODSR_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name, value)
+
+#define SCU_SGODSR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name)
+
+/*
+ * *****************************************************************************
+ * * SMU Registers
+ * ***************************************************************************** */
+
+/*
+ * ----------------------------------------------------------------------------
+ * SMU Registers
+ * These registers are based off of BAR0
+ *
+ * To calculate the offset for other functions use
+ * BAR0 + FN# * SystemPageSize * 2
+ *
+ * The TCA is only accessable from FN#0 (Physical Function) and each
+ * is programmed by (BAR0 + SCU_SMU_TCA_OFFSET + (FN# * 0x04)) or
+ * TCA0 for FN#0 is at BAR0 + 0x0400
+ * TCA1 for FN#1 is at BAR0 + 0x0404
+ * etc.
+ * ----------------------------------------------------------------------------
+ * Accessable to all FN#s */
+#define SCU_SMU_PCP_OFFSET 0x0000
+#define SCU_SMU_AMR_OFFSET 0x0004
+#define SCU_SMU_ISR_OFFSET 0x0010
+#define SCU_SMU_IMR_OFFSET 0x0014
+#define SCU_SMU_ICC_OFFSET 0x0018
+#define SCU_SMU_HTTLBAR_OFFSET 0x0020
+#define SCU_SMU_HTTUBAR_OFFSET 0x0024
+#define SCU_SMU_TCR_OFFSET 0x0028
+#define SCU_SMU_CQLBAR_OFFSET 0x0030
+#define SCU_SMU_CQUBAR_OFFSET 0x0034
+#define SCU_SMU_CQPR_OFFSET 0x0040
+#define SCU_SMU_CQGR_OFFSET 0x0044
+#define SCU_SMU_CQC_OFFSET 0x0048
+/* Accessable to FN#0 only */
+#define SCU_SMU_RNCLBAR_OFFSET 0x0080
+#define SCU_SMU_RNCUBAR_OFFSET 0x0084
+#define SCU_SMU_DCC_OFFSET 0x0090
+#define SCU_SMU_DFC_OFFSET 0x0094
+#define SCU_SMU_SMUCSR_OFFSET 0x0098
+#define SCU_SMU_SCUSRCR_OFFSET 0x009C
+#define SCU_SMU_SMAW_OFFSET 0x00A0
+#define SCU_SMU_SMDW_OFFSET 0x00A4
+/* Accessable to FN#0 only */
+#define SCU_SMU_TCA_OFFSET 0x0400
+/* Accessable to all FN#s */
+#define SCU_SMU_MT_MLAR0_OFFSET 0x2000
+#define SCU_SMU_MT_MUAR0_OFFSET 0x2004
+#define SCU_SMU_MT_MDR0_OFFSET 0x2008
+#define SCU_SMU_MT_VCR0_OFFSET 0x200C
+#define SCU_SMU_MT_MLAR1_OFFSET 0x2010
+#define SCU_SMU_MT_MUAR1_OFFSET 0x2014
+#define SCU_SMU_MT_MDR1_OFFSET 0x2018
+#define SCU_SMU_MT_VCR1_OFFSET 0x201C
+#define SCU_SMU_MPBA_OFFSET 0x3000
+
+/**
+ * struct smu_registers - These are the SMU registers
+ *
+ *
+ */
+struct smu_registers {
+/* 0x0000 PCP */
+ u32 post_context_port;
+/* 0x0004 AMR */
+ u32 address_modifier;
+ u32 reserved_08;
+ u32 reserved_0C;
+/* 0x0010 ISR */
+ u32 interrupt_status;
+/* 0x0014 IMR */
+ u32 interrupt_mask;
+/* 0x0018 ICC */
+ u32 interrupt_coalesce_control;
+ u32 reserved_1C;
+/* 0x0020 HTTLBAR */
+ u32 host_task_table_lower;
+/* 0x0024 HTTUBAR */
+ u32 host_task_table_upper;
+/* 0x0028 TCR */
+ u32 task_context_range;
+ u32 reserved_2C;
+/* 0x0030 CQLBAR */
+ u32 completion_queue_lower;
+/* 0x0034 CQUBAR */
+ u32 completion_queue_upper;
+ u32 reserved_38;
+ u32 reserved_3C;
+/* 0x0040 CQPR */
+ u32 completion_queue_put;
+/* 0x0044 CQGR */
+ u32 completion_queue_get;
+/* 0x0048 CQC */
+ u32 completion_queue_control;
+ u32 reserved_4C;
+ u32 reserved_5x[4];
+ u32 reserved_6x[4];
+ u32 reserved_7x[4];
+/*
+ * Accessable to FN#0 only
+ * 0x0080 RNCLBAR */
+ u32 remote_node_context_lower;
+/* 0x0084 RNCUBAR */
+ u32 remote_node_context_upper;
+ u32 reserved_88;
+ u32 reserved_8C;
+/* 0x0090 DCC */
+ u32 device_context_capacity;
+/* 0x0094 DFC */
+ u32 device_function_capacity;
+/* 0x0098 SMUCSR */
+ u32 control_status;
+/* 0x009C SCUSRCR */
+ u32 soft_reset_control;
+/* 0x00A0 SMAW */
+ u32 mmr_address_window;
+/* 0x00A4 SMDW */
+ u32 mmr_data_window;
+ u32 reserved_A8;
+ u32 reserved_AC;
+/* A whole bunch of reserved space */
+ u32 reserved_Bx[4];
+ u32 reserved_Cx[4];
+ u32 reserved_Dx[4];
+ u32 reserved_Ex[4];
+ u32 reserved_Fx[4];
+ u32 reserved_1xx[64];
+ u32 reserved_2xx[64];
+ u32 reserved_3xx[64];
+/*
+ * Accessable to FN#0 only
+ * 0x0400 TCA */
+ u32 task_context_assignment[256];
+/* MSI-X registers not included */
+};
+
+/*
+ * *****************************************************************************
+ * SDMA Registers
+ * ***************************************************************************** */
+#define SCU_SDMA_BASE 0x6000
+#define SCU_SDMA_PUFATLHAR_OFFSET 0x0000
+#define SCU_SDMA_PUFATUHAR_OFFSET 0x0004
+#define SCU_SDMA_UFLHBAR_OFFSET 0x0008
+#define SCU_SDMA_UFUHBAR_OFFSET 0x000C
+#define SCU_SDMA_UFQC_OFFSET 0x0010
+#define SCU_SDMA_UFQPP_OFFSET 0x0014
+#define SCU_SDMA_UFQGP_OFFSET 0x0018
+#define SCU_SDMA_PDMACR_OFFSET 0x001C
+#define SCU_SDMA_CDMACR_OFFSET 0x0080
+
+/**
+ * struct scu_sdma_registers - These are the SCU SDMA Registers
+ *
+ *
+ */
+struct scu_sdma_registers {
+/* 0x0000 PUFATLHAR */
+ u32 uf_address_table_lower;
+/* 0x0004 PUFATUHAR */
+ u32 uf_address_table_upper;
+/* 0x0008 UFLHBAR */
+ u32 uf_header_base_address_lower;
+/* 0x000C UFUHBAR */
+ u32 uf_header_base_address_upper;
+/* 0x0010 UFQC */
+ u32 unsolicited_frame_queue_control;
+/* 0x0014 UFQPP */
+ u32 unsolicited_frame_put_pointer;
+/* 0x0018 UFQGP */
+ u32 unsolicited_frame_get_pointer;
+/* 0x001C PDMACR */
+ u32 pdma_configuration;
+/* Reserved until offset 0x80 */
+ u32 reserved_0020_007C[0x18];
+/* 0x0080 CDMACR */
+ u32 cdma_configuration;
+/* Remainder SDMA register space */
+ u32 reserved_0084_0400[0xDF];
+
+};
+
+/*
+ * *****************************************************************************
+ * * SCU Link Registers
+ * ***************************************************************************** */
+#define SCU_PEG0_OFFSET 0x0000
+#define SCU_PEG1_OFFSET 0x8000
+
+#define SCU_TL0_OFFSET 0x0000
+#define SCU_TL1_OFFSET 0x0400
+#define SCU_TL2_OFFSET 0x0800
+#define SCU_TL3_OFFSET 0x0C00
+
+#define SCU_LL_OFFSET 0x0080
+#define SCU_LL0_OFFSET (SCU_TL0_OFFSET + SCU_LL_OFFSET)
+#define SCU_LL1_OFFSET (SCU_TL1_OFFSET + SCU_LL_OFFSET)
+#define SCU_LL2_OFFSET (SCU_TL2_OFFSET + SCU_LL_OFFSET)
+#define SCU_LL3_OFFSET (SCU_TL3_OFFSET + SCU_LL_OFFSET)
+
+/* Transport Layer Offsets (PEG + TL) */
+#define SCU_TLCR_OFFSET 0x0000
+#define SCU_TLADTR_OFFSET 0x0004
+#define SCU_TLTTMR_OFFSET 0x0008
+#define SCU_TLEECR0_OFFSET 0x000C
+#define SCU_STPTLDARNI_OFFSET 0x0010
+
+
+#define SCU_TLCR_HASH_SAS_CHECKING_ENABLE_SHIFT (0)
+#define SCU_TLCR_HASH_SAS_CHECKING_ENABLE_MASK (0x00000001)
+#define SCU_TLCR_CLEAR_TCI_NCQ_MAPPING_TABLE_SHIFT (1)
+#define SCU_TLCR_CLEAR_TCI_NCQ_MAPPING_TABLE_MASK (0x00000002)
+#define SCU_TLCR_STP_WRITE_DATA_PREFETCH_SHIFT (3)
+#define SCU_TLCR_STP_WRITE_DATA_PREFETCH_MASK (0x00000008)
+#define SCU_TLCR_CMD_NAK_STATUS_CODE_SHIFT (4)
+#define SCU_TLCR_CMD_NAK_STATUS_CODE_MASK (0x00000010)
+#define SCU_TLCR_RESERVED_MASK (0xFFFFFFEB)
+
+#define SCU_TLCR_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_TLCR_ ## name)
+
+/**
+ * struct scu_transport_layer_registers - These are the SCU Transport Layer
+ * registers
+ *
+ *
+ */
+struct scu_transport_layer_registers {
+ /* 0x0000 TLCR */
+ u32 control;
+ /* 0x0004 TLADTR */
+ u32 arbitration_delay_timer;
+ /* 0x0008 TLTTMR */
+ u32 timer_test_mode;
+ /* 0x000C reserved */
+ u32 reserved_0C;
+ /* 0x0010 STPTLDARNI */
+ u32 stp_rni;
+ /* 0x0014 TLFEWPORCTRL */
+ u32 tlfe_wpo_read_control;
+ /* 0x0018 TLFEWPORDATA */
+ u32 tlfe_wpo_read_data;
+ /* 0x001C RXTLSSCSR1 */
+ u32 rxtl_single_step_control_status_1;
+ /* 0x0020 RXTLSSCSR2 */
+ u32 rxtl_single_step_control_status_2;
+ /* 0x0024 AWTRDDCR */
+ u32 tlfe_awt_retry_delay_debug_control;
+ /* Remainder of TL memory space */
+ u32 reserved_0028_007F[0x16];
+
+};
+
+/* Protocol Engine Group Registers */
+#define SCU_SCUVZECRx_OFFSET 0x1080
+
+/* Link Layer Offsets (PEG + TL + LL) */
+#define SCU_SAS_SPDTOV_OFFSET 0x0000
+#define SCU_SAS_LLSTA_OFFSET 0x0004
+#define SCU_SATA_PSELTOV_OFFSET 0x0008
+#define SCU_SAS_TIMETOV_OFFSET 0x0010
+#define SCU_SAS_LOSTOT_OFFSET 0x0014
+#define SCU_SAS_LNKTOV_OFFSET 0x0018
+#define SCU_SAS_PHYTOV_OFFSET 0x001C
+#define SCU_SAS_AFERCNT_OFFSET 0x0020
+#define SCU_SAS_WERCNT_OFFSET 0x0024
+#define SCU_SAS_TIID_OFFSET 0x0028
+#define SCU_SAS_TIDNH_OFFSET 0x002C
+#define SCU_SAS_TIDNL_OFFSET 0x0030
+#define SCU_SAS_TISSAH_OFFSET 0x0034
+#define SCU_SAS_TISSAL_OFFSET 0x0038
+#define SCU_SAS_TIPID_OFFSET 0x003C
+#define SCU_SAS_TIRES2_OFFSET 0x0040
+#define SCU_SAS_ADRSTA_OFFSET 0x0044
+#define SCU_SAS_MAWTTOV_OFFSET 0x0048
+#define SCU_SAS_FRPLDFIL_OFFSET 0x0054
+#define SCU_SAS_RFCNT_OFFSET 0x0060
+#define SCU_SAS_TFCNT_OFFSET 0x0064
+#define SCU_SAS_RFDCNT_OFFSET 0x0068
+#define SCU_SAS_TFDCNT_OFFSET 0x006C
+#define SCU_SAS_LERCNT_OFFSET 0x0070
+#define SCU_SAS_RDISERRCNT_OFFSET 0x0074
+#define SCU_SAS_CRERCNT_OFFSET 0x0078
+#define SCU_STPCTL_OFFSET 0x007C
+#define SCU_SAS_PCFG_OFFSET 0x0080
+#define SCU_SAS_CLKSM_OFFSET 0x0084
+#define SCU_SAS_TXCOMWAKE_OFFSET 0x0088
+#define SCU_SAS_TXCOMINIT_OFFSET 0x008C
+#define SCU_SAS_TXCOMSAS_OFFSET 0x0090
+#define SCU_SAS_COMINIT_OFFSET 0x0094
+#define SCU_SAS_COMWAKE_OFFSET 0x0098
+#define SCU_SAS_COMSAS_OFFSET 0x009C
+#define SCU_SAS_SFERCNT_OFFSET 0x00A0
+#define SCU_SAS_CDFERCNT_OFFSET 0x00A4
+#define SCU_SAS_DNFERCNT_OFFSET 0x00A8
+#define SCU_SAS_PRSTERCNT_OFFSET 0x00AC
+#define SCU_SAS_CNTCTL_OFFSET 0x00B0
+#define SCU_SAS_SSPTOV_OFFSET 0x00B4
+#define SCU_FTCTL_OFFSET 0x00B8
+#define SCU_FRCTL_OFFSET 0x00BC
+#define SCU_FTWMRK_OFFSET 0x00C0
+#define SCU_ENSPINUP_OFFSET 0x00C4
+#define SCU_SAS_TRNTOV_OFFSET 0x00C8
+#define SCU_SAS_PHYCAP_OFFSET 0x00CC
+#define SCU_SAS_PHYCTL_OFFSET 0x00D0
+#define SCU_SAS_LLCTL_OFFSET 0x00D8
+#define SCU_AFE_XCVRCR_OFFSET 0x00DC
+#define SCU_AFE_LUTCR_OFFSET 0x00E0
+
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_SHIFT (0)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_MASK (0x00000003)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN1 (0)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN2 (1)
+#define SCU_SAS_LINK_LAYER_CONTROL_MAX_LINK_RATE_GEN3 (2)
+#define SCU_SAS_LINK_LAYER_CONTROL_BROADCAST_PRIMITIVE_SHIFT (2)
+#define SCU_SAS_LINK_LAYER_CONTROL_BROADCAST_PRIMITIVE_MASK (0x000003FC)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_ACTIVE_TASK_DISABLE_SHIFT (16)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_ACTIVE_TASK_DISABLE_MASK (0x00010000)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_OUTBOUND_TASK_DISABLE_SHIFT (17)
+#define SCU_SAS_LINK_LAYER_CONTROL_CLOSE_NO_OUTBOUND_TASK_DISABLE_MASK (0x00020000)
+#define SCU_SAS_LINK_LAYER_CONTROL_NO_OUTBOUND_TASK_TIMEOUT_SHIFT (24)
+#define SCU_SAS_LINK_LAYER_CONTROL_NO_OUTBOUND_TASK_TIMEOUT_MASK (0xFF000000)
+#define SCU_SAS_LINK_LAYER_CONTROL_RESERVED (0x00FCFC00)
+
+#define SCU_SAS_LLCTL_GEN_VAL(name, value) \
+ SCU_GEN_VALUE(SCU_SAS_LINK_LAYER_CONTROL_ ## name, value)
+
+#define SCU_SAS_LLCTL_GEN_BIT(name) \
+ SCU_GEN_BIT(SCU_SAS_LINK_LAYER_CONTROL_ ## name)
+
+
+/* #define SCU_FRXHECR_DCNT_OFFSET 0x00B0 */
+#define SCU_PSZGCR_OFFSET 0x00E4
+#define SCU_SAS_RECPHYCAP_OFFSET 0x00E8
+/* #define SCU_TX_LUTSEL_OFFSET 0x00B8 */
+
+#define SCU_SAS_PTxC_OFFSET 0x00D4 /* Same offset as SAS_TCTSTM */
+
+/**
+ * struct scu_link_layer_registers - SCU Link Layer Registers
+ *
+ *
+ */
+struct scu_link_layer_registers {
+/* 0x0000 SAS_SPDTOV */
+ u32 speed_negotiation_timers;
+/* 0x0004 SAS_LLSTA */
+ u32 link_layer_status;
+/* 0x0008 SATA_PSELTOV */
+ u32 port_selector_timeout;
+ u32 reserved0C;
+/* 0x0010 SAS_TIMETOV */
+ u32 timeout_unit_value;
+/* 0x0014 SAS_RCDTOV */
+ u32 rcd_timeout;
+/* 0x0018 SAS_LNKTOV */
+ u32 link_timer_timeouts;
+/* 0x001C SAS_PHYTOV */
+ u32 sas_phy_timeouts;
+/* 0x0020 SAS_AFERCNT */
+ u32 received_address_frame_error_counter;
+/* 0x0024 SAS_WERCNT */
+ u32 invalid_dword_counter;
+/* 0x0028 SAS_TIID */
+ u32 transmit_identification;
+/* 0x002C SAS_TIDNH */
+ u32 sas_device_name_high;
+/* 0x0030 SAS_TIDNL */
+ u32 sas_device_name_low;
+/* 0x0034 SAS_TISSAH */
+ u32 source_sas_address_high;
+/* 0x0038 SAS_TISSAL */
+ u32 source_sas_address_low;
+/* 0x003C SAS_TIPID */
+ u32 identify_frame_phy_id;
+/* 0x0040 SAS_TIRES2 */
+ u32 identify_frame_reserved;
+/* 0x0044 SAS_ADRSTA */
+ u32 received_address_frame;
+/* 0x0048 SAS_MAWTTOV */
+ u32 maximum_arbitration_wait_timer_timeout;
+/* 0x004C SAS_PTxC */
+ u32 transmit_primitive;
+/* 0x0050 SAS_RORES */
+ u32 error_counter_event_notification_control;
+/* 0x0054 SAS_FRPLDFIL */
+ u32 frxq_payload_fill_threshold;
+/* 0x0058 SAS_LLHANG_TOT */
+ u32 link_layer_hang_detection_timeout;
+ u32 reserved_5C;
+/* 0x0060 SAS_RFCNT */
+ u32 received_frame_count;
+/* 0x0064 SAS_TFCNT */
+ u32 transmit_frame_count;
+/* 0x0068 SAS_RFDCNT */
+ u32 received_dword_count;
+/* 0x006C SAS_TFDCNT */
+ u32 transmit_dword_count;
+/* 0x0070 SAS_LERCNT */
+ u32 loss_of_sync_error_count;
+/* 0x0074 SAS_RDISERRCNT */
+ u32 running_disparity_error_count;
+/* 0x0078 SAS_CRERCNT */
+ u32 received_frame_crc_error_count;
+/* 0x007C STPCTL */
+ u32 stp_control;
+/* 0x0080 SAS_PCFG */
+ u32 phy_configuration;
+/* 0x0084 SAS_CLKSM */
+ u32 clock_skew_management;
+/* 0x0088 SAS_TXCOMWAKE */
+ u32 transmit_comwake_signal;
+/* 0x008C SAS_TXCOMINIT */
+ u32 transmit_cominit_signal;
+/* 0x0090 SAS_TXCOMSAS */
+ u32 transmit_comsas_signal;
+/* 0x0094 SAS_COMINIT */
+ u32 cominit_control;
+/* 0x0098 SAS_COMWAKE */
+ u32 comwake_control;
+/* 0x009C SAS_COMSAS */
+ u32 comsas_control;
+/* 0x00A0 SAS_SFERCNT */
+ u32 received_short_frame_count;
+/* 0x00A4 SAS_CDFERCNT */
+ u32 received_frame_without_credit_count;
+/* 0x00A8 SAS_DNFERCNT */
+ u32 received_frame_after_done_count;
+/* 0x00AC SAS_PRSTERCNT */
+ u32 phy_reset_problem_count;
+/* 0x00B0 SAS_CNTCTL */
+ u32 counter_control;
+/* 0x00B4 SAS_SSPTOV */
+ u32 ssp_timer_timeout_values;
+/* 0x00B8 FTCTL */
+ u32 ftx_control;
+/* 0x00BC FRCTL */
+ u32 frx_control;
+/* 0x00C0 FTWMRK */
+ u32 ftx_watermark;
+/* 0x00C4 ENSPINUP */
+ u32 notify_enable_spinup_control;
+/* 0x00C8 SAS_TRNTOV */
+ u32 sas_training_sequence_timer_values;
+/* 0x00CC SAS_PHYCAP */
+ u32 phy_capabilities;
+/* 0x00D0 SAS_PHYCTL */
+ u32 phy_control;
+ u32 reserved_d4;
+/* 0x00D8 LLCTL */
+ u32 link_layer_control;
+/* 0x00DC AFE_XCVRCR */
+ u32 afe_xcvr_control;
+/* 0x00E0 AFE_LUTCR */
+ u32 afe_lookup_table_control;
+/* 0x00E4 PSZGCR */
+ u32 phy_source_zone_group_control;
+/* 0x00E8 SAS_RECPHYCAP */
+ u32 receive_phycap;
+ u32 reserved_ec;
+/* 0x00F0 SNAFERXRSTCTL */
+ u32 speed_negotiation_afe_rx_reset_control;
+/* 0x00F4 SAS_SSIPMCTL */
+ u32 power_management_control;
+/* 0x00F8 SAS_PSPREQ_PRIM */
+ u32 sas_pm_partial_request_primitive;
+/* 0x00FC SAS_PSSREQ_PRIM */
+ u32 sas_pm_slumber_request_primitive;
+/* 0x0100 SAS_PPSACK_PRIM */
+ u32 sas_pm_ack_primitive_register;
+/* 0x0104 SAS_PSNAK_PRIM */
+ u32 sas_pm_nak_primitive_register;
+/* 0x0108 SAS_SSIPMTOV */
+ u32 sas_primitive_timeout;
+ u32 reserved_10c;
+/* 0x0110 - 0x011C PLAPRDCTRLxREG */
+ u32 pla_product_control[4];
+/* 0x0120 PLAPRDSUMREG */
+ u32 pla_product_sum;
+/* 0x0124 PLACONTROLREG */
+ u32 pla_control;
+/* Remainder of memory space 896 bytes */
+ u32 reserved_0128_037f[0x96];
+
+};
+
+/*
+ * 0x00D4 // Same offset as SAS_TCTSTM SAS_PTxC
+ * u32 primitive_transmit_control; */
+
+/*
+ * ----------------------------------------------------------------------------
+ * SGPIO
+ * ---------------------------------------------------------------------------- */
+#define SCU_SGPIO_OFFSET 0x1400
+
+/* #define SCU_SGPIO_OFFSET 0x6000 // later moves to 0x1400 see HSD 652625 */
+#define SCU_SGPIO_SGICR_OFFSET 0x0000
+#define SCU_SGPIO_SGPBR_OFFSET 0x0004
+#define SCU_SGPIO_SGSDLR_OFFSET 0x0008
+#define SCU_SGPIO_SGSDUR_OFFSET 0x000C
+#define SCU_SGPIO_SGSIDLR_OFFSET 0x0010
+#define SCU_SGPIO_SGSIDUR_OFFSET 0x0014
+#define SCU_SGPIO_SGVSCR_OFFSET 0x0018
+/* Address from 0x0820 to 0x083C */
+#define SCU_SGPIO_SGODSR_OFFSET 0x0020
+
+/**
+ * struct scu_sgpio_registers - SCU SGPIO Registers
+ *
+ *
+ */
+struct scu_sgpio_registers {
+/* 0x0000 SGPIO_SGICR */
+ u32 interface_control;
+/* 0x0004 SGPIO_SGPBR */
+ u32 blink_rate;
+/* 0x0008 SGPIO_SGSDLR */
+ u32 start_drive_lower;
+/* 0x000C SGPIO_SGSDUR */
+ u32 start_drive_upper;
+/* 0x0010 SGPIO_SGSIDLR */
+ u32 serial_input_lower;
+/* 0x0014 SGPIO_SGSIDUR */
+ u32 serial_input_upper;
+/* 0x0018 SGPIO_SGVSCR */
+ u32 vendor_specific_code;
+/* 0x0020 SGPIO_SGODSR */
+ u32 ouput_data_select[8];
+/* Remainder of memory space 256 bytes */
+ u32 reserved_1444_14ff[0x31];
+
+};
+
+/*
+ * *****************************************************************************
+ * * Defines for VIIT entry offsets
+ * * Access additional entries by SCU_VIIT_BASE + index * 0x10
+ * ***************************************************************************** */
+#define SCU_VIIT_BASE 0x1c00
+
+struct scu_viit_registers {
+ u32 registers[256];
+};
+
+/*
+ * *****************************************************************************
+ * * SCU PORT TASK SCHEDULER REGISTERS
+ * ***************************************************************************** */
+
+#define SCU_PTSG_BASE 0x1000
+
+#define SCU_PTSG_PTSGCR_OFFSET 0x0000
+#define SCU_PTSG_RTCR_OFFSET 0x0004
+#define SCU_PTSG_RTCCR_OFFSET 0x0008
+#define SCU_PTSG_PTS0CR_OFFSET 0x0010
+#define SCU_PTSG_PTS0SR_OFFSET 0x0014
+#define SCU_PTSG_PTS1CR_OFFSET 0x0018
+#define SCU_PTSG_PTS1SR_OFFSET 0x001C
+#define SCU_PTSG_PTS2CR_OFFSET 0x0020
+#define SCU_PTSG_PTS2SR_OFFSET 0x0024
+#define SCU_PTSG_PTS3CR_OFFSET 0x0028
+#define SCU_PTSG_PTS3SR_OFFSET 0x002C
+#define SCU_PTSG_PCSPE0CR_OFFSET 0x0030
+#define SCU_PTSG_PCSPE1CR_OFFSET 0x0034
+#define SCU_PTSG_PCSPE2CR_OFFSET 0x0038
+#define SCU_PTSG_PCSPE3CR_OFFSET 0x003C
+#define SCU_PTSG_ETMTSCCR_OFFSET 0x0040
+#define SCU_PTSG_ETMRNSCCR_OFFSET 0x0044
+
+/**
+ * struct scu_port_task_scheduler_registers - These are the control/stats pairs
+ * for each Port Task Scheduler.
+ *
+ *
+ */
+struct scu_port_task_scheduler_registers {
+ u32 control;
+ u32 status;
+};
+
+/**
+ * struct scu_port_task_scheduler_group_registers - These are the PORT Task
+ * Scheduler registers
+ *
+ *
+ */
+struct scu_port_task_scheduler_group_registers {
+/* 0x0000 PTSGCR */
+ u32 control;
+/* 0x0004 RTCR */
+ u32 real_time_clock;
+/* 0x0008 RTCCR */
+ u32 real_time_clock_control;
+/* 0x000C */
+ u32 reserved_0C;
+/*
+ * 0x0010 PTS0CR
+ * 0x0014 PTS0SR
+ * 0x0018 PTS1CR
+ * 0x001C PTS1SR
+ * 0x0020 PTS2CR
+ * 0x0024 PTS2SR
+ * 0x0028 PTS3CR
+ * 0x002C PTS3SR */
+ struct scu_port_task_scheduler_registers port[4];
+/*
+ * 0x0030 PCSPE0CR
+ * 0x0034 PCSPE1CR
+ * 0x0038 PCSPE2CR
+ * 0x003C PCSPE3CR */
+ u32 protocol_engine[4];
+/* 0x0040 ETMTSCCR */
+ u32 tc_scanning_interval_control;
+/* 0x0044 ETMRNSCCR */
+ u32 rnc_scanning_interval_control;
+/* Remainder of memory space 128 bytes */
+ u32 reserved_1048_107f[0x0E];
+
+};
+
+#define SCU_PTSG_SCUVZECR_OFFSET 0x003C
+
+/*
+ * *****************************************************************************
+ * * AFE REGISTERS
+ * ***************************************************************************** */
+#define SCU_AFE_MMR_BASE 0xE000
+
+/*
+ * AFE 0 is at offset 0x0800
+ * AFE 1 is at offset 0x0900
+ * AFE 2 is at offset 0x0a00
+ * AFE 3 is at offset 0x0b00 */
+struct scu_afe_transceiver {
+ /* 0x0000 AFE_XCVR_CTRL0 */
+ u32 afe_xcvr_control0;
+ /* 0x0004 AFE_XCVR_CTRL1 */
+ u32 afe_xcvr_control1;
+ /* 0x0008 */
+ u32 reserved_0008;
+ /* 0x000c afe_dfx_rx_control0 */
+ u32 afe_dfx_rx_control0;
+ /* 0x0010 AFE_DFX_RX_CTRL1 */
+ u32 afe_dfx_rx_control1;
+ /* 0x0014 */
+ u32 reserved_0014;
+ /* 0x0018 AFE_DFX_RX_STS0 */
+ u32 afe_dfx_rx_status0;
+ /* 0x001c AFE_DFX_RX_STS1 */
+ u32 afe_dfx_rx_status1;
+ /* 0x0020 */
+ u32 reserved_0020;
+ /* 0x0024 AFE_TX_CTRL */
+ u32 afe_tx_control;
+ /* 0x0028 AFE_TX_AMP_CTRL0 */
+ u32 afe_tx_amp_control0;
+ /* 0x002c AFE_TX_AMP_CTRL1 */
+ u32 afe_tx_amp_control1;
+ /* 0x0030 AFE_TX_AMP_CTRL2 */
+ u32 afe_tx_amp_control2;
+ /* 0x0034 AFE_TX_AMP_CTRL3 */
+ u32 afe_tx_amp_control3;
+ /* 0x0038 afe_tx_ssc_control */
+ u32 afe_tx_ssc_control;
+ /* 0x003c */
+ u32 reserved_003c;
+ /* 0x0040 AFE_RX_SSC_CTRL0 */
+ u32 afe_rx_ssc_control0;
+ /* 0x0044 AFE_RX_SSC_CTRL1 */
+ u32 afe_rx_ssc_control1;
+ /* 0x0048 AFE_RX_SSC_CTRL2 */
+ u32 afe_rx_ssc_control2;
+ /* 0x004c AFE_RX_EQ_STS0 */
+ u32 afe_rx_eq_status0;
+ /* 0x0050 AFE_RX_EQ_STS1 */
+ u32 afe_rx_eq_status1;
+ /* 0x0054 AFE_RX_CDR_STS */
+ u32 afe_rx_cdr_status;
+ /* 0x0058 */
+ u32 reserved_0058;
+ /* 0x005c AFE_CHAN_CTRL */
+ u32 afe_channel_control;
+ /* 0x0060-0x006c */
+ u32 reserved_0060_006c[0x04];
+ /* 0x0070 AFE_XCVR_EC_STS0 */
+ u32 afe_xcvr_error_capture_status0;
+ /* 0x0074 AFE_XCVR_EC_STS1 */
+ u32 afe_xcvr_error_capture_status1;
+ /* 0x0078 AFE_XCVR_EC_STS2 */
+ u32 afe_xcvr_error_capture_status2;
+ /* 0x007c afe_xcvr_ec_status3 */
+ u32 afe_xcvr_error_capture_status3;
+ /* 0x0080 AFE_XCVR_EC_STS4 */
+ u32 afe_xcvr_error_capture_status4;
+ /* 0x0084 AFE_XCVR_EC_STS5 */
+ u32 afe_xcvr_error_capture_status5;
+ /* 0x0088-0x00fc */
+ u32 reserved_008c_00fc[0x1e];
+};
+
+/**
+ * struct scu_afe_registers - AFE Regsiters
+ *
+ *
+ */
+/* Uaoa AFE registers */
+struct scu_afe_registers {
+ /* 0Xe000 AFE_BIAS_CTRL */
+ u32 afe_bias_control;
+ u32 reserved_0004;
+ /* 0x0008 AFE_PLL_CTRL0 */
+ u32 afe_pll_control0;
+ /* 0x000c AFE_PLL_CTRL1 */
+ u32 afe_pll_control1;
+ /* 0x0010 AFE_PLL_CTRL2 */
+ u32 afe_pll_control2;
+ /* 0x0014 AFE_CB_STS */
+ u32 afe_common_block_status;
+ /* 0x0018-0x007c */
+ u32 reserved_18_7c[0x1a];
+ /* 0x0080 AFE_PMSN_MCTRL0 */
+ u32 afe_pmsn_master_control0;
+ /* 0x0084 AFE_PMSN_MCTRL1 */
+ u32 afe_pmsn_master_control1;
+ /* 0x0088 AFE_PMSN_MCTRL2 */
+ u32 afe_pmsn_master_control2;
+ /* 0x008C-0x00fc */
+ u32 reserved_008c_00fc[0x1D];
+ /* 0x0100 AFE_DFX_MST_CTRL0 */
+ u32 afe_dfx_master_control0;
+ /* 0x0104 AFE_DFX_MST_CTRL1 */
+ u32 afe_dfx_master_control1;
+ /* 0x0108 AFE_DFX_DCL_CTRL */
+ u32 afe_dfx_dcl_control;
+ /* 0x010c AFE_DFX_DMON_CTRL */
+ u32 afe_dfx_digital_monitor_control;
+ /* 0x0110 AFE_DFX_AMONP_CTRL */
+ u32 afe_dfx_analog_p_monitor_control;
+ /* 0x0114 AFE_DFX_AMONN_CTRL */
+ u32 afe_dfx_analog_n_monitor_control;
+ /* 0x0118 AFE_DFX_NTL_STS */
+ u32 afe_dfx_ntl_status;
+ /* 0x011c AFE_DFX_FIFO_STS0 */
+ u32 afe_dfx_fifo_status0;
+ /* 0x0120 AFE_DFX_FIFO_STS1 */
+ u32 afe_dfx_fifo_status1;
+ /* 0x0124 AFE_DFX_MPAT_CTRL */
+ u32 afe_dfx_master_pattern_control;
+ /* 0x0128 AFE_DFX_P0_CTRL */
+ u32 afe_dfx_p0_control;
+ /* 0x012c-0x01a8 AFE_DFX_P0_DRx */
+ u32 afe_dfx_p0_data[32];
+ /* 0x01ac */
+ u32 reserved_01ac;
+ /* 0x01b0-0x020c AFE_DFX_P0_IRx */
+ u32 afe_dfx_p0_instruction[24];
+ /* 0x0210 */
+ u32 reserved_0210;
+ /* 0x0214 AFE_DFX_P1_CTRL */
+ u32 afe_dfx_p1_control;
+ /* 0x0218-0x245 AFE_DFX_P1_DRx */
+ u32 afe_dfx_p1_data[16];
+ /* 0x0258-0x029c */
+ u32 reserved_0258_029c[0x12];
+ /* 0x02a0-0x02bc AFE_DFX_P1_IRx */
+ u32 afe_dfx_p1_instruction[8];
+ /* 0x02c0-0x2fc */
+ u32 reserved_02c0_02fc[0x10];
+ /* 0x0300 AFE_DFX_TX_PMSN_CTRL */
+ u32 afe_dfx_tx_pmsn_control;
+ /* 0x0304 AFE_DFX_RX_PMSN_CTRL */
+ u32 afe_dfx_rx_pmsn_control;
+ u32 reserved_0308;
+ /* 0x030c AFE_DFX_NOA_CTRL0 */
+ u32 afe_dfx_noa_control0;
+ /* 0x0310 AFE_DFX_NOA_CTRL1 */
+ u32 afe_dfx_noa_control1;
+ /* 0x0314 AFE_DFX_NOA_CTRL2 */
+ u32 afe_dfx_noa_control2;
+ /* 0x0318 AFE_DFX_NOA_CTRL3 */
+ u32 afe_dfx_noa_control3;
+ /* 0x031c AFE_DFX_NOA_CTRL4 */
+ u32 afe_dfx_noa_control4;
+ /* 0x0320 AFE_DFX_NOA_CTRL5 */
+ u32 afe_dfx_noa_control5;
+ /* 0x0324 AFE_DFX_NOA_CTRL6 */
+ u32 afe_dfx_noa_control6;
+ /* 0x0328 AFE_DFX_NOA_CTRL7 */
+ u32 afe_dfx_noa_control7;
+ /* 0x032c-0x07fc */
+ u32 reserved_032c_07fc[0x135];
+
+ /* 0x0800-0x0bfc */
+ struct scu_afe_transceiver scu_afe_xcvr[4];
+
+ /* 0x0c00-0x0ffc */
+ u32 reserved_0c00_0ffc[0x0100];
+};
+
+struct scu_protocol_engine_group_registers {
+ u32 table[0xE0];
+};
+
+
+struct scu_viit_iit {
+ u32 table[256];
+};
+
+/**
+ * Placeholder for the ZONE Partition Table information ZONING will not be
+ * included in the 1.1 release.
+ *
+ *
+ */
+struct scu_zone_partition_table {
+ u32 table[2048];
+};
+
+/**
+ * Placeholder for the CRAM register since I am not sure if we need to
+ * read/write to these registers as yet.
+ *
+ *
+ */
+struct scu_completion_ram {
+ u32 ram[128];
+};
+
+/**
+ * Placeholder for the FBRAM registers since I am not sure if we need to
+ * read/write to these registers as yet.
+ *
+ *
+ */
+struct scu_frame_buffer_ram {
+ u32 ram[128];
+};
+
+#define scu_scratch_ram_SIZE_IN_DWORDS 256
+
+/**
+ * Placeholder for the scratch RAM registers.
+ *
+ *
+ */
+struct scu_scratch_ram {
+ u32 ram[scu_scratch_ram_SIZE_IN_DWORDS];
+};
+
+/**
+ * Placeholder since I am not yet sure what these registers are here for.
+ *
+ *
+ */
+struct noa_protocol_engine_partition {
+ u32 reserved[64];
+};
+
+/**
+ * Placeholder since I am not yet sure what these registers are here for.
+ *
+ *
+ */
+struct noa_hub_partition {
+ u32 reserved[64];
+};
+
+/**
+ * Placeholder since I am not yet sure what these registers are here for.
+ *
+ *
+ */
+struct noa_host_interface_partition {
+ u32 reserved[64];
+};
+
+/**
+ * struct transport_link_layer_pair - The SCU Hardware pairs up the TL
+ * registers with the LL registers so we must place them adjcent to make the
+ * array of registers in the PEG.
+ *
+ *
+ */
+struct transport_link_layer_pair {
+ struct scu_transport_layer_registers tl;
+ struct scu_link_layer_registers ll;
+};
+
+/**
+ * struct scu_peg_registers - SCU Protocol Engine Memory mapped register space.
+ * These registers are unique to each protocol engine group. There can be
+ * at most two PEG for a single SCU part.
+ *
+ *
+ */
+struct scu_peg_registers {
+ struct transport_link_layer_pair pe[4];
+ struct scu_port_task_scheduler_group_registers ptsg;
+ struct scu_protocol_engine_group_registers peg;
+ struct scu_sgpio_registers sgpio;
+ u32 reserved_01500_1BFF[0x1C0];
+ struct scu_viit_entry viit[64];
+ struct scu_zone_partition_table zpt0;
+ struct scu_zone_partition_table zpt1;
+};
+
+/**
+ * struct scu_registers - SCU regsiters including both PEG registers if we turn
+ * on that compile option. All of these registers are in the memory mapped
+ * space returned from BAR1.
+ *
+ *
+ */
+struct scu_registers {
+ /* 0x0000 - PEG 0 */
+ struct scu_peg_registers peg0;
+
+ /* 0x6000 - SDMA and Miscellaneous */
+ struct scu_sdma_registers sdma;
+ struct scu_completion_ram cram;
+ struct scu_frame_buffer_ram fbram;
+ u32 reserved_6800_69FF[0x80];
+ struct noa_protocol_engine_partition noa_pe;
+ struct noa_hub_partition noa_hub;
+ struct noa_host_interface_partition noa_if;
+ u32 reserved_6d00_7fff[0x4c0];
+
+ /* 0x8000 - PEG 1 */
+ struct scu_peg_registers peg1;
+
+ /* 0xE000 - AFE Registers */
+ struct scu_afe_registers afe;
+
+ /* 0xF000 - reserved */
+ u32 reserved_f000_211fff[0x80c00];
+
+ /* 0x212000 - scratch RAM */
+ struct scu_scratch_ram scratch_ram;
+};
+
+#endif /* _SCU_REGISTERS_HEADER_ */
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
new file mode 100644
index 00000000000..b6e6368c266
--- /dev/null
+++ b/drivers/scsi/isci/remote_device.c
@@ -0,0 +1,1501 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <scsi/sas.h>
+#include "isci.h"
+#include "port.h"
+#include "remote_device.h"
+#include "request.h"
+#include "remote_node_context.h"
+#include "scu_event_codes.h"
+#include "task.h"
+
+/**
+ * isci_remote_device_not_ready() - This function is called by the ihost when
+ * the remote device is not ready. We mark the isci device as ready (not
+ * "ready_for_io") and signal the waiting proccess.
+ * @isci_host: This parameter specifies the isci host object.
+ * @isci_device: This parameter specifies the remote device
+ *
+ * sci_lock is held on entrance to this function.
+ */
+static void isci_remote_device_not_ready(struct isci_host *ihost,
+ struct isci_remote_device *idev, u32 reason)
+{
+ struct isci_request *ireq;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p\n", __func__, idev);
+
+ switch (reason) {
+ case SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED:
+ set_bit(IDEV_GONE, &idev->flags);
+ break;
+ case SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED:
+ set_bit(IDEV_IO_NCQERROR, &idev->flags);
+
+ /* Kill all outstanding requests for the device. */
+ list_for_each_entry(ireq, &idev->reqs_in_process, dev_node) {
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p request = %p\n",
+ __func__, idev, ireq);
+
+ sci_controller_terminate_request(ihost,
+ idev,
+ ireq);
+ }
+ /* Fall through into the default case... */
+ default:
+ clear_bit(IDEV_IO_READY, &idev->flags);
+ break;
+ }
+}
+
+/**
+ * isci_remote_device_ready() - This function is called by the ihost when the
+ * remote device is ready. We mark the isci device as ready and signal the
+ * waiting proccess.
+ * @ihost: our valid isci_host
+ * @idev: remote device
+ *
+ */
+static void isci_remote_device_ready(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev = %p\n", __func__, idev);
+
+ clear_bit(IDEV_IO_NCQERROR, &idev->flags);
+ set_bit(IDEV_IO_READY, &idev->flags);
+ if (test_and_clear_bit(IDEV_START_PENDING, &idev->flags))
+ wake_up(&ihost->eventq);
+}
+
+/* called once the remote node context is ready to be freed.
+ * The remote device can now report that its stop operation is complete. none
+ */
+static void rnc_destruct_done(void *_dev)
+{
+ struct isci_remote_device *idev = _dev;
+
+ BUG_ON(idev->started_request_count != 0);
+ sci_change_state(&idev->sm, SCI_DEV_STOPPED);
+}
+
+static enum sci_status sci_remote_device_terminate_requests(struct isci_remote_device *idev)
+{
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+ enum sci_status status = SCI_SUCCESS;
+ u32 i;
+
+ for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) {
+ struct isci_request *ireq = ihost->reqs[i];
+ enum sci_status s;
+
+ if (!test_bit(IREQ_ACTIVE, &ireq->flags) ||
+ ireq->target_device != idev)
+ continue;
+
+ s = sci_controller_terminate_request(ihost, idev, ireq);
+ if (s != SCI_SUCCESS)
+ status = s;
+ }
+
+ return status;
+}
+
+enum sci_status sci_remote_device_stop(struct isci_remote_device *idev,
+ u32 timeout)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+
+ switch (state) {
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_FINAL:
+ default:
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_DEV_STOPPED:
+ return SCI_SUCCESS;
+ case SCI_DEV_STARTING:
+ /* device not started so there had better be no requests */
+ BUG_ON(idev->started_request_count != 0);
+ sci_remote_node_context_destruct(&idev->rnc,
+ rnc_destruct_done, idev);
+ /* Transition to the stopping state and wait for the
+ * remote node to complete being posted and invalidated.
+ */
+ sci_change_state(sm, SCI_DEV_STOPPING);
+ return SCI_SUCCESS;
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_IDLE:
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_SMP_DEV_CMD:
+ sci_change_state(sm, SCI_DEV_STOPPING);
+ if (idev->started_request_count == 0) {
+ sci_remote_node_context_destruct(&idev->rnc,
+ rnc_destruct_done, idev);
+ return SCI_SUCCESS;
+ } else
+ return sci_remote_device_terminate_requests(idev);
+ break;
+ case SCI_DEV_STOPPING:
+ /* All requests should have been terminated, but if there is an
+ * attempt to stop a device already in the stopping state, then
+ * try again to terminate.
+ */
+ return sci_remote_device_terminate_requests(idev);
+ case SCI_DEV_RESETTING:
+ sci_change_state(sm, SCI_DEV_STOPPING);
+ return SCI_SUCCESS;
+ }
+}
+
+enum sci_status sci_remote_device_reset(struct isci_remote_device *idev)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+
+ switch (state) {
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_SMP_DEV_CMD:
+ case SCI_DEV_STOPPING:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_RESETTING:
+ case SCI_DEV_FINAL:
+ default:
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_IDLE:
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
+ sci_change_state(sm, SCI_DEV_RESETTING);
+ return SCI_SUCCESS;
+ }
+}
+
+enum sci_status sci_remote_device_reset_complete(struct isci_remote_device *idev)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+
+ if (state != SCI_DEV_RESETTING) {
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_change_state(sm, SCI_DEV_READY);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_remote_device_suspend(struct isci_remote_device *idev,
+ u32 suspend_type)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+
+ if (state != SCI_STP_DEV_CMD) {
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ return sci_remote_node_context_suspend(&idev->rnc,
+ suspend_type, NULL, NULL);
+}
+
+enum sci_status sci_remote_device_frame_handler(struct isci_remote_device *idev,
+ u32 frame_index)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+ enum sci_status status;
+
+ switch (state) {
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_STP_DEV_IDLE:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_DEV_FINAL:
+ default:
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ /* Return the frame back to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
+ case SCI_DEV_STOPPING:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_RESETTING: {
+ struct isci_request *ireq;
+ struct ssp_frame_hdr hdr;
+ void *frame_header;
+ ssize_t word_cnt;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ &frame_header);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ word_cnt = sizeof(hdr) / sizeof(u32);
+ sci_swab32_cpy(&hdr, frame_header, word_cnt);
+
+ ireq = sci_request_by_tag(ihost, be16_to_cpu(hdr.tag));
+ if (ireq && ireq->target_device == idev) {
+ /* The IO request is now in charge of releasing the frame */
+ status = sci_io_request_frame_handler(ireq, frame_index);
+ } else {
+ /* We could not map this tag to a valid IO
+ * request Just toss the frame and continue
+ */
+ sci_controller_release_frame(ihost, frame_index);
+ }
+ break;
+ }
+ case SCI_STP_DEV_NCQ: {
+ struct dev_to_host_fis *hdr;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&hdr);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ if (hdr->fis_type == FIS_SETDEVBITS &&
+ (hdr->status & ATA_ERR)) {
+ idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
+
+ /* TODO Check sactive and complete associated IO if any. */
+ sci_change_state(sm, SCI_STP_DEV_NCQ_ERROR);
+ } else if (hdr->fis_type == FIS_REGD2H &&
+ (hdr->status & ATA_ERR)) {
+ /*
+ * Some devices return D2H FIS when an NCQ error is detected.
+ * Treat this like an SDB error FIS ready reason.
+ */
+ idev->not_ready_reason = SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED;
+ sci_change_state(&idev->sm, SCI_STP_DEV_NCQ_ERROR);
+ } else
+ status = SCI_FAILURE;
+
+ sci_controller_release_frame(ihost, frame_index);
+ break;
+ }
+ case SCI_STP_DEV_CMD:
+ case SCI_SMP_DEV_CMD:
+ /* The device does not process any UF received from the hardware while
+ * in this state. All unsolicited frames are forwarded to the io request
+ * object.
+ */
+ status = sci_io_request_frame_handler(idev->working_request, frame_index);
+ break;
+ }
+
+ return status;
+}
+
+static bool is_remote_device_ready(struct isci_remote_device *idev)
+{
+
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+
+ switch (state) {
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_IDLE:
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_SMP_DEV_CMD:
+ return true;
+ default:
+ return false;
+ }
+}
+
+enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev,
+ u32 event_code)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ enum sci_status status;
+
+ switch (scu_get_event_type(event_code)) {
+ case SCU_EVENT_TYPE_RNC_OPS_MISC:
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+ status = sci_remote_node_context_event_handler(&idev->rnc, event_code);
+ break;
+ case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
+ if (scu_get_event_code(event_code) == SCU_EVENT_IT_NEXUS_TIMEOUT) {
+ status = SCI_SUCCESS;
+
+ /* Suspend the associated RNC */
+ sci_remote_node_context_suspend(&idev->rnc,
+ SCI_SOFTWARE_SUSPENSION,
+ NULL, NULL);
+
+ dev_dbg(scirdev_to_dev(idev),
+ "%s: device: %p event code: %x: %s\n",
+ __func__, idev, event_code,
+ is_remote_device_ready(idev)
+ ? "I_T_Nexus_Timeout event"
+ : "I_T_Nexus_Timeout event in wrong state");
+
+ break;
+ }
+ /* Else, fall through and treat as unhandled... */
+ default:
+ dev_dbg(scirdev_to_dev(idev),
+ "%s: device: %p event code: %x: %s\n",
+ __func__, idev, event_code,
+ is_remote_device_ready(idev)
+ ? "unexpected event"
+ : "unexpected event in wrong state");
+ status = SCI_FAILURE_INVALID_STATE;
+ break;
+ }
+
+ if (status != SCI_SUCCESS)
+ return status;
+
+ if (state == SCI_STP_DEV_IDLE) {
+
+ /* We pick up suspension events to handle specifically to this
+ * state. We resume the RNC right away.
+ */
+ if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX ||
+ scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX)
+ status = sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
+ }
+
+ return status;
+}
+
+static void sci_remote_device_start_request(struct isci_remote_device *idev,
+ struct isci_request *ireq,
+ enum sci_status status)
+{
+ struct isci_port *iport = idev->owning_port;
+
+ /* cleanup requests that failed after starting on the port */
+ if (status != SCI_SUCCESS)
+ sci_port_complete_io(iport, idev, ireq);
+ else {
+ kref_get(&idev->kref);
+ idev->started_request_count++;
+ }
+}
+
+enum sci_status sci_remote_device_start_io(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ struct isci_port *iport = idev->owning_port;
+ enum sci_status status;
+
+ switch (state) {
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_DEV_STOPPING:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_RESETTING:
+ case SCI_DEV_FINAL:
+ default:
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_DEV_READY:
+ /* attempt to start an io request for this device object. The remote
+ * device object will issue the start request for the io and if
+ * successful it will start the request for the port object then
+ * increment its own request count.
+ */
+ status = sci_port_start_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ status = sci_request_start(ireq);
+ break;
+ case SCI_STP_DEV_IDLE: {
+ /* handle the start io operation for a sata device that is in
+ * the command idle state. - Evalute the type of IO request to
+ * be started - If its an NCQ request change to NCQ substate -
+ * If its any other command change to the CMD substate
+ *
+ * If this is a softreset we may want to have a different
+ * substate.
+ */
+ enum sci_remote_device_states new_state;
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ status = sci_port_start_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ status = sci_request_start(ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ if (task->ata_task.use_ncq)
+ new_state = SCI_STP_DEV_NCQ;
+ else {
+ idev->working_request = ireq;
+ new_state = SCI_STP_DEV_CMD;
+ }
+ sci_change_state(sm, new_state);
+ break;
+ }
+ case SCI_STP_DEV_NCQ: {
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ if (task->ata_task.use_ncq) {
+ status = sci_port_start_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ status = sci_request_start(ireq);
+ } else
+ return SCI_FAILURE_INVALID_STATE;
+ break;
+ }
+ case SCI_STP_DEV_AWAIT_RESET:
+ return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+ case SCI_SMP_DEV_IDLE:
+ status = sci_port_start_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_remote_node_context_start_io(&idev->rnc, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ status = sci_request_start(ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ idev->working_request = ireq;
+ sci_change_state(&idev->sm, SCI_SMP_DEV_CMD);
+ break;
+ case SCI_STP_DEV_CMD:
+ case SCI_SMP_DEV_CMD:
+ /* device is already handling a command it can not accept new commands
+ * until this one is complete.
+ */
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_remote_device_start_request(idev, ireq, status);
+ return status;
+}
+
+static enum sci_status common_complete_io(struct isci_port *iport,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ enum sci_status status;
+
+ status = sci_request_complete(ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_port_complete_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ sci_remote_device_decrement_request_count(idev);
+ return status;
+}
+
+enum sci_status sci_remote_device_complete_io(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ struct isci_port *iport = idev->owning_port;
+ enum sci_status status;
+
+ switch (state) {
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_STP_DEV_IDLE:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_FINAL:
+ default:
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_DEV_READY:
+ case SCI_STP_DEV_AWAIT_RESET:
+ case SCI_DEV_RESETTING:
+ status = common_complete_io(iport, idev, ireq);
+ break;
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
+ status = common_complete_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ if (ireq->sci_status == SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+ /* This request causes hardware error, device needs to be Lun Reset.
+ * So here we force the state machine to IDLE state so the rest IOs
+ * can reach RNC state handler, these IOs will be completed by RNC with
+ * status of "DEVICE_RESET_REQUIRED", instead of "INVALID STATE".
+ */
+ sci_change_state(sm, SCI_STP_DEV_AWAIT_RESET);
+ } else if (idev->started_request_count == 0)
+ sci_change_state(sm, SCI_STP_DEV_IDLE);
+ break;
+ case SCI_SMP_DEV_CMD:
+ status = common_complete_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+ sci_change_state(sm, SCI_SMP_DEV_IDLE);
+ break;
+ case SCI_DEV_STOPPING:
+ status = common_complete_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ if (idev->started_request_count == 0)
+ sci_remote_node_context_destruct(&idev->rnc,
+ rnc_destruct_done,
+ idev);
+ break;
+ }
+
+ if (status != SCI_SUCCESS)
+ dev_err(scirdev_to_dev(idev),
+ "%s: Port:0x%p Device:0x%p Request:0x%p Status:0x%x "
+ "could not complete\n", __func__, iport,
+ idev, ireq, status);
+ else
+ isci_put_device(idev);
+
+ return status;
+}
+
+static void sci_remote_device_continue_request(void *dev)
+{
+ struct isci_remote_device *idev = dev;
+
+ /* we need to check if this request is still valid to continue. */
+ if (idev->working_request)
+ sci_controller_continue_io(idev->working_request);
+}
+
+enum sci_status sci_remote_device_start_task(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ struct isci_port *iport = idev->owning_port;
+ enum sci_status status;
+
+ switch (state) {
+ case SCI_DEV_INITIAL:
+ case SCI_DEV_STOPPED:
+ case SCI_DEV_STARTING:
+ case SCI_SMP_DEV_IDLE:
+ case SCI_SMP_DEV_CMD:
+ case SCI_DEV_STOPPING:
+ case SCI_DEV_FAILED:
+ case SCI_DEV_RESETTING:
+ case SCI_DEV_FINAL:
+ default:
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_STP_DEV_IDLE:
+ case SCI_STP_DEV_CMD:
+ case SCI_STP_DEV_NCQ:
+ case SCI_STP_DEV_NCQ_ERROR:
+ case SCI_STP_DEV_AWAIT_RESET:
+ status = sci_port_start_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_remote_node_context_start_task(&idev->rnc, ireq);
+ if (status != SCI_SUCCESS)
+ goto out;
+
+ status = sci_request_start(ireq);
+ if (status != SCI_SUCCESS)
+ goto out;
+
+ /* Note: If the remote device state is not IDLE this will
+ * replace the request that probably resulted in the task
+ * management request.
+ */
+ idev->working_request = ireq;
+ sci_change_state(sm, SCI_STP_DEV_CMD);
+
+ /* The remote node context must cleanup the TCi to NCQ mapping
+ * table. The only way to do this correctly is to either write
+ * to the TLCR register or to invalidate and repost the RNC. In
+ * either case the remote node context state machine will take
+ * the correct action when the remote node context is suspended
+ * and later resumed.
+ */
+ sci_remote_node_context_suspend(&idev->rnc,
+ SCI_SOFTWARE_SUSPENSION, NULL, NULL);
+ sci_remote_node_context_resume(&idev->rnc,
+ sci_remote_device_continue_request,
+ idev);
+
+ out:
+ sci_remote_device_start_request(idev, ireq, status);
+ /* We need to let the controller start request handler know that
+ * it can't post TC yet. We will provide a callback function to
+ * post TC when RNC gets resumed.
+ */
+ return SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS;
+ case SCI_DEV_READY:
+ status = sci_port_start_io(iport, idev, ireq);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ status = sci_remote_node_context_start_task(&idev->rnc, ireq);
+ if (status != SCI_SUCCESS)
+ break;
+
+ status = sci_request_start(ireq);
+ break;
+ }
+ sci_remote_device_start_request(idev, ireq, status);
+
+ return status;
+}
+
+void sci_remote_device_post_request(struct isci_remote_device *idev, u32 request)
+{
+ struct isci_port *iport = idev->owning_port;
+ u32 context;
+
+ context = request |
+ (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+ (iport->physical_port_index << SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+ idev->rnc.remote_node_index;
+
+ sci_controller_post_request(iport->owning_controller, context);
+}
+
+/* called once the remote node context has transisitioned to a
+ * ready state. This is the indication that the remote device object can also
+ * transition to ready.
+ */
+static void remote_device_resume_done(void *_dev)
+{
+ struct isci_remote_device *idev = _dev;
+
+ if (is_remote_device_ready(idev))
+ return;
+
+ /* go 'ready' if we are not already in a ready state */
+ sci_change_state(&idev->sm, SCI_DEV_READY);
+}
+
+static void sci_stp_remote_device_ready_idle_substate_resume_complete_handler(void *_dev)
+{
+ struct isci_remote_device *idev = _dev;
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ /* For NCQ operation we do not issue a isci_remote_device_not_ready().
+ * As a result, avoid sending the ready notification.
+ */
+ if (idev->sm.previous_state_id != SCI_STP_DEV_NCQ)
+ isci_remote_device_ready(ihost, idev);
+}
+
+static void sci_remote_device_initial_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+ /* Initial state is a transitional state to the stopped state */
+ sci_change_state(&idev->sm, SCI_DEV_STOPPED);
+}
+
+/**
+ * sci_remote_device_destruct() - free remote node context and destruct
+ * @remote_device: This parameter specifies the remote device to be destructed.
+ *
+ * Remote device objects are a limited resource. As such, they must be
+ * protected. Thus calls to construct and destruct are mutually exclusive and
+ * non-reentrant. The return value shall indicate if the device was
+ * successfully destructed or if some failure occurred. enum sci_status This value
+ * is returned if the device is successfully destructed.
+ * SCI_FAILURE_INVALID_REMOTE_DEVICE This value is returned if the supplied
+ * device isn't valid (e.g. it's already been destoryed, the handle isn't
+ * valid, etc.).
+ */
+static enum sci_status sci_remote_device_destruct(struct isci_remote_device *idev)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ struct isci_host *ihost;
+
+ if (state != SCI_DEV_STOPPED) {
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ ihost = idev->owning_port->owning_controller;
+ sci_controller_free_remote_node_context(ihost, idev,
+ idev->rnc.remote_node_index);
+ idev->rnc.remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+ sci_change_state(sm, SCI_DEV_FINAL);
+
+ return SCI_SUCCESS;
+}
+
+/**
+ * isci_remote_device_deconstruct() - This function frees an isci_remote_device.
+ * @ihost: This parameter specifies the isci host object.
+ * @idev: This parameter specifies the remote device to be freed.
+ *
+ */
+static void isci_remote_device_deconstruct(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p\n", __func__, idev);
+
+ /* There should not be any outstanding io's. All paths to
+ * here should go through isci_remote_device_nuke_requests.
+ * If we hit this condition, we will need a way to complete
+ * io requests in process */
+ BUG_ON(!list_empty(&idev->reqs_in_process));
+
+ sci_remote_device_destruct(idev);
+ list_del_init(&idev->node);
+ isci_put_device(idev);
+}
+
+static void sci_remote_device_stopped_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+ u32 prev_state;
+
+ /* If we are entering from the stopping state let the SCI User know that
+ * the stop operation has completed.
+ */
+ prev_state = idev->sm.previous_state_id;
+ if (prev_state == SCI_DEV_STOPPING)
+ isci_remote_device_deconstruct(ihost, idev);
+
+ sci_controller_remote_device_stopped(ihost, idev);
+}
+
+static void sci_remote_device_starting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ isci_remote_device_not_ready(ihost, idev,
+ SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED);
+}
+
+static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+ struct domain_device *dev = idev->domain_dev;
+
+ if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) {
+ sci_change_state(&idev->sm, SCI_STP_DEV_IDLE);
+ } else if (dev_is_expander(dev)) {
+ sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE);
+ } else
+ isci_remote_device_ready(ihost, idev);
+}
+
+static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct domain_device *dev = idev->domain_dev;
+
+ if (dev->dev_type == SAS_END_DEV) {
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ isci_remote_device_not_ready(ihost, idev,
+ SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED);
+ }
+}
+
+static void sci_remote_device_resetting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+ sci_remote_node_context_suspend(
+ &idev->rnc, SCI_SOFTWARE_SUSPENSION, NULL, NULL);
+}
+
+static void sci_remote_device_resetting_state_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+ sci_remote_node_context_resume(&idev->rnc, NULL, NULL);
+}
+
+static void sci_stp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+ idev->working_request = NULL;
+ if (sci_remote_node_context_is_ready(&idev->rnc)) {
+ /*
+ * Since the RNC is ready, it's alright to finish completion
+ * processing (e.g. signal the remote device is ready). */
+ sci_stp_remote_device_ready_idle_substate_resume_complete_handler(idev);
+ } else {
+ sci_remote_node_context_resume(&idev->rnc,
+ sci_stp_remote_device_ready_idle_substate_resume_complete_handler,
+ idev);
+ }
+}
+
+static void sci_stp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ BUG_ON(idev->working_request == NULL);
+
+ isci_remote_device_not_ready(ihost, idev,
+ SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED);
+}
+
+static void sci_stp_remote_device_ready_ncq_error_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ if (idev->not_ready_reason == SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED)
+ isci_remote_device_not_ready(ihost, idev,
+ idev->not_ready_reason);
+}
+
+static void sci_smp_remote_device_ready_idle_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ isci_remote_device_ready(ihost, idev);
+}
+
+static void sci_smp_remote_device_ready_cmd_substate_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ BUG_ON(idev->working_request == NULL);
+
+ isci_remote_device_not_ready(ihost, idev,
+ SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED);
+}
+
+static void sci_smp_remote_device_ready_cmd_substate_exit(struct sci_base_state_machine *sm)
+{
+ struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm);
+
+ idev->working_request = NULL;
+}
+
+static const struct sci_base_state sci_remote_device_state_table[] = {
+ [SCI_DEV_INITIAL] = {
+ .enter_state = sci_remote_device_initial_state_enter,
+ },
+ [SCI_DEV_STOPPED] = {
+ .enter_state = sci_remote_device_stopped_state_enter,
+ },
+ [SCI_DEV_STARTING] = {
+ .enter_state = sci_remote_device_starting_state_enter,
+ },
+ [SCI_DEV_READY] = {
+ .enter_state = sci_remote_device_ready_state_enter,
+ .exit_state = sci_remote_device_ready_state_exit
+ },
+ [SCI_STP_DEV_IDLE] = {
+ .enter_state = sci_stp_remote_device_ready_idle_substate_enter,
+ },
+ [SCI_STP_DEV_CMD] = {
+ .enter_state = sci_stp_remote_device_ready_cmd_substate_enter,
+ },
+ [SCI_STP_DEV_NCQ] = { },
+ [SCI_STP_DEV_NCQ_ERROR] = {
+ .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter,
+ },
+ [SCI_STP_DEV_AWAIT_RESET] = { },
+ [SCI_SMP_DEV_IDLE] = {
+ .enter_state = sci_smp_remote_device_ready_idle_substate_enter,
+ },
+ [SCI_SMP_DEV_CMD] = {
+ .enter_state = sci_smp_remote_device_ready_cmd_substate_enter,
+ .exit_state = sci_smp_remote_device_ready_cmd_substate_exit,
+ },
+ [SCI_DEV_STOPPING] = { },
+ [SCI_DEV_FAILED] = { },
+ [SCI_DEV_RESETTING] = {
+ .enter_state = sci_remote_device_resetting_state_enter,
+ .exit_state = sci_remote_device_resetting_state_exit
+ },
+ [SCI_DEV_FINAL] = { },
+};
+
+/**
+ * sci_remote_device_construct() - common construction
+ * @sci_port: SAS/SATA port through which this device is accessed.
+ * @sci_dev: remote device to construct
+ *
+ * This routine just performs benign initialization and does not
+ * allocate the remote_node_context which is left to
+ * sci_remote_device_[de]a_construct(). sci_remote_device_destruct()
+ * frees the remote_node_context(s) for the device.
+ */
+static void sci_remote_device_construct(struct isci_port *iport,
+ struct isci_remote_device *idev)
+{
+ idev->owning_port = iport;
+ idev->started_request_count = 0;
+
+ sci_init_sm(&idev->sm, sci_remote_device_state_table, SCI_DEV_INITIAL);
+
+ sci_remote_node_context_construct(&idev->rnc,
+ SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX);
+}
+
+/**
+ * sci_remote_device_da_construct() - construct direct attached device.
+ *
+ * The information (e.g. IAF, Signature FIS, etc.) necessary to build
+ * the device is known to the SCI Core since it is contained in the
+ * sci_phy object. Remote node context(s) is/are a global resource
+ * allocated by this routine, freed by sci_remote_device_destruct().
+ *
+ * Returns:
+ * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
+ * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
+ * sata-only controller instance.
+ * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
+ */
+static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
+ struct isci_remote_device *idev)
+{
+ enum sci_status status;
+ struct domain_device *dev = idev->domain_dev;
+
+ sci_remote_device_construct(iport, idev);
+
+ /*
+ * This information is request to determine how many remote node context
+ * entries will be needed to store the remote node.
+ */
+ idev->is_direct_attached = true;
+ status = sci_controller_allocate_remote_node_context(iport->owning_controller,
+ idev,
+ &idev->rnc.remote_node_index);
+
+ if (status != SCI_SUCCESS)
+ return status;
+
+ if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
+ (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
+ /* pass */;
+ else
+ return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+ idev->connection_rate = sci_port_get_max_allowed_speed(iport);
+
+ /* / @todo Should I assign the port width by reading all of the phys on the port? */
+ idev->device_port_width = 1;
+
+ return SCI_SUCCESS;
+}
+
+/**
+ * sci_remote_device_ea_construct() - construct expander attached device
+ *
+ * Remote node context(s) is/are a global resource allocated by this
+ * routine, freed by sci_remote_device_destruct().
+ *
+ * Returns:
+ * SCI_FAILURE_DEVICE_EXISTS - device has already been constructed.
+ * SCI_FAILURE_UNSUPPORTED_PROTOCOL - e.g. sas device attached to
+ * sata-only controller instance.
+ * SCI_FAILURE_INSUFFICIENT_RESOURCES - remote node contexts exhausted.
+ */
+static enum sci_status sci_remote_device_ea_construct(struct isci_port *iport,
+ struct isci_remote_device *idev)
+{
+ struct domain_device *dev = idev->domain_dev;
+ enum sci_status status;
+
+ sci_remote_device_construct(iport, idev);
+
+ status = sci_controller_allocate_remote_node_context(iport->owning_controller,
+ idev,
+ &idev->rnc.remote_node_index);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ if (dev->dev_type == SAS_END_DEV || dev->dev_type == SATA_DEV ||
+ (dev->tproto & SAS_PROTOCOL_STP) || dev_is_expander(dev))
+ /* pass */;
+ else
+ return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+ /*
+ * For SAS-2 the physical link rate is actually a logical link
+ * rate that incorporates multiplexing. The SCU doesn't
+ * incorporate multiplexing and for the purposes of the
+ * connection the logical link rate is that same as the
+ * physical. Furthermore, the SAS-2 and SAS-1.1 fields overlay
+ * one another, so this code works for both situations. */
+ idev->connection_rate = min_t(u16, sci_port_get_max_allowed_speed(iport),
+ dev->linkrate);
+
+ /* / @todo Should I assign the port width by reading all of the phys on the port? */
+ idev->device_port_width = 1;
+
+ return SCI_SUCCESS;
+}
+
+/**
+ * sci_remote_device_start() - This method will start the supplied remote
+ * device. This method enables normal IO requests to flow through to the
+ * remote device.
+ * @remote_device: This parameter specifies the device to be started.
+ * @timeout: This parameter specifies the number of milliseconds in which the
+ * start operation should complete.
+ *
+ * An indication of whether the device was successfully started. SCI_SUCCESS
+ * This value is returned if the device was successfully started.
+ * SCI_FAILURE_INVALID_PHY This value is returned if the user attempts to start
+ * the device when there have been no phys added to it.
+ */
+static enum sci_status sci_remote_device_start(struct isci_remote_device *idev,
+ u32 timeout)
+{
+ struct sci_base_state_machine *sm = &idev->sm;
+ enum sci_remote_device_states state = sm->current_state_id;
+ enum sci_status status;
+
+ if (state != SCI_DEV_STOPPED) {
+ dev_warn(scirdev_to_dev(idev), "%s: in wrong state: %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ status = sci_remote_node_context_resume(&idev->rnc,
+ remote_device_resume_done,
+ idev);
+ if (status != SCI_SUCCESS)
+ return status;
+
+ sci_change_state(sm, SCI_DEV_STARTING);
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status isci_remote_device_construct(struct isci_port *iport,
+ struct isci_remote_device *idev)
+{
+ struct isci_host *ihost = iport->isci_host;
+ struct domain_device *dev = idev->domain_dev;
+ enum sci_status status;
+
+ if (dev->parent && dev_is_expander(dev->parent))
+ status = sci_remote_device_ea_construct(iport, idev);
+ else
+ status = sci_remote_device_da_construct(iport, idev);
+
+ if (status != SCI_SUCCESS) {
+ dev_dbg(&ihost->pdev->dev, "%s: construct failed: %d\n",
+ __func__, status);
+
+ return status;
+ }
+
+ /* start the device. */
+ status = sci_remote_device_start(idev, ISCI_REMOTE_DEVICE_START_TIMEOUT);
+
+ if (status != SCI_SUCCESS)
+ dev_warn(&ihost->pdev->dev, "remote device start failed: %d\n",
+ status);
+
+ return status;
+}
+
+void isci_remote_device_nuke_requests(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ DECLARE_COMPLETION_ONSTACK(aborted_task_completion);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev = %p\n", __func__, idev);
+
+ /* Cleanup all requests pending for this device. */
+ isci_terminate_pending_requests(ihost, idev);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev = %p, done\n", __func__, idev);
+}
+
+/**
+ * This function builds the isci_remote_device when a libsas dev_found message
+ * is received.
+ * @isci_host: This parameter specifies the isci host object.
+ * @port: This parameter specifies the isci_port conected to this device.
+ *
+ * pointer to new isci_remote_device.
+ */
+static struct isci_remote_device *
+isci_remote_device_alloc(struct isci_host *ihost, struct isci_port *iport)
+{
+ struct isci_remote_device *idev;
+ int i;
+
+ for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) {
+ idev = &ihost->devices[i];
+ if (!test_and_set_bit(IDEV_ALLOCATED, &idev->flags))
+ break;
+ }
+
+ if (i >= SCI_MAX_REMOTE_DEVICES) {
+ dev_warn(&ihost->pdev->dev, "%s: failed\n", __func__);
+ return NULL;
+ }
+
+ if (WARN_ONCE(!list_empty(&idev->reqs_in_process), "found requests in process\n"))
+ return NULL;
+
+ if (WARN_ONCE(!list_empty(&idev->node), "found non-idle remote device\n"))
+ return NULL;
+
+ return idev;
+}
+
+void isci_remote_device_release(struct kref *kref)
+{
+ struct isci_remote_device *idev = container_of(kref, typeof(*idev), kref);
+ struct isci_host *ihost = idev->isci_port->isci_host;
+
+ idev->domain_dev = NULL;
+ idev->isci_port = NULL;
+ clear_bit(IDEV_START_PENDING, &idev->flags);
+ clear_bit(IDEV_STOP_PENDING, &idev->flags);
+ clear_bit(IDEV_IO_READY, &idev->flags);
+ clear_bit(IDEV_GONE, &idev->flags);
+ clear_bit(IDEV_EH, &idev->flags);
+ smp_mb__before_clear_bit();
+ clear_bit(IDEV_ALLOCATED, &idev->flags);
+ wake_up(&ihost->eventq);
+}
+
+/**
+ * isci_remote_device_stop() - This function is called internally to stop the
+ * remote device.
+ * @isci_host: This parameter specifies the isci host object.
+ * @isci_device: This parameter specifies the remote device.
+ *
+ * The status of the ihost request to stop.
+ */
+enum sci_status isci_remote_device_stop(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ enum sci_status status;
+ unsigned long flags;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_device = %p\n", __func__, idev);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ idev->domain_dev->lldd_dev = NULL; /* disable new lookups */
+ set_bit(IDEV_GONE, &idev->flags);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /* Kill all outstanding requests. */
+ isci_remote_device_nuke_requests(ihost, idev);
+
+ set_bit(IDEV_STOP_PENDING, &idev->flags);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ status = sci_remote_device_stop(idev, 50);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /* Wait for the stop complete callback. */
+ if (WARN_ONCE(status != SCI_SUCCESS, "failed to stop device\n"))
+ /* nothing to wait for */;
+ else
+ wait_for_device_stop(ihost, idev);
+
+ return status;
+}
+
+/**
+ * isci_remote_device_gone() - This function is called by libsas when a domain
+ * device is removed.
+ * @domain_device: This parameter specifies the libsas domain device.
+ *
+ */
+void isci_remote_device_gone(struct domain_device *dev)
+{
+ struct isci_host *ihost = dev_to_ihost(dev);
+ struct isci_remote_device *idev = dev->lldd_dev;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: domain_device = %p, isci_device = %p, isci_port = %p\n",
+ __func__, dev, idev, idev->isci_port);
+
+ isci_remote_device_stop(ihost, idev);
+}
+
+
+/**
+ * isci_remote_device_found() - This function is called by libsas when a remote
+ * device is discovered. A remote device object is created and started. the
+ * function then sleeps until the sci core device started message is
+ * received.
+ * @domain_device: This parameter specifies the libsas domain device.
+ *
+ * status, zero indicates success.
+ */
+int isci_remote_device_found(struct domain_device *domain_dev)
+{
+ struct isci_host *isci_host = dev_to_ihost(domain_dev);
+ struct isci_port *isci_port;
+ struct isci_phy *isci_phy;
+ struct asd_sas_port *sas_port;
+ struct asd_sas_phy *sas_phy;
+ struct isci_remote_device *isci_device;
+ enum sci_status status;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: domain_device = %p\n", __func__, domain_dev);
+
+ wait_for_start(isci_host);
+
+ sas_port = domain_dev->port;
+ sas_phy = list_first_entry(&sas_port->phy_list, struct asd_sas_phy,
+ port_phy_el);
+ isci_phy = to_iphy(sas_phy);
+ isci_port = isci_phy->isci_port;
+
+ /* we are being called for a device on this port,
+ * so it has to come up eventually
+ */
+ wait_for_completion(&isci_port->start_complete);
+
+ if ((isci_stopping == isci_port_get_state(isci_port)) ||
+ (isci_stopped == isci_port_get_state(isci_port)))
+ return -ENODEV;
+
+ isci_device = isci_remote_device_alloc(isci_host, isci_port);
+ if (!isci_device)
+ return -ENODEV;
+
+ kref_init(&isci_device->kref);
+ INIT_LIST_HEAD(&isci_device->node);
+
+ spin_lock_irq(&isci_host->scic_lock);
+ isci_device->domain_dev = domain_dev;
+ isci_device->isci_port = isci_port;
+ list_add_tail(&isci_device->node, &isci_port->remote_dev_list);
+
+ set_bit(IDEV_START_PENDING, &isci_device->flags);
+ status = isci_remote_device_construct(isci_port, isci_device);
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_device = %p\n",
+ __func__, isci_device);
+
+ if (status == SCI_SUCCESS) {
+ /* device came up, advertise it to the world */
+ domain_dev->lldd_dev = isci_device;
+ } else
+ isci_put_device(isci_device);
+ spin_unlock_irq(&isci_host->scic_lock);
+
+ /* wait for the device ready callback. */
+ wait_for_device_start(isci_host, isci_device);
+
+ return status == SCI_SUCCESS ? 0 : -ENODEV;
+}
+/**
+ * isci_device_is_reset_pending() - This function will check if there is any
+ * pending reset condition on the device.
+ * @request: This parameter is the isci_device object.
+ *
+ * true if there is a reset pending for the device.
+ */
+bool isci_device_is_reset_pending(
+ struct isci_host *isci_host,
+ struct isci_remote_device *isci_device)
+{
+ struct isci_request *isci_request;
+ struct isci_request *tmp_req;
+ bool reset_is_pending = false;
+ unsigned long flags;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_device = %p\n", __func__, isci_device);
+
+ spin_lock_irqsave(&isci_host->scic_lock, flags);
+
+ /* Check for reset on all pending requests. */
+ list_for_each_entry_safe(isci_request, tmp_req,
+ &isci_device->reqs_in_process, dev_node) {
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_device = %p request = %p\n",
+ __func__, isci_device, isci_request);
+
+ if (isci_request->ttype == io_task) {
+ struct sas_task *task = isci_request_access_task(
+ isci_request);
+
+ spin_lock(&task->task_state_lock);
+ if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
+ reset_is_pending = true;
+ spin_unlock(&task->task_state_lock);
+ }
+ }
+
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_device = %p reset_is_pending = %d\n",
+ __func__, isci_device, reset_is_pending);
+
+ return reset_is_pending;
+}
+
+/**
+ * isci_device_clear_reset_pending() - This function will clear if any pending
+ * reset condition flags on the device.
+ * @request: This parameter is the isci_device object.
+ *
+ * true if there is a reset pending for the device.
+ */
+void isci_device_clear_reset_pending(struct isci_host *ihost, struct isci_remote_device *idev)
+{
+ struct isci_request *isci_request;
+ struct isci_request *tmp_req;
+ unsigned long flags = 0;
+
+ dev_dbg(&ihost->pdev->dev, "%s: idev=%p, ihost=%p\n",
+ __func__, idev, ihost);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ /* Clear reset pending on all pending requests. */
+ list_for_each_entry_safe(isci_request, tmp_req,
+ &idev->reqs_in_process, dev_node) {
+ dev_dbg(&ihost->pdev->dev, "%s: idev = %p request = %p\n",
+ __func__, idev, isci_request);
+
+ if (isci_request->ttype == io_task) {
+
+ unsigned long flags2;
+ struct sas_task *task = isci_request_access_task(
+ isci_request);
+
+ spin_lock_irqsave(&task->task_state_lock, flags2);
+ task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
+ spin_unlock_irqrestore(&task->task_state_lock, flags2);
+ }
+ }
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h
new file mode 100644
index 00000000000..57ccfc3d6ad
--- /dev/null
+++ b/drivers/scsi/isci/remote_device.h
@@ -0,0 +1,352 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ISCI_REMOTE_DEVICE_H_
+#define _ISCI_REMOTE_DEVICE_H_
+#include <scsi/libsas.h>
+#include <linux/kref.h>
+#include "scu_remote_node_context.h"
+#include "remote_node_context.h"
+#include "port.h"
+
+enum sci_remote_device_not_ready_reason_code {
+ SCIC_REMOTE_DEVICE_NOT_READY_START_REQUESTED,
+ SCIC_REMOTE_DEVICE_NOT_READY_STOP_REQUESTED,
+ SCIC_REMOTE_DEVICE_NOT_READY_SATA_REQUEST_STARTED,
+ SCIC_REMOTE_DEVICE_NOT_READY_SATA_SDB_ERROR_FIS_RECEIVED,
+ SCIC_REMOTE_DEVICE_NOT_READY_SMP_REQUEST_STARTED,
+ SCIC_REMOTE_DEVICE_NOT_READY_REASON_CODE_MAX
+};
+
+/**
+ * isci_remote_device - isci representation of a sas expander / end point
+ * @device_port_width: hw setting for number of simultaneous connections
+ * @connection_rate: per-taskcontext connection rate for this device
+ * @working_request: SATA requests have no tag we for unaccelerated
+ * protocols we need a method to associate unsolicited
+ * frames with a pending request
+ */
+struct isci_remote_device {
+ #define IDEV_START_PENDING 0
+ #define IDEV_STOP_PENDING 1
+ #define IDEV_ALLOCATED 2
+ #define IDEV_EH 3
+ #define IDEV_GONE 4
+ #define IDEV_IO_READY 5
+ #define IDEV_IO_NCQERROR 6
+ unsigned long flags;
+ struct kref kref;
+ struct isci_port *isci_port;
+ struct domain_device *domain_dev;
+ struct list_head node;
+ struct list_head reqs_in_process;
+ struct sci_base_state_machine sm;
+ u32 device_port_width;
+ enum sas_linkrate connection_rate;
+ bool is_direct_attached;
+ struct isci_port *owning_port;
+ struct sci_remote_node_context rnc;
+ /* XXX unify with device reference counting and delete */
+ u32 started_request_count;
+ struct isci_request *working_request;
+ u32 not_ready_reason;
+};
+
+#define ISCI_REMOTE_DEVICE_START_TIMEOUT 5000
+
+/* device reference routines must be called under sci_lock */
+static inline struct isci_remote_device *isci_lookup_device(struct domain_device *dev)
+{
+ struct isci_remote_device *idev = dev->lldd_dev;
+
+ if (idev && !test_bit(IDEV_GONE, &idev->flags)) {
+ kref_get(&idev->kref);
+ return idev;
+ }
+
+ return NULL;
+}
+
+void isci_remote_device_release(struct kref *kref);
+static inline void isci_put_device(struct isci_remote_device *idev)
+{
+ if (idev)
+ kref_put(&idev->kref, isci_remote_device_release);
+}
+
+enum sci_status isci_remote_device_stop(struct isci_host *ihost,
+ struct isci_remote_device *idev);
+void isci_remote_device_nuke_requests(struct isci_host *ihost,
+ struct isci_remote_device *idev);
+void isci_remote_device_gone(struct domain_device *domain_dev);
+int isci_remote_device_found(struct domain_device *domain_dev);
+bool isci_device_is_reset_pending(struct isci_host *ihost,
+ struct isci_remote_device *idev);
+void isci_device_clear_reset_pending(struct isci_host *ihost,
+ struct isci_remote_device *idev);
+/**
+ * sci_remote_device_stop() - This method will stop both transmission and
+ * reception of link activity for the supplied remote device. This method
+ * disables normal IO requests from flowing through to the remote device.
+ * @remote_device: This parameter specifies the device to be stopped.
+ * @timeout: This parameter specifies the number of milliseconds in which the
+ * stop operation should complete.
+ *
+ * An indication of whether the device was successfully stopped. SCI_SUCCESS
+ * This value is returned if the transmission and reception for the device was
+ * successfully stopped.
+ */
+enum sci_status sci_remote_device_stop(
+ struct isci_remote_device *idev,
+ u32 timeout);
+
+/**
+ * sci_remote_device_reset() - This method will reset the device making it
+ * ready for operation. This method must be called anytime the device is
+ * reset either through a SMP phy control or a port hard reset request.
+ * @remote_device: This parameter specifies the device to be reset.
+ *
+ * This method does not actually cause the device hardware to be reset. This
+ * method resets the software object so that it will be operational after a
+ * device hardware reset completes. An indication of whether the device reset
+ * was accepted. SCI_SUCCESS This value is returned if the device reset is
+ * started.
+ */
+enum sci_status sci_remote_device_reset(
+ struct isci_remote_device *idev);
+
+/**
+ * sci_remote_device_reset_complete() - This method informs the device object
+ * that the reset operation is complete and the device can resume operation
+ * again.
+ * @remote_device: This parameter specifies the device which is to be informed
+ * of the reset complete operation.
+ *
+ * An indication that the device is resuming operation. SCI_SUCCESS the device
+ * is resuming operation.
+ */
+enum sci_status sci_remote_device_reset_complete(
+ struct isci_remote_device *idev);
+
+/**
+ * enum sci_remote_device_states - This enumeration depicts all the states
+ * for the common remote device state machine.
+ *
+ *
+ */
+enum sci_remote_device_states {
+ /**
+ * Simply the initial state for the base remote device state machine.
+ */
+ SCI_DEV_INITIAL,
+
+ /**
+ * This state indicates that the remote device has successfully been
+ * stopped. In this state no new IO operations are permitted.
+ * This state is entered from the INITIAL state.
+ * This state is entered from the STOPPING state.
+ */
+ SCI_DEV_STOPPED,
+
+ /**
+ * This state indicates the the remote device is in the process of
+ * becoming ready (i.e. starting). In this state no new IO operations
+ * are permitted.
+ * This state is entered from the STOPPED state.
+ */
+ SCI_DEV_STARTING,
+
+ /**
+ * This state indicates the remote device is now ready. Thus, the user
+ * is able to perform IO operations on the remote device.
+ * This state is entered from the STARTING state.
+ */
+ SCI_DEV_READY,
+
+ /**
+ * This is the idle substate for the stp remote device. When there are no
+ * active IO for the device it is is in this state.
+ */
+ SCI_STP_DEV_IDLE,
+
+ /**
+ * This is the command state for for the STP remote device. This state is
+ * entered when the device is processing a non-NCQ command. The device object
+ * will fail any new start IO requests until this command is complete.
+ */
+ SCI_STP_DEV_CMD,
+
+ /**
+ * This is the NCQ state for the STP remote device. This state is entered
+ * when the device is processing an NCQ reuqest. It will remain in this state
+ * so long as there is one or more NCQ requests being processed.
+ */
+ SCI_STP_DEV_NCQ,
+
+ /**
+ * This is the NCQ error state for the STP remote device. This state is
+ * entered when an SDB error FIS is received by the device object while in the
+ * NCQ state. The device object will only accept a READ LOG command while in
+ * this state.
+ */
+ SCI_STP_DEV_NCQ_ERROR,
+
+ /**
+ * This is the READY substate indicates the device is waiting for the RESET task
+ * coming to be recovered from certain hardware specific error.
+ */
+ SCI_STP_DEV_AWAIT_RESET,
+
+ /**
+ * This is the ready operational substate for the remote device. This is the
+ * normal operational state for a remote device.
+ */
+ SCI_SMP_DEV_IDLE,
+
+ /**
+ * This is the suspended state for the remote device. This is the state that
+ * the device is placed in when a RNC suspend is received by the SCU hardware.
+ */
+ SCI_SMP_DEV_CMD,
+
+ /**
+ * This state indicates that the remote device is in the process of
+ * stopping. In this state no new IO operations are permitted, but
+ * existing IO operations are allowed to complete.
+ * This state is entered from the READY state.
+ * This state is entered from the FAILED state.
+ */
+ SCI_DEV_STOPPING,
+
+ /**
+ * This state indicates that the remote device has failed.
+ * In this state no new IO operations are permitted.
+ * This state is entered from the INITIALIZING state.
+ * This state is entered from the READY state.
+ */
+ SCI_DEV_FAILED,
+
+ /**
+ * This state indicates the device is being reset.
+ * In this state no new IO operations are permitted.
+ * This state is entered from the READY state.
+ */
+ SCI_DEV_RESETTING,
+
+ /**
+ * Simply the final state for the base remote device state machine.
+ */
+ SCI_DEV_FINAL,
+};
+
+static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_context *rnc)
+{
+ struct isci_remote_device *idev;
+
+ idev = container_of(rnc, typeof(*idev), rnc);
+
+ return idev;
+}
+
+static inline bool dev_is_expander(struct domain_device *dev)
+{
+ return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV;
+}
+
+static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev)
+{
+ /* XXX delete this voodoo when converting to the top-level device
+ * reference count
+ */
+ if (WARN_ONCE(idev->started_request_count == 0,
+ "%s: tried to decrement started_request_count past 0!?",
+ __func__))
+ /* pass */;
+ else
+ idev->started_request_count--;
+}
+
+enum sci_status sci_remote_device_frame_handler(
+ struct isci_remote_device *idev,
+ u32 frame_index);
+
+enum sci_status sci_remote_device_event_handler(
+ struct isci_remote_device *idev,
+ u32 event_code);
+
+enum sci_status sci_remote_device_start_io(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status sci_remote_device_start_task(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status sci_remote_device_complete_io(
+ struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq);
+
+enum sci_status sci_remote_device_suspend(
+ struct isci_remote_device *idev,
+ u32 suspend_type);
+
+void sci_remote_device_post_request(
+ struct isci_remote_device *idev,
+ u32 request);
+
+#endif /* !defined(_ISCI_REMOTE_DEVICE_H_) */
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
new file mode 100644
index 00000000000..748e8339d1e
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -0,0 +1,627 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "host.h"
+#include "isci.h"
+#include "remote_device.h"
+#include "remote_node_context.h"
+#include "scu_event_codes.h"
+#include "scu_task_context.h"
+
+
+/**
+ *
+ * @sci_rnc: The RNC for which the is posted request is being made.
+ *
+ * This method will return true if the RNC is not in the initial state. In all
+ * other states the RNC is considered active and this will return true. The
+ * destroy request of the state machine drives the RNC back to the initial
+ * state. If the state machine changes then this routine will also have to be
+ * changed. bool true if the state machine is not in the initial state false if
+ * the state machine is in the initial state
+ */
+
+/**
+ *
+ * @sci_rnc: The state of the remote node context object to check.
+ *
+ * This method will return true if the remote node context is in a READY state
+ * otherwise it will return false bool true if the remote node context is in
+ * the ready state. false if the remote node context is not in the ready state.
+ */
+bool sci_remote_node_context_is_ready(
+ struct sci_remote_node_context *sci_rnc)
+{
+ u32 current_state = sci_rnc->sm.current_state_id;
+
+ if (current_state == SCI_RNC_READY) {
+ return true;
+ }
+
+ return false;
+}
+
+static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id)
+{
+ if (id < ihost->remote_node_entries &&
+ ihost->device_table[id])
+ return &ihost->remote_node_context_table[id];
+
+ return NULL;
+}
+
+static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc)
+{
+ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+ struct domain_device *dev = idev->domain_dev;
+ int rni = sci_rnc->remote_node_index;
+ union scu_remote_node_context *rnc;
+ struct isci_host *ihost;
+ __le64 sas_addr;
+
+ ihost = idev->owning_port->owning_controller;
+ rnc = sci_rnc_by_id(ihost, rni);
+
+ memset(rnc, 0, sizeof(union scu_remote_node_context)
+ * sci_remote_device_node_count(idev));
+
+ rnc->ssp.remote_node_index = rni;
+ rnc->ssp.remote_node_port_width = idev->device_port_width;
+ rnc->ssp.logical_port_index = idev->owning_port->physical_port_index;
+
+ /* sas address is __be64, context ram format is __le64 */
+ sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr));
+ rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr);
+ rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr);
+
+ rnc->ssp.nexus_loss_timer_enable = true;
+ rnc->ssp.check_bit = false;
+ rnc->ssp.is_valid = false;
+ rnc->ssp.is_remote_node_context = true;
+ rnc->ssp.function_number = 0;
+
+ rnc->ssp.arbitration_wait_time = 0;
+
+ if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+ rnc->ssp.connection_occupancy_timeout =
+ ihost->user_parameters.stp_max_occupancy_timeout;
+ rnc->ssp.connection_inactivity_timeout =
+ ihost->user_parameters.stp_inactivity_timeout;
+ } else {
+ rnc->ssp.connection_occupancy_timeout =
+ ihost->user_parameters.ssp_max_occupancy_timeout;
+ rnc->ssp.connection_inactivity_timeout =
+ ihost->user_parameters.ssp_inactivity_timeout;
+ }
+
+ rnc->ssp.initial_arbitration_wait_time = 0;
+
+ /* Open Address Frame Parameters */
+ rnc->ssp.oaf_connection_rate = idev->connection_rate;
+ rnc->ssp.oaf_features = 0;
+ rnc->ssp.oaf_source_zone_group = 0;
+ rnc->ssp.oaf_more_compatibility_features = 0;
+}
+
+/**
+ *
+ * @sci_rnc:
+ * @callback:
+ * @callback_parameter:
+ *
+ * This method will setup the remote node context object so it will transition
+ * to its ready state. If the remote node context is already setup to
+ * transition to its final state then this function does nothing. none
+ */
+static void sci_remote_node_context_setup_to_resume(
+ struct sci_remote_node_context *sci_rnc,
+ scics_sds_remote_node_context_callback callback,
+ void *callback_parameter)
+{
+ if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) {
+ sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY;
+ sci_rnc->user_callback = callback;
+ sci_rnc->user_cookie = callback_parameter;
+ }
+}
+
+static void sci_remote_node_context_setup_to_destory(
+ struct sci_remote_node_context *sci_rnc,
+ scics_sds_remote_node_context_callback callback,
+ void *callback_parameter)
+{
+ sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL;
+ sci_rnc->user_callback = callback;
+ sci_rnc->user_cookie = callback_parameter;
+}
+
+/**
+ *
+ *
+ * This method just calls the user callback function and then resets the
+ * callback.
+ */
+static void sci_remote_node_context_notify_user(
+ struct sci_remote_node_context *rnc)
+{
+ if (rnc->user_callback != NULL) {
+ (*rnc->user_callback)(rnc->user_cookie);
+
+ rnc->user_callback = NULL;
+ rnc->user_cookie = NULL;
+ }
+}
+
+static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc)
+{
+ if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
+ sci_remote_node_context_resume(rnc, rnc->user_callback,
+ rnc->user_cookie);
+}
+
+static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc)
+{
+ union scu_remote_node_context *rnc_buffer;
+ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+ struct domain_device *dev = idev->domain_dev;
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
+
+ rnc_buffer->ssp.is_valid = true;
+
+ if (!idev->is_direct_attached &&
+ (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) {
+ sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96);
+ } else {
+ sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32);
+
+ if (idev->is_direct_attached)
+ sci_port_setup_transports(idev->owning_port,
+ sci_rnc->remote_node_index);
+ }
+}
+
+static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc)
+{
+ union scu_remote_node_context *rnc_buffer;
+ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+ struct isci_host *ihost = idev->owning_port->owning_controller;
+
+ rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index);
+
+ rnc_buffer->ssp.is_valid = false;
+
+ sci_remote_device_post_request(rnc_to_dev(sci_rnc),
+ SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE);
+}
+
+static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+ /* Check to see if we have gotten back to the initial state because
+ * someone requested to destroy the remote node context object.
+ */
+ if (sm->previous_state_id == SCI_RNC_INVALIDATING) {
+ rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
+ sci_remote_node_context_notify_user(rnc);
+ }
+}
+
+static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm);
+
+ sci_remote_node_context_validate_context_buffer(sci_rnc);
+}
+
+static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+ sci_remote_node_context_invalidate_context_buffer(rnc);
+}
+
+static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+ struct isci_remote_device *idev;
+ struct domain_device *dev;
+
+ idev = rnc_to_dev(rnc);
+ dev = idev->domain_dev;
+
+ /*
+ * For direct attached SATA devices we need to clear the TLCR
+ * NCQ to TCi tag mapping on the phy and in cases where we
+ * resume because of a target reset we also need to update
+ * the STPTLDARNI register with the RNi of the device
+ */
+ if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) &&
+ idev->is_direct_attached)
+ sci_port_setup_transports(idev->owning_port,
+ rnc->remote_node_index);
+
+ sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME);
+}
+
+static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+ rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
+
+ if (rnc->user_callback)
+ sci_remote_node_context_notify_user(rnc);
+}
+
+static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+ sci_remote_node_context_continue_state_transitions(rnc);
+}
+
+static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm)
+{
+ struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm);
+
+ sci_remote_node_context_continue_state_transitions(rnc);
+}
+
+static const struct sci_base_state sci_remote_node_context_state_table[] = {
+ [SCI_RNC_INITIAL] = {
+ .enter_state = sci_remote_node_context_initial_state_enter,
+ },
+ [SCI_RNC_POSTING] = {
+ .enter_state = sci_remote_node_context_posting_state_enter,
+ },
+ [SCI_RNC_INVALIDATING] = {
+ .enter_state = sci_remote_node_context_invalidating_state_enter,
+ },
+ [SCI_RNC_RESUMING] = {
+ .enter_state = sci_remote_node_context_resuming_state_enter,
+ },
+ [SCI_RNC_READY] = {
+ .enter_state = sci_remote_node_context_ready_state_enter,
+ },
+ [SCI_RNC_TX_SUSPENDED] = {
+ .enter_state = sci_remote_node_context_tx_suspended_state_enter,
+ },
+ [SCI_RNC_TX_RX_SUSPENDED] = {
+ .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter,
+ },
+ [SCI_RNC_AWAIT_SUSPENSION] = { },
+};
+
+void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
+ u16 remote_node_index)
+{
+ memset(rnc, 0, sizeof(struct sci_remote_node_context));
+
+ rnc->remote_node_index = remote_node_index;
+ rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED;
+
+ sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL);
+}
+
+enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
+ u32 event_code)
+{
+ enum scis_sds_remote_node_context_states state;
+
+ state = sci_rnc->sm.current_state_id;
+ switch (state) {
+ case SCI_RNC_POSTING:
+ switch (scu_get_event_code(event_code)) {
+ case SCU_EVENT_POST_RNC_COMPLETE:
+ sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
+ break;
+ default:
+ goto out;
+ }
+ break;
+ case SCI_RNC_INVALIDATING:
+ if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) {
+ if (sci_rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL)
+ state = SCI_RNC_INITIAL;
+ else
+ state = SCI_RNC_POSTING;
+ sci_change_state(&sci_rnc->sm, state);
+ } else {
+ switch (scu_get_event_type(event_code)) {
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+ /* We really dont care if the hardware is going to suspend
+ * the device since it's being invalidated anyway */
+ dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: SCIC Remote Node Context 0x%p was "
+ "suspeneded by hardware while being "
+ "invalidated.\n", __func__, sci_rnc);
+ break;
+ default:
+ goto out;
+ }
+ }
+ break;
+ case SCI_RNC_RESUMING:
+ if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) {
+ sci_change_state(&sci_rnc->sm, SCI_RNC_READY);
+ } else {
+ switch (scu_get_event_type(event_code)) {
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
+ case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
+ /* We really dont care if the hardware is going to suspend
+ * the device since it's being resumed anyway */
+ dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: SCIC Remote Node Context 0x%p was "
+ "suspeneded by hardware while being resumed.\n",
+ __func__, sci_rnc);
+ break;
+ default:
+ goto out;
+ }
+ }
+ break;
+ case SCI_RNC_READY:
+ switch (scu_get_event_type(event_code)) {
+ case SCU_EVENT_TL_RNC_SUSPEND_TX:
+ sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
+ sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+ break;
+ case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
+ sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
+ sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+ break;
+ default:
+ goto out;
+ }
+ break;
+ case SCI_RNC_AWAIT_SUSPENSION:
+ switch (scu_get_event_type(event_code)) {
+ case SCU_EVENT_TL_RNC_SUSPEND_TX:
+ sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED);
+ sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+ break;
+ case SCU_EVENT_TL_RNC_SUSPEND_TX_RX:
+ sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED);
+ sci_rnc->suspension_code = scu_get_event_specifier(event_code);
+ break;
+ default:
+ goto out;
+ }
+ break;
+ default:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+ return SCI_SUCCESS;
+
+ out:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: code: %#x state: %d\n", __func__, event_code, state);
+ return SCI_FAILURE;
+
+}
+
+enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p)
+{
+ enum scis_sds_remote_node_context_states state;
+
+ state = sci_rnc->sm.current_state_id;
+ switch (state) {
+ case SCI_RNC_INVALIDATING:
+ sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
+ return SCI_SUCCESS;
+ case SCI_RNC_POSTING:
+ case SCI_RNC_RESUMING:
+ case SCI_RNC_READY:
+ case SCI_RNC_TX_SUSPENDED:
+ case SCI_RNC_TX_RX_SUSPENDED:
+ case SCI_RNC_AWAIT_SUSPENSION:
+ sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
+ return SCI_SUCCESS;
+ case SCI_RNC_INITIAL:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %d\n", __func__, state);
+ /* We have decided that the destruct request on the remote node context
+ * can not fail since it is either in the initial/destroyed state or is
+ * can be destroyed.
+ */
+ return SCI_SUCCESS;
+ default:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
+ u32 suspend_type,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p)
+{
+ enum scis_sds_remote_node_context_states state;
+
+ state = sci_rnc->sm.current_state_id;
+ if (state != SCI_RNC_READY) {
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ sci_rnc->user_callback = cb_fn;
+ sci_rnc->user_cookie = cb_p;
+ sci_rnc->suspension_code = suspend_type;
+
+ if (suspend_type == SCI_SOFTWARE_SUSPENSION) {
+ sci_remote_device_post_request(rnc_to_dev(sci_rnc),
+ SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX);
+ }
+
+ sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p)
+{
+ enum scis_sds_remote_node_context_states state;
+
+ state = sci_rnc->sm.current_state_id;
+ switch (state) {
+ case SCI_RNC_INITIAL:
+ if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
+ return SCI_FAILURE_INVALID_STATE;
+
+ sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+ sci_remote_node_context_construct_buffer(sci_rnc);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING);
+ return SCI_SUCCESS;
+ case SCI_RNC_POSTING:
+ case SCI_RNC_INVALIDATING:
+ case SCI_RNC_RESUMING:
+ if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY)
+ return SCI_FAILURE_INVALID_STATE;
+
+ sci_rnc->user_callback = cb_fn;
+ sci_rnc->user_cookie = cb_p;
+ return SCI_SUCCESS;
+ case SCI_RNC_TX_SUSPENDED: {
+ struct isci_remote_device *idev = rnc_to_dev(sci_rnc);
+ struct domain_device *dev = idev->domain_dev;
+
+ sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+
+ /* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */
+ if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev))
+ sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
+ else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+ if (idev->is_direct_attached) {
+ /* @todo Fix this since I am being silly in writing to the STPTLDARNI register. */
+ sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
+ } else {
+ sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING);
+ }
+ } else
+ return SCI_FAILURE;
+ return SCI_SUCCESS;
+ }
+ case SCI_RNC_TX_RX_SUSPENDED:
+ sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+ sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING);
+ return SCI_FAILURE_INVALID_STATE;
+ case SCI_RNC_AWAIT_SUSPENSION:
+ sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
+ struct isci_request *ireq)
+{
+ enum scis_sds_remote_node_context_states state;
+
+ state = sci_rnc->sm.current_state_id;
+
+ switch (state) {
+ case SCI_RNC_READY:
+ return SCI_SUCCESS;
+ case SCI_RNC_TX_SUSPENDED:
+ case SCI_RNC_TX_RX_SUSPENDED:
+ case SCI_RNC_AWAIT_SUSPENSION:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %d\n", __func__, state);
+ return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+ default:
+ break;
+ }
+ dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: requested to start IO while still resuming, %d\n",
+ __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+}
+
+enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
+ struct isci_request *ireq)
+{
+ enum scis_sds_remote_node_context_states state;
+
+ state = sci_rnc->sm.current_state_id;
+ switch (state) {
+ case SCI_RNC_RESUMING:
+ case SCI_RNC_READY:
+ case SCI_RNC_AWAIT_SUSPENSION:
+ return SCI_SUCCESS;
+ case SCI_RNC_TX_SUSPENDED:
+ case SCI_RNC_TX_RX_SUSPENDED:
+ sci_remote_node_context_resume(sci_rnc, NULL, NULL);
+ return SCI_SUCCESS;
+ default:
+ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)),
+ "%s: invalid state %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
diff --git a/drivers/scsi/isci/remote_node_context.h b/drivers/scsi/isci/remote_node_context.h
new file mode 100644
index 00000000000..41580ad1252
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_context.h
@@ -0,0 +1,224 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCIC_SDS_REMOTE_NODE_CONTEXT_H_
+#define _SCIC_SDS_REMOTE_NODE_CONTEXT_H_
+
+/**
+ * This file contains the structures, constants, and prototypes associated with
+ * the remote node context in the silicon. It exists to model and manage
+ * the remote node context in the silicon.
+ *
+ *
+ */
+
+#include "isci.h"
+
+/**
+ *
+ *
+ * This constant represents an invalid remote device id, it is used to program
+ * the STPDARNI register so the driver knows when it has received a SIGNATURE
+ * FIS from the SCU.
+ */
+#define SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX 0x0FFF
+
+#define SCU_HARDWARE_SUSPENSION (0)
+#define SCI_SOFTWARE_SUSPENSION (1)
+
+struct isci_request;
+struct isci_remote_device;
+struct sci_remote_node_context;
+
+typedef void (*scics_sds_remote_node_context_callback)(void *);
+
+/**
+ * This is the enumeration of the remote node context states.
+ */
+enum scis_sds_remote_node_context_states {
+ /**
+ * This state is the initial state for a remote node context. On a resume
+ * request the remote node context will transition to the posting state.
+ */
+ SCI_RNC_INITIAL,
+
+ /**
+ * This is a transition state that posts the RNi to the hardware. Once the RNC
+ * is posted the remote node context will be made ready.
+ */
+ SCI_RNC_POSTING,
+
+ /**
+ * This is a transition state that will post an RNC invalidate to the
+ * hardware. Once the invalidate is complete the remote node context will
+ * transition to the posting state.
+ */
+ SCI_RNC_INVALIDATING,
+
+ /**
+ * This is a transition state that will post an RNC resume to the hardare.
+ * Once the event notification of resume complete is received the remote node
+ * context will transition to the ready state.
+ */
+ SCI_RNC_RESUMING,
+
+ /**
+ * This is the state that the remote node context must be in to accept io
+ * request operations.
+ */
+ SCI_RNC_READY,
+
+ /**
+ * This is the state that the remote node context transitions to when it gets
+ * a TX suspend notification from the hardware.
+ */
+ SCI_RNC_TX_SUSPENDED,
+
+ /**
+ * This is the state that the remote node context transitions to when it gets
+ * a TX RX suspend notification from the hardware.
+ */
+ SCI_RNC_TX_RX_SUSPENDED,
+
+ /**
+ * This state is a wait state for the remote node context that waits for a
+ * suspend notification from the hardware. This state is entered when either
+ * there is a request to supend the remote node context or when there is a TC
+ * completion where the remote node will be suspended by the hardware.
+ */
+ SCI_RNC_AWAIT_SUSPENSION
+};
+
+/**
+ *
+ *
+ * This enumeration is used to define the end destination state for the remote
+ * node context.
+ */
+enum sci_remote_node_context_destination_state {
+ SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED,
+ SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY,
+ SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL
+};
+
+/**
+ * struct sci_remote_node_context - This structure contains the data
+ * associated with the remote node context object. The remote node context
+ * (RNC) object models the the remote device information necessary to manage
+ * the silicon RNC.
+ */
+struct sci_remote_node_context {
+ /**
+ * This field indicates the remote node index (RNI) associated with
+ * this RNC.
+ */
+ u16 remote_node_index;
+
+ /**
+ * This field is the recored suspension code or the reason for the remote node
+ * context suspension.
+ */
+ u32 suspension_code;
+
+ /**
+ * This field is true if the remote node context is resuming from its current
+ * state. This can cause an automatic resume on receiving a suspension
+ * notification.
+ */
+ enum sci_remote_node_context_destination_state destination_state;
+
+ /**
+ * This field contains the callback function that the user requested to be
+ * called when the requested state transition is complete.
+ */
+ scics_sds_remote_node_context_callback user_callback;
+
+ /**
+ * This field contains the parameter that is called when the user requested
+ * state transition is completed.
+ */
+ void *user_cookie;
+
+ /**
+ * This field contains the data for the object's state machine.
+ */
+ struct sci_base_state_machine sm;
+};
+
+void sci_remote_node_context_construct(struct sci_remote_node_context *rnc,
+ u16 remote_node_index);
+
+
+bool sci_remote_node_context_is_ready(
+ struct sci_remote_node_context *sci_rnc);
+
+enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc,
+ u32 event_code);
+enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc,
+ scics_sds_remote_node_context_callback callback,
+ void *callback_parameter);
+enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc,
+ u32 suspend_type,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p);
+enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc,
+ scics_sds_remote_node_context_callback cb_fn,
+ void *cb_p);
+enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc,
+ struct isci_request *ireq);
+enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc,
+ struct isci_request *ireq);
+
+#endif /* _SCIC_SDS_REMOTE_NODE_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/remote_node_table.c b/drivers/scsi/isci/remote_node_table.c
new file mode 100644
index 00000000000..301b3141945
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_table.c
@@ -0,0 +1,598 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * This file contains the implementation of the SCIC_SDS_REMOTE_NODE_TABLE
+ * public, protected, and private methods.
+ *
+ *
+ */
+#include "remote_node_table.h"
+#include "remote_node_context.h"
+
+/**
+ *
+ * @remote_node_table: This is the remote node index table from which the
+ * selection will be made.
+ * @group_table_index: This is the index to the group table from which to
+ * search for an available selection.
+ *
+ * This routine will find the bit position in absolute bit terms of the next 32
+ * + bit position. If there are available bits in the first u32 then it is
+ * just bit position. u32 This is the absolute bit position for an available
+ * group.
+ */
+static u32 sci_remote_node_table_get_group_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_table_index)
+{
+ u32 dword_index;
+ u32 *group_table;
+ u32 bit_index;
+
+ group_table = remote_node_table->remote_node_groups[group_table_index];
+
+ for (dword_index = 0; dword_index < remote_node_table->group_array_size; dword_index++) {
+ if (group_table[dword_index] != 0) {
+ for (bit_index = 0; bit_index < 32; bit_index++) {
+ if ((group_table[dword_index] & (1 << bit_index)) != 0) {
+ return (dword_index * 32) + bit_index;
+ }
+ }
+ }
+ }
+
+ return SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX;
+}
+
+/**
+ *
+ * @out]: remote_node_table This the remote node table in which to clear the
+ * selector.
+ * @set_index: This is the remote node selector in which the change will be
+ * made.
+ * @group_index: This is the bit index in the table to be modified.
+ *
+ * This method will clear the group index entry in the specified group index
+ * table. none
+ */
+static void sci_remote_node_table_clear_group_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_table_index,
+ u32 group_index)
+{
+ u32 dword_index;
+ u32 bit_index;
+ u32 *group_table;
+
+ BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
+ BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
+
+ dword_index = group_index / 32;
+ bit_index = group_index % 32;
+ group_table = remote_node_table->remote_node_groups[group_table_index];
+
+ group_table[dword_index] = group_table[dword_index] & ~(1 << bit_index);
+}
+
+/**
+ *
+ * @out]: remote_node_table This the remote node table in which to set the
+ * selector.
+ * @group_table_index: This is the remote node selector in which the change
+ * will be made.
+ * @group_index: This is the bit position in the table to be modified.
+ *
+ * This method will set the group index bit entry in the specified gropu index
+ * table. none
+ */
+static void sci_remote_node_table_set_group_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_table_index,
+ u32 group_index)
+{
+ u32 dword_index;
+ u32 bit_index;
+ u32 *group_table;
+
+ BUG_ON(group_table_index >= SCU_STP_REMOTE_NODE_COUNT);
+ BUG_ON(group_index >= (u32)(remote_node_table->group_array_size * 32));
+
+ dword_index = group_index / 32;
+ bit_index = group_index % 32;
+ group_table = remote_node_table->remote_node_groups[group_table_index];
+
+ group_table[dword_index] = group_table[dword_index] | (1 << bit_index);
+}
+
+/**
+ *
+ * @out]: remote_node_table This is the remote node table in which to modify
+ * the remote node availability.
+ * @remote_node_index: This is the remote node index that is being returned to
+ * the table.
+ *
+ * This method will set the remote to available in the remote node allocation
+ * table. none
+ */
+static void sci_remote_node_table_set_node_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_index)
+{
+ u32 dword_location;
+ u32 dword_remainder;
+ u32 slot_normalized;
+ u32 slot_position;
+
+ BUG_ON(
+ (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+ <= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
+ );
+
+ dword_location = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
+ dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
+ slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
+ slot_position = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
+
+ remote_node_table->available_remote_nodes[dword_location] |=
+ 1 << (slot_normalized + slot_position);
+}
+
+/**
+ *
+ * @out]: remote_node_table This is the remote node table from which to clear
+ * the available remote node bit.
+ * @remote_node_index: This is the remote node index which is to be cleared
+ * from the table.
+ *
+ * This method clears the remote node index from the table of available remote
+ * nodes. none
+ */
+static void sci_remote_node_table_clear_node_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_index)
+{
+ u32 dword_location;
+ u32 dword_remainder;
+ u32 slot_position;
+ u32 slot_normalized;
+
+ BUG_ON(
+ (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+ <= (remote_node_index / SCU_STP_REMOTE_NODE_COUNT)
+ );
+
+ dword_location = remote_node_index / SCIC_SDS_REMOTE_NODES_PER_DWORD;
+ dword_remainder = remote_node_index % SCIC_SDS_REMOTE_NODES_PER_DWORD;
+ slot_normalized = (dword_remainder / SCU_STP_REMOTE_NODE_COUNT) * sizeof(u32);
+ slot_position = remote_node_index % SCU_STP_REMOTE_NODE_COUNT;
+
+ remote_node_table->available_remote_nodes[dword_location] &=
+ ~(1 << (slot_normalized + slot_position));
+}
+
+/**
+ *
+ * @out]: remote_node_table The remote node table from which the slot will be
+ * cleared.
+ * @group_index: The index for the slot that is to be cleared.
+ *
+ * This method clears the entire table slot at the specified slot index. none
+ */
+static void sci_remote_node_table_clear_group(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_index)
+{
+ u32 dword_location;
+ u32 dword_remainder;
+ u32 dword_value;
+
+ BUG_ON(
+ (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+ <= (group_index / SCU_STP_REMOTE_NODE_COUNT)
+ );
+
+ dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+ dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+
+ dword_value = remote_node_table->available_remote_nodes[dword_location];
+ dword_value &= ~(SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
+ remote_node_table->available_remote_nodes[dword_location] = dword_value;
+}
+
+/**
+ *
+ * @remote_node_table:
+ *
+ * THis method sets an entire remote node group in the remote node table.
+ */
+static void sci_remote_node_table_set_group(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_index)
+{
+ u32 dword_location;
+ u32 dword_remainder;
+ u32 dword_value;
+
+ BUG_ON(
+ (remote_node_table->available_nodes_array_size * SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD)
+ <= (group_index / SCU_STP_REMOTE_NODE_COUNT)
+ );
+
+ dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+ dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+
+ dword_value = remote_node_table->available_remote_nodes[dword_location];
+ dword_value |= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
+ remote_node_table->available_remote_nodes[dword_location] = dword_value;
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table that for which the group
+ * value is to be returned.
+ * @group_index: This is the group index to use to find the group value.
+ *
+ * This method will return the group value for the specified group index. The
+ * bit values at the specified remote node group index.
+ */
+static u8 sci_remote_node_table_get_group_value(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_index)
+{
+ u32 dword_location;
+ u32 dword_remainder;
+ u32 dword_value;
+
+ dword_location = group_index / SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+ dword_remainder = group_index % SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD;
+
+ dword_value = remote_node_table->available_remote_nodes[dword_location];
+ dword_value &= (SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE << (dword_remainder * 4));
+ dword_value = dword_value >> (dword_remainder * 4);
+
+ return (u8)dword_value;
+}
+
+/**
+ *
+ * @out]: remote_node_table The remote that which is to be initialized.
+ * @remote_node_entries: The number of entries to put in the table.
+ *
+ * This method will initialize the remote node table for use. none
+ */
+void sci_remote_node_table_initialize(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_entries)
+{
+ u32 index;
+
+ /*
+ * Initialize the raw data we could improve the speed by only initializing
+ * those entries that we are actually going to be used */
+ memset(
+ remote_node_table->available_remote_nodes,
+ 0x00,
+ sizeof(remote_node_table->available_remote_nodes)
+ );
+
+ memset(
+ remote_node_table->remote_node_groups,
+ 0x00,
+ sizeof(remote_node_table->remote_node_groups)
+ );
+
+ /* Initialize the available remote node sets */
+ remote_node_table->available_nodes_array_size = (u16)
+ (remote_node_entries / SCIC_SDS_REMOTE_NODES_PER_DWORD)
+ + ((remote_node_entries % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0);
+
+
+ /* Initialize each full DWORD to a FULL SET of remote nodes */
+ for (index = 0; index < remote_node_entries; index++) {
+ sci_remote_node_table_set_node_index(remote_node_table, index);
+ }
+
+ remote_node_table->group_array_size = (u16)
+ (remote_node_entries / (SCU_STP_REMOTE_NODE_COUNT * 32))
+ + ((remote_node_entries % (SCU_STP_REMOTE_NODE_COUNT * 32)) != 0);
+
+ for (index = 0; index < (remote_node_entries / SCU_STP_REMOTE_NODE_COUNT); index++) {
+ /*
+ * These are all guaranteed to be full slot values so fill them in the
+ * available sets of 3 remote nodes */
+ sci_remote_node_table_set_group_index(remote_node_table, 2, index);
+ }
+
+ /* Now fill in any remainders that we may find */
+ if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 2) {
+ sci_remote_node_table_set_group_index(remote_node_table, 1, index);
+ } else if ((remote_node_entries % SCU_STP_REMOTE_NODE_COUNT) == 1) {
+ sci_remote_node_table_set_group_index(remote_node_table, 0, index);
+ }
+}
+
+/**
+ *
+ * @out]: remote_node_table The remote node table from which to allocate a
+ * remote node.
+ * @table_index: The group index that is to be used for the search.
+ *
+ * This method will allocate a single RNi from the remote node table. The
+ * table index will determine from which remote node group table to search.
+ * This search may fail and another group node table can be specified. The
+ * function is designed to allow a serach of the available single remote node
+ * group up to the triple remote node group. If an entry is found in the
+ * specified table the remote node is removed and the remote node groups are
+ * updated. The RNi value or an invalid remote node context if an RNi can not
+ * be found.
+ */
+static u16 sci_remote_node_table_allocate_single_remote_node(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_table_index)
+{
+ u8 index;
+ u8 group_value;
+ u32 group_index;
+ u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+
+ group_index = sci_remote_node_table_get_group_index(
+ remote_node_table, group_table_index);
+
+ /* We could not find an available slot in the table selector 0 */
+ if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
+ group_value = sci_remote_node_table_get_group_value(
+ remote_node_table, group_index);
+
+ for (index = 0; index < SCU_STP_REMOTE_NODE_COUNT; index++) {
+ if (((1 << index) & group_value) != 0) {
+ /* We have selected a bit now clear it */
+ remote_node_index = (u16)(group_index * SCU_STP_REMOTE_NODE_COUNT
+ + index);
+
+ sci_remote_node_table_clear_group_index(
+ remote_node_table, group_table_index, group_index
+ );
+
+ sci_remote_node_table_clear_node_index(
+ remote_node_table, remote_node_index
+ );
+
+ if (group_table_index > 0) {
+ sci_remote_node_table_set_group_index(
+ remote_node_table, group_table_index - 1, group_index
+ );
+ }
+
+ break;
+ }
+ }
+ }
+
+ return remote_node_index;
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table from which to allocate the
+ * remote node entries.
+ * @group_table_index: THis is the group table index which must equal two (2)
+ * for this operation.
+ *
+ * This method will allocate three consecutive remote node context entries. If
+ * there are no remaining triple entries the function will return a failure.
+ * The remote node index that represents three consecutive remote node entries
+ * or an invalid remote node context if none can be found.
+ */
+static u16 sci_remote_node_table_allocate_triple_remote_node(
+ struct sci_remote_node_table *remote_node_table,
+ u32 group_table_index)
+{
+ u32 group_index;
+ u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+
+ group_index = sci_remote_node_table_get_group_index(
+ remote_node_table, group_table_index);
+
+ if (group_index != SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX) {
+ remote_node_index = (u16)group_index * SCU_STP_REMOTE_NODE_COUNT;
+
+ sci_remote_node_table_clear_group_index(
+ remote_node_table, group_table_index, group_index
+ );
+
+ sci_remote_node_table_clear_group(
+ remote_node_table, group_index
+ );
+ }
+
+ return remote_node_index;
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table from which the remote node
+ * allocation is to take place.
+ * @remote_node_count: This is ther remote node count which is one of
+ * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3).
+ *
+ * This method will allocate a remote node that mataches the remote node count
+ * specified by the caller. Valid values for remote node count is
+ * SCU_SSP_REMOTE_NODE_COUNT(1) or SCU_STP_REMOTE_NODE_COUNT(3). u16 This is
+ * the remote node index that is returned or an invalid remote node context.
+ */
+u16 sci_remote_node_table_allocate_remote_node(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_count)
+{
+ u16 remote_node_index = SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX;
+
+ if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
+ remote_node_index =
+ sci_remote_node_table_allocate_single_remote_node(
+ remote_node_table, 0);
+
+ if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+ remote_node_index =
+ sci_remote_node_table_allocate_single_remote_node(
+ remote_node_table, 1);
+ }
+
+ if (remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
+ remote_node_index =
+ sci_remote_node_table_allocate_single_remote_node(
+ remote_node_table, 2);
+ }
+ } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
+ remote_node_index =
+ sci_remote_node_table_allocate_triple_remote_node(
+ remote_node_table, 2);
+ }
+
+ return remote_node_index;
+}
+
+/**
+ *
+ * @remote_node_table:
+ *
+ * This method will free a single remote node index back to the remote node
+ * table. This routine will update the remote node groups
+ */
+static void sci_remote_node_table_release_single_remote_node(
+ struct sci_remote_node_table *remote_node_table,
+ u16 remote_node_index)
+{
+ u32 group_index;
+ u8 group_value;
+
+ group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
+
+ group_value = sci_remote_node_table_get_group_value(remote_node_table, group_index);
+
+ /*
+ * Assert that we are not trying to add an entry to a slot that is already
+ * full. */
+ BUG_ON(group_value == SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE);
+
+ if (group_value == 0x00) {
+ /*
+ * There are no entries in this slot so it must be added to the single
+ * slot table. */
+ sci_remote_node_table_set_group_index(remote_node_table, 0, group_index);
+ } else if ((group_value & (group_value - 1)) == 0) {
+ /*
+ * There is only one entry in this slot so it must be moved from the
+ * single slot table to the dual slot table */
+ sci_remote_node_table_clear_group_index(remote_node_table, 0, group_index);
+ sci_remote_node_table_set_group_index(remote_node_table, 1, group_index);
+ } else {
+ /*
+ * There are two entries in the slot so it must be moved from the dual
+ * slot table to the tripple slot table. */
+ sci_remote_node_table_clear_group_index(remote_node_table, 1, group_index);
+ sci_remote_node_table_set_group_index(remote_node_table, 2, group_index);
+ }
+
+ sci_remote_node_table_set_node_index(remote_node_table, remote_node_index);
+}
+
+/**
+ *
+ * @remote_node_table: This is the remote node table to which the remote node
+ * index is to be freed.
+ *
+ * This method will release a group of three consecutive remote nodes back to
+ * the free remote nodes.
+ */
+static void sci_remote_node_table_release_triple_remote_node(
+ struct sci_remote_node_table *remote_node_table,
+ u16 remote_node_index)
+{
+ u32 group_index;
+
+ group_index = remote_node_index / SCU_STP_REMOTE_NODE_COUNT;
+
+ sci_remote_node_table_set_group_index(
+ remote_node_table, 2, group_index
+ );
+
+ sci_remote_node_table_set_group(remote_node_table, group_index);
+}
+
+/**
+ *
+ * @remote_node_table: The remote node table to which the remote node index is
+ * to be freed.
+ * @remote_node_count: This is the count of consecutive remote nodes that are
+ * to be freed.
+ *
+ * This method will release the remote node index back into the remote node
+ * table free pool.
+ */
+void sci_remote_node_table_release_remote_node_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_count,
+ u16 remote_node_index)
+{
+ if (remote_node_count == SCU_SSP_REMOTE_NODE_COUNT) {
+ sci_remote_node_table_release_single_remote_node(
+ remote_node_table, remote_node_index);
+ } else if (remote_node_count == SCU_STP_REMOTE_NODE_COUNT) {
+ sci_remote_node_table_release_triple_remote_node(
+ remote_node_table, remote_node_index);
+ }
+}
+
diff --git a/drivers/scsi/isci/remote_node_table.h b/drivers/scsi/isci/remote_node_table.h
new file mode 100644
index 00000000000..721ab982d2a
--- /dev/null
+++ b/drivers/scsi/isci/remote_node_table.h
@@ -0,0 +1,188 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCIC_SDS_REMOTE_NODE_TABLE_H_
+#define _SCIC_SDS_REMOTE_NODE_TABLE_H_
+
+#include "isci.h"
+
+/**
+ *
+ *
+ * Remote node sets are sets of remote node index in the remtoe node table The
+ * SCU hardware requires that STP remote node entries take three consecutive
+ * remote node index so the table is arranged in sets of three. The bits are
+ * used as 0111 0111 to make a byte and the bits define the set of three remote
+ * nodes to use as a sequence.
+ */
+#define SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE 2
+
+/**
+ *
+ *
+ * Since the remote node table is organized as DWORDS take the remote node sets
+ * in bytes and represent them in DWORDs. The lowest ordered bits are the ones
+ * used in case full DWORD is not being used. i.e. 0000 0000 0000 0000 0111
+ * 0111 0111 0111 // if only a single WORD is in use in the DWORD.
+ */
+#define SCIC_SDS_REMOTE_NODE_SETS_PER_DWORD \
+ (sizeof(u32) * SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE)
+/**
+ *
+ *
+ * This is a count of the numeber of remote nodes that can be represented in a
+ * byte
+ */
+#define SCIC_SDS_REMOTE_NODES_PER_BYTE \
+ (SCU_STP_REMOTE_NODE_COUNT * SCIC_SDS_REMOTE_NODE_SETS_PER_BYTE)
+
+/**
+ *
+ *
+ * This is a count of the number of remote nodes that can be represented in a
+ * DWROD
+ */
+#define SCIC_SDS_REMOTE_NODES_PER_DWORD \
+ (sizeof(u32) * SCIC_SDS_REMOTE_NODES_PER_BYTE)
+
+/**
+ *
+ *
+ * This is the number of bits in a remote node group
+ */
+#define SCIC_SDS_REMOTE_NODES_BITS_PER_GROUP 4
+
+#define SCIC_SDS_REMOTE_NODE_TABLE_INVALID_INDEX (0xFFFFFFFF)
+#define SCIC_SDS_REMOTE_NODE_TABLE_FULL_SLOT_VALUE (0x07)
+#define SCIC_SDS_REMOTE_NODE_TABLE_EMPTY_SLOT_VALUE (0x00)
+
+/**
+ *
+ *
+ * Expander attached sata remote node count
+ */
+#define SCU_STP_REMOTE_NODE_COUNT 3
+
+/**
+ *
+ *
+ * Expander or direct attached ssp remote node count
+ */
+#define SCU_SSP_REMOTE_NODE_COUNT 1
+
+/**
+ *
+ *
+ * Direct attached STP remote node count
+ */
+#define SCU_SATA_REMOTE_NODE_COUNT 1
+
+/**
+ * struct sci_remote_node_table -
+ *
+ *
+ */
+struct sci_remote_node_table {
+ /**
+ * This field contains the array size in dwords
+ */
+ u16 available_nodes_array_size;
+
+ /**
+ * This field contains the array size of the
+ */
+ u16 group_array_size;
+
+ /**
+ * This field is the array of available remote node entries in bits.
+ * Because of the way STP remote node data is allocated on the SCU hardware
+ * the remote nodes must occupy three consecutive remote node context
+ * entries. For ease of allocation and de-allocation we have broken the
+ * sets of three into a single nibble. When the STP RNi is allocated all
+ * of the bits in the nibble are cleared. This math results in a table size
+ * of MAX_REMOTE_NODES / CONSECUTIVE RNi ENTRIES for STP / 2 entries per byte.
+ */
+ u32 available_remote_nodes[
+ (SCI_MAX_REMOTE_DEVICES / SCIC_SDS_REMOTE_NODES_PER_DWORD)
+ + ((SCI_MAX_REMOTE_DEVICES % SCIC_SDS_REMOTE_NODES_PER_DWORD) != 0)];
+
+ /**
+ * This field is the nibble selector for the above table. There are three
+ * possible selectors each for fast lookup when trying to find one, two or
+ * three remote node entries.
+ */
+ u32 remote_node_groups[
+ SCU_STP_REMOTE_NODE_COUNT][
+ (SCI_MAX_REMOTE_DEVICES / (32 * SCU_STP_REMOTE_NODE_COUNT))
+ + ((SCI_MAX_REMOTE_DEVICES % (32 * SCU_STP_REMOTE_NODE_COUNT)) != 0)];
+
+};
+
+/* --------------------------------------------------------------------------- */
+
+void sci_remote_node_table_initialize(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_entries);
+
+u16 sci_remote_node_table_allocate_remote_node(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_count);
+
+void sci_remote_node_table_release_remote_node_index(
+ struct sci_remote_node_table *remote_node_table,
+ u32 remote_node_count,
+ u16 remote_node_index);
+
+#endif /* _SCIC_SDS_REMOTE_NODE_TABLE_H_ */
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
new file mode 100644
index 00000000000..a46e07ac789
--- /dev/null
+++ b/drivers/scsi/isci/request.c
@@ -0,0 +1,3391 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "isci.h"
+#include "task.h"
+#include "request.h"
+#include "scu_completion_codes.h"
+#include "scu_event_codes.h"
+#include "sas.h"
+
+static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq,
+ int idx)
+{
+ if (idx == 0)
+ return &ireq->tc->sgl_pair_ab;
+ else if (idx == 1)
+ return &ireq->tc->sgl_pair_cd;
+ else if (idx < 0)
+ return NULL;
+ else
+ return &ireq->sg_table[idx - 2];
+}
+
+static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost,
+ struct isci_request *ireq, u32 idx)
+{
+ u32 offset;
+
+ if (idx == 0) {
+ offset = (void *) &ireq->tc->sgl_pair_ab -
+ (void *) &ihost->task_context_table[0];
+ return ihost->task_context_dma + offset;
+ } else if (idx == 1) {
+ offset = (void *) &ireq->tc->sgl_pair_cd -
+ (void *) &ihost->task_context_table[0];
+ return ihost->task_context_dma + offset;
+ }
+
+ return sci_io_request_get_dma_addr(ireq, &ireq->sg_table[idx - 2]);
+}
+
+static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg)
+{
+ e->length = sg_dma_len(sg);
+ e->address_upper = upper_32_bits(sg_dma_address(sg));
+ e->address_lower = lower_32_bits(sg_dma_address(sg));
+ e->address_modifier = 0;
+}
+
+static void sci_request_build_sgl(struct isci_request *ireq)
+{
+ struct isci_host *ihost = ireq->isci_host;
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct scatterlist *sg = NULL;
+ dma_addr_t dma_addr;
+ u32 sg_idx = 0;
+ struct scu_sgl_element_pair *scu_sg = NULL;
+ struct scu_sgl_element_pair *prev_sg = NULL;
+
+ if (task->num_scatter > 0) {
+ sg = task->scatter;
+
+ while (sg) {
+ scu_sg = to_sgl_element_pair(ireq, sg_idx);
+ init_sgl_element(&scu_sg->A, sg);
+ sg = sg_next(sg);
+ if (sg) {
+ init_sgl_element(&scu_sg->B, sg);
+ sg = sg_next(sg);
+ } else
+ memset(&scu_sg->B, 0, sizeof(scu_sg->B));
+
+ if (prev_sg) {
+ dma_addr = to_sgl_element_pair_dma(ihost,
+ ireq,
+ sg_idx);
+
+ prev_sg->next_pair_upper =
+ upper_32_bits(dma_addr);
+ prev_sg->next_pair_lower =
+ lower_32_bits(dma_addr);
+ }
+
+ prev_sg = scu_sg;
+ sg_idx++;
+ }
+ } else { /* handle when no sg */
+ scu_sg = to_sgl_element_pair(ireq, sg_idx);
+
+ dma_addr = dma_map_single(&ihost->pdev->dev,
+ task->scatter,
+ task->total_xfer_len,
+ task->data_dir);
+
+ ireq->zero_scatter_daddr = dma_addr;
+
+ scu_sg->A.length = task->total_xfer_len;
+ scu_sg->A.address_upper = upper_32_bits(dma_addr);
+ scu_sg->A.address_lower = lower_32_bits(dma_addr);
+ }
+
+ if (scu_sg) {
+ scu_sg->next_pair_upper = 0;
+ scu_sg->next_pair_lower = 0;
+ }
+}
+
+static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
+{
+ struct ssp_cmd_iu *cmd_iu;
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ cmd_iu = &ireq->ssp.cmd;
+
+ memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
+ cmd_iu->add_cdb_len = 0;
+ cmd_iu->_r_a = 0;
+ cmd_iu->_r_b = 0;
+ cmd_iu->en_fburst = 0; /* unsupported */
+ cmd_iu->task_prio = task->ssp_task.task_prio;
+ cmd_iu->task_attr = task->ssp_task.task_attr;
+ cmd_iu->_r_c = 0;
+
+ sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
+ sizeof(task->ssp_task.cdb) / sizeof(u32));
+}
+
+static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
+{
+ struct ssp_task_iu *task_iu;
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
+
+ task_iu = &ireq->ssp.tmf;
+
+ memset(task_iu, 0, sizeof(struct ssp_task_iu));
+
+ memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
+
+ task_iu->task_func = isci_tmf->tmf_code;
+ task_iu->task_tag =
+ (ireq->ttype == tmf_task) ?
+ isci_tmf->io_tag :
+ SCI_CONTROLLER_INVALID_IO_TAG;
+}
+
+/**
+ * This method is will fill in the SCU Task Context for any type of SSP request.
+ * @sci_req:
+ * @task_context:
+ *
+ */
+static void scu_ssp_reqeust_construct_task_context(
+ struct isci_request *ireq,
+ struct scu_task_context *task_context)
+{
+ dma_addr_t dma_addr;
+ struct isci_remote_device *idev;
+ struct isci_port *iport;
+
+ idev = ireq->target_device;
+ iport = idev->owning_port;
+
+ /* Fill in the TC with the its required data */
+ task_context->abort = 0;
+ task_context->priority = 0;
+ task_context->initiator_request = 1;
+ task_context->connection_rate = idev->connection_rate;
+ task_context->protocol_engine_index = ISCI_PEG;
+ task_context->logical_port_index = iport->physical_port_index;
+ task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
+ task_context->valid = SCU_TASK_CONTEXT_VALID;
+ task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+
+ task_context->remote_node_index = idev->rnc.remote_node_index;
+ task_context->command_code = 0;
+
+ task_context->link_layer_control = 0;
+ task_context->do_not_dma_ssp_good_response = 1;
+ task_context->strict_ordering = 0;
+ task_context->control_frame = 0;
+ task_context->timeout_enable = 0;
+ task_context->block_guard_enable = 0;
+
+ task_context->address_modifier = 0;
+
+ /* task_context->type.ssp.tag = ireq->io_tag; */
+ task_context->task_phase = 0x01;
+
+ ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+ (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+ (iport->physical_port_index <<
+ SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+ ISCI_TAG_TCI(ireq->io_tag));
+
+ /*
+ * Copy the physical address for the command buffer to the
+ * SCU Task Context
+ */
+ dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.cmd);
+
+ task_context->command_iu_upper = upper_32_bits(dma_addr);
+ task_context->command_iu_lower = lower_32_bits(dma_addr);
+
+ /*
+ * Copy the physical address for the response buffer to the
+ * SCU Task Context
+ */
+ dma_addr = sci_io_request_get_dma_addr(ireq, &ireq->ssp.rsp);
+
+ task_context->response_iu_upper = upper_32_bits(dma_addr);
+ task_context->response_iu_lower = lower_32_bits(dma_addr);
+}
+
+/**
+ * This method is will fill in the SCU Task Context for a SSP IO request.
+ * @sci_req:
+ *
+ */
+static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq,
+ enum dma_data_direction dir,
+ u32 len)
+{
+ struct scu_task_context *task_context = ireq->tc;
+
+ scu_ssp_reqeust_construct_task_context(ireq, task_context);
+
+ task_context->ssp_command_iu_length =
+ sizeof(struct ssp_cmd_iu) / sizeof(u32);
+ task_context->type.ssp.frame_type = SSP_COMMAND;
+
+ switch (dir) {
+ case DMA_FROM_DEVICE:
+ case DMA_NONE:
+ default:
+ task_context->task_type = SCU_TASK_TYPE_IOREAD;
+ break;
+ case DMA_TO_DEVICE:
+ task_context->task_type = SCU_TASK_TYPE_IOWRITE;
+ break;
+ }
+
+ task_context->transfer_length_bytes = len;
+
+ if (task_context->transfer_length_bytes > 0)
+ sci_request_build_sgl(ireq);
+}
+
+/**
+ * This method will fill in the SCU Task Context for a SSP Task request. The
+ * following important settings are utilized: -# priority ==
+ * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
+ * ahead of other task destined for the same Remote Node. -# task_type ==
+ * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
+ * (i.e. non-raw frame) is being utilized to perform task management. -#
+ * control_frame == 1. This ensures that the proper endianess is set so
+ * that the bytes are transmitted in the right order for a task frame.
+ * @sci_req: This parameter specifies the task request object being
+ * constructed.
+ *
+ */
+static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq)
+{
+ struct scu_task_context *task_context = ireq->tc;
+
+ scu_ssp_reqeust_construct_task_context(ireq, task_context);
+
+ task_context->control_frame = 1;
+ task_context->priority = SCU_TASK_PRIORITY_HIGH;
+ task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
+ task_context->transfer_length_bytes = 0;
+ task_context->type.ssp.frame_type = SSP_TASK;
+ task_context->ssp_command_iu_length =
+ sizeof(struct ssp_task_iu) / sizeof(u32);
+}
+
+/**
+ * This method is will fill in the SCU Task Context for any type of SATA
+ * request. This is called from the various SATA constructors.
+ * @sci_req: The general IO request object which is to be used in
+ * constructing the SCU task context.
+ * @task_context: The buffer pointer for the SCU task context which is being
+ * constructed.
+ *
+ * The general io request construction is complete. The buffer assignment for
+ * the command buffer is complete. none Revisit task context construction to
+ * determine what is common for SSP/SMP/STP task context structures.
+ */
+static void scu_sata_reqeust_construct_task_context(
+ struct isci_request *ireq,
+ struct scu_task_context *task_context)
+{
+ dma_addr_t dma_addr;
+ struct isci_remote_device *idev;
+ struct isci_port *iport;
+
+ idev = ireq->target_device;
+ iport = idev->owning_port;
+
+ /* Fill in the TC with the its required data */
+ task_context->abort = 0;
+ task_context->priority = SCU_TASK_PRIORITY_NORMAL;
+ task_context->initiator_request = 1;
+ task_context->connection_rate = idev->connection_rate;
+ task_context->protocol_engine_index = ISCI_PEG;
+ task_context->logical_port_index = iport->physical_port_index;
+ task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
+ task_context->valid = SCU_TASK_CONTEXT_VALID;
+ task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+
+ task_context->remote_node_index = idev->rnc.remote_node_index;
+ task_context->command_code = 0;
+
+ task_context->link_layer_control = 0;
+ task_context->do_not_dma_ssp_good_response = 1;
+ task_context->strict_ordering = 0;
+ task_context->control_frame = 0;
+ task_context->timeout_enable = 0;
+ task_context->block_guard_enable = 0;
+
+ task_context->address_modifier = 0;
+ task_context->task_phase = 0x01;
+
+ task_context->ssp_command_iu_length =
+ (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
+
+ /* Set the first word of the H2D REG FIS */
+ task_context->type.words[0] = *(u32 *)&ireq->stp.cmd;
+
+ ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+ (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+ (iport->physical_port_index <<
+ SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+ ISCI_TAG_TCI(ireq->io_tag));
+ /*
+ * Copy the physical address for the command buffer to the SCU Task
+ * Context. We must offset the command buffer by 4 bytes because the
+ * first 4 bytes are transfered in the body of the TC.
+ */
+ dma_addr = sci_io_request_get_dma_addr(ireq,
+ ((char *) &ireq->stp.cmd) +
+ sizeof(u32));
+
+ task_context->command_iu_upper = upper_32_bits(dma_addr);
+ task_context->command_iu_lower = lower_32_bits(dma_addr);
+
+ /* SATA Requests do not have a response buffer */
+ task_context->response_iu_upper = 0;
+ task_context->response_iu_lower = 0;
+}
+
+static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq)
+{
+ struct scu_task_context *task_context = ireq->tc;
+
+ scu_sata_reqeust_construct_task_context(ireq, task_context);
+
+ task_context->control_frame = 0;
+ task_context->priority = SCU_TASK_PRIORITY_NORMAL;
+ task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
+ task_context->type.stp.fis_type = FIS_REGH2D;
+ task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
+}
+
+static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq,
+ bool copy_rx_frame)
+{
+ struct isci_stp_request *stp_req = &ireq->stp.req;
+
+ scu_stp_raw_request_construct_task_context(ireq);
+
+ stp_req->status = 0;
+ stp_req->sgl.offset = 0;
+ stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A;
+
+ if (copy_rx_frame) {
+ sci_request_build_sgl(ireq);
+ stp_req->sgl.index = 0;
+ } else {
+ /* The user does not want the data copied to the SGL buffer location */
+ stp_req->sgl.index = -1;
+ }
+
+ return SCI_SUCCESS;
+}
+
+/**
+ *
+ * @sci_req: This parameter specifies the request to be constructed as an
+ * optimized request.
+ * @optimized_task_type: This parameter specifies whether the request is to be
+ * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
+ * value of 1 indicates NCQ.
+ *
+ * This method will perform request construction common to all types of STP
+ * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
+ * returns an indication as to whether the construction was successful.
+ */
+static void sci_stp_optimized_request_construct(struct isci_request *ireq,
+ u8 optimized_task_type,
+ u32 len,
+ enum dma_data_direction dir)
+{
+ struct scu_task_context *task_context = ireq->tc;
+
+ /* Build the STP task context structure */
+ scu_sata_reqeust_construct_task_context(ireq, task_context);
+
+ /* Copy over the SGL elements */
+ sci_request_build_sgl(ireq);
+
+ /* Copy over the number of bytes to be transfered */
+ task_context->transfer_length_bytes = len;
+
+ if (dir == DMA_TO_DEVICE) {
+ /*
+ * The difference between the DMA IN and DMA OUT request task type
+ * values are consistent with the difference between FPDMA READ
+ * and FPDMA WRITE values. Add the supplied task type parameter
+ * to this difference to set the task type properly for this
+ * DATA OUT (WRITE) case. */
+ task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
+ - SCU_TASK_TYPE_DMA_IN);
+ } else {
+ /*
+ * For the DATA IN (READ) case, simply save the supplied
+ * optimized task type. */
+ task_context->task_type = optimized_task_type;
+ }
+}
+
+
+
+static enum sci_status
+sci_io_request_construct_sata(struct isci_request *ireq,
+ u32 len,
+ enum dma_data_direction dir,
+ bool copy)
+{
+ enum sci_status status = SCI_SUCCESS;
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ /* check for management protocols */
+ if (ireq->ttype == tmf_task) {
+ struct isci_tmf *tmf = isci_request_access_tmf(ireq);
+
+ if (tmf->tmf_code == isci_tmf_sata_srst_high ||
+ tmf->tmf_code == isci_tmf_sata_srst_low) {
+ scu_stp_raw_request_construct_task_context(ireq);
+ return SCI_SUCCESS;
+ } else {
+ dev_err(&ireq->owning_controller->pdev->dev,
+ "%s: Request 0x%p received un-handled SAT "
+ "management protocol 0x%x.\n",
+ __func__, ireq, tmf->tmf_code);
+
+ return SCI_FAILURE;
+ }
+ }
+
+ if (!sas_protocol_ata(task->task_proto)) {
+ dev_err(&ireq->owning_controller->pdev->dev,
+ "%s: Non-ATA protocol in SATA path: 0x%x\n",
+ __func__,
+ task->task_proto);
+ return SCI_FAILURE;
+
+ }
+
+ /* non data */
+ if (task->data_dir == DMA_NONE) {
+ scu_stp_raw_request_construct_task_context(ireq);
+ return SCI_SUCCESS;
+ }
+
+ /* NCQ */
+ if (task->ata_task.use_ncq) {
+ sci_stp_optimized_request_construct(ireq,
+ SCU_TASK_TYPE_FPDMAQ_READ,
+ len, dir);
+ return SCI_SUCCESS;
+ }
+
+ /* DMA */
+ if (task->ata_task.dma_xfer) {
+ sci_stp_optimized_request_construct(ireq,
+ SCU_TASK_TYPE_DMA_IN,
+ len, dir);
+ return SCI_SUCCESS;
+ } else /* PIO */
+ return sci_stp_pio_request_construct(ireq, copy);
+
+ return status;
+}
+
+static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq)
+{
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ ireq->protocol = SCIC_SSP_PROTOCOL;
+
+ scu_ssp_io_request_construct_task_context(ireq,
+ task->data_dir,
+ task->total_xfer_len);
+
+ sci_io_request_build_ssp_command_iu(ireq);
+
+ sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_task_request_construct_ssp(
+ struct isci_request *ireq)
+{
+ /* Construct the SSP Task SCU Task Context */
+ scu_ssp_task_request_construct_task_context(ireq);
+
+ /* Fill in the SSP Task IU */
+ sci_task_request_build_ssp_task_iu(ireq);
+
+ sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq)
+{
+ enum sci_status status;
+ bool copy = false;
+ struct sas_task *task = isci_request_access_task(ireq);
+
+ ireq->protocol = SCIC_STP_PROTOCOL;
+
+ copy = (task->data_dir == DMA_NONE) ? false : true;
+
+ status = sci_io_request_construct_sata(ireq,
+ task->total_xfer_len,
+ task->data_dir,
+ copy);
+
+ if (status == SCI_SUCCESS)
+ sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+ return status;
+}
+
+enum sci_status sci_task_request_construct_sata(struct isci_request *ireq)
+{
+ enum sci_status status = SCI_SUCCESS;
+
+ /* check for management protocols */
+ if (ireq->ttype == tmf_task) {
+ struct isci_tmf *tmf = isci_request_access_tmf(ireq);
+
+ if (tmf->tmf_code == isci_tmf_sata_srst_high ||
+ tmf->tmf_code == isci_tmf_sata_srst_low) {
+ scu_stp_raw_request_construct_task_context(ireq);
+ } else {
+ dev_err(&ireq->owning_controller->pdev->dev,
+ "%s: Request 0x%p received un-handled SAT "
+ "Protocol 0x%x.\n",
+ __func__, ireq, tmf->tmf_code);
+
+ return SCI_FAILURE;
+ }
+ }
+
+ if (status != SCI_SUCCESS)
+ return status;
+ sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+ return status;
+}
+
+/**
+ * sci_req_tx_bytes - bytes transferred when reply underruns request
+ * @sci_req: request that was terminated early
+ */
+#define SCU_TASK_CONTEXT_SRAM 0x200000
+static u32 sci_req_tx_bytes(struct isci_request *ireq)
+{
+ struct isci_host *ihost = ireq->owning_controller;
+ u32 ret_val = 0;
+
+ if (readl(&ihost->smu_registers->address_modifier) == 0) {
+ void __iomem *scu_reg_base = ihost->scu_registers;
+
+ /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
+ * BAR1 is the scu_registers
+ * 0x20002C = 0x200000 + 0x2c
+ * = start of task context SRAM + offset of (type.ssp.data_offset)
+ * TCi is the io_tag of struct sci_request
+ */
+ ret_val = readl(scu_reg_base +
+ (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
+ ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag)));
+ }
+
+ return ret_val;
+}
+
+enum sci_status sci_request_start(struct isci_request *ireq)
+{
+ enum sci_base_request_states state;
+ struct scu_task_context *tc = ireq->tc;
+ struct isci_host *ihost = ireq->owning_controller;
+
+ state = ireq->sm.current_state_id;
+ if (state != SCI_REQ_CONSTRUCTED) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC IO Request requested to start while in wrong "
+ "state %d\n", __func__, state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ tc->task_index = ISCI_TAG_TCI(ireq->io_tag);
+
+ switch (tc->protocol_type) {
+ case SCU_TASK_CONTEXT_PROTOCOL_SMP:
+ case SCU_TASK_CONTEXT_PROTOCOL_SSP:
+ /* SSP/SMP Frame */
+ tc->type.ssp.tag = ireq->io_tag;
+ tc->type.ssp.target_port_transfer_tag = 0xFFFF;
+ break;
+
+ case SCU_TASK_CONTEXT_PROTOCOL_STP:
+ /* STP/SATA Frame
+ * tc->type.stp.ncq_tag = ireq->ncq_tag;
+ */
+ break;
+
+ case SCU_TASK_CONTEXT_PROTOCOL_NONE:
+ /* / @todo When do we set no protocol type? */
+ break;
+
+ default:
+ /* This should never happen since we build the IO
+ * requests */
+ break;
+ }
+
+ /* Add to the post_context the io tag value */
+ ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag);
+
+ /* Everything is good go ahead and change state */
+ sci_change_state(&ireq->sm, SCI_REQ_STARTED);
+
+ return SCI_SUCCESS;
+}
+
+enum sci_status
+sci_io_request_terminate(struct isci_request *ireq)
+{
+ enum sci_base_request_states state;
+
+ state = ireq->sm.current_state_id;
+
+ switch (state) {
+ case SCI_REQ_CONSTRUCTED:
+ ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
+ ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ return SCI_SUCCESS;
+ case SCI_REQ_STARTED:
+ case SCI_REQ_TASK_WAIT_TC_COMP:
+ case SCI_REQ_SMP_WAIT_RESP:
+ case SCI_REQ_SMP_WAIT_TC_COMP:
+ case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+ case SCI_REQ_STP_UDMA_WAIT_D2H:
+ case SCI_REQ_STP_NON_DATA_WAIT_H2D:
+ case SCI_REQ_STP_NON_DATA_WAIT_D2H:
+ case SCI_REQ_STP_PIO_WAIT_H2D:
+ case SCI_REQ_STP_PIO_WAIT_FRAME:
+ case SCI_REQ_STP_PIO_DATA_IN:
+ case SCI_REQ_STP_PIO_DATA_OUT:
+ case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
+ case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
+ case SCI_REQ_STP_SOFT_RESET_WAIT_D2H:
+ sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
+ return SCI_SUCCESS;
+ case SCI_REQ_TASK_WAIT_TC_RESP:
+ sci_change_state(&ireq->sm, SCI_REQ_ABORTING);
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ return SCI_SUCCESS;
+ case SCI_REQ_ABORTING:
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ return SCI_SUCCESS;
+ case SCI_REQ_COMPLETED:
+ default:
+ dev_warn(&ireq->owning_controller->pdev->dev,
+ "%s: SCIC IO Request requested to abort while in wrong "
+ "state %d\n",
+ __func__,
+ ireq->sm.current_state_id);
+ break;
+ }
+
+ return SCI_FAILURE_INVALID_STATE;
+}
+
+enum sci_status sci_request_complete(struct isci_request *ireq)
+{
+ enum sci_base_request_states state;
+ struct isci_host *ihost = ireq->owning_controller;
+
+ state = ireq->sm.current_state_id;
+ if (WARN_ONCE(state != SCI_REQ_COMPLETED,
+ "isci: request completion from wrong state (%d)\n", state))
+ return SCI_FAILURE_INVALID_STATE;
+
+ if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX)
+ sci_controller_release_frame(ihost,
+ ireq->saved_rx_frame_index);
+
+ /* XXX can we just stop the machine and remove the 'final' state? */
+ sci_change_state(&ireq->sm, SCI_REQ_FINAL);
+ return SCI_SUCCESS;
+}
+
+enum sci_status sci_io_request_event_handler(struct isci_request *ireq,
+ u32 event_code)
+{
+ enum sci_base_request_states state;
+ struct isci_host *ihost = ireq->owning_controller;
+
+ state = ireq->sm.current_state_id;
+
+ if (state != SCI_REQ_STP_PIO_DATA_IN) {
+ dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %d\n",
+ __func__, event_code, state);
+
+ return SCI_FAILURE_INVALID_STATE;
+ }
+
+ switch (scu_get_event_specifier(event_code)) {
+ case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
+ /* We are waiting for data and the SCU has R_ERR the data frame.
+ * Go back to waiting for the D2H Register FIS
+ */
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ return SCI_SUCCESS;
+ default:
+ dev_err(&ihost->pdev->dev,
+ "%s: pio request unexpected event %#x\n",
+ __func__, event_code);
+
+ /* TODO Should we fail the PIO request when we get an
+ * unexpected event?
+ */
+ return SCI_FAILURE;
+ }
+}
+
+/*
+ * This function copies response data for requests returning response data
+ * instead of sense data.
+ * @sci_req: This parameter specifies the request object for which to copy
+ * the response data.
+ */
+static void sci_io_request_copy_response(struct isci_request *ireq)
+{
+ void *resp_buf;
+ u32 len;
+ struct ssp_response_iu *ssp_response;
+ struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
+
+ ssp_response = &ireq->ssp.rsp;
+
+ resp_buf = &isci_tmf->resp.resp_iu;
+
+ len = min_t(u32,
+ SSP_RESP_IU_MAX_SIZE,
+ be32_to_cpu(ssp_response->response_data_len));
+
+ memcpy(resp_buf, ssp_response->resp_data, len);
+}
+
+static enum sci_status
+request_started_state_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ struct ssp_response_iu *resp_iu;
+ u8 datapres;
+
+ /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
+ * to determine SDMA status
+ */
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ break;
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
+ /* There are times when the SCU hardware will return an early
+ * response because the io request specified more data than is
+ * returned by the target device (mode pages, inquiry data,
+ * etc.). We must check the response stats to see if this is
+ * truly a failed request or a good request that just got
+ * completed early.
+ */
+ struct ssp_response_iu *resp = &ireq->ssp.rsp;
+ ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
+
+ sci_swab32_cpy(&ireq->ssp.rsp,
+ &ireq->ssp.rsp,
+ word_cnt);
+
+ if (resp->status == 0) {
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY;
+ } else {
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ }
+ break;
+ }
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
+ ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
+
+ sci_swab32_cpy(&ireq->ssp.rsp,
+ &ireq->ssp.rsp,
+ word_cnt);
+
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ break;
+ }
+
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
+ /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
+ * guaranteed to be received before this completion status is
+ * posted?
+ */
+ resp_iu = &ireq->ssp.rsp;
+ datapres = resp_iu->datapres;
+
+ if (datapres == 1 || datapres == 2) {
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ } else {
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ }
+ break;
+ /* only stp device gets suspended. */
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
+ if (ireq->protocol == SCIC_STP_PROTOCOL) {
+ ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+ SCU_COMPLETION_TL_STATUS_SHIFT;
+ ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+ } else {
+ ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+ SCU_COMPLETION_TL_STATUS_SHIFT;
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ }
+ break;
+
+ /* both stp/ssp device gets suspended */
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
+ ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+ SCU_COMPLETION_TL_STATUS_SHIFT;
+ ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED;
+ break;
+
+ /* neither ssp nor stp gets suspended. */
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
+ default:
+ ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
+ SCU_COMPLETION_TL_STATUS_SHIFT;
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ break;
+ }
+
+ /*
+ * TODO: This is probably wrong for ACK/NAK timeout conditions
+ */
+
+ /* In all cases we will treat this as the completion of the IO req. */
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ return SCI_SUCCESS;
+}
+
+static enum sci_status
+request_aborting_state_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
+ case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
+ ireq->scu_status = SCU_TASK_DONE_TASK_ABORT;
+ ireq->sci_status = SCI_FAILURE_IO_TERMINATED;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+
+ default:
+ /* Unless we get some strange error wait for the task abort to complete
+ * TODO: Should there be a state change for this completion?
+ */
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
+ break;
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
+ /* Currently, the decision is to simply allow the task request
+ * to timeout if the task IU wasn't received successfully.
+ * There is a potential for receiving multiple task responses if
+ * we decide to send the task IU again.
+ */
+ dev_warn(&ireq->owning_controller->pdev->dev,
+ "%s: TaskRequest:0x%p CompletionCode:%x - "
+ "ACK/NAK timeout\n", __func__, ireq,
+ completion_code);
+
+ sci_change_state(&ireq->sm, SCI_REQ_TASK_WAIT_TC_RESP);
+ break;
+ default:
+ /*
+ * All other completion status cause the IO to be complete.
+ * If a NAK was received, then it is up to the user to retry
+ * the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status
+smp_request_await_response_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ /* In the AWAIT RESPONSE state, any TC completion is
+ * unexpected. but if the TC has success status, we
+ * complete the IO anyway.
+ */
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
+ /* These status has been seen in a specific LSI
+ * expander, which sometimes is not able to send smp
+ * response within 2 ms. This causes our hardware break
+ * the connection and set TC completion with one of
+ * these SMP_XXX_XX_ERR status. For these type of error,
+ * we ask ihost user to retry the request.
+ */
+ ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR;
+ ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ default:
+ /* All other completion status cause the IO to be complete. If a NAK
+ * was received, then it is up to the user to retry the request
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status
+smp_request_await_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ default:
+ /* All other completion status cause the IO to be
+ * complete. If a NAK was received, then it is up to
+ * the user to retry the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req)
+{
+ struct scu_sgl_element *sgl;
+ struct scu_sgl_element_pair *sgl_pair;
+ struct isci_request *ireq = to_ireq(stp_req);
+ struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl;
+
+ sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
+ if (!sgl_pair)
+ sgl = NULL;
+ else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) {
+ if (sgl_pair->B.address_lower == 0 &&
+ sgl_pair->B.address_upper == 0) {
+ sgl = NULL;
+ } else {
+ pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B;
+ sgl = &sgl_pair->B;
+ }
+ } else {
+ if (sgl_pair->next_pair_lower == 0 &&
+ sgl_pair->next_pair_upper == 0) {
+ sgl = NULL;
+ } else {
+ pio_sgl->index++;
+ pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A;
+ sgl_pair = to_sgl_element_pair(ireq, pio_sgl->index);
+ sgl = &sgl_pair->A;
+ }
+ }
+
+ return sgl;
+}
+
+static enum sci_status
+stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_STP_NON_DATA_WAIT_D2H);
+ break;
+
+ default:
+ /* All other completion status cause the IO to be
+ * complete. If a NAK was received, then it is up to
+ * the user to retry the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
+
+/* transmit DATA_FIS from (current sgl + offset) for input
+ * parameter length. current sgl and offset is alreay stored in the IO request
+ */
+static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame(
+ struct isci_request *ireq,
+ u32 length)
+{
+ struct isci_stp_request *stp_req = &ireq->stp.req;
+ struct scu_task_context *task_context = ireq->tc;
+ struct scu_sgl_element_pair *sgl_pair;
+ struct scu_sgl_element *current_sgl;
+
+ /* Recycle the TC and reconstruct it for sending out DATA FIS containing
+ * for the data from current_sgl+offset for the input length
+ */
+ sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
+ if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A)
+ current_sgl = &sgl_pair->A;
+ else
+ current_sgl = &sgl_pair->B;
+
+ /* update the TC */
+ task_context->command_iu_upper = current_sgl->address_upper;
+ task_context->command_iu_lower = current_sgl->address_lower;
+ task_context->transfer_length_bytes = length;
+ task_context->type.stp.fis_type = FIS_DATA;
+
+ /* send the new TC out. */
+ return sci_controller_continue_io(ireq);
+}
+
+static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq)
+{
+ struct isci_stp_request *stp_req = &ireq->stp.req;
+ struct scu_sgl_element_pair *sgl_pair;
+ struct scu_sgl_element *sgl;
+ enum sci_status status;
+ u32 offset;
+ u32 len = 0;
+
+ offset = stp_req->sgl.offset;
+ sgl_pair = to_sgl_element_pair(ireq, stp_req->sgl.index);
+ if (WARN_ONCE(!sgl_pair, "%s: null sgl element", __func__))
+ return SCI_FAILURE;
+
+ if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) {
+ sgl = &sgl_pair->A;
+ len = sgl_pair->A.length - offset;
+ } else {
+ sgl = &sgl_pair->B;
+ len = sgl_pair->B.length - offset;
+ }
+
+ if (stp_req->pio_len == 0)
+ return SCI_SUCCESS;
+
+ if (stp_req->pio_len >= len) {
+ status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, len);
+ if (status != SCI_SUCCESS)
+ return status;
+ stp_req->pio_len -= len;
+
+ /* update the current sgl, offset and save for future */
+ sgl = pio_sgl_next(stp_req);
+ offset = 0;
+ } else if (stp_req->pio_len < len) {
+ sci_stp_request_pio_data_out_trasmit_data_frame(ireq, stp_req->pio_len);
+
+ /* Sgl offset will be adjusted and saved for future */
+ offset += stp_req->pio_len;
+ sgl->address_lower += stp_req->pio_len;
+ stp_req->pio_len = 0;
+ }
+
+ stp_req->sgl.offset = offset;
+
+ return status;
+}
+
+/**
+ *
+ * @stp_request: The request that is used for the SGL processing.
+ * @data_buffer: The buffer of data to be copied.
+ * @length: The length of the data transfer.
+ *
+ * Copy the data from the buffer for the length specified to the IO reqeust SGL
+ * specified data region. enum sci_status
+ */
+static enum sci_status
+sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
+ u8 *data_buf, u32 len)
+{
+ struct isci_request *ireq;
+ u8 *src_addr;
+ int copy_len;
+ struct sas_task *task;
+ struct scatterlist *sg;
+ void *kaddr;
+ int total_len = len;
+
+ ireq = to_ireq(stp_req);
+ task = isci_request_access_task(ireq);
+ src_addr = data_buf;
+
+ if (task->num_scatter > 0) {
+ sg = task->scatter;
+
+ while (total_len > 0) {
+ struct page *page = sg_page(sg);
+
+ copy_len = min_t(int, total_len, sg_dma_len(sg));
+ kaddr = kmap_atomic(page, KM_IRQ0);
+ memcpy(kaddr + sg->offset, src_addr, copy_len);
+ kunmap_atomic(kaddr, KM_IRQ0);
+ total_len -= copy_len;
+ src_addr += copy_len;
+ sg = sg_next(sg);
+ }
+ } else {
+ BUG_ON(task->total_xfer_len < total_len);
+ memcpy(task->scatter, src_addr, total_len);
+ }
+
+ return SCI_SUCCESS;
+}
+
+/**
+ *
+ * @sci_req: The PIO DATA IN request that is to receive the data.
+ * @data_buffer: The buffer to copy from.
+ *
+ * Copy the data buffer to the io request data region. enum sci_status
+ */
+static enum sci_status sci_stp_request_pio_data_in_copy_data(
+ struct isci_stp_request *stp_req,
+ u8 *data_buffer)
+{
+ enum sci_status status;
+
+ /*
+ * If there is less than 1K remaining in the transfer request
+ * copy just the data for the transfer */
+ if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) {
+ status = sci_stp_request_pio_data_in_copy_data_buffer(
+ stp_req, data_buffer, stp_req->pio_len);
+
+ if (status == SCI_SUCCESS)
+ stp_req->pio_len = 0;
+ } else {
+ /* We are transfering the whole frame so copy */
+ status = sci_stp_request_pio_data_in_copy_data_buffer(
+ stp_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
+
+ if (status == SCI_SUCCESS)
+ stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE;
+ }
+
+ return status;
+}
+
+static enum sci_status
+stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ enum sci_status status = SCI_SUCCESS;
+
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ break;
+
+ default:
+ /* All other completion status cause the IO to be
+ * complete. If a NAK was received, then it is up to
+ * the user to retry the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return status;
+}
+
+static enum sci_status
+pio_data_out_tx_done_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ enum sci_status status = SCI_SUCCESS;
+ bool all_frames_transferred = false;
+ struct isci_stp_request *stp_req = &ireq->stp.req;
+
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ /* Transmit data */
+ if (stp_req->pio_len != 0) {
+ status = sci_stp_request_pio_data_out_transmit_data(ireq);
+ if (status == SCI_SUCCESS) {
+ if (stp_req->pio_len == 0)
+ all_frames_transferred = true;
+ }
+ } else if (stp_req->pio_len == 0) {
+ /*
+ * this will happen if the all data is written at the
+ * first time after the pio setup fis is received
+ */
+ all_frames_transferred = true;
+ }
+
+ /* all data transferred. */
+ if (all_frames_transferred) {
+ /*
+ * Change the state to SCI_REQ_STP_PIO_DATA_IN
+ * and wait for PIO_SETUP fis / or D2H REg fis. */
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ }
+ break;
+
+ default:
+ /*
+ * All other completion status cause the IO to be complete.
+ * If a NAK was received, then it is up to the user to retry
+ * the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return status;
+}
+
+static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq,
+ u32 frame_index)
+{
+ struct isci_host *ihost = ireq->owning_controller;
+ struct dev_to_host_fis *frame_header;
+ enum sci_status status;
+ u32 *frame_buffer;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if ((status == SCI_SUCCESS) &&
+ (frame_header->fis_type == FIS_REGD2H)) {
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ sci_controller_copy_sata_response(&ireq->stp.rsp,
+ frame_header,
+ frame_buffer);
+ }
+
+ sci_controller_release_frame(ihost, frame_index);
+
+ return status;
+}
+
+enum sci_status
+sci_io_request_frame_handler(struct isci_request *ireq,
+ u32 frame_index)
+{
+ struct isci_host *ihost = ireq->owning_controller;
+ struct isci_stp_request *stp_req = &ireq->stp.req;
+ enum sci_base_request_states state;
+ enum sci_status status;
+ ssize_t word_cnt;
+
+ state = ireq->sm.current_state_id;
+ switch (state) {
+ case SCI_REQ_STARTED: {
+ struct ssp_frame_hdr ssp_hdr;
+ void *frame_header;
+
+ sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ &frame_header);
+
+ word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
+ sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
+
+ if (ssp_hdr.frame_type == SSP_RESPONSE) {
+ struct ssp_response_iu *resp_iu;
+ ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
+
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&resp_iu);
+
+ sci_swab32_cpy(&ireq->ssp.rsp, resp_iu, word_cnt);
+
+ resp_iu = &ireq->ssp.rsp;
+
+ if (resp_iu->datapres == 0x01 ||
+ resp_iu->datapres == 0x02) {
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ } else {
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ }
+ } else {
+ /* not a response frame, why did it get forwarded? */
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC IO Request 0x%p received unexpected "
+ "frame %d type 0x%02x\n", __func__, ireq,
+ frame_index, ssp_hdr.frame_type);
+ }
+
+ /*
+ * In any case we are done with this frame buffer return it to
+ * the controller
+ */
+ sci_controller_release_frame(ihost, frame_index);
+
+ return SCI_SUCCESS;
+ }
+
+ case SCI_REQ_TASK_WAIT_TC_RESP:
+ sci_io_request_copy_response(ireq);
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ sci_controller_release_frame(ihost, frame_index);
+ return SCI_SUCCESS;
+
+ case SCI_REQ_SMP_WAIT_RESP: {
+ struct smp_resp *rsp_hdr = &ireq->smp.rsp;
+ void *frame_header;
+
+ sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ &frame_header);
+
+ /* byte swap the header. */
+ word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
+ sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
+
+ if (rsp_hdr->frame_type == SMP_RESPONSE) {
+ void *smp_resp;
+
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ &smp_resp);
+
+ word_cnt = (sizeof(struct smp_resp) - SMP_RESP_HDR_SZ) /
+ sizeof(u32);
+
+ sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
+ smp_resp, word_cnt);
+
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_SMP_WAIT_TC_COMP);
+ } else {
+ /*
+ * This was not a response frame why did it get
+ * forwarded?
+ */
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC SMP Request 0x%p received unexpected "
+ "frame %d type 0x%02x\n",
+ __func__,
+ ireq,
+ frame_index,
+ rsp_hdr->frame_type);
+
+ ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR;
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ }
+
+ sci_controller_release_frame(ihost, frame_index);
+
+ return SCI_SUCCESS;
+ }
+
+ case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+ return sci_stp_request_udma_general_frame_handler(ireq,
+ frame_index);
+
+ case SCI_REQ_STP_UDMA_WAIT_D2H:
+ /* Use the general frame handler to copy the resposne data */
+ status = sci_stp_request_udma_general_frame_handler(ireq, frame_index);
+
+ if (status != SCI_SUCCESS)
+ return status;
+
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ return SCI_SUCCESS;
+
+ case SCI_REQ_STP_NON_DATA_WAIT_D2H: {
+ struct dev_to_host_fis *frame_header;
+ u32 *frame_buffer;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if (status != SCI_SUCCESS) {
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC IO Request 0x%p could not get frame "
+ "header for frame index %d, status %x\n",
+ __func__,
+ stp_req,
+ frame_index,
+ status);
+
+ return status;
+ }
+
+ switch (frame_header->fis_type) {
+ case FIS_REGD2H:
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ sci_controller_copy_sata_response(&ireq->stp.rsp,
+ frame_header,
+ frame_buffer);
+
+ /* The command has completed with error */
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ break;
+
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "%s: IO Request:0x%p Frame Id:%d protocol "
+ "violation occurred\n", __func__, stp_req,
+ frame_index);
+
+ ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
+ ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
+ break;
+ }
+
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+
+ /* Frame has been decoded return it to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+
+ return status;
+ }
+
+ case SCI_REQ_STP_PIO_WAIT_FRAME: {
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct dev_to_host_fis *frame_header;
+ u32 *frame_buffer;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if (status != SCI_SUCCESS) {
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC IO Request 0x%p could not get frame "
+ "header for frame index %d, status %x\n",
+ __func__, stp_req, frame_index, status);
+ return status;
+ }
+
+ switch (frame_header->fis_type) {
+ case FIS_PIO_SETUP:
+ /* Get from the frame buffer the PIO Setup Data */
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ /* Get the data from the PIO Setup The SCU Hardware
+ * returns first word in the frame_header and the rest
+ * of the data is in the frame buffer so we need to
+ * back up one dword
+ */
+
+ /* transfer_count: first 16bits in the 4th dword */
+ stp_req->pio_len = frame_buffer[3] & 0xffff;
+
+ /* status: 4th byte in the 3rd dword */
+ stp_req->status = (frame_buffer[2] >> 24) & 0xff;
+
+ sci_controller_copy_sata_response(&ireq->stp.rsp,
+ frame_header,
+ frame_buffer);
+
+ ireq->stp.rsp.status = stp_req->status;
+
+ /* The next state is dependent on whether the
+ * request was PIO Data-in or Data out
+ */
+ if (task->data_dir == DMA_FROM_DEVICE) {
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_IN);
+ } else if (task->data_dir == DMA_TO_DEVICE) {
+ /* Transmit data */
+ status = sci_stp_request_pio_data_out_transmit_data(ireq);
+ if (status != SCI_SUCCESS)
+ break;
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_DATA_OUT);
+ }
+ break;
+
+ case FIS_SETDEVBITS:
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ break;
+
+ case FIS_REGD2H:
+ if (frame_header->status & ATA_BUSY) {
+ /*
+ * Now why is the drive sending a D2H Register
+ * FIS when it is still busy? Do nothing since
+ * we are still in the right state.
+ */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCIC PIO Request 0x%p received "
+ "D2H Register FIS with BSY status "
+ "0x%x\n",
+ __func__,
+ stp_req,
+ frame_header->status);
+ break;
+ }
+
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ sci_controller_copy_sata_response(&ireq->stp.req,
+ frame_header,
+ frame_buffer);
+
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+
+ default:
+ /* FIXME: what do we do here? */
+ break;
+ }
+
+ /* Frame is decoded return it to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+
+ return status;
+ }
+
+ case SCI_REQ_STP_PIO_DATA_IN: {
+ struct dev_to_host_fis *frame_header;
+ struct sata_fis_data *frame_buffer;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+
+ if (status != SCI_SUCCESS) {
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC IO Request 0x%p could not get frame "
+ "header for frame index %d, status %x\n",
+ __func__,
+ stp_req,
+ frame_index,
+ status);
+ return status;
+ }
+
+ if (frame_header->fis_type != FIS_DATA) {
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC PIO Request 0x%p received frame %d "
+ "with fis type 0x%02x when expecting a data "
+ "fis.\n",
+ __func__,
+ stp_req,
+ frame_index,
+ frame_header->fis_type);
+
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+
+ /* Frame is decoded return it to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+ return status;
+ }
+
+ if (stp_req->sgl.index < 0) {
+ ireq->saved_rx_frame_index = frame_index;
+ stp_req->pio_len = 0;
+ } else {
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ status = sci_stp_request_pio_data_in_copy_data(stp_req,
+ (u8 *)frame_buffer);
+
+ /* Frame is decoded return it to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+ }
+
+ /* Check for the end of the transfer, are there more
+ * bytes remaining for this data transfer
+ */
+ if (status != SCI_SUCCESS || stp_req->pio_len != 0)
+ return status;
+
+ if ((stp_req->status & ATA_BUSY) == 0) {
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ } else {
+ sci_change_state(&ireq->sm, SCI_REQ_STP_PIO_WAIT_FRAME);
+ }
+ return status;
+ }
+
+ case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: {
+ struct dev_to_host_fis *frame_header;
+ u32 *frame_buffer;
+
+ status = sci_unsolicited_frame_control_get_header(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_header);
+ if (status != SCI_SUCCESS) {
+ dev_err(&ihost->pdev->dev,
+ "%s: SCIC IO Request 0x%p could not get frame "
+ "header for frame index %d, status %x\n",
+ __func__,
+ stp_req,
+ frame_index,
+ status);
+ return status;
+ }
+
+ switch (frame_header->fis_type) {
+ case FIS_REGD2H:
+ sci_unsolicited_frame_control_get_buffer(&ihost->uf_control,
+ frame_index,
+ (void **)&frame_buffer);
+
+ sci_controller_copy_sata_response(&ireq->stp.rsp,
+ frame_header,
+ frame_buffer);
+
+ /* The command has completed with error */
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ break;
+
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "%s: IO Request:0x%p Frame Id:%d protocol "
+ "violation occurred\n",
+ __func__,
+ stp_req,
+ frame_index);
+
+ ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS;
+ ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION;
+ break;
+ }
+
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+
+ /* Frame has been decoded return it to the controller */
+ sci_controller_release_frame(ihost, frame_index);
+
+ return status;
+ }
+ case SCI_REQ_ABORTING:
+ /*
+ * TODO: Is it even possible to get an unsolicited frame in the
+ * aborting state?
+ */
+ sci_controller_release_frame(ihost, frame_index);
+ return SCI_SUCCESS;
+
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC IO Request given unexpected frame %x while "
+ "in state %d\n",
+ __func__,
+ frame_index,
+ state);
+
+ sci_controller_release_frame(ihost, frame_index);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ enum sci_status status = SCI_SUCCESS;
+
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
+ /* We must check ther response buffer to see if the D2H
+ * Register FIS was received before we got the TC
+ * completion.
+ */
+ if (ireq->stp.rsp.fis_type == FIS_REGD2H) {
+ sci_remote_device_suspend(ireq->target_device,
+ SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
+
+ ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE;
+ ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ } else {
+ /* If we have an error completion status for the
+ * TC then we can expect a D2H register FIS from
+ * the device so we must change state to wait
+ * for it
+ */
+ sci_change_state(&ireq->sm, SCI_REQ_STP_UDMA_WAIT_D2H);
+ }
+ break;
+
+ /* TODO Check to see if any of these completion status need to
+ * wait for the device to host register fis.
+ */
+ /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
+ * - this comes only for B0
+ */
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
+ sci_remote_device_suspend(ireq->target_device,
+ SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
+ /* Fall through to the default case */
+ default:
+ /* All other completion status cause the IO to be complete. */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return status;
+}
+
+static enum sci_status
+stp_request_soft_reset_await_h2d_asserted_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG);
+ break;
+
+ default:
+ /*
+ * All other completion status cause the IO to be complete.
+ * If a NAK was received, then it is up to the user to retry
+ * the request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static enum sci_status
+stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq,
+ u32 completion_code)
+{
+ switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
+ case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
+ ireq->scu_status = SCU_TASK_DONE_GOOD;
+ ireq->sci_status = SCI_SUCCESS;
+ sci_change_state(&ireq->sm, SCI_REQ_STP_SOFT_RESET_WAIT_D2H);
+ break;
+
+ default:
+ /* All other completion status cause the IO to be complete. If
+ * a NAK was received, then it is up to the user to retry the
+ * request.
+ */
+ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code);
+ ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
+ sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
+ break;
+ }
+
+ return SCI_SUCCESS;
+}
+
+enum sci_status
+sci_io_request_tc_completion(struct isci_request *ireq,
+ u32 completion_code)
+{
+ enum sci_base_request_states state;
+ struct isci_host *ihost = ireq->owning_controller;
+
+ state = ireq->sm.current_state_id;
+
+ switch (state) {
+ case SCI_REQ_STARTED:
+ return request_started_state_tc_event(ireq, completion_code);
+
+ case SCI_REQ_TASK_WAIT_TC_COMP:
+ return ssp_task_request_await_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_SMP_WAIT_RESP:
+ return smp_request_await_response_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_SMP_WAIT_TC_COMP:
+ return smp_request_await_tc_event(ireq, completion_code);
+
+ case SCI_REQ_STP_UDMA_WAIT_TC_COMP:
+ return stp_request_udma_await_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_STP_NON_DATA_WAIT_H2D:
+ return stp_request_non_data_await_h2d_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_STP_PIO_WAIT_H2D:
+ return stp_request_pio_await_h2d_completion_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_STP_PIO_DATA_OUT:
+ return pio_data_out_tx_done_tc_event(ireq, completion_code);
+
+ case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED:
+ return stp_request_soft_reset_await_h2d_asserted_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG:
+ return stp_request_soft_reset_await_h2d_diagnostic_tc_event(ireq,
+ completion_code);
+
+ case SCI_REQ_ABORTING:
+ return request_aborting_state_tc_event(ireq,
+ completion_code);
+
+ default:
+ dev_warn(&ihost->pdev->dev,
+ "%s: SCIC IO Request given task completion "
+ "notification %x while in wrong state %d\n",
+ __func__,
+ completion_code,
+ state);
+ return SCI_FAILURE_INVALID_STATE;
+ }
+}
+
+/**
+ * isci_request_process_response_iu() - This function sets the status and
+ * response iu, in the task struct, from the request object for the upper
+ * layer driver.
+ * @sas_task: This parameter is the task struct from the upper layer driver.
+ * @resp_iu: This parameter points to the response iu of the completed request.
+ * @dev: This parameter specifies the linux device struct.
+ *
+ * none.
+ */
+static void isci_request_process_response_iu(
+ struct sas_task *task,
+ struct ssp_response_iu *resp_iu,
+ struct device *dev)
+{
+ dev_dbg(dev,
+ "%s: resp_iu = %p "
+ "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
+ "resp_iu->response_data_len = %x, "
+ "resp_iu->sense_data_len = %x\nrepsonse data: ",
+ __func__,
+ resp_iu,
+ resp_iu->status,
+ resp_iu->datapres,
+ resp_iu->response_data_len,
+ resp_iu->sense_data_len);
+
+ task->task_status.stat = resp_iu->status;
+
+ /* libsas updates the task status fields based on the response iu. */
+ sas_ssp_task_response(dev, task, resp_iu);
+}
+
+/**
+ * isci_request_set_open_reject_status() - This function prepares the I/O
+ * completion for OPEN_REJECT conditions.
+ * @request: This parameter is the completed isci_request object.
+ * @response_ptr: This parameter specifies the service response for the I/O.
+ * @status_ptr: This parameter specifies the exec status for the I/O.
+ * @complete_to_host_ptr: This parameter specifies the action to be taken by
+ * the LLDD with respect to completing this request or forcing an abort
+ * condition on the I/O.
+ * @open_rej_reason: This parameter specifies the encoded reason for the
+ * abandon-class reject.
+ *
+ * none.
+ */
+static void isci_request_set_open_reject_status(
+ struct isci_request *request,
+ struct sas_task *task,
+ enum service_response *response_ptr,
+ enum exec_status *status_ptr,
+ enum isci_completion_selection *complete_to_host_ptr,
+ enum sas_open_rej_reason open_rej_reason)
+{
+ /* Task in the target is done. */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ *response_ptr = SAS_TASK_UNDELIVERED;
+ *status_ptr = SAS_OPEN_REJECT;
+ *complete_to_host_ptr = isci_perform_normal_io_completion;
+ task->task_status.open_rej_reason = open_rej_reason;
+}
+
+/**
+ * isci_request_handle_controller_specific_errors() - This function decodes
+ * controller-specific I/O completion error conditions.
+ * @request: This parameter is the completed isci_request object.
+ * @response_ptr: This parameter specifies the service response for the I/O.
+ * @status_ptr: This parameter specifies the exec status for the I/O.
+ * @complete_to_host_ptr: This parameter specifies the action to be taken by
+ * the LLDD with respect to completing this request or forcing an abort
+ * condition on the I/O.
+ *
+ * none.
+ */
+static void isci_request_handle_controller_specific_errors(
+ struct isci_remote_device *idev,
+ struct isci_request *request,
+ struct sas_task *task,
+ enum service_response *response_ptr,
+ enum exec_status *status_ptr,
+ enum isci_completion_selection *complete_to_host_ptr)
+{
+ unsigned int cstatus;
+
+ cstatus = request->scu_status;
+
+ dev_dbg(&request->isci_host->pdev->dev,
+ "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
+ "- controller status = 0x%x\n",
+ __func__, request, cstatus);
+
+ /* Decode the controller-specific errors; most
+ * important is to recognize those conditions in which
+ * the target may still have a task outstanding that
+ * must be aborted.
+ *
+ * Note that there are SCU completion codes being
+ * named in the decode below for which SCIC has already
+ * done work to handle them in a way other than as
+ * a controller-specific completion code; these are left
+ * in the decode below for completeness sake.
+ */
+ switch (cstatus) {
+ case SCU_TASK_DONE_DMASETUP_DIRERR:
+ /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
+ case SCU_TASK_DONE_XFERCNT_ERR:
+ /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
+ if (task->task_proto == SAS_PROTOCOL_SMP) {
+ /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
+ *response_ptr = SAS_TASK_COMPLETE;
+
+ /* See if the device has been/is being stopped. Note
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ if (!idev)
+ *status_ptr = SAS_DEVICE_UNKNOWN;
+ else
+ *status_ptr = SAS_ABORTED_TASK;
+
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+ *complete_to_host_ptr =
+ isci_perform_normal_io_completion;
+ } else {
+ /* Task in the target is not done. */
+ *response_ptr = SAS_TASK_UNDELIVERED;
+
+ if (!idev)
+ *status_ptr = SAS_DEVICE_UNKNOWN;
+ else
+ *status_ptr = SAM_STAT_TASK_ABORTED;
+
+ clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+ *complete_to_host_ptr =
+ isci_perform_error_io_completion;
+ }
+
+ break;
+
+ case SCU_TASK_DONE_CRC_ERR:
+ case SCU_TASK_DONE_NAK_CMD_ERR:
+ case SCU_TASK_DONE_EXCESS_DATA:
+ case SCU_TASK_DONE_UNEXP_FIS:
+ /* Also SCU_TASK_DONE_UNEXP_RESP: */
+ case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */
+ case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */
+ case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */
+ /* These are conditions in which the target
+ * has completed the task, so that no cleanup
+ * is necessary.
+ */
+ *response_ptr = SAS_TASK_COMPLETE;
+
+ /* See if the device has been/is being stopped. Note
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ if (!idev)
+ *status_ptr = SAS_DEVICE_UNKNOWN;
+ else
+ *status_ptr = SAS_ABORTED_TASK;
+
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+ *complete_to_host_ptr = isci_perform_normal_io_completion;
+ break;
+
+
+ /* Note that the only open reject completion codes seen here will be
+ * abandon-class codes; all others are automatically retried in the SCU.
+ */
+ case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
+
+ /* Note - the return of AB0 will change when
+ * libsas implements detection of zone violations.
+ */
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_RESV_AB0);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_RESV_AB1);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_RESV_AB2);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_RESV_AB3);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_BAD_DEST);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_STP_NORES);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_EPROTO);
+ break;
+
+ case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
+
+ isci_request_set_open_reject_status(
+ request, task, response_ptr, status_ptr,
+ complete_to_host_ptr, SAS_OREJ_CONN_RATE);
+ break;
+
+ case SCU_TASK_DONE_LL_R_ERR:
+ /* Also SCU_TASK_DONE_ACK_NAK_TO: */
+ case SCU_TASK_DONE_LL_PERR:
+ case SCU_TASK_DONE_LL_SY_TERM:
+ /* Also SCU_TASK_DONE_NAK_ERR:*/
+ case SCU_TASK_DONE_LL_LF_TERM:
+ /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
+ case SCU_TASK_DONE_LL_ABORT_ERR:
+ case SCU_TASK_DONE_SEQ_INV_TYPE:
+ /* Also SCU_TASK_DONE_UNEXP_XR: */
+ case SCU_TASK_DONE_XR_IU_LEN_ERR:
+ case SCU_TASK_DONE_INV_FIS_LEN:
+ /* Also SCU_TASK_DONE_XR_WD_LEN: */
+ case SCU_TASK_DONE_SDMA_ERR:
+ case SCU_TASK_DONE_OFFSET_ERR:
+ case SCU_TASK_DONE_MAX_PLD_ERR:
+ case SCU_TASK_DONE_LF_ERR:
+ case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */
+ case SCU_TASK_DONE_SMP_LL_RX_ERR:
+ case SCU_TASK_DONE_UNEXP_DATA:
+ case SCU_TASK_DONE_UNEXP_SDBFIS:
+ case SCU_TASK_DONE_REG_ERR:
+ case SCU_TASK_DONE_SDB_ERR:
+ case SCU_TASK_DONE_TASK_ABORT:
+ default:
+ /* Task in the target is not done. */
+ *response_ptr = SAS_TASK_UNDELIVERED;
+ *status_ptr = SAM_STAT_TASK_ABORTED;
+
+ if (task->task_proto == SAS_PROTOCOL_SMP) {
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+ *complete_to_host_ptr = isci_perform_normal_io_completion;
+ } else {
+ clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+ *complete_to_host_ptr = isci_perform_error_io_completion;
+ }
+ break;
+ }
+}
+
+/**
+ * isci_task_save_for_upper_layer_completion() - This function saves the
+ * request for later completion to the upper layer driver.
+ * @host: This parameter is a pointer to the host on which the the request
+ * should be queued (either as an error or success).
+ * @request: This parameter is the completed request.
+ * @response: This parameter is the response code for the completed task.
+ * @status: This parameter is the status code for the completed task.
+ *
+ * none.
+ */
+static void isci_task_save_for_upper_layer_completion(
+ struct isci_host *host,
+ struct isci_request *request,
+ enum service_response response,
+ enum exec_status status,
+ enum isci_completion_selection task_notification_selection)
+{
+ struct sas_task *task = isci_request_access_task(request);
+
+ task_notification_selection
+ = isci_task_set_completion_status(task, response, status,
+ task_notification_selection);
+
+ /* Tasks aborted specifically by a call to the lldd_abort_task
+ * function should not be completed to the host in the regular path.
+ */
+ switch (task_notification_selection) {
+
+ case isci_perform_normal_io_completion:
+
+ /* Normal notification (task_done) */
+ dev_dbg(&host->pdev->dev,
+ "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
+ __func__,
+ task,
+ task->task_status.resp, response,
+ task->task_status.stat, status);
+ /* Add to the completed list. */
+ list_add(&request->completed_node,
+ &host->requests_to_complete);
+
+ /* Take the request off the device's pending request list. */
+ list_del_init(&request->dev_node);
+ break;
+
+ case isci_perform_aborted_io_completion:
+ /* No notification to libsas because this request is
+ * already in the abort path.
+ */
+ dev_dbg(&host->pdev->dev,
+ "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
+ __func__,
+ task,
+ task->task_status.resp, response,
+ task->task_status.stat, status);
+
+ /* Wake up whatever process was waiting for this
+ * request to complete.
+ */
+ WARN_ON(request->io_request_completion == NULL);
+
+ if (request->io_request_completion != NULL) {
+
+ /* Signal whoever is waiting that this
+ * request is complete.
+ */
+ complete(request->io_request_completion);
+ }
+ break;
+
+ case isci_perform_error_io_completion:
+ /* Use sas_task_abort */
+ dev_dbg(&host->pdev->dev,
+ "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
+ __func__,
+ task,
+ task->task_status.resp, response,
+ task->task_status.stat, status);
+ /* Add to the aborted list. */
+ list_add(&request->completed_node,
+ &host->requests_to_errorback);
+ break;
+
+ default:
+ dev_dbg(&host->pdev->dev,
+ "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
+ __func__,
+ task,
+ task->task_status.resp, response,
+ task->task_status.stat, status);
+
+ /* Add to the error to libsas list. */
+ list_add(&request->completed_node,
+ &host->requests_to_errorback);
+ break;
+ }
+}
+
+static void isci_request_process_stp_response(struct sas_task *task,
+ void *response_buffer)
+{
+ struct dev_to_host_fis *d2h_reg_fis = response_buffer;
+ struct task_status_struct *ts = &task->task_status;
+ struct ata_task_resp *resp = (void *)&ts->buf[0];
+
+ resp->frame_len = le16_to_cpu(*(__le16 *)(response_buffer + 6));
+ memcpy(&resp->ending_fis[0], response_buffer + 16, 24);
+ ts->buf_valid_size = sizeof(*resp);
+
+ /**
+ * If the device fault bit is set in the status register, then
+ * set the sense data and return.
+ */
+ if (d2h_reg_fis->status & ATA_DF)
+ ts->stat = SAS_PROTO_RESPONSE;
+ else
+ ts->stat = SAM_STAT_GOOD;
+
+ ts->resp = SAS_TASK_COMPLETE;
+}
+
+static void isci_request_io_request_complete(struct isci_host *ihost,
+ struct isci_request *request,
+ enum sci_io_status completion_status)
+{
+ struct sas_task *task = isci_request_access_task(request);
+ struct ssp_response_iu *resp_iu;
+ void *resp_buf;
+ unsigned long task_flags;
+ struct isci_remote_device *idev = isci_lookup_device(task->dev);
+ enum service_response response = SAS_TASK_UNDELIVERED;
+ enum exec_status status = SAS_ABORTED_TASK;
+ enum isci_request_status request_status;
+ enum isci_completion_selection complete_to_host
+ = isci_perform_normal_io_completion;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: request = %p, task = %p,\n"
+ "task->data_dir = %d completion_status = 0x%x\n",
+ __func__,
+ request,
+ task,
+ task->data_dir,
+ completion_status);
+
+ spin_lock(&request->state_lock);
+ request_status = request->status;
+
+ /* Decode the request status. Note that if the request has been
+ * aborted by a task management function, we don't care
+ * what the status is.
+ */
+ switch (request_status) {
+
+ case aborted:
+ /* "aborted" indicates that the request was aborted by a task
+ * management function, since once a task management request is
+ * perfomed by the device, the request only completes because
+ * of the subsequent driver terminate.
+ *
+ * Aborted also means an external thread is explicitly managing
+ * this request, so that we do not complete it up the stack.
+ *
+ * The target is still there (since the TMF was successful).
+ */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ response = SAS_TASK_COMPLETE;
+
+ /* See if the device has been/is being stopped. Note
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ if (!idev)
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_ABORTED_TASK;
+
+ complete_to_host = isci_perform_aborted_io_completion;
+ /* This was an aborted request. */
+
+ spin_unlock(&request->state_lock);
+ break;
+
+ case aborting:
+ /* aborting means that the task management function tried and
+ * failed to abort the request. We need to note the request
+ * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
+ * target as down.
+ *
+ * Aborting also means an external thread is explicitly managing
+ * this request, so that we do not complete it up the stack.
+ */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ response = SAS_TASK_UNDELIVERED;
+
+ if (!idev)
+ /* The device has been /is being stopped. Note that
+ * we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_PHY_DOWN;
+
+ complete_to_host = isci_perform_aborted_io_completion;
+
+ /* This was an aborted request. */
+
+ spin_unlock(&request->state_lock);
+ break;
+
+ case terminating:
+
+ /* This was an terminated request. This happens when
+ * the I/O is being terminated because of an action on
+ * the device (reset, tear down, etc.), and the I/O needs
+ * to be completed up the stack.
+ */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ response = SAS_TASK_UNDELIVERED;
+
+ /* See if the device has been/is being stopped. Note
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ if (!idev)
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_ABORTED_TASK;
+
+ complete_to_host = isci_perform_aborted_io_completion;
+
+ /* This was a terminated request. */
+
+ spin_unlock(&request->state_lock);
+ break;
+
+ case dead:
+ /* This was a terminated request that timed-out during the
+ * termination process. There is no task to complete to
+ * libsas.
+ */
+ complete_to_host = isci_perform_normal_io_completion;
+ spin_unlock(&request->state_lock);
+ break;
+
+ default:
+
+ /* The request is done from an SCU HW perspective. */
+ request->status = completed;
+
+ spin_unlock(&request->state_lock);
+
+ /* This is an active request being completed from the core. */
+ switch (completion_status) {
+
+ case SCI_IO_FAILURE_RESPONSE_VALID:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
+ __func__,
+ request,
+ task);
+
+ if (sas_protocol_ata(task->task_proto)) {
+ resp_buf = &request->stp.rsp;
+ isci_request_process_stp_response(task,
+ resp_buf);
+ } else if (SAS_PROTOCOL_SSP == task->task_proto) {
+
+ /* crack the iu response buffer. */
+ resp_iu = &request->ssp.rsp;
+ isci_request_process_response_iu(task, resp_iu,
+ &ihost->pdev->dev);
+
+ } else if (SAS_PROTOCOL_SMP == task->task_proto) {
+
+ dev_err(&ihost->pdev->dev,
+ "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
+ "SAS_PROTOCOL_SMP protocol\n",
+ __func__);
+
+ } else
+ dev_err(&ihost->pdev->dev,
+ "%s: unknown protocol\n", __func__);
+
+ /* use the task status set in the task struct by the
+ * isci_request_process_response_iu call.
+ */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ response = task->task_status.resp;
+ status = task->task_status.stat;
+ break;
+
+ case SCI_IO_SUCCESS:
+ case SCI_IO_SUCCESS_IO_DONE_EARLY:
+
+ response = SAS_TASK_COMPLETE;
+ status = SAM_STAT_GOOD;
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+
+ if (task->task_proto == SAS_PROTOCOL_SMP) {
+ void *rsp = &request->smp.rsp;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SMP protocol completion\n",
+ __func__);
+
+ sg_copy_from_buffer(
+ &task->smp_task.smp_resp, 1,
+ rsp, sizeof(struct smp_resp));
+ } else if (completion_status
+ == SCI_IO_SUCCESS_IO_DONE_EARLY) {
+
+ /* This was an SSP / STP / SATA transfer.
+ * There is a possibility that less data than
+ * the maximum was transferred.
+ */
+ u32 transferred_length = sci_req_tx_bytes(request);
+
+ task->task_status.residual
+ = task->total_xfer_len - transferred_length;
+
+ /* If there were residual bytes, call this an
+ * underrun.
+ */
+ if (task->task_status.residual != 0)
+ status = SAS_DATA_UNDERRUN;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
+ __func__,
+ status);
+
+ } else
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCI_IO_SUCCESS\n",
+ __func__);
+
+ break;
+
+ case SCI_IO_FAILURE_TERMINATED:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
+ __func__,
+ request,
+ task);
+
+ /* The request was terminated explicitly. No handling
+ * is needed in the SCSI error handler path.
+ */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ response = SAS_TASK_UNDELIVERED;
+
+ /* See if the device has been/is being stopped. Note
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ if (!idev)
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_ABORTED_TASK;
+
+ complete_to_host = isci_perform_normal_io_completion;
+ break;
+
+ case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
+
+ isci_request_handle_controller_specific_errors(
+ idev, request, task, &response, &status,
+ &complete_to_host);
+
+ break;
+
+ case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
+ /* This is a special case, in that the I/O completion
+ * is telling us that the device needs a reset.
+ * In order for the device reset condition to be
+ * noticed, the I/O has to be handled in the error
+ * handler. Set the reset flag and cause the
+ * SCSI error thread to be scheduled.
+ */
+ spin_lock_irqsave(&task->task_state_lock, task_flags);
+ task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+ spin_unlock_irqrestore(&task->task_state_lock, task_flags);
+
+ /* Fail the I/O. */
+ response = SAS_TASK_UNDELIVERED;
+ status = SAM_STAT_TASK_ABORTED;
+
+ complete_to_host = isci_perform_error_io_completion;
+ clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ break;
+
+ case SCI_FAILURE_RETRY_REQUIRED:
+
+ /* Fail the I/O so it can be retried. */
+ response = SAS_TASK_UNDELIVERED;
+ if (!idev)
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_ABORTED_TASK;
+
+ complete_to_host = isci_perform_normal_io_completion;
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ break;
+
+
+ default:
+ /* Catch any otherwise unhandled error codes here. */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: invalid completion code: 0x%x - "
+ "isci_request = %p\n",
+ __func__, completion_status, request);
+
+ response = SAS_TASK_UNDELIVERED;
+
+ /* See if the device has been/is being stopped. Note
+ * that we ignore the quiesce state, since we are
+ * concerned about the actual device state.
+ */
+ if (!idev)
+ status = SAS_DEVICE_UNKNOWN;
+ else
+ status = SAS_ABORTED_TASK;
+
+ if (SAS_PROTOCOL_SMP == task->task_proto) {
+ set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ complete_to_host = isci_perform_normal_io_completion;
+ } else {
+ clear_bit(IREQ_COMPLETE_IN_TARGET, &request->flags);
+ complete_to_host = isci_perform_error_io_completion;
+ }
+ break;
+ }
+ break;
+ }
+
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SSP:
+ if (task->data_dir == DMA_NONE)
+ break;
+ if (task->num_scatter == 0)
+ /* 0 indicates a single dma address */
+ dma_unmap_single(&ihost->pdev->dev,
+ request->zero_scatter_daddr,
+ task->total_xfer_len, task->data_dir);
+ else /* unmap the sgl dma addresses */
+ dma_unmap_sg(&ihost->pdev->dev, task->scatter,
+ request->num_sg_entries, task->data_dir);
+ break;
+ case SAS_PROTOCOL_SMP: {
+ struct scatterlist *sg = &task->smp_task.smp_req;
+ struct smp_req *smp_req;
+ void *kaddr;
+
+ dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
+
+ /* need to swab it back in case the command buffer is re-used */
+ kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
+ smp_req = kaddr + sg->offset;
+ sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
+ kunmap_atomic(kaddr, KM_IRQ0);
+ break;
+ }
+ default:
+ break;
+ }
+
+ /* Put the completed request on the correct list */
+ isci_task_save_for_upper_layer_completion(ihost, request, response,
+ status, complete_to_host
+ );
+
+ /* complete the io request to the core. */
+ sci_controller_complete_io(ihost, request->target_device, request);
+ isci_put_device(idev);
+
+ /* set terminated handle so it cannot be completed or
+ * terminated again, and to cause any calls into abort
+ * task to recognize the already completed case.
+ */
+ set_bit(IREQ_TERMINATED, &request->flags);
+}
+
+static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+ struct domain_device *dev = ireq->target_device->domain_dev;
+ struct sas_task *task;
+
+ /* XXX as hch said always creating an internal sas_task for tmf
+ * requests would simplify the driver
+ */
+ task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
+
+ /* all unaccelerated request types (non ssp or ncq) handled with
+ * substates
+ */
+ if (!task && dev->dev_type == SAS_END_DEV) {
+ sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP);
+ } else if (!task &&
+ (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
+ isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
+ sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED);
+ } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
+ sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP);
+ } else if (task && sas_protocol_ata(task->task_proto) &&
+ !task->ata_task.use_ncq) {
+ u32 state;
+
+ if (task->data_dir == DMA_NONE)
+ state = SCI_REQ_STP_NON_DATA_WAIT_H2D;
+ else if (task->ata_task.dma_xfer)
+ state = SCI_REQ_STP_UDMA_WAIT_TC_COMP;
+ else /* PIO */
+ state = SCI_REQ_STP_PIO_WAIT_H2D;
+
+ sci_change_state(sm, state);
+ }
+}
+
+static void sci_request_completed_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+ struct isci_host *ihost = ireq->owning_controller;
+
+ /* Tell the SCI_USER that the IO request is complete */
+ if (!test_bit(IREQ_TMF, &ireq->flags))
+ isci_request_io_request_complete(ihost, ireq,
+ ireq->sci_status);
+ else
+ isci_task_request_complete(ihost, ireq, ireq->sci_status);
+}
+
+static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+ /* Setting the abort bit in the Task Context is required by the silicon. */
+ ireq->tc->abort = 1;
+}
+
+static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+ ireq->target_device->working_request = ireq;
+}
+
+static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+ ireq->target_device->working_request = ireq;
+}
+
+static void sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+
+ ireq->target_device->working_request = ireq;
+}
+
+static void sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(struct sci_base_state_machine *sm)
+{
+ struct isci_request *ireq = container_of(sm, typeof(*ireq), sm);
+ struct scu_task_context *tc = ireq->tc;
+ struct host_to_dev_fis *h2d_fis;
+ enum sci_status status;
+
+ /* Clear the SRST bit */
+ h2d_fis = &ireq->stp.cmd;
+ h2d_fis->control = 0;
+
+ /* Clear the TC control bit */
+ tc->control_frame = 0;
+
+ status = sci_controller_continue_io(ireq);
+ WARN_ONCE(status != SCI_SUCCESS, "isci: continue io failure\n");
+}
+
+static const struct sci_base_state sci_request_state_table[] = {
+ [SCI_REQ_INIT] = { },
+ [SCI_REQ_CONSTRUCTED] = { },
+ [SCI_REQ_STARTED] = {
+ .enter_state = sci_request_started_state_enter,
+ },
+ [SCI_REQ_STP_NON_DATA_WAIT_H2D] = {
+ .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter,
+ },
+ [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { },
+ [SCI_REQ_STP_PIO_WAIT_H2D] = {
+ .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter,
+ },
+ [SCI_REQ_STP_PIO_WAIT_FRAME] = { },
+ [SCI_REQ_STP_PIO_DATA_IN] = { },
+ [SCI_REQ_STP_PIO_DATA_OUT] = { },
+ [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { },
+ [SCI_REQ_STP_UDMA_WAIT_D2H] = { },
+ [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED] = {
+ .enter_state = sci_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
+ },
+ [SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG] = {
+ .enter_state = sci_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
+ },
+ [SCI_REQ_STP_SOFT_RESET_WAIT_D2H] = { },
+ [SCI_REQ_TASK_WAIT_TC_COMP] = { },
+ [SCI_REQ_TASK_WAIT_TC_RESP] = { },
+ [SCI_REQ_SMP_WAIT_RESP] = { },
+ [SCI_REQ_SMP_WAIT_TC_COMP] = { },
+ [SCI_REQ_COMPLETED] = {
+ .enter_state = sci_request_completed_state_enter,
+ },
+ [SCI_REQ_ABORTING] = {
+ .enter_state = sci_request_aborting_state_enter,
+ },
+ [SCI_REQ_FINAL] = { },
+};
+
+static void
+sci_general_request_construct(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ sci_init_sm(&ireq->sm, sci_request_state_table, SCI_REQ_INIT);
+
+ ireq->target_device = idev;
+ ireq->protocol = SCIC_NO_PROTOCOL;
+ ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
+
+ ireq->sci_status = SCI_SUCCESS;
+ ireq->scu_status = 0;
+ ireq->post_context = 0xFFFFFFFF;
+}
+
+static enum sci_status
+sci_io_request_construct(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *ireq)
+{
+ struct domain_device *dev = idev->domain_dev;
+ enum sci_status status = SCI_SUCCESS;
+
+ /* Build the common part of the request */
+ sci_general_request_construct(ihost, idev, ireq);
+
+ if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
+ return SCI_FAILURE_INVALID_REMOTE_DEVICE;
+
+ if (dev->dev_type == SAS_END_DEV)
+ /* pass */;
+ else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
+ memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd));
+ else if (dev_is_expander(dev))
+ /* pass */;
+ else
+ return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+ memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab));
+
+ return status;
+}
+
+enum sci_status sci_task_request_construct(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 io_tag, struct isci_request *ireq)
+{
+ struct domain_device *dev = idev->domain_dev;
+ enum sci_status status = SCI_SUCCESS;
+
+ /* Build the common part of the request */
+ sci_general_request_construct(ihost, idev, ireq);
+
+ if (dev->dev_type == SAS_END_DEV ||
+ dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+ set_bit(IREQ_TMF, &ireq->flags);
+ memset(ireq->tc, 0, sizeof(struct scu_task_context));
+ } else
+ status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+
+ return status;
+}
+
+static enum sci_status isci_request_ssp_request_construct(
+ struct isci_request *request)
+{
+ enum sci_status status;
+
+ dev_dbg(&request->isci_host->pdev->dev,
+ "%s: request = %p\n",
+ __func__,
+ request);
+ status = sci_io_request_construct_basic_ssp(request);
+ return status;
+}
+
+static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq)
+{
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct host_to_dev_fis *fis = &ireq->stp.cmd;
+ struct ata_queued_cmd *qc = task->uldd_task;
+ enum sci_status status;
+
+ dev_dbg(&ireq->isci_host->pdev->dev,
+ "%s: ireq = %p\n",
+ __func__,
+ ireq);
+
+ memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
+ if (!task->ata_task.device_control_reg_update)
+ fis->flags |= 0x80;
+ fis->flags &= 0xF0;
+
+ status = sci_io_request_construct_basic_sata(ireq);
+
+ if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
+ qc->tf.command == ATA_CMD_FPDMA_READ)) {
+ fis->sector_count = qc->tag << 3;
+ ireq->tc->type.stp.ncq_tag = qc->tag;
+ }
+
+ return status;
+}
+
+static enum sci_status
+sci_io_request_construct_smp(struct device *dev,
+ struct isci_request *ireq,
+ struct sas_task *task)
+{
+ struct scatterlist *sg = &task->smp_task.smp_req;
+ struct isci_remote_device *idev;
+ struct scu_task_context *task_context;
+ struct isci_port *iport;
+ struct smp_req *smp_req;
+ void *kaddr;
+ u8 req_len;
+ u32 cmd;
+
+ kaddr = kmap_atomic(sg_page(sg), KM_IRQ0);
+ smp_req = kaddr + sg->offset;
+ /*
+ * Look at the SMP requests' header fields; for certain SAS 1.x SMP
+ * functions under SAS 2.0, a zero request length really indicates
+ * a non-zero default length.
+ */
+ if (smp_req->req_len == 0) {
+ switch (smp_req->func) {
+ case SMP_DISCOVER:
+ case SMP_REPORT_PHY_ERR_LOG:
+ case SMP_REPORT_PHY_SATA:
+ case SMP_REPORT_ROUTE_INFO:
+ smp_req->req_len = 2;
+ break;
+ case SMP_CONF_ROUTE_INFO:
+ case SMP_PHY_CONTROL:
+ case SMP_PHY_TEST_FUNCTION:
+ smp_req->req_len = 9;
+ break;
+ /* Default - zero is a valid default for 2.0. */
+ }
+ }
+ req_len = smp_req->req_len;
+ sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
+ cmd = *(u32 *) smp_req;
+ kunmap_atomic(kaddr, KM_IRQ0);
+
+ if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
+ return SCI_FAILURE;
+
+ ireq->protocol = SCIC_SMP_PROTOCOL;
+
+ /* byte swap the smp request. */
+
+ task_context = ireq->tc;
+
+ idev = ireq->target_device;
+ iport = idev->owning_port;
+
+ /*
+ * Fill in the TC with the its required data
+ * 00h
+ */
+ task_context->priority = 0;
+ task_context->initiator_request = 1;
+ task_context->connection_rate = idev->connection_rate;
+ task_context->protocol_engine_index = ISCI_PEG;
+ task_context->logical_port_index = iport->physical_port_index;
+ task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
+ task_context->abort = 0;
+ task_context->valid = SCU_TASK_CONTEXT_VALID;
+ task_context->context_type = SCU_TASK_CONTEXT_TYPE;
+
+ /* 04h */
+ task_context->remote_node_index = idev->rnc.remote_node_index;
+ task_context->command_code = 0;
+ task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
+
+ /* 08h */
+ task_context->link_layer_control = 0;
+ task_context->do_not_dma_ssp_good_response = 1;
+ task_context->strict_ordering = 0;
+ task_context->control_frame = 1;
+ task_context->timeout_enable = 0;
+ task_context->block_guard_enable = 0;
+
+ /* 0ch */
+ task_context->address_modifier = 0;
+
+ /* 10h */
+ task_context->ssp_command_iu_length = req_len;
+
+ /* 14h */
+ task_context->transfer_length_bytes = 0;
+
+ /*
+ * 18h ~ 30h, protocol specific
+ * since commandIU has been build by framework at this point, we just
+ * copy the frist DWord from command IU to this location. */
+ memcpy(&task_context->type.smp, &cmd, sizeof(u32));
+
+ /*
+ * 40h
+ * "For SMP you could program it to zero. We would prefer that way
+ * so that done code will be consistent." - Venki
+ */
+ task_context->task_phase = 0;
+
+ ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
+ (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
+ (iport->physical_port_index <<
+ SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
+ ISCI_TAG_TCI(ireq->io_tag));
+ /*
+ * Copy the physical address for the command buffer to the SCU Task
+ * Context command buffer should not contain command header.
+ */
+ task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg));
+ task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32));
+
+ /* SMP response comes as UF, so no need to set response IU address. */
+ task_context->response_iu_upper = 0;
+ task_context->response_iu_lower = 0;
+
+ sci_change_state(&ireq->sm, SCI_REQ_CONSTRUCTED);
+
+ return SCI_SUCCESS;
+}
+
+/*
+ * isci_smp_request_build() - This function builds the smp request.
+ * @ireq: This parameter points to the isci_request allocated in the
+ * request construct function.
+ *
+ * SCI_SUCCESS on successfull completion, or specific failure code.
+ */
+static enum sci_status isci_smp_request_build(struct isci_request *ireq)
+{
+ struct sas_task *task = isci_request_access_task(ireq);
+ struct device *dev = &ireq->isci_host->pdev->dev;
+ enum sci_status status = SCI_FAILURE;
+
+ status = sci_io_request_construct_smp(dev, ireq, task);
+ if (status != SCI_SUCCESS)
+ dev_dbg(&ireq->isci_host->pdev->dev,
+ "%s: failed with status = %d\n",
+ __func__,
+ status);
+
+ return status;
+}
+
+/**
+ * isci_io_request_build() - This function builds the io request object.
+ * @ihost: This parameter specifies the ISCI host object
+ * @request: This parameter points to the isci_request object allocated in the
+ * request construct function.
+ * @sci_device: This parameter is the handle for the sci core's remote device
+ * object that is the destination for this request.
+ *
+ * SCI_SUCCESS on successfull completion, or specific failure code.
+ */
+static enum sci_status isci_io_request_build(struct isci_host *ihost,
+ struct isci_request *request,
+ struct isci_remote_device *idev)
+{
+ enum sci_status status = SCI_SUCCESS;
+ struct sas_task *task = isci_request_access_task(request);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev = 0x%p; request = %p, "
+ "num_scatter = %d\n",
+ __func__,
+ idev,
+ request,
+ task->num_scatter);
+
+ /* map the sgl addresses, if present.
+ * libata does the mapping for sata devices
+ * before we get the request.
+ */
+ if (task->num_scatter &&
+ !sas_protocol_ata(task->task_proto) &&
+ !(SAS_PROTOCOL_SMP & task->task_proto)) {
+
+ request->num_sg_entries = dma_map_sg(
+ &ihost->pdev->dev,
+ task->scatter,
+ task->num_scatter,
+ task->data_dir
+ );
+
+ if (request->num_sg_entries == 0)
+ return SCI_FAILURE_INSUFFICIENT_RESOURCES;
+ }
+
+ status = sci_io_request_construct(ihost, idev, request);
+
+ if (status != SCI_SUCCESS) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: failed request construct\n",
+ __func__);
+ return SCI_FAILURE;
+ }
+
+ switch (task->task_proto) {
+ case SAS_PROTOCOL_SMP:
+ status = isci_smp_request_build(request);
+ break;
+ case SAS_PROTOCOL_SSP:
+ status = isci_request_ssp_request_construct(request);
+ break;
+ case SAS_PROTOCOL_SATA:
+ case SAS_PROTOCOL_STP:
+ case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+ status = isci_request_stp_request_construct(request);
+ break;
+ default:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: unknown protocol\n", __func__);
+ return SCI_FAILURE;
+ }
+
+ return SCI_SUCCESS;
+}
+
+static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
+{
+ struct isci_request *ireq;
+
+ ireq = ihost->reqs[ISCI_TAG_TCI(tag)];
+ ireq->io_tag = tag;
+ ireq->io_request_completion = NULL;
+ ireq->flags = 0;
+ ireq->num_sg_entries = 0;
+ INIT_LIST_HEAD(&ireq->completed_node);
+ INIT_LIST_HEAD(&ireq->dev_node);
+ isci_request_change_state(ireq, allocated);
+
+ return ireq;
+}
+
+static struct isci_request *isci_io_request_from_tag(struct isci_host *ihost,
+ struct sas_task *task,
+ u16 tag)
+{
+ struct isci_request *ireq;
+
+ ireq = isci_request_from_tag(ihost, tag);
+ ireq->ttype_ptr.io_task_ptr = task;
+ ireq->ttype = io_task;
+ task->lldd_task = ireq;
+
+ return ireq;
+}
+
+struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
+ struct isci_tmf *isci_tmf,
+ u16 tag)
+{
+ struct isci_request *ireq;
+
+ ireq = isci_request_from_tag(ihost, tag);
+ ireq->ttype_ptr.tmf_task_ptr = isci_tmf;
+ ireq->ttype = tmf_task;
+
+ return ireq;
+}
+
+int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
+ struct sas_task *task, u16 tag)
+{
+ enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
+ struct isci_request *ireq;
+ unsigned long flags;
+ int ret = 0;
+
+ /* do common allocation and init of request object. */
+ ireq = isci_io_request_from_tag(ihost, task, tag);
+
+ status = isci_io_request_build(ihost, ireq, idev);
+ if (status != SCI_SUCCESS) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: request_construct failed - status = 0x%x\n",
+ __func__,
+ status);
+ return status;
+ }
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) {
+
+ if (isci_task_is_ncq_recovery(task)) {
+
+ /* The device is in an NCQ recovery state. Issue the
+ * request on the task side. Note that it will
+ * complete on the I/O request side because the
+ * request was built that way (ie.
+ * ireq->is_task_management_request is false).
+ */
+ status = sci_controller_start_task(ihost,
+ idev,
+ ireq);
+ } else {
+ status = SCI_FAILURE;
+ }
+ } else {
+ /* send the request, let the core assign the IO TAG. */
+ status = sci_controller_start_io(ihost, idev,
+ ireq);
+ }
+
+ if (status != SCI_SUCCESS &&
+ status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: failed request start (0x%x)\n",
+ __func__, status);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ return status;
+ }
+
+ /* Either I/O started OK, or the core has signaled that
+ * the device needs a target reset.
+ *
+ * In either case, hold onto the I/O for later.
+ *
+ * Update it's status and add it to the list in the
+ * remote device object.
+ */
+ list_add(&ireq->dev_node, &idev->reqs_in_process);
+
+ if (status == SCI_SUCCESS) {
+ isci_request_change_state(ireq, started);
+ } else {
+ /* The request did not really start in the
+ * hardware, so clear the request handle
+ * here so no terminations will be done.
+ */
+ set_bit(IREQ_TERMINATED, &ireq->flags);
+ isci_request_change_state(ireq, completed);
+ }
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (status ==
+ SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
+ /* Signal libsas that we need the SCSI error
+ * handler thread to work on this I/O and that
+ * we want a device reset.
+ */
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ /* Cause this task to be scheduled in the SCSI error
+ * handler thread.
+ */
+ isci_execpath_callback(ihost, task,
+ sas_task_abort);
+
+ /* Change the status, since we are holding
+ * the I/O until it is managed by the SCSI
+ * error handler.
+ */
+ status = SCI_SUCCESS;
+ }
+
+ return ret;
+}
diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h
new file mode 100644
index 00000000000..7a1d5a9778e
--- /dev/null
+++ b/drivers/scsi/isci/request.h
@@ -0,0 +1,448 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _ISCI_REQUEST_H_
+#define _ISCI_REQUEST_H_
+
+#include "isci.h"
+#include "host.h"
+#include "scu_task_context.h"
+
+/**
+ * struct isci_request_status - This enum defines the possible states of an I/O
+ * request.
+ *
+ *
+ */
+enum isci_request_status {
+ unallocated = 0x00,
+ allocated = 0x01,
+ started = 0x02,
+ completed = 0x03,
+ aborting = 0x04,
+ aborted = 0x05,
+ terminating = 0x06,
+ dead = 0x07
+};
+
+enum task_type {
+ io_task = 0,
+ tmf_task = 1
+};
+
+enum sci_request_protocol {
+ SCIC_NO_PROTOCOL,
+ SCIC_SMP_PROTOCOL,
+ SCIC_SSP_PROTOCOL,
+ SCIC_STP_PROTOCOL
+}; /* XXX remove me, use sas_task.{dev|task_proto} instead */;
+
+/**
+ * isci_stp_request - extra request infrastructure to handle pio/atapi protocol
+ * @pio_len - number of bytes requested at PIO setup
+ * @status - pio setup ending status value to tell us if we need
+ * to wait for another fis or if the transfer is complete. Upon
+ * receipt of a d2h fis this will be the status field of that fis.
+ * @sgl - track pio transfer progress as we iterate through the sgl
+ * @device_cdb_len - atapi device advertises it's transfer constraints at setup
+ */
+struct isci_stp_request {
+ u32 pio_len;
+ u8 status;
+
+ struct isci_stp_pio_sgl {
+ int index;
+ u8 set;
+ u32 offset;
+ } sgl;
+ u32 device_cdb_len;
+};
+
+struct isci_request {
+ enum isci_request_status status;
+ #define IREQ_COMPLETE_IN_TARGET 0
+ #define IREQ_TERMINATED 1
+ #define IREQ_TMF 2
+ #define IREQ_ACTIVE 3
+ unsigned long flags;
+ /* XXX kill ttype and ttype_ptr, allocate full sas_task */
+ enum task_type ttype;
+ union ttype_ptr_union {
+ struct sas_task *io_task_ptr; /* When ttype==io_task */
+ struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
+ } ttype_ptr;
+ struct isci_host *isci_host;
+ /* For use in the requests_to_{complete|abort} lists: */
+ struct list_head completed_node;
+ /* For use in the reqs_in_process list: */
+ struct list_head dev_node;
+ spinlock_t state_lock;
+ dma_addr_t request_daddr;
+ dma_addr_t zero_scatter_daddr;
+ unsigned int num_sg_entries;
+ /* Note: "io_request_completion" is completed in two different ways
+ * depending on whether this is a TMF or regular request.
+ * - TMF requests are completed in the thread that started them;
+ * - regular requests are completed in the request completion callback
+ * function.
+ * This difference in operation allows the aborter of a TMF request
+ * to be sure that once the TMF request completes, the I/O that the
+ * TMF was aborting is guaranteed to have completed.
+ *
+ * XXX kill io_request_completion
+ */
+ struct completion *io_request_completion;
+ struct sci_base_state_machine sm;
+ struct isci_host *owning_controller;
+ struct isci_remote_device *target_device;
+ u16 io_tag;
+ enum sci_request_protocol protocol;
+ u32 scu_status; /* hardware result */
+ u32 sci_status; /* upper layer disposition */
+ u32 post_context;
+ struct scu_task_context *tc;
+ /* could be larger with sg chaining */
+ #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2)
+ struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
+ /* This field is a pointer to the stored rx frame data. It is used in
+ * STP internal requests and SMP response frames. If this field is
+ * non-NULL the saved frame must be released on IO request completion.
+ */
+ u32 saved_rx_frame_index;
+
+ union {
+ struct {
+ union {
+ struct ssp_cmd_iu cmd;
+ struct ssp_task_iu tmf;
+ };
+ union {
+ struct ssp_response_iu rsp;
+ u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
+ };
+ } ssp;
+ struct {
+ struct smp_resp rsp;
+ } smp;
+ struct {
+ struct isci_stp_request req;
+ struct host_to_dev_fis cmd;
+ struct dev_to_host_fis rsp;
+ } stp;
+ };
+};
+
+static inline struct isci_request *to_ireq(struct isci_stp_request *stp_req)
+{
+ struct isci_request *ireq;
+
+ ireq = container_of(stp_req, typeof(*ireq), stp.req);
+ return ireq;
+}
+
+/**
+ * enum sci_base_request_states - This enumeration depicts all the states for
+ * the common request state machine.
+ *
+ *
+ */
+enum sci_base_request_states {
+ /*
+ * Simply the initial state for the base request state machine.
+ */
+ SCI_REQ_INIT,
+
+ /*
+ * This state indicates that the request has been constructed.
+ * This state is entered from the INITIAL state.
+ */
+ SCI_REQ_CONSTRUCTED,
+
+ /*
+ * This state indicates that the request has been started. This state
+ * is entered from the CONSTRUCTED state.
+ */
+ SCI_REQ_STARTED,
+
+ SCI_REQ_STP_UDMA_WAIT_TC_COMP,
+ SCI_REQ_STP_UDMA_WAIT_D2H,
+
+ SCI_REQ_STP_NON_DATA_WAIT_H2D,
+ SCI_REQ_STP_NON_DATA_WAIT_D2H,
+
+ SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED,
+ SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG,
+ SCI_REQ_STP_SOFT_RESET_WAIT_D2H,
+
+ /*
+ * While in this state the IO request object is waiting for the TC
+ * completion notification for the H2D Register FIS
+ */
+ SCI_REQ_STP_PIO_WAIT_H2D,
+
+ /*
+ * While in this state the IO request object is waiting for either a
+ * PIO Setup FIS or a D2H register FIS. The type of frame received is
+ * based on the result of the prior frame and line conditions.
+ */
+ SCI_REQ_STP_PIO_WAIT_FRAME,
+
+ /*
+ * While in this state the IO request object is waiting for a DATA
+ * frame from the device.
+ */
+ SCI_REQ_STP_PIO_DATA_IN,
+
+ /*
+ * While in this state the IO request object is waiting to transmit
+ * the next data frame to the device.
+ */
+ SCI_REQ_STP_PIO_DATA_OUT,
+
+ /*
+ * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
+ * task management request is waiting for the transmission of the
+ * initial frame (i.e. command, task, etc.).
+ */
+ SCI_REQ_TASK_WAIT_TC_COMP,
+
+ /*
+ * This sub-state indicates that the started task management request
+ * is waiting for the reception of an unsolicited frame
+ * (i.e. response IU).
+ */
+ SCI_REQ_TASK_WAIT_TC_RESP,
+
+ /*
+ * This sub-state indicates that the started task management request
+ * is waiting for the reception of an unsolicited frame
+ * (i.e. response IU).
+ */
+ SCI_REQ_SMP_WAIT_RESP,
+
+ /*
+ * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP
+ * request is waiting for the transmission of the initial frame
+ * (i.e. command, task, etc.).
+ */
+ SCI_REQ_SMP_WAIT_TC_COMP,
+
+ /*
+ * This state indicates that the request has completed.
+ * This state is entered from the STARTED state. This state is entered
+ * from the ABORTING state.
+ */
+ SCI_REQ_COMPLETED,
+
+ /*
+ * This state indicates that the request is in the process of being
+ * terminated/aborted.
+ * This state is entered from the CONSTRUCTED state.
+ * This state is entered from the STARTED state.
+ */
+ SCI_REQ_ABORTING,
+
+ /*
+ * Simply the final state for the base request state machine.
+ */
+ SCI_REQ_FINAL,
+};
+
+enum sci_status sci_request_start(struct isci_request *ireq);
+enum sci_status sci_io_request_terminate(struct isci_request *ireq);
+enum sci_status
+sci_io_request_event_handler(struct isci_request *ireq,
+ u32 event_code);
+enum sci_status
+sci_io_request_frame_handler(struct isci_request *ireq,
+ u32 frame_index);
+enum sci_status
+sci_task_request_terminate(struct isci_request *ireq);
+extern enum sci_status
+sci_request_complete(struct isci_request *ireq);
+extern enum sci_status
+sci_io_request_tc_completion(struct isci_request *ireq, u32 code);
+
+/* XXX open code in caller */
+static inline dma_addr_t
+sci_io_request_get_dma_addr(struct isci_request *ireq, void *virt_addr)
+{
+
+ char *requested_addr = (char *)virt_addr;
+ char *base_addr = (char *)ireq;
+
+ BUG_ON(requested_addr < base_addr);
+ BUG_ON((requested_addr - base_addr) >= sizeof(*ireq));
+
+ return ireq->request_daddr + (requested_addr - base_addr);
+}
+
+/**
+ * isci_request_change_state() - This function sets the status of the request
+ * object.
+ * @request: This parameter points to the isci_request object
+ * @status: This Parameter is the new status of the object
+ *
+ */
+static inline enum isci_request_status
+isci_request_change_state(struct isci_request *isci_request,
+ enum isci_request_status status)
+{
+ enum isci_request_status old_state;
+ unsigned long flags;
+
+ dev_dbg(&isci_request->isci_host->pdev->dev,
+ "%s: isci_request = %p, state = 0x%x\n",
+ __func__,
+ isci_request,
+ status);
+
+ BUG_ON(isci_request == NULL);
+
+ spin_lock_irqsave(&isci_request->state_lock, flags);
+ old_state = isci_request->status;
+ isci_request->status = status;
+ spin_unlock_irqrestore(&isci_request->state_lock, flags);
+
+ return old_state;
+}
+
+/**
+ * isci_request_change_started_to_newstate() - This function sets the status of
+ * the request object.
+ * @request: This parameter points to the isci_request object
+ * @status: This Parameter is the new status of the object
+ *
+ * state previous to any change.
+ */
+static inline enum isci_request_status
+isci_request_change_started_to_newstate(struct isci_request *isci_request,
+ struct completion *completion_ptr,
+ enum isci_request_status newstate)
+{
+ enum isci_request_status old_state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&isci_request->state_lock, flags);
+
+ old_state = isci_request->status;
+
+ if (old_state == started || old_state == aborting) {
+ BUG_ON(isci_request->io_request_completion != NULL);
+
+ isci_request->io_request_completion = completion_ptr;
+ isci_request->status = newstate;
+ }
+
+ spin_unlock_irqrestore(&isci_request->state_lock, flags);
+
+ dev_dbg(&isci_request->isci_host->pdev->dev,
+ "%s: isci_request = %p, old_state = 0x%x\n",
+ __func__,
+ isci_request,
+ old_state);
+
+ return old_state;
+}
+
+/**
+ * isci_request_change_started_to_aborted() - This function sets the status of
+ * the request object.
+ * @request: This parameter points to the isci_request object
+ * @completion_ptr: This parameter is saved as the kernel completion structure
+ * signalled when the old request completes.
+ *
+ * state previous to any change.
+ */
+static inline enum isci_request_status
+isci_request_change_started_to_aborted(struct isci_request *isci_request,
+ struct completion *completion_ptr)
+{
+ return isci_request_change_started_to_newstate(isci_request,
+ completion_ptr,
+ aborted);
+}
+
+#define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr)
+
+#define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr)
+
+struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost,
+ struct isci_tmf *isci_tmf,
+ u16 tag);
+int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
+ struct sas_task *task, u16 tag);
+void isci_terminate_pending_requests(struct isci_host *ihost,
+ struct isci_remote_device *idev);
+enum sci_status
+sci_task_request_construct(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 io_tag,
+ struct isci_request *ireq);
+enum sci_status
+sci_task_request_construct_ssp(struct isci_request *ireq);
+enum sci_status
+sci_task_request_construct_sata(struct isci_request *ireq);
+void sci_smp_request_copy_response(struct isci_request *ireq);
+
+static inline int isci_task_is_ncq_recovery(struct sas_task *task)
+{
+ return (sas_protocol_ata(task->task_proto) &&
+ task->ata_task.fis.command == ATA_CMD_READ_LOG_EXT &&
+ task->ata_task.fis.lbal == ATA_LOG_SATA_NCQ);
+
+}
+
+#endif /* !defined(_ISCI_REQUEST_H_) */
diff --git a/drivers/scsi/isci/sas.h b/drivers/scsi/isci/sas.h
new file mode 100644
index 00000000000..462b15174d3
--- /dev/null
+++ b/drivers/scsi/isci/sas.h
@@ -0,0 +1,219 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCI_SAS_H_
+#define _SCI_SAS_H_
+
+#include <linux/kernel.h>
+
+/*
+ * SATA FIS Types These constants depict the various SATA FIS types devined in
+ * the serial ATA specification.
+ * XXX: This needs to go into <scsi/sas.h>
+ */
+#define FIS_REGH2D 0x27
+#define FIS_REGD2H 0x34
+#define FIS_SETDEVBITS 0xA1
+#define FIS_DMA_ACTIVATE 0x39
+#define FIS_DMA_SETUP 0x41
+#define FIS_BIST_ACTIVATE 0x58
+#define FIS_PIO_SETUP 0x5F
+#define FIS_DATA 0x46
+
+/**************************************************************************/
+#define SSP_RESP_IU_MAX_SIZE 280
+
+/*
+ * contents of the SSP COMMAND INFORMATION UNIT.
+ * For specific information on each of these individual fields please
+ * reference the SAS specification SSP transport layer section.
+ * XXX: This needs to go into <scsi/sas.h>
+ */
+struct ssp_cmd_iu {
+ u8 LUN[8];
+ u8 add_cdb_len:6;
+ u8 _r_a:2;
+ u8 _r_b;
+ u8 en_fburst:1;
+ u8 task_prio:4;
+ u8 task_attr:3;
+ u8 _r_c;
+
+ u8 cdb[16];
+} __packed;
+
+/*
+ * contents of the SSP TASK INFORMATION UNIT.
+ * For specific information on each of these individual fields please
+ * reference the SAS specification SSP transport layer section.
+ * XXX: This needs to go into <scsi/sas.h>
+ */
+struct ssp_task_iu {
+ u8 LUN[8];
+ u8 _r_a;
+ u8 task_func;
+ u8 _r_b[4];
+ u16 task_tag;
+ u8 _r_c[12];
+} __packed;
+
+
+/*
+ * struct smp_req_phy_id - This structure defines the contents of
+ * an SMP Request that is comprised of the struct smp_request_header and a
+ * phy identifier.
+ * Examples: SMP_REQUEST_DISCOVER, SMP_REQUEST_REPORT_PHY_SATA.
+ *
+ * For specific information on each of these individual fields please reference
+ * the SAS specification.
+ */
+struct smp_req_phy_id {
+ u8 _r_a[4]; /* bytes 4-7 */
+
+ u8 ign_zone_grp:1; /* byte 8 */
+ u8 _r_b:7;
+
+ u8 phy_id; /* byte 9 */
+ u8 _r_c; /* byte 10 */
+ u8 _r_d; /* byte 11 */
+} __packed;
+
+/*
+ * struct smp_req_config_route_info - This structure defines the
+ * contents of an SMP Configure Route Information request.
+ *
+ * For specific information on each of these individual fields please reference
+ * the SAS specification.
+ */
+struct smp_req_conf_rtinfo {
+ u16 exp_change_cnt; /* bytes 4-5 */
+ u8 exp_rt_idx_hi; /* byte 6 */
+ u8 exp_rt_idx; /* byte 7 */
+
+ u8 _r_a; /* byte 8 */
+ u8 phy_id; /* byte 9 */
+ u16 _r_b; /* bytes 10-11 */
+
+ u8 _r_c:7; /* byte 12 */
+ u8 dis_rt_entry:1;
+ u8 _r_d[3]; /* bytes 13-15 */
+
+ u8 rt_sas_addr[8]; /* bytes 16-23 */
+ u8 _r_e[16]; /* bytes 24-39 */
+} __packed;
+
+/*
+ * struct smp_req_phycntl - This structure defines the contents of an
+ * SMP Phy Controller request.
+ *
+ * For specific information on each of these individual fields please reference
+ * the SAS specification.
+ */
+struct smp_req_phycntl {
+ u16 exp_change_cnt; /* byte 4-5 */
+
+ u8 _r_a[3]; /* bytes 6-8 */
+
+ u8 phy_id; /* byte 9 */
+ u8 phy_op; /* byte 10 */
+
+ u8 upd_pathway:1; /* byte 11 */
+ u8 _r_b:7;
+
+ u8 _r_c[12]; /* byte 12-23 */
+
+ u8 att_dev_name[8]; /* byte 24-31 */
+
+ u8 _r_d:4; /* byte 32 */
+ u8 min_linkrate:4;
+
+ u8 _r_e:4; /* byte 33 */
+ u8 max_linkrate:4;
+
+ u8 _r_f[2]; /* byte 34-35 */
+
+ u8 pathway:4; /* byte 36 */
+ u8 _r_g:4;
+
+ u8 _r_h[3]; /* bytes 37-39 */
+} __packed;
+
+/*
+ * struct smp_req - This structure simply unionizes the existing request
+ * structures into a common request type.
+ *
+ * XXX: This data structure may need to go to scsi/sas.h
+ */
+struct smp_req {
+ u8 type; /* byte 0 */
+ u8 func; /* byte 1 */
+ u8 alloc_resp_len; /* byte 2 */
+ u8 req_len; /* byte 3 */
+ u8 req_data[0];
+} __packed;
+
+#define SMP_RESP_HDR_SZ 4
+
+/*
+ * struct sci_sas_address - This structure depicts how a SAS address is
+ * represented by SCI.
+ * XXX convert this to u8 [SAS_ADDR_SIZE] like the rest of libsas
+ *
+ */
+struct sci_sas_address {
+ u32 high;
+ u32 low;
+};
+#endif
diff --git a/drivers/scsi/isci/scu_completion_codes.h b/drivers/scsi/isci/scu_completion_codes.h
new file mode 100644
index 00000000000..c8b329c695f
--- /dev/null
+++ b/drivers/scsi/isci/scu_completion_codes.h
@@ -0,0 +1,283 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCU_COMPLETION_CODES_HEADER_
+#define _SCU_COMPLETION_CODES_HEADER_
+
+/**
+ * This file contains the constants and macros for the SCU hardware completion
+ * codes.
+ *
+ *
+ */
+
+#define SCU_COMPLETION_TYPE_SHIFT 28
+#define SCU_COMPLETION_TYPE_MASK 0x70000000
+
+/**
+ * SCU_COMPLETION_TYPE() -
+ *
+ * This macro constructs an SCU completion type
+ */
+#define SCU_COMPLETION_TYPE(type) \
+ ((u32)(type) << SCU_COMPLETION_TYPE_SHIFT)
+
+/**
+ * SCU_COMPLETION_TYPE() -
+ *
+ * These macros contain the SCU completion types SCU_COMPLETION_TYPE
+ */
+#define SCU_COMPLETION_TYPE_TASK SCU_COMPLETION_TYPE(0)
+#define SCU_COMPLETION_TYPE_SDMA SCU_COMPLETION_TYPE(1)
+#define SCU_COMPLETION_TYPE_UFI SCU_COMPLETION_TYPE(2)
+#define SCU_COMPLETION_TYPE_EVENT SCU_COMPLETION_TYPE(3)
+#define SCU_COMPLETION_TYPE_NOTIFY SCU_COMPLETION_TYPE(4)
+
+/**
+ *
+ *
+ * These constants provide the shift and mask values for the various parts of
+ * an SCU completion code.
+ */
+#define SCU_COMPLETION_STATUS_MASK 0x0FFC0000
+#define SCU_COMPLETION_TL_STATUS_MASK 0x0FC00000
+#define SCU_COMPLETION_TL_STATUS_SHIFT 22
+#define SCU_COMPLETION_SDMA_STATUS_MASK 0x003C0000
+#define SCU_COMPLETION_PEG_MASK 0x00010000
+#define SCU_COMPLETION_PORT_MASK 0x00007000
+#define SCU_COMPLETION_PE_MASK SCU_COMPLETION_PORT_MASK
+#define SCU_COMPLETION_PE_SHIFT 12
+#define SCU_COMPLETION_INDEX_MASK 0x00000FFF
+
+/**
+ * SCU_GET_COMPLETION_TYPE() -
+ *
+ * This macro returns the SCU completion type.
+ */
+#define SCU_GET_COMPLETION_TYPE(completion_code) \
+ ((completion_code) & SCU_COMPLETION_TYPE_MASK)
+
+/**
+ * SCU_GET_COMPLETION_STATUS() -
+ *
+ * This macro returns the SCU completion status.
+ */
+#define SCU_GET_COMPLETION_STATUS(completion_code) \
+ ((completion_code) & SCU_COMPLETION_STATUS_MASK)
+
+/**
+ * SCU_GET_COMPLETION_TL_STATUS() -
+ *
+ * This macro returns the transport layer completion status.
+ */
+#define SCU_GET_COMPLETION_TL_STATUS(completion_code) \
+ ((completion_code) & SCU_COMPLETION_TL_STATUS_MASK)
+
+/**
+ * SCU_MAKE_COMPLETION_STATUS() -
+ *
+ * This macro takes a completion code and performs the shift and mask
+ * operations to turn it into a completion code that can be compared to a
+ * SCU_GET_COMPLETION_TL_STATUS.
+ */
+#define SCU_MAKE_COMPLETION_STATUS(completion_code) \
+ ((u32)(completion_code) << SCU_COMPLETION_TL_STATUS_SHIFT)
+
+/**
+ * SCU_NORMALIZE_COMPLETION_STATUS() -
+ *
+ * This macro takes a SCU_GET_COMPLETION_TL_STATUS and normalizes it for a
+ * return code.
+ */
+#define SCU_NORMALIZE_COMPLETION_STATUS(completion_code) \
+ (\
+ ((completion_code) & SCU_COMPLETION_TL_STATUS_MASK) \
+ >> SCU_COMPLETION_TL_STATUS_SHIFT \
+ )
+
+/**
+ * SCU_GET_COMPLETION_SDMA_STATUS() -
+ *
+ * This macro returns the SDMA completion status.
+ */
+#define SCU_GET_COMPLETION_SDMA_STATUS(completion_code) \
+ ((completion_code) & SCU_COMPLETION_SDMA_STATUS_MASK)
+
+/**
+ * SCU_GET_COMPLETION_PEG() -
+ *
+ * This macro returns the Protocol Engine Group from the completion code.
+ */
+#define SCU_GET_COMPLETION_PEG(completion_code) \
+ ((completion_code) & SCU_COMPLETION_PEG_MASK)
+
+/**
+ * SCU_GET_COMPLETION_PORT() -
+ *
+ * This macro reuturns the logical port index from the completion code.
+ */
+#define SCU_GET_COMPLETION_PORT(completion_code) \
+ ((completion_code) & SCU_COMPLETION_PORT_MASK)
+
+/**
+ * SCU_GET_PROTOCOL_ENGINE_INDEX() -
+ *
+ * This macro returns the PE index from the completion code.
+ */
+#define SCU_GET_PROTOCOL_ENGINE_INDEX(completion_code) \
+ (((completion_code) & SCU_COMPLETION_PE_MASK) >> SCU_COMPLETION_PE_SHIFT)
+
+/**
+ * SCU_GET_COMPLETION_INDEX() -
+ *
+ * This macro returns the index of the completion which is either a TCi or an
+ * RNi depending on the completion type.
+ */
+#define SCU_GET_COMPLETION_INDEX(completion_code) \
+ ((completion_code) & SCU_COMPLETION_INDEX_MASK)
+
+#define SCU_UNSOLICITED_FRAME_MASK 0x0FFF0000
+#define SCU_UNSOLICITED_FRAME_SHIFT 16
+
+/**
+ * SCU_GET_FRAME_INDEX() -
+ *
+ * This macro returns a normalized frame index from an unsolicited frame
+ * completion.
+ */
+#define SCU_GET_FRAME_INDEX(completion_code) \
+ (\
+ ((completion_code) & SCU_UNSOLICITED_FRAME_MASK) \
+ >> SCU_UNSOLICITED_FRAME_SHIFT \
+ )
+
+#define SCU_UNSOLICITED_FRAME_ERROR_MASK 0x00008000
+
+/**
+ * SCU_GET_FRAME_ERROR() -
+ *
+ * This macro returns a zero (0) value if there is no frame error otherwise it
+ * returns non-zero (!0).
+ */
+#define SCU_GET_FRAME_ERROR(completion_code) \
+ ((completion_code) & SCU_UNSOLICITED_FRAME_ERROR_MASK)
+
+/**
+ *
+ *
+ * These constants represent normalized completion codes which must be shifted
+ * 18 bits to match it with the hardware completion code. In a 16-bit compiler,
+ * immediate constants are 16-bit values (the size of an int). If we shift
+ * those by 18 bits, we completely lose the value. To ensure the value is a
+ * 32-bit value like we want, each immediate value must be cast to a u32.
+ */
+#define SCU_TASK_DONE_GOOD ((u32)0x00)
+#define SCU_TASK_DONE_CRC_ERR ((u32)0x14)
+#define SCU_TASK_DONE_CHECK_RESPONSE ((u32)0x14)
+#define SCU_TASK_DONE_GEN_RESPONSE ((u32)0x15)
+#define SCU_TASK_DONE_NAK_CMD_ERR ((u32)0x16)
+#define SCU_TASK_DONE_CMD_LL_R_ERR ((u32)0x16)
+#define SCU_TASK_DONE_LL_R_ERR ((u32)0x17)
+#define SCU_TASK_DONE_ACK_NAK_TO ((u32)0x17)
+#define SCU_TASK_DONE_LL_PERR ((u32)0x18)
+#define SCU_TASK_DONE_LL_SY_TERM ((u32)0x19)
+#define SCU_TASK_DONE_NAK_ERR ((u32)0x19)
+#define SCU_TASK_DONE_LL_LF_TERM ((u32)0x1A)
+#define SCU_TASK_DONE_DATA_LEN_ERR ((u32)0x1A)
+#define SCU_TASK_DONE_LL_CL_TERM ((u32)0x1B)
+#define SCU_TASK_DONE_LL_ABORT_ERR ((u32)0x1B)
+#define SCU_TASK_DONE_SEQ_INV_TYPE ((u32)0x1C)
+#define SCU_TASK_DONE_UNEXP_XR ((u32)0x1C)
+#define SCU_TASK_DONE_INV_FIS_TYPE ((u32)0x1D)
+#define SCU_TASK_DONE_XR_IU_LEN_ERR ((u32)0x1D)
+#define SCU_TASK_DONE_INV_FIS_LEN ((u32)0x1E)
+#define SCU_TASK_DONE_XR_WD_LEN ((u32)0x1E)
+#define SCU_TASK_DONE_SDMA_ERR ((u32)0x1F)
+#define SCU_TASK_DONE_OFFSET_ERR ((u32)0x20)
+#define SCU_TASK_DONE_MAX_PLD_ERR ((u32)0x21)
+#define SCU_TASK_DONE_EXCESS_DATA ((u32)0x22)
+#define SCU_TASK_DONE_LF_ERR ((u32)0x23)
+#define SCU_TASK_DONE_UNEXP_FIS ((u32)0x24)
+#define SCU_TASK_DONE_UNEXP_RESP ((u32)0x24)
+#define SCU_TASK_DONE_EARLY_RESP ((u32)0x25)
+#define SCU_TASK_DONE_SMP_RESP_TO_ERR ((u32)0x26)
+#define SCU_TASK_DONE_DMASETUP_DIRERR ((u32)0x27)
+#define SCU_TASK_DONE_SMP_UFI_ERR ((u32)0x27)
+#define SCU_TASK_DONE_XFERCNT_ERR ((u32)0x28)
+#define SCU_TASK_DONE_SMP_FRM_TYPE_ERR ((u32)0x28)
+#define SCU_TASK_DONE_SMP_LL_RX_ERR ((u32)0x29)
+#define SCU_TASK_DONE_RESP_LEN_ERR ((u32)0x2A)
+#define SCU_TASK_DONE_UNEXP_DATA ((u32)0x2B)
+#define SCU_TASK_DONE_OPEN_FAIL ((u32)0x2C)
+#define SCU_TASK_DONE_UNEXP_SDBFIS ((u32)0x2D)
+#define SCU_TASK_DONE_REG_ERR ((u32)0x2E)
+#define SCU_TASK_DONE_SDB_ERR ((u32)0x2F)
+#define SCU_TASK_DONE_TASK_ABORT ((u32)0x30)
+#define SCU_TASK_DONE_CMD_SDMA_ERR ((U32)0x32)
+#define SCU_TASK_DONE_CMD_LL_ABORT_ERR ((U32)0x33)
+#define SCU_TASK_OPEN_REJECT_WRONG_DESTINATION ((u32)0x34)
+#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1 ((u32)0x35)
+#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2 ((u32)0x36)
+#define SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3 ((u32)0x37)
+#define SCU_TASK_OPEN_REJECT_BAD_DESTINATION ((u32)0x38)
+#define SCU_TASK_OPEN_REJECT_ZONE_VIOLATION ((u32)0x39)
+#define SCU_TASK_DONE_VIIT_ENTRY_NV ((u32)0x3A)
+#define SCU_TASK_DONE_IIT_ENTRY_NV ((u32)0x3B)
+#define SCU_TASK_DONE_RNCNV_OUTBOUND ((u32)0x3C)
+#define SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY ((u32)0x3D)
+#define SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED ((u32)0x3E)
+#define SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED ((u32)0x3F)
+
+#endif /* _SCU_COMPLETION_CODES_HEADER_ */
diff --git a/drivers/scsi/isci/scu_event_codes.h b/drivers/scsi/isci/scu_event_codes.h
new file mode 100644
index 00000000000..36a945ad572
--- /dev/null
+++ b/drivers/scsi/isci/scu_event_codes.h
@@ -0,0 +1,336 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SCU_EVENT_CODES_HEADER__
+#define __SCU_EVENT_CODES_HEADER__
+
+/**
+ * This file contains the constants and macros for the SCU event codes.
+ *
+ *
+ */
+
+#define SCU_EVENT_TYPE_CODE_SHIFT 24
+#define SCU_EVENT_TYPE_CODE_MASK 0x0F000000
+
+#define SCU_EVENT_SPECIFIC_CODE_SHIFT 18
+#define SCU_EVENT_SPECIFIC_CODE_MASK 0x00FC0000
+
+#define SCU_EVENT_CODE_MASK \
+ (SCU_EVENT_TYPE_CODE_MASK | SCU_EVENT_SPECIFIC_CODE_MASK)
+
+/**
+ * SCU_EVENT_TYPE() -
+ *
+ * This macro constructs an SCU event type from the type value.
+ */
+#define SCU_EVENT_TYPE(type) \
+ ((u32)(type) << SCU_EVENT_TYPE_CODE_SHIFT)
+
+/**
+ * SCU_EVENT_SPECIFIC() -
+ *
+ * This macro constructs an SCU event specifier from the code value.
+ */
+#define SCU_EVENT_SPECIFIC(code) \
+ ((u32)(code) << SCU_EVENT_SPECIFIC_CODE_SHIFT)
+
+/**
+ * SCU_EVENT_MESSAGE() -
+ *
+ * This macro constructs a combines an SCU event type and SCU event specifier
+ * from the type and code values.
+ */
+#define SCU_EVENT_MESSAGE(type, code) \
+ ((type) | SCU_EVENT_SPECIFIC(code))
+
+/**
+ * SCU_EVENT_TYPE() -
+ *
+ * SCU_EVENT_TYPES
+ */
+#define SCU_EVENT_TYPE_SMU_COMMAND_ERROR SCU_EVENT_TYPE(0x08)
+#define SCU_EVENT_TYPE_SMU_PCQ_ERROR SCU_EVENT_TYPE(0x09)
+#define SCU_EVENT_TYPE_SMU_ERROR SCU_EVENT_TYPE(0x00)
+#define SCU_EVENT_TYPE_TRANSPORT_ERROR SCU_EVENT_TYPE(0x01)
+#define SCU_EVENT_TYPE_BROADCAST_CHANGE SCU_EVENT_TYPE(0x02)
+#define SCU_EVENT_TYPE_OSSP_EVENT SCU_EVENT_TYPE(0x03)
+#define SCU_EVENT_TYPE_FATAL_MEMORY_ERROR SCU_EVENT_TYPE(0x0F)
+#define SCU_EVENT_TYPE_RNC_SUSPEND_TX SCU_EVENT_TYPE(0x04)
+#define SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX SCU_EVENT_TYPE(0x05)
+#define SCU_EVENT_TYPE_RNC_OPS_MISC SCU_EVENT_TYPE(0x06)
+#define SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT SCU_EVENT_TYPE(0x07)
+#define SCU_EVENT_TYPE_ERR_CNT_EVENT SCU_EVENT_TYPE(0x0A)
+
+/**
+ *
+ *
+ * SCU_EVENT_SPECIFIERS
+ */
+#define SCU_EVENT_SPECIFIER_DRIVER_SUSPEND 0x20
+#define SCU_EVENT_SPECIFIER_RNC_RELEASE 0x00
+
+/**
+ *
+ *
+ * SMU_COMMAND_EVENTS
+ */
+#define SCU_EVENT_INVALID_CONTEXT_COMMAND \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_COMMAND_ERROR, 0x00)
+
+/**
+ *
+ *
+ * SMU_PCQ_EVENTS
+ */
+#define SCU_EVENT_UNCORRECTABLE_PCQ_ERROR \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_PCQ_ERROR, 0x00)
+
+/**
+ *
+ *
+ * SMU_EVENTS
+ */
+#define SCU_EVENT_UNCORRECTABLE_REGISTER_WRITE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x02)
+#define SCU_EVENT_UNCORRECTABLE_REGISTER_READ \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x03)
+#define SCU_EVENT_PCIE_INTERFACE_ERROR \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x04)
+#define SCU_EVENT_FUNCTION_LEVEL_RESET \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_SMU_ERROR, 0x05)
+
+/**
+ *
+ *
+ * TRANSPORT_LEVEL_ERRORS
+ */
+#define SCU_EVENT_ACK_NAK_TIMEOUT_ERROR \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_TRANSPORT_ERROR, 0x00)
+
+/**
+ *
+ *
+ * BROADCAST_CHANGE_EVENTS
+ */
+#define SCU_EVENT_BROADCAST_CHANGE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x01)
+#define SCU_EVENT_BROADCAST_RESERVED0 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x02)
+#define SCU_EVENT_BROADCAST_RESERVED1 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x03)
+#define SCU_EVENT_BROADCAST_SES \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x04)
+#define SCU_EVENT_BROADCAST_EXPANDER \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x05)
+#define SCU_EVENT_BROADCAST_AEN \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x06)
+#define SCU_EVENT_BROADCAST_RESERVED3 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x07)
+#define SCU_EVENT_BROADCAST_RESERVED4 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x08)
+#define SCU_EVENT_PE_SUSPENDED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_BROADCAST_CHANGE, 0x09)
+
+/**
+ *
+ *
+ * OSSP_EVENTS
+ */
+#define SCU_EVENT_PORT_SELECTOR_DETECTED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x10)
+#define SCU_EVENT_SENT_PORT_SELECTION \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x11)
+#define SCU_EVENT_HARD_RESET_TRANSMITTED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x12)
+#define SCU_EVENT_HARD_RESET_RECEIVED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x13)
+#define SCU_EVENT_RECEIVED_IDENTIFY_TIMEOUT \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x15)
+#define SCU_EVENT_LINK_FAILURE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x16)
+#define SCU_EVENT_SATA_SPINUP_HOLD \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x17)
+#define SCU_EVENT_SAS_15_SSC \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x18)
+#define SCU_EVENT_SAS_15 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x19)
+#define SCU_EVENT_SAS_30_SSC \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1A)
+#define SCU_EVENT_SAS_30 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1B)
+#define SCU_EVENT_SAS_60_SSC \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1C)
+#define SCU_EVENT_SAS_60 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1D)
+#define SCU_EVENT_SATA_15_SSC \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1E)
+#define SCU_EVENT_SATA_15 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x1F)
+#define SCU_EVENT_SATA_30_SSC \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x20)
+#define SCU_EVENT_SATA_30 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x21)
+#define SCU_EVENT_SATA_60_SSC \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x22)
+#define SCU_EVENT_SATA_60 \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x23)
+#define SCU_EVENT_SAS_PHY_DETECTED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x24)
+#define SCU_EVENT_SATA_PHY_DETECTED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_OSSP_EVENT, 0x25)
+
+/**
+ *
+ *
+ * FATAL_INTERNAL_MEMORY_ERROR_EVENTS
+ */
+#define SCU_EVENT_TSC_RNSC_UNCORRECTABLE_ERROR \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x00)
+#define SCU_EVENT_TC_RNC_UNCORRECTABLE_ERROR \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x01)
+#define SCU_EVENT_ZPT_UNCORRECTABLE_ERROR \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_FATAL_MEMORY_ERROR, 0x02)
+
+/**
+ *
+ *
+ * REMOTE_NODE_SUSPEND_EVENTS
+ */
+#define SCU_EVENT_TL_RNC_SUSPEND_TX \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x00)
+#define SCU_EVENT_TL_RNC_SUSPEND_TX_RX \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x00)
+#define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX, 0x20)
+#define SCU_EVENT_DRIVER_POST_RNC_SUSPEND_TX_RX \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX, 0x20)
+
+/**
+ *
+ *
+ * REMOTE_NODE_MISC_EVENTS
+ */
+#define SCU_EVENT_POST_RCN_RELEASE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, SCU_EVENT_SPECIFIER_RNC_RELEASE)
+#define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_ENABLE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x01)
+#define SCU_EVENT_POST_IT_NEXUS_LOSS_TIMER_DISABLE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x02)
+#define SCU_EVENT_POST_RNC_COMPLETE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x03)
+#define SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_RNC_OPS_MISC, 0x04)
+
+/**
+ *
+ *
+ * ERROR_COUNT_EVENT
+ */
+#define SCU_EVENT_RX_CREDIT_BLOCKED_RECEIVED \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x00)
+#define SCU_EVENT_TX_DONE_CREDIT_TIMEOUT \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x01)
+#define SCU_EVENT_RX_DONE_CREDIT_TIMEOUT \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_ERR_CNT_EVENT, 0x02)
+
+/**
+ * scu_get_event_type() -
+ *
+ * This macro returns the SCU event type from the event code.
+ */
+#define scu_get_event_type(event_code) \
+ ((event_code) & SCU_EVENT_TYPE_CODE_MASK)
+
+/**
+ * scu_get_event_specifier() -
+ *
+ * This macro returns the SCU event specifier from the event code.
+ */
+#define scu_get_event_specifier(event_code) \
+ ((event_code) & SCU_EVENT_SPECIFIC_CODE_MASK)
+
+/**
+ * scu_get_event_code() -
+ *
+ * This macro returns the combined SCU event type and SCU event specifier from
+ * the event code.
+ */
+#define scu_get_event_code(event_code) \
+ ((event_code) & SCU_EVENT_CODE_MASK)
+
+
+/**
+ *
+ *
+ * PTS_SCHEDULE_EVENT
+ */
+#define SCU_EVENT_SMP_RESPONSE_NO_PE \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x00)
+#define SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE \
+ scu_get_event_specifier(SCU_EVENT_SMP_RESPONSE_NO_PE)
+
+#define SCU_EVENT_TASK_TIMEOUT \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x01)
+#define SCU_EVENT_SPECIFIC_TASK_TIMEOUT \
+ scu_get_event_specifier(SCU_EVENT_TASK_TIMEOUT)
+
+#define SCU_EVENT_IT_NEXUS_TIMEOUT \
+ SCU_EVENT_MESSAGE(SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT, 0x02)
+#define SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT \
+ scu_get_event_specifier(SCU_EVENT_IT_NEXUS_TIMEOUT)
+
+
+#endif /* __SCU_EVENT_CODES_HEADER__ */
diff --git a/drivers/scsi/isci/scu_remote_node_context.h b/drivers/scsi/isci/scu_remote_node_context.h
new file mode 100644
index 00000000000..33745adc826
--- /dev/null
+++ b/drivers/scsi/isci/scu_remote_node_context.h
@@ -0,0 +1,229 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __SCU_REMOTE_NODE_CONTEXT_HEADER__
+#define __SCU_REMOTE_NODE_CONTEXT_HEADER__
+
+/**
+ * This file contains the structures and constatns used by the SCU hardware to
+ * describe a remote node context.
+ *
+ *
+ */
+
+/**
+ * struct ssp_remote_node_context - This structure contains the SCU hardware
+ * definition for an SSP remote node.
+ *
+ *
+ */
+struct ssp_remote_node_context {
+ /* WORD 0 */
+
+ /**
+ * This field is the remote node index assigned for this remote node. All
+ * remote nodes must have a unique remote node index. The value of the remote
+ * node index can not exceed the maximum number of remote nodes reported in
+ * the SCU device context capacity register.
+ */
+ u32 remote_node_index:12;
+ u32 reserved0_1:4;
+
+ /**
+ * This field tells the SCU hardware how many simultaneous connections that
+ * this remote node will support.
+ */
+ u32 remote_node_port_width:4;
+
+ /**
+ * This field tells the SCU hardware which logical port to associate with this
+ * remote node.
+ */
+ u32 logical_port_index:3;
+ u32 reserved0_2:5;
+
+ /**
+ * This field will enable the I_T nexus loss timer for this remote node.
+ */
+ u32 nexus_loss_timer_enable:1;
+
+ /**
+ * This field is the for driver debug only and is not used.
+ */
+ u32 check_bit:1;
+
+ /**
+ * This field must be set to true when the hardware DMAs the remote node
+ * context to the hardware SRAM. When the remote node is being invalidated
+ * this field must be set to false.
+ */
+ u32 is_valid:1;
+
+ /**
+ * This field must be set to true.
+ */
+ u32 is_remote_node_context:1;
+
+ /* WORD 1 - 2 */
+
+ /**
+ * This is the low word of the remote device SAS Address
+ */
+ u32 remote_sas_address_lo;
+
+ /**
+ * This field is the high word of the remote device SAS Address
+ */
+ u32 remote_sas_address_hi;
+
+ /* WORD 3 */
+ /**
+ * This field reprensets the function number assigned to this remote device.
+ * This value must match the virtual function number that is being used to
+ * communicate to the device.
+ */
+ u32 function_number:8;
+ u32 reserved3_1:8;
+
+ /**
+ * This field provides the driver a way to cheat on the arbitration wait time
+ * for this remote node.
+ */
+ u32 arbitration_wait_time:16;
+
+ /* WORD 4 */
+ /**
+ * This field tells the SCU hardware how long this device may occupy the
+ * connection before it must be closed.
+ */
+ u32 connection_occupancy_timeout:16;
+
+ /**
+ * This field tells the SCU hardware how long to maintain a connection when
+ * there are no frames being transmitted on the link.
+ */
+ u32 connection_inactivity_timeout:16;
+
+ /* WORD 5 */
+ /**
+ * This field allows the driver to cheat on the arbitration wait time for this
+ * remote node.
+ */
+ u32 initial_arbitration_wait_time:16;
+
+ /**
+ * This field is tells the hardware what to program for the connection rate in
+ * the open address frame. See the SAS spec for valid values.
+ */
+ u32 oaf_connection_rate:4;
+
+ /**
+ * This field tells the SCU hardware what to program for the features in the
+ * open address frame. See the SAS spec for valid values.
+ */
+ u32 oaf_features:4;
+
+ /**
+ * This field tells the SCU hardware what to use for the source zone group in
+ * the open address frame. See the SAS spec for more details on zoning.
+ */
+ u32 oaf_source_zone_group:8;
+
+ /* WORD 6 */
+ /**
+ * This field tells the SCU hardware what to use as the more capibilities in
+ * the open address frame. See the SAS Spec for details.
+ */
+ u32 oaf_more_compatibility_features;
+
+ /* WORD 7 */
+ u32 reserved7;
+
+};
+
+/**
+ * struct stp_remote_node_context - This structure contains the SCU hardware
+ * definition for a STP remote node.
+ *
+ * STP Targets are not yet supported so this definition is a placeholder until
+ * we do support them.
+ */
+struct stp_remote_node_context {
+ /**
+ * Placeholder data for the STP remote node.
+ */
+ u32 data[8];
+
+};
+
+/**
+ * This union combines the SAS and SATA remote node definitions.
+ *
+ * union scu_remote_node_context
+ */
+union scu_remote_node_context {
+ /**
+ * SSP Remote Node
+ */
+ struct ssp_remote_node_context ssp;
+
+ /**
+ * STP Remote Node
+ */
+ struct stp_remote_node_context stp;
+
+};
+
+#endif /* __SCU_REMOTE_NODE_CONTEXT_HEADER__ */
diff --git a/drivers/scsi/isci/scu_task_context.h b/drivers/scsi/isci/scu_task_context.h
new file mode 100644
index 00000000000..7df87d92328
--- /dev/null
+++ b/drivers/scsi/isci/scu_task_context.h
@@ -0,0 +1,942 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCU_TASK_CONTEXT_H_
+#define _SCU_TASK_CONTEXT_H_
+
+/**
+ * This file contains the structures and constants for the SCU hardware task
+ * context.
+ *
+ *
+ */
+
+
+/**
+ * enum scu_ssp_task_type - This enumberation defines the various SSP task
+ * types the SCU hardware will accept. The definition for the various task
+ * types the SCU hardware will accept can be found in the DS specification.
+ *
+ *
+ */
+typedef enum {
+ SCU_TASK_TYPE_IOREAD, /* /< IO READ direction or no direction */
+ SCU_TASK_TYPE_IOWRITE, /* /< IO Write direction */
+ SCU_TASK_TYPE_SMP_REQUEST, /* /< SMP Request type */
+ SCU_TASK_TYPE_RESPONSE, /* /< Driver generated response frame (targt mode) */
+ SCU_TASK_TYPE_RAW_FRAME, /* /< Raw frame request type */
+ SCU_TASK_TYPE_PRIMITIVE /* /< Request for a primitive to be transmitted */
+} scu_ssp_task_type;
+
+/**
+ * enum scu_sata_task_type - This enumeration defines the various SATA task
+ * types the SCU hardware will accept. The definition for the various task
+ * types the SCU hardware will accept can be found in the DS specification.
+ *
+ *
+ */
+typedef enum {
+ SCU_TASK_TYPE_DMA_IN, /* /< Read request */
+ SCU_TASK_TYPE_FPDMAQ_READ, /* /< NCQ read request */
+ SCU_TASK_TYPE_PACKET_DMA_IN, /* /< Packet read request */
+ SCU_TASK_TYPE_SATA_RAW_FRAME, /* /< Raw frame request */
+ RESERVED_4,
+ RESERVED_5,
+ RESERVED_6,
+ RESERVED_7,
+ SCU_TASK_TYPE_DMA_OUT, /* /< Write request */
+ SCU_TASK_TYPE_FPDMAQ_WRITE, /* /< NCQ write Request */
+ SCU_TASK_TYPE_PACKET_DMA_OUT /* /< Packet write request */
+} scu_sata_task_type;
+
+
+/**
+ *
+ *
+ * SCU_CONTEXT_TYPE
+ */
+#define SCU_TASK_CONTEXT_TYPE 0
+#define SCU_RNC_CONTEXT_TYPE 1
+
+/**
+ *
+ *
+ * SCU_TASK_CONTEXT_VALIDITY
+ */
+#define SCU_TASK_CONTEXT_INVALID 0
+#define SCU_TASK_CONTEXT_VALID 1
+
+/**
+ *
+ *
+ * SCU_COMMAND_CODE
+ */
+#define SCU_COMMAND_CODE_INITIATOR_NEW_TASK 0
+#define SCU_COMMAND_CODE_ACTIVE_TASK 1
+#define SCU_COMMAND_CODE_PRIMITIVE_SEQ_TASK 2
+#define SCU_COMMAND_CODE_TARGET_RAW_FRAMES 3
+
+/**
+ *
+ *
+ * SCU_TASK_PRIORITY
+ */
+/**
+ *
+ *
+ * This priority is used when there is no priority request for this request.
+ */
+#define SCU_TASK_PRIORITY_NORMAL 0
+
+/**
+ *
+ *
+ * This priority indicates that the task should be scheduled to the head of the
+ * queue. The task will NOT be executed if the TX is suspended for the remote
+ * node.
+ */
+#define SCU_TASK_PRIORITY_HEAD_OF_Q 1
+
+/**
+ *
+ *
+ * This priority indicates that the task will be executed before all
+ * SCU_TASK_PRIORITY_NORMAL and SCU_TASK_PRIORITY_HEAD_OF_Q tasks. The task
+ * WILL be executed if the TX is suspended for the remote node.
+ */
+#define SCU_TASK_PRIORITY_HIGH 2
+
+/**
+ *
+ *
+ * This task priority is reserved and should not be used.
+ */
+#define SCU_TASK_PRIORITY_RESERVED 3
+
+#define SCU_TASK_INITIATOR_MODE 1
+#define SCU_TASK_TARGET_MODE 0
+
+#define SCU_TASK_REGULAR 0
+#define SCU_TASK_ABORTED 1
+
+/* direction bit defintion */
+/**
+ *
+ *
+ * SATA_DIRECTION
+ */
+#define SCU_SATA_WRITE_DATA_DIRECTION 0
+#define SCU_SATA_READ_DATA_DIRECTION 1
+
+/**
+ *
+ *
+ * SCU_COMMAND_CONTEXT_MACROS These macros provide the mask and shift
+ * operations to construct the various SCU commands
+ */
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_SHIFT 21
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK 0x00E00000
+#define scu_get_command_request_type(x) \
+ ((x) & SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK)
+
+#define SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_SHIFT 18
+#define SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK 0x001C0000
+#define scu_get_command_request_subtype(x) \
+ ((x) & SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK)
+
+#define SCU_CONTEXT_COMMAND_REQUEST_FULLTYPE_MASK \
+ (\
+ SCU_CONTEXT_COMMAND_REQUEST_TYPE_MASK \
+ | SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_MASK \
+ )
+#define scu_get_command_request_full_type(x) \
+ ((x) & SCU_CONTEXT_COMMAND_REQUEST_FULLTYPE_MASK)
+
+#define SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT 16
+#define SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_MASK 0x00010000
+#define scu_get_command_protocl_engine_group(x) \
+ ((x) & SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_MASK)
+
+#define SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT 12
+#define SCU_CONTEXT_COMMAND_LOGICAL_PORT_MASK 0x00007000
+#define scu_get_command_reqeust_logical_port(x) \
+ ((x) & SCU_CONTEXT_COMMAND_LOGICAL_PORT_MASK)
+
+
+#define MAKE_SCU_CONTEXT_COMMAND_TYPE(type) \
+ ((u32)(type) << SCU_CONTEXT_COMMAND_REQUEST_TYPE_SHIFT)
+
+/**
+ * MAKE_SCU_CONTEXT_COMMAND_TYPE() -
+ *
+ * SCU_COMMAND_TYPES These constants provide the grouping of the different SCU
+ * command types.
+ */
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC MAKE_SCU_CONTEXT_COMMAND_TYPE(0)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC MAKE_SCU_CONTEXT_COMMAND_TYPE(1)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(2)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(3)
+#define SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC MAKE_SCU_CONTEXT_COMMAND_TYPE(6)
+
+#define MAKE_SCU_CONTEXT_COMMAND_REQUEST(type, command) \
+ ((type) | ((command) << SCU_CONTEXT_COMMAND_REQUEST_SUBTYPE_SHIFT))
+
+/**
+ *
+ *
+ * SCU_REQUEST_TYPES These constants are the various request types that can be
+ * posted to the SCU hardware.
+ */
+#define SCU_CONTEXT_COMMAND_REQUST_POST_TC \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC, 0))
+
+#define SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC, 1))
+
+#define SCU_CONTEXT_COMMAND_REQUST_DUMP_TC \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC, 0))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_32 \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 0))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_96 \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 1))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC, 2))
+
+#define SCU_CONTEXT_COMMAND_DUMP_RNC_32 \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC, 0))
+
+#define SCU_CONTEXT_COMMAND_DUMP_RNC_96 \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC, 1))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 0))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX_RX \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 1))
+
+#define SCU_CONTEXT_COMMAND_POST_RNC_RESUME \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 2))
+
+#define SCU_CONTEXT_IT_NEXUS_LOSS_TIMER_ENABLE \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 3))
+
+#define SCU_CONTEXT_IT_NEXUS_LOSS_TIMER_DISABLE \
+ (MAKE_SCU_CONTEXT_COMMAND_REQUEST(SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC, 4))
+
+/**
+ *
+ *
+ * SCU_TASK_CONTEXT_PROTOCOL SCU Task context protocol types this is uesd to
+ * program the SCU Task context protocol field in word 0x00.
+ */
+#define SCU_TASK_CONTEXT_PROTOCOL_SMP 0x00
+#define SCU_TASK_CONTEXT_PROTOCOL_SSP 0x01
+#define SCU_TASK_CONTEXT_PROTOCOL_STP 0x02
+#define SCU_TASK_CONTEXT_PROTOCOL_NONE 0x07
+
+/**
+ * struct ssp_task_context - This is the SCU hardware definition for an SSP
+ * request.
+ *
+ *
+ */
+struct ssp_task_context {
+ /* OFFSET 0x18 */
+ u32 reserved00:24;
+ u32 frame_type:8;
+
+ /* OFFSET 0x1C */
+ u32 reserved01;
+
+ /* OFFSET 0x20 */
+ u32 fill_bytes:2;
+ u32 reserved02:6;
+ u32 changing_data_pointer:1;
+ u32 retransmit:1;
+ u32 retry_data_frame:1;
+ u32 tlr_control:2;
+ u32 reserved03:19;
+
+ /* OFFSET 0x24 */
+ u32 uiRsvd4;
+
+ /* OFFSET 0x28 */
+ u32 target_port_transfer_tag:16;
+ u32 tag:16;
+
+ /* OFFSET 0x2C */
+ u32 data_offset;
+};
+
+/**
+ * struct stp_task_context - This is the SCU hardware definition for an STP
+ * request.
+ *
+ *
+ */
+struct stp_task_context {
+ /* OFFSET 0x18 */
+ u32 fis_type:8;
+ u32 pm_port:4;
+ u32 reserved0:3;
+ u32 control:1;
+ u32 command:8;
+ u32 features:8;
+
+ /* OFFSET 0x1C */
+ u32 reserved1;
+
+ /* OFFSET 0x20 */
+ u32 reserved2;
+
+ /* OFFSET 0x24 */
+ u32 reserved3;
+
+ /* OFFSET 0x28 */
+ u32 ncq_tag:5;
+ u32 reserved4:27;
+
+ /* OFFSET 0x2C */
+ u32 data_offset; /* TODO: What is this used for? */
+};
+
+/**
+ * struct smp_task_context - This is the SCU hardware definition for an SMP
+ * request.
+ *
+ *
+ */
+struct smp_task_context {
+ /* OFFSET 0x18 */
+ u32 response_length:8;
+ u32 function_result:8;
+ u32 function:8;
+ u32 frame_type:8;
+
+ /* OFFSET 0x1C */
+ u32 smp_response_ufi:12;
+ u32 reserved1:20;
+
+ /* OFFSET 0x20 */
+ u32 reserved2;
+
+ /* OFFSET 0x24 */
+ u32 reserved3;
+
+ /* OFFSET 0x28 */
+ u32 reserved4;
+
+ /* OFFSET 0x2C */
+ u32 reserved5;
+};
+
+/**
+ * struct primitive_task_context - This is the SCU hardware definition used
+ * when the driver wants to send a primitive on the link.
+ *
+ *
+ */
+struct primitive_task_context {
+ /* OFFSET 0x18 */
+ /**
+ * This field is the control word and it must be 0.
+ */
+ u32 control; /* /< must be set to 0 */
+
+ /* OFFSET 0x1C */
+ /**
+ * This field specifies the primitive that is to be transmitted.
+ */
+ u32 sequence;
+
+ /* OFFSET 0x20 */
+ u32 reserved0;
+
+ /* OFFSET 0x24 */
+ u32 reserved1;
+
+ /* OFFSET 0x28 */
+ u32 reserved2;
+
+ /* OFFSET 0x2C */
+ u32 reserved3;
+};
+
+/**
+ * The union of the protocols that can be selected in the SCU task context
+ * field.
+ *
+ * protocol_context
+ */
+union protocol_context {
+ struct ssp_task_context ssp;
+ struct stp_task_context stp;
+ struct smp_task_context smp;
+ struct primitive_task_context primitive;
+ u32 words[6];
+};
+
+/**
+ * struct scu_sgl_element - This structure represents a single SCU defined SGL
+ * element. SCU SGLs contain a 64 bit address with the maximum data transfer
+ * being 24 bits in size. The SGL can not cross a 4GB boundary.
+ *
+ * struct scu_sgl_element
+ */
+struct scu_sgl_element {
+ /**
+ * This field is the upper 32 bits of the 64 bit physical address.
+ */
+ u32 address_upper;
+
+ /**
+ * This field is the lower 32 bits of the 64 bit physical address.
+ */
+ u32 address_lower;
+
+ /**
+ * This field is the number of bytes to transfer.
+ */
+ u32 length:24;
+
+ /**
+ * This field is the address modifier to be used when a virtual function is
+ * requesting a data transfer.
+ */
+ u32 address_modifier:8;
+
+};
+
+#define SCU_SGL_ELEMENT_PAIR_A 0
+#define SCU_SGL_ELEMENT_PAIR_B 1
+
+/**
+ * struct scu_sgl_element_pair - This structure is the SCU hardware definition
+ * of a pair of SGL elements. The SCU hardware always works on SGL pairs.
+ * They are refered to in the DS specification as SGL A and SGL B. Each SGL
+ * pair is followed by the address of the next pair.
+ *
+ *
+ */
+struct scu_sgl_element_pair {
+ /* OFFSET 0x60-0x68 */
+ /**
+ * This field is the SGL element A of the SGL pair.
+ */
+ struct scu_sgl_element A;
+
+ /* OFFSET 0x6C-0x74 */
+ /**
+ * This field is the SGL element B of the SGL pair.
+ */
+ struct scu_sgl_element B;
+
+ /* OFFSET 0x78-0x7C */
+ /**
+ * This field is the upper 32 bits of the 64 bit address to the next SGL
+ * element pair.
+ */
+ u32 next_pair_upper;
+
+ /**
+ * This field is the lower 32 bits of the 64 bit address to the next SGL
+ * element pair.
+ */
+ u32 next_pair_lower;
+
+};
+
+/**
+ * struct transport_snapshot - This structure is the SCU hardware scratch area
+ * for the task context. This is set to 0 by the driver but can be read by
+ * issuing a dump TC request to the SCU.
+ *
+ *
+ */
+struct transport_snapshot {
+ /* OFFSET 0x48 */
+ u32 xfer_rdy_write_data_length;
+
+ /* OFFSET 0x4C */
+ u32 data_offset;
+
+ /* OFFSET 0x50 */
+ u32 data_transfer_size:24;
+ u32 reserved_50_0:8;
+
+ /* OFFSET 0x54 */
+ u32 next_initiator_write_data_offset;
+
+ /* OFFSET 0x58 */
+ u32 next_initiator_write_data_xfer_size:24;
+ u32 reserved_58_0:8;
+};
+
+/**
+ * struct scu_task_context - This structure defines the contents of the SCU
+ * silicon task context. It lays out all of the fields according to the
+ * expected order and location for the Storage Controller unit.
+ *
+ *
+ */
+struct scu_task_context {
+ /* OFFSET 0x00 ------ */
+ /**
+ * This field must be encoded to one of the valid SCU task priority values
+ * - SCU_TASK_PRIORITY_NORMAL
+ * - SCU_TASK_PRIORITY_HEAD_OF_Q
+ * - SCU_TASK_PRIORITY_HIGH
+ */
+ u32 priority:2;
+
+ /**
+ * This field must be set to true if this is an initiator generated request.
+ * Until target mode is supported all task requests are initiator requests.
+ */
+ u32 initiator_request:1;
+
+ /**
+ * This field must be set to one of the valid connection rates valid values
+ * are 0x8, 0x9, and 0xA.
+ */
+ u32 connection_rate:4;
+
+ /**
+ * This field muse be programed when generating an SMP response since the SMP
+ * connection remains open until the SMP response is generated.
+ */
+ u32 protocol_engine_index:3;
+
+ /**
+ * This field must contain the logical port for the task request.
+ */
+ u32 logical_port_index:3;
+
+ /**
+ * This field must be set to one of the SCU_TASK_CONTEXT_PROTOCOL values
+ * - SCU_TASK_CONTEXT_PROTOCOL_SMP
+ * - SCU_TASK_CONTEXT_PROTOCOL_SSP
+ * - SCU_TASK_CONTEXT_PROTOCOL_STP
+ * - SCU_TASK_CONTEXT_PROTOCOL_NONE
+ */
+ u32 protocol_type:3;
+
+ /**
+ * This filed must be set to the TCi allocated for this task
+ */
+ u32 task_index:12;
+
+ /**
+ * This field is reserved and must be set to 0x00
+ */
+ u32 reserved_00_0:1;
+
+ /**
+ * For a normal task request this must be set to 0. If this is an abort of
+ * this task request it must be set to 1.
+ */
+ u32 abort:1;
+
+ /**
+ * This field must be set to true for the SCU hardware to process the task.
+ */
+ u32 valid:1;
+
+ /**
+ * This field must be set to SCU_TASK_CONTEXT_TYPE
+ */
+ u32 context_type:1;
+
+ /* OFFSET 0x04 */
+ /**
+ * This field contains the RNi that is the target of this request.
+ */
+ u32 remote_node_index:12;
+
+ /**
+ * This field is programmed if this is a mirrored request, which we are not
+ * using, in which case it is the RNi for the mirrored target.
+ */
+ u32 mirrored_node_index:12;
+
+ /**
+ * This field is programmed with the direction of the SATA reqeust
+ * - SCU_SATA_WRITE_DATA_DIRECTION
+ * - SCU_SATA_READ_DATA_DIRECTION
+ */
+ u32 sata_direction:1;
+
+ /**
+ * This field is programmsed with one of the following SCU_COMMAND_CODE
+ * - SCU_COMMAND_CODE_INITIATOR_NEW_TASK
+ * - SCU_COMMAND_CODE_ACTIVE_TASK
+ * - SCU_COMMAND_CODE_PRIMITIVE_SEQ_TASK
+ * - SCU_COMMAND_CODE_TARGET_RAW_FRAMES
+ */
+ u32 command_code:2;
+
+ /**
+ * This field is set to true if the remote node should be suspended.
+ * This bit is only valid for SSP & SMP target devices.
+ */
+ u32 suspend_node:1;
+
+ /**
+ * This field is programmed with one of the following command type codes
+ *
+ * For SAS requests use the scu_ssp_task_type
+ * - SCU_TASK_TYPE_IOREAD
+ * - SCU_TASK_TYPE_IOWRITE
+ * - SCU_TASK_TYPE_SMP_REQUEST
+ * - SCU_TASK_TYPE_RESPONSE
+ * - SCU_TASK_TYPE_RAW_FRAME
+ * - SCU_TASK_TYPE_PRIMITIVE
+ *
+ * For SATA requests use the scu_sata_task_type
+ * - SCU_TASK_TYPE_DMA_IN
+ * - SCU_TASK_TYPE_FPDMAQ_READ
+ * - SCU_TASK_TYPE_PACKET_DMA_IN
+ * - SCU_TASK_TYPE_SATA_RAW_FRAME
+ * - SCU_TASK_TYPE_DMA_OUT
+ * - SCU_TASK_TYPE_FPDMAQ_WRITE
+ * - SCU_TASK_TYPE_PACKET_DMA_OUT
+ */
+ u32 task_type:4;
+
+ /* OFFSET 0x08 */
+ /**
+ * This field is reserved and the must be set to 0x00
+ */
+ u32 link_layer_control:8; /* presently all reserved */
+
+ /**
+ * This field is set to true when TLR is to be enabled
+ */
+ u32 ssp_tlr_enable:1;
+
+ /**
+ * This is field specifies if the SCU DMAs a response frame to host
+ * memory for good response frames when operating in target mode.
+ */
+ u32 dma_ssp_target_good_response:1;
+
+ /**
+ * This field indicates if the SCU should DMA the response frame to
+ * host memory.
+ */
+ u32 do_not_dma_ssp_good_response:1;
+
+ /**
+ * This field is set to true when strict ordering is to be enabled
+ */
+ u32 strict_ordering:1;
+
+ /**
+ * This field indicates the type of endianess to be utilized for the
+ * frame. command, task, and response frames utilized control_frame
+ * set to 1.
+ */
+ u32 control_frame:1;
+
+ /**
+ * This field is reserved and the driver should set to 0x00
+ */
+ u32 tl_control_reserved:3;
+
+ /**
+ * This field is set to true when the SCU hardware task timeout control is to
+ * be enabled
+ */
+ u32 timeout_enable:1;
+
+ /**
+ * This field is reserved and the driver should set it to 0x00
+ */
+ u32 pts_control_reserved:7;
+
+ /**
+ * This field should be set to true when block guard is to be enabled
+ */
+ u32 block_guard_enable:1;
+
+ /**
+ * This field is reserved and the driver should set to 0x00
+ */
+ u32 sdma_control_reserved:7;
+
+ /* OFFSET 0x0C */
+ /**
+ * This field is the address modifier for this io request it should be
+ * programmed with the virtual function that is making the request.
+ */
+ u32 address_modifier:16;
+
+ /**
+ * @todo What we support mirrored SMP response frame?
+ */
+ u32 mirrored_protocol_engine:3; /* mirrored protocol Engine Index */
+
+ /**
+ * If this is a mirrored request the logical port index for the mirrored RNi
+ * must be programmed.
+ */
+ u32 mirrored_logical_port:4; /* mirrored local port index */
+
+ /**
+ * This field is reserved and the driver must set it to 0x00
+ */
+ u32 reserved_0C_0:8;
+
+ /**
+ * This field must be set to true if the mirrored request processing is to be
+ * enabled.
+ */
+ u32 mirror_request_enable:1; /* Mirrored request Enable */
+
+ /* OFFSET 0x10 */
+ /**
+ * This field is the command iu length in dwords
+ */
+ u32 ssp_command_iu_length:8;
+
+ /**
+ * This is the target TLR enable bit it must be set to 0 when creatning the
+ * task context.
+ */
+ u32 xfer_ready_tlr_enable:1;
+
+ /**
+ * This field is reserved and the driver must set it to 0x00
+ */
+ u32 reserved_10_0:7;
+
+ /**
+ * This is the maximum burst size that the SCU hardware will send in one
+ * connection its value is (N x 512) and N must be a multiple of 2. If the
+ * value is 0x00 then maximum burst size is disabled.
+ */
+ u32 ssp_max_burst_size:16;
+
+ /* OFFSET 0x14 */
+ /**
+ * This filed is set to the number of bytes to be transfered in the request.
+ */
+ u32 transfer_length_bytes:24; /* In terms of bytes */
+
+ /**
+ * This field is reserved and the driver should set it to 0x00
+ */
+ u32 reserved_14_0:8;
+
+ /* OFFSET 0x18-0x2C */
+ /**
+ * This union provides for the protocol specif part of the SCU Task Context.
+ */
+ union protocol_context type;
+
+ /* OFFSET 0x30-0x34 */
+ /**
+ * This field is the upper 32 bits of the 64 bit physical address of the
+ * command iu buffer
+ */
+ u32 command_iu_upper;
+
+ /**
+ * This field is the lower 32 bits of the 64 bit physical address of the
+ * command iu buffer
+ */
+ u32 command_iu_lower;
+
+ /* OFFSET 0x38-0x3C */
+ /**
+ * This field is the upper 32 bits of the 64 bit physical address of the
+ * response iu buffer
+ */
+ u32 response_iu_upper;
+
+ /**
+ * This field is the lower 32 bits of the 64 bit physical address of the
+ * response iu buffer
+ */
+ u32 response_iu_lower;
+
+ /* OFFSET 0x40 */
+ /**
+ * This field is set to the task phase of the SCU hardware. The driver must
+ * set this to 0x01
+ */
+ u32 task_phase:8;
+
+ /**
+ * This field is set to the transport layer task status. The driver must set
+ * this to 0x00
+ */
+ u32 task_status:8;
+
+ /**
+ * This field is used during initiator write TLR
+ */
+ u32 previous_extended_tag:4;
+
+ /**
+ * This field is set the maximum number of retries for a STP non-data FIS
+ */
+ u32 stp_retry_count:2;
+
+ /**
+ * This field is reserved and the driver must set it to 0x00
+ */
+ u32 reserved_40_1:2;
+
+ /**
+ * This field is used by the SCU TL to determine when to take a snapshot when
+ * tranmitting read data frames.
+ * - 0x00 The entire IO
+ * - 0x01 32k
+ * - 0x02 64k
+ * - 0x04 128k
+ * - 0x08 256k
+ */
+ u32 ssp_tlr_threshold:4;
+
+ /**
+ * This field is reserved and the driver must set it to 0x00
+ */
+ u32 reserved_40_2:4;
+
+ /* OFFSET 0x44 */
+ u32 write_data_length; /* read only set to 0 */
+
+ /* OFFSET 0x48-0x58 */
+ struct transport_snapshot snapshot; /* read only set to 0 */
+
+ /* OFFSET 0x5C */
+ u32 block_protection_enable:1;
+ u32 block_size:2;
+ u32 block_protection_function:2;
+ u32 reserved_5C_0:9;
+ u32 active_sgl_element:2; /* read only set to 0 */
+ u32 sgl_exhausted:1; /* read only set to 0 */
+ u32 payload_data_transfer_error:4; /* read only set to 0 */
+ u32 frame_buffer_offset:11; /* read only set to 0 */
+
+ /* OFFSET 0x60-0x7C */
+ /**
+ * This field is the first SGL element pair found in the TC data structure.
+ */
+ struct scu_sgl_element_pair sgl_pair_ab;
+ /* OFFSET 0x80-0x9C */
+ /**
+ * This field is the second SGL element pair found in the TC data structure.
+ */
+ struct scu_sgl_element_pair sgl_pair_cd;
+
+ /* OFFSET 0xA0-BC */
+ struct scu_sgl_element_pair sgl_snapshot_ac;
+
+ /* OFFSET 0xC0 */
+ u32 active_sgl_element_pair; /* read only set to 0 */
+
+ /* OFFSET 0xC4-0xCC */
+ u32 reserved_C4_CC[3];
+
+ /* OFFSET 0xD0 */
+ u32 intermediate_crc_value:16;
+ u32 initial_crc_seed:16;
+
+ /* OFFSET 0xD4 */
+ u32 application_tag_for_verify:16;
+ u32 application_tag_for_generate:16;
+
+ /* OFFSET 0xD8 */
+ u32 reference_tag_seed_for_verify_function;
+
+ /* OFFSET 0xDC */
+ u32 reserved_DC;
+
+ /* OFFSET 0xE0 */
+ u32 reserved_E0_0:16;
+ u32 application_tag_mask_for_generate:16;
+
+ /* OFFSET 0xE4 */
+ u32 block_protection_control:16;
+ u32 application_tag_mask_for_verify:16;
+
+ /* OFFSET 0xE8 */
+ u32 block_protection_error:8;
+ u32 reserved_E8_0:24;
+
+ /* OFFSET 0xEC */
+ u32 reference_tag_seed_for_verify;
+
+ /* OFFSET 0xF0 */
+ u32 intermediate_crc_valid_snapshot:16;
+ u32 reserved_F0_0:16;
+
+ /* OFFSET 0xF4 */
+ u32 reference_tag_seed_for_verify_function_snapshot;
+
+ /* OFFSET 0xF8 */
+ u32 snapshot_of_reserved_dword_DC_of_tc;
+
+ /* OFFSET 0xFC */
+ u32 reference_tag_seed_for_generate_function_snapshot;
+
+};
+
+#endif /* _SCU_TASK_CONTEXT_H_ */
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
new file mode 100644
index 00000000000..d6bcdd013dc
--- /dev/null
+++ b/drivers/scsi/isci/task.c
@@ -0,0 +1,1676 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/completion.h>
+#include <linux/irqflags.h>
+#include "sas.h"
+#include <scsi/libsas.h>
+#include "remote_device.h"
+#include "remote_node_context.h"
+#include "isci.h"
+#include "request.h"
+#include "task.h"
+#include "host.h"
+
+/**
+* isci_task_refuse() - complete the request to the upper layer driver in
+* the case where an I/O needs to be completed back in the submit path.
+* @ihost: host on which the the request was queued
+* @task: request to complete
+* @response: response code for the completed task.
+* @status: status code for the completed task.
+*
+*/
+static void isci_task_refuse(struct isci_host *ihost, struct sas_task *task,
+ enum service_response response,
+ enum exec_status status)
+
+{
+ enum isci_completion_selection disposition;
+
+ disposition = isci_perform_normal_io_completion;
+ disposition = isci_task_set_completion_status(task, response, status,
+ disposition);
+
+ /* Tasks aborted specifically by a call to the lldd_abort_task
+ * function should not be completed to the host in the regular path.
+ */
+ switch (disposition) {
+ case isci_perform_normal_io_completion:
+ /* Normal notification (task_done) */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: Normal - task = %p, response=%d, "
+ "status=%d\n",
+ __func__, task, response, status);
+
+ task->lldd_task = NULL;
+
+ isci_execpath_callback(ihost, task, task->task_done);
+ break;
+
+ case isci_perform_aborted_io_completion:
+ /*
+ * No notification because this request is already in the
+ * abort path.
+ */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: Aborted - task = %p, response=%d, "
+ "status=%d\n",
+ __func__, task, response, status);
+ break;
+
+ case isci_perform_error_io_completion:
+ /* Use sas_task_abort */
+ dev_dbg(&ihost->pdev->dev,
+ "%s: Error - task = %p, response=%d, "
+ "status=%d\n",
+ __func__, task, response, status);
+
+ isci_execpath_callback(ihost, task, sas_task_abort);
+ break;
+
+ default:
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci task notification default case!",
+ __func__);
+ sas_task_abort(task);
+ break;
+ }
+}
+
+#define for_each_sas_task(num, task) \
+ for (; num > 0; num--,\
+ task = list_entry(task->list.next, struct sas_task, list))
+
+
+static inline int isci_device_io_ready(struct isci_remote_device *idev,
+ struct sas_task *task)
+{
+ return idev ? test_bit(IDEV_IO_READY, &idev->flags) ||
+ (test_bit(IDEV_IO_NCQERROR, &idev->flags) &&
+ isci_task_is_ncq_recovery(task))
+ : 0;
+}
+/**
+ * isci_task_execute_task() - This function is one of the SAS Domain Template
+ * functions. This function is called by libsas to send a task down to
+ * hardware.
+ * @task: This parameter specifies the SAS task to send.
+ * @num: This parameter specifies the number of tasks to queue.
+ * @gfp_flags: This parameter specifies the context of this call.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_execute_task(struct sas_task *task, int num, gfp_t gfp_flags)
+{
+ struct isci_host *ihost = dev_to_ihost(task->dev);
+ struct isci_remote_device *idev;
+ unsigned long flags;
+ bool io_ready;
+ u16 tag;
+
+ dev_dbg(&ihost->pdev->dev, "%s: num=%d\n", __func__, num);
+
+ for_each_sas_task(num, task) {
+ enum sci_status status = SCI_FAILURE;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ idev = isci_lookup_device(task->dev);
+ io_ready = isci_device_io_ready(idev, task);
+ tag = isci_alloc_tag(ihost);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ dev_dbg(&ihost->pdev->dev,
+ "task: %p, num: %d dev: %p idev: %p:%#lx cmd = %p\n",
+ task, num, task->dev, idev, idev ? idev->flags : 0,
+ task->uldd_task);
+
+ if (!idev) {
+ isci_task_refuse(ihost, task, SAS_TASK_UNDELIVERED,
+ SAS_DEVICE_UNKNOWN);
+ } else if (!io_ready || tag == SCI_CONTROLLER_INVALID_IO_TAG) {
+ /* Indicate QUEUE_FULL so that the scsi midlayer
+ * retries.
+ */
+ isci_task_refuse(ihost, task, SAS_TASK_COMPLETE,
+ SAS_QUEUE_FULL);
+ } else {
+ /* There is a device and it's ready for I/O. */
+ spin_lock_irqsave(&task->task_state_lock, flags);
+
+ if (task->task_state_flags & SAS_TASK_STATE_ABORTED) {
+ /* The I/O was aborted. */
+ spin_unlock_irqrestore(&task->task_state_lock,
+ flags);
+
+ isci_task_refuse(ihost, task,
+ SAS_TASK_UNDELIVERED,
+ SAM_STAT_TASK_ABORTED);
+ } else {
+ task->task_state_flags |= SAS_TASK_AT_INITIATOR;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ /* build and send the request. */
+ status = isci_request_execute(ihost, idev, task, tag);
+
+ if (status != SCI_SUCCESS) {
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ /* Did not really start this command. */
+ task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ /* Indicate QUEUE_FULL so that the scsi
+ * midlayer retries. if the request
+ * failed for remote device reasons,
+ * it gets returned as
+ * SAS_TASK_UNDELIVERED next time
+ * through.
+ */
+ isci_task_refuse(ihost, task,
+ SAS_TASK_COMPLETE,
+ SAS_QUEUE_FULL);
+ }
+ }
+ }
+ if (status != SCI_SUCCESS && tag != SCI_CONTROLLER_INVALID_IO_TAG) {
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ /* command never hit the device, so just free
+ * the tci and skip the sequence increment
+ */
+ isci_tci_free(ihost, ISCI_TAG_TCI(tag));
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ }
+ isci_put_device(idev);
+ }
+ return 0;
+}
+
+static enum sci_status isci_sata_management_task_request_build(struct isci_request *ireq)
+{
+ struct isci_tmf *isci_tmf;
+ enum sci_status status;
+
+ if (tmf_task != ireq->ttype)
+ return SCI_FAILURE;
+
+ isci_tmf = isci_request_access_tmf(ireq);
+
+ switch (isci_tmf->tmf_code) {
+
+ case isci_tmf_sata_srst_high:
+ case isci_tmf_sata_srst_low: {
+ struct host_to_dev_fis *fis = &ireq->stp.cmd;
+
+ memset(fis, 0, sizeof(*fis));
+
+ fis->fis_type = 0x27;
+ fis->flags &= ~0x80;
+ fis->flags &= 0xF0;
+ if (isci_tmf->tmf_code == isci_tmf_sata_srst_high)
+ fis->control |= ATA_SRST;
+ else
+ fis->control &= ~ATA_SRST;
+ break;
+ }
+ /* other management commnd go here... */
+ default:
+ return SCI_FAILURE;
+ }
+
+ /* core builds the protocol specific request
+ * based on the h2d fis.
+ */
+ status = sci_task_request_construct_sata(ireq);
+
+ return status;
+}
+
+static struct isci_request *isci_task_request_build(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ u16 tag, struct isci_tmf *isci_tmf)
+{
+ enum sci_status status = SCI_FAILURE;
+ struct isci_request *ireq = NULL;
+ struct domain_device *dev;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: isci_tmf = %p\n", __func__, isci_tmf);
+
+ dev = idev->domain_dev;
+
+ /* do common allocation and init of request object. */
+ ireq = isci_tmf_request_from_tag(ihost, isci_tmf, tag);
+ if (!ireq)
+ return NULL;
+
+ /* let the core do it's construct. */
+ status = sci_task_request_construct(ihost, idev, tag,
+ ireq);
+
+ if (status != SCI_SUCCESS) {
+ dev_warn(&ihost->pdev->dev,
+ "%s: sci_task_request_construct failed - "
+ "status = 0x%x\n",
+ __func__,
+ status);
+ return NULL;
+ }
+
+ /* XXX convert to get this from task->tproto like other drivers */
+ if (dev->dev_type == SAS_END_DEV) {
+ isci_tmf->proto = SAS_PROTOCOL_SSP;
+ status = sci_task_request_construct_ssp(ireq);
+ if (status != SCI_SUCCESS)
+ return NULL;
+ }
+
+ if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
+ isci_tmf->proto = SAS_PROTOCOL_SATA;
+ status = isci_sata_management_task_request_build(ireq);
+
+ if (status != SCI_SUCCESS)
+ return NULL;
+ }
+ return ireq;
+}
+
+static int isci_task_execute_tmf(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_tmf *tmf, unsigned long timeout_ms)
+{
+ DECLARE_COMPLETION_ONSTACK(completion);
+ enum sci_task_status status = SCI_TASK_FAILURE;
+ struct isci_request *ireq;
+ int ret = TMF_RESP_FUNC_FAILED;
+ unsigned long flags;
+ unsigned long timeleft;
+ u16 tag;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ tag = isci_alloc_tag(ihost);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (tag == SCI_CONTROLLER_INVALID_IO_TAG)
+ return ret;
+
+ /* sanity check, return TMF_RESP_FUNC_FAILED
+ * if the device is not there and ready.
+ */
+ if (!idev ||
+ (!test_bit(IDEV_IO_READY, &idev->flags) &&
+ !test_bit(IDEV_IO_NCQERROR, &idev->flags))) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev = %p not ready (%#lx)\n",
+ __func__,
+ idev, idev ? idev->flags : 0);
+ goto err_tci;
+ } else
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev = %p\n",
+ __func__, idev);
+
+ /* Assign the pointer to the TMF's completion kernel wait structure. */
+ tmf->complete = &completion;
+
+ ireq = isci_task_request_build(ihost, idev, tag, tmf);
+ if (!ireq)
+ goto err_tci;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ /* start the TMF io. */
+ status = sci_controller_start_task(ihost, idev, ireq);
+
+ if (status != SCI_TASK_SUCCESS) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: start_io failed - status = 0x%x, request = %p\n",
+ __func__,
+ status,
+ ireq);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ goto err_tci;
+ }
+
+ if (tmf->cb_state_func != NULL)
+ tmf->cb_state_func(isci_tmf_started, tmf, tmf->cb_data);
+
+ isci_request_change_state(ireq, started);
+
+ /* add the request to the remote device request list. */
+ list_add(&ireq->dev_node, &idev->reqs_in_process);
+
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /* Wait for the TMF to complete, or a timeout. */
+ timeleft = wait_for_completion_timeout(&completion,
+ msecs_to_jiffies(timeout_ms));
+
+ if (timeleft == 0) {
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ if (tmf->cb_state_func != NULL)
+ tmf->cb_state_func(isci_tmf_timed_out, tmf, tmf->cb_data);
+
+ sci_controller_terminate_request(ihost,
+ idev,
+ ireq);
+
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ wait_for_completion(tmf->complete);
+ }
+
+ isci_print_tmf(tmf);
+
+ if (tmf->status == SCI_SUCCESS)
+ ret = TMF_RESP_FUNC_COMPLETE;
+ else if (tmf->status == SCI_FAILURE_IO_RESPONSE_VALID) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: tmf.status == "
+ "SCI_FAILURE_IO_RESPONSE_VALID\n",
+ __func__);
+ ret = TMF_RESP_FUNC_COMPLETE;
+ }
+ /* Else - leave the default "failed" status alone. */
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: completed request = %p\n",
+ __func__,
+ ireq);
+
+ return ret;
+
+ err_tci:
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ isci_tci_free(ihost, ISCI_TAG_TCI(tag));
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ return ret;
+}
+
+static void isci_task_build_tmf(struct isci_tmf *tmf,
+ enum isci_tmf_function_codes code,
+ void (*tmf_sent_cb)(enum isci_tmf_cb_state,
+ struct isci_tmf *,
+ void *),
+ void *cb_data)
+{
+ memset(tmf, 0, sizeof(*tmf));
+
+ tmf->tmf_code = code;
+ tmf->cb_state_func = tmf_sent_cb;
+ tmf->cb_data = cb_data;
+}
+
+static void isci_task_build_abort_task_tmf(struct isci_tmf *tmf,
+ enum isci_tmf_function_codes code,
+ void (*tmf_sent_cb)(enum isci_tmf_cb_state,
+ struct isci_tmf *,
+ void *),
+ struct isci_request *old_request)
+{
+ isci_task_build_tmf(tmf, code, tmf_sent_cb, old_request);
+ tmf->io_tag = old_request->io_tag;
+}
+
+/**
+ * isci_task_validate_request_to_abort() - This function checks the given I/O
+ * against the "started" state. If the request is still "started", it's
+ * state is changed to aborted. NOTE: isci_host->scic_lock MUST BE HELD
+ * BEFORE CALLING THIS FUNCTION.
+ * @isci_request: This parameter specifies the request object to control.
+ * @isci_host: This parameter specifies the ISCI host object
+ * @isci_device: This is the device to which the request is pending.
+ * @aborted_io_completion: This is a completion structure that will be added to
+ * the request in case it is changed to aborting; this completion is
+ * triggered when the request is fully completed.
+ *
+ * Either "started" on successful change of the task status to "aborted", or
+ * "unallocated" if the task cannot be controlled.
+ */
+static enum isci_request_status isci_task_validate_request_to_abort(
+ struct isci_request *isci_request,
+ struct isci_host *isci_host,
+ struct isci_remote_device *isci_device,
+ struct completion *aborted_io_completion)
+{
+ enum isci_request_status old_state = unallocated;
+
+ /* Only abort the task if it's in the
+ * device's request_in_process list
+ */
+ if (isci_request && !list_empty(&isci_request->dev_node)) {
+ old_state = isci_request_change_started_to_aborted(
+ isci_request, aborted_io_completion);
+
+ }
+
+ return old_state;
+}
+
+/**
+* isci_request_cleanup_completed_loiterer() - This function will take care of
+* the final cleanup on any request which has been explicitly terminated.
+* @isci_host: This parameter specifies the ISCI host object
+* @isci_device: This is the device to which the request is pending.
+* @isci_request: This parameter specifies the terminated request object.
+* @task: This parameter is the libsas I/O request.
+*/
+static void isci_request_cleanup_completed_loiterer(
+ struct isci_host *isci_host,
+ struct isci_remote_device *isci_device,
+ struct isci_request *isci_request,
+ struct sas_task *task)
+{
+ unsigned long flags;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_device=%p, request=%p, task=%p\n",
+ __func__, isci_device, isci_request, task);
+
+ if (task != NULL) {
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ task->lldd_task = NULL;
+
+ task->task_state_flags &= ~SAS_TASK_NEED_DEV_RESET;
+
+ isci_set_task_doneflags(task);
+
+ /* If this task is not in the abort path, call task_done. */
+ if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ task->task_done(task);
+ } else
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ }
+
+ if (isci_request != NULL) {
+ spin_lock_irqsave(&isci_host->scic_lock, flags);
+ list_del_init(&isci_request->dev_node);
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+ }
+}
+
+/**
+ * isci_terminate_request_core() - This function will terminate the given
+ * request, and wait for it to complete. This function must only be called
+ * from a thread that can wait. Note that the request is terminated and
+ * completed (back to the host, if started there).
+ * @ihost: This SCU.
+ * @idev: The target.
+ * @isci_request: The I/O request to be terminated.
+ *
+ */
+static void isci_terminate_request_core(struct isci_host *ihost,
+ struct isci_remote_device *idev,
+ struct isci_request *isci_request)
+{
+ enum sci_status status = SCI_SUCCESS;
+ bool was_terminated = false;
+ bool needs_cleanup_handling = false;
+ enum isci_request_status request_status;
+ unsigned long flags;
+ unsigned long termination_completed = 1;
+ struct completion *io_request_completion;
+ struct sas_task *task;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: device = %p; request = %p\n",
+ __func__, idev, isci_request);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+
+ io_request_completion = isci_request->io_request_completion;
+
+ task = (isci_request->ttype == io_task)
+ ? isci_request_access_task(isci_request)
+ : NULL;
+
+ /* Note that we are not going to control
+ * the target to abort the request.
+ */
+ set_bit(IREQ_COMPLETE_IN_TARGET, &isci_request->flags);
+
+ /* Make sure the request wasn't just sitting around signalling
+ * device condition (if the request handle is NULL, then the
+ * request completed but needed additional handling here).
+ */
+ if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
+ was_terminated = true;
+ needs_cleanup_handling = true;
+ status = sci_controller_terminate_request(ihost,
+ idev,
+ isci_request);
+ }
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /*
+ * The only time the request to terminate will
+ * fail is when the io request is completed and
+ * being aborted.
+ */
+ if (status != SCI_SUCCESS) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: sci_controller_terminate_request"
+ " returned = 0x%x\n",
+ __func__, status);
+
+ isci_request->io_request_completion = NULL;
+
+ } else {
+ if (was_terminated) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: before completion wait (%p/%p)\n",
+ __func__, isci_request, io_request_completion);
+
+ /* Wait here for the request to complete. */
+ #define TERMINATION_TIMEOUT_MSEC 500
+ termination_completed
+ = wait_for_completion_timeout(
+ io_request_completion,
+ msecs_to_jiffies(TERMINATION_TIMEOUT_MSEC));
+
+ if (!termination_completed) {
+
+ /* The request to terminate has timed out. */
+ spin_lock_irqsave(&ihost->scic_lock,
+ flags);
+
+ /* Check for state changes. */
+ if (!test_bit(IREQ_TERMINATED, &isci_request->flags)) {
+
+ /* The best we can do is to have the
+ * request die a silent death if it
+ * ever really completes.
+ *
+ * Set the request state to "dead",
+ * and clear the task pointer so that
+ * an actual completion event callback
+ * doesn't do anything.
+ */
+ isci_request->status = dead;
+ isci_request->io_request_completion
+ = NULL;
+
+ if (isci_request->ttype == io_task) {
+
+ /* Break links with the
+ * sas_task.
+ */
+ isci_request->ttype_ptr.io_task_ptr
+ = NULL;
+ }
+ } else
+ termination_completed = 1;
+
+ spin_unlock_irqrestore(&ihost->scic_lock,
+ flags);
+
+ if (!termination_completed) {
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: *** Timeout waiting for "
+ "termination(%p/%p)\n",
+ __func__, io_request_completion,
+ isci_request);
+
+ /* The request can no longer be referenced
+ * safely since it may go away if the
+ * termination every really does complete.
+ */
+ isci_request = NULL;
+ }
+ }
+ if (termination_completed)
+ dev_dbg(&ihost->pdev->dev,
+ "%s: after completion wait (%p/%p)\n",
+ __func__, isci_request, io_request_completion);
+ }
+
+ if (termination_completed) {
+
+ isci_request->io_request_completion = NULL;
+
+ /* Peek at the status of the request. This will tell
+ * us if there was special handling on the request such that it
+ * needs to be detached and freed here.
+ */
+ spin_lock_irqsave(&isci_request->state_lock, flags);
+ request_status = isci_request->status;
+
+ if ((isci_request->ttype == io_task) /* TMFs are in their own thread */
+ && ((request_status == aborted)
+ || (request_status == aborting)
+ || (request_status == terminating)
+ || (request_status == completed)
+ || (request_status == dead)
+ )
+ ) {
+
+ /* The completion routine won't free a request in
+ * the aborted/aborting/etc. states, so we do
+ * it here.
+ */
+ needs_cleanup_handling = true;
+ }
+ spin_unlock_irqrestore(&isci_request->state_lock, flags);
+
+ }
+ if (needs_cleanup_handling)
+ isci_request_cleanup_completed_loiterer(
+ ihost, idev, isci_request, task);
+ }
+}
+
+/**
+ * isci_terminate_pending_requests() - This function will change the all of the
+ * requests on the given device's state to "aborting", will terminate the
+ * requests, and wait for them to complete. This function must only be
+ * called from a thread that can wait. Note that the requests are all
+ * terminated and completed (back to the host, if started there).
+ * @isci_host: This parameter specifies SCU.
+ * @idev: This parameter specifies the target.
+ *
+ */
+void isci_terminate_pending_requests(struct isci_host *ihost,
+ struct isci_remote_device *idev)
+{
+ struct completion request_completion;
+ enum isci_request_status old_state;
+ unsigned long flags;
+ LIST_HEAD(list);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ list_splice_init(&idev->reqs_in_process, &list);
+
+ /* assumes that isci_terminate_request_core deletes from the list */
+ while (!list_empty(&list)) {
+ struct isci_request *ireq = list_entry(list.next, typeof(*ireq), dev_node);
+
+ /* Change state to "terminating" if it is currently
+ * "started".
+ */
+ old_state = isci_request_change_started_to_newstate(ireq,
+ &request_completion,
+ terminating);
+ switch (old_state) {
+ case started:
+ case completed:
+ case aborting:
+ break;
+ default:
+ /* termination in progress, or otherwise dispositioned.
+ * We know the request was on 'list' so should be safe
+ * to move it back to reqs_in_process
+ */
+ list_move(&ireq->dev_node, &idev->reqs_in_process);
+ ireq = NULL;
+ break;
+ }
+
+ if (!ireq)
+ continue;
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ init_completion(&request_completion);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: idev=%p request=%p; task=%p old_state=%d\n",
+ __func__, idev, ireq,
+ ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL,
+ old_state);
+
+ /* If the old_state is started:
+ * This request was not already being aborted. If it had been,
+ * then the aborting I/O (ie. the TMF request) would not be in
+ * the aborting state, and thus would be terminated here. Note
+ * that since the TMF completion's call to the kernel function
+ * "complete()" does not happen until the pending I/O request
+ * terminate fully completes, we do not have to implement a
+ * special wait here for already aborting requests - the
+ * termination of the TMF request will force the request
+ * to finish it's already started terminate.
+ *
+ * If old_state == completed:
+ * This request completed from the SCU hardware perspective
+ * and now just needs cleaning up in terms of freeing the
+ * request and potentially calling up to libsas.
+ *
+ * If old_state == aborting:
+ * This request has already gone through a TMF timeout, but may
+ * not have been terminated; needs cleaning up at least.
+ */
+ isci_terminate_request_core(ihost, idev, ireq);
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ }
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+}
+
+/**
+ * isci_task_send_lu_reset_sas() - This function is called by of the SAS Domain
+ * Template functions.
+ * @lun: This parameter specifies the lun to be reset.
+ *
+ * status, zero indicates success.
+ */
+static int isci_task_send_lu_reset_sas(
+ struct isci_host *isci_host,
+ struct isci_remote_device *isci_device,
+ u8 *lun)
+{
+ struct isci_tmf tmf;
+ int ret = TMF_RESP_FUNC_FAILED;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_host = %p, isci_device = %p\n",
+ __func__, isci_host, isci_device);
+ /* Send the LUN reset to the target. By the time the call returns,
+ * the TMF has fully exected in the target (in which case the return
+ * value is "TMF_RESP_FUNC_COMPLETE", or the request timed-out (or
+ * was otherwise unable to be executed ("TMF_RESP_FUNC_FAILED").
+ */
+ isci_task_build_tmf(&tmf, isci_tmf_ssp_lun_reset, NULL, NULL);
+
+ #define ISCI_LU_RESET_TIMEOUT_MS 2000 /* 2 second timeout. */
+ ret = isci_task_execute_tmf(isci_host, isci_device, &tmf, ISCI_LU_RESET_TIMEOUT_MS);
+
+ if (ret == TMF_RESP_FUNC_COMPLETE)
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: %p: TMF_LU_RESET passed\n",
+ __func__, isci_device);
+ else
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: %p: TMF_LU_RESET failed (%x)\n",
+ __func__, isci_device, ret);
+
+ return ret;
+}
+
+static int isci_task_send_lu_reset_sata(struct isci_host *ihost,
+ struct isci_remote_device *idev, u8 *lun)
+{
+ int ret = TMF_RESP_FUNC_FAILED;
+ struct isci_tmf tmf;
+
+ /* Send the soft reset to the target */
+ #define ISCI_SRST_TIMEOUT_MS 25000 /* 25 second timeout. */
+ isci_task_build_tmf(&tmf, isci_tmf_sata_srst_high, NULL, NULL);
+
+ ret = isci_task_execute_tmf(ihost, idev, &tmf, ISCI_SRST_TIMEOUT_MS);
+
+ if (ret != TMF_RESP_FUNC_COMPLETE) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: Assert SRST failed (%p) = %x",
+ __func__, idev, ret);
+
+ /* Return the failure so that the LUN reset is escalated
+ * to a target reset.
+ */
+ }
+ return ret;
+}
+
+/**
+ * isci_task_lu_reset() - This function is one of the SAS Domain Template
+ * functions. This is one of the Task Management functoins called by libsas,
+ * to reset the given lun. Note the assumption that while this call is
+ * executing, no I/O will be sent by the host to the device.
+ * @lun: This parameter specifies the lun to be reset.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_lu_reset(struct domain_device *domain_device, u8 *lun)
+{
+ struct isci_host *isci_host = dev_to_ihost(domain_device);
+ struct isci_remote_device *isci_device;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&isci_host->scic_lock, flags);
+ isci_device = isci_lookup_device(domain_device);
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: domain_device=%p, isci_host=%p; isci_device=%p\n",
+ __func__, domain_device, isci_host, isci_device);
+
+ if (isci_device)
+ set_bit(IDEV_EH, &isci_device->flags);
+
+ /* If there is a device reset pending on any request in the
+ * device's list, fail this LUN reset request in order to
+ * escalate to the device reset.
+ */
+ if (!isci_device ||
+ isci_device_is_reset_pending(isci_host, isci_device)) {
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: No dev (%p), or "
+ "RESET PENDING: domain_device=%p\n",
+ __func__, isci_device, domain_device);
+ ret = TMF_RESP_FUNC_FAILED;
+ goto out;
+ }
+
+ /* Send the task management part of the reset. */
+ if (sas_protocol_ata(domain_device->tproto)) {
+ ret = isci_task_send_lu_reset_sata(isci_host, isci_device, lun);
+ } else
+ ret = isci_task_send_lu_reset_sas(isci_host, isci_device, lun);
+
+ /* If the LUN reset worked, all the I/O can now be terminated. */
+ if (ret == TMF_RESP_FUNC_COMPLETE)
+ /* Terminate all I/O now. */
+ isci_terminate_pending_requests(isci_host,
+ isci_device);
+
+ out:
+ isci_put_device(isci_device);
+ return ret;
+}
+
+
+/* int (*lldd_clear_nexus_port)(struct asd_sas_port *); */
+int isci_task_clear_nexus_port(struct asd_sas_port *port)
+{
+ return TMF_RESP_FUNC_FAILED;
+}
+
+
+
+int isci_task_clear_nexus_ha(struct sas_ha_struct *ha)
+{
+ return TMF_RESP_FUNC_FAILED;
+}
+
+/* Task Management Functions. Must be called from process context. */
+
+/**
+ * isci_abort_task_process_cb() - This is a helper function for the abort task
+ * TMF command. It manages the request state with respect to the successful
+ * transmission / completion of the abort task request.
+ * @cb_state: This parameter specifies when this function was called - after
+ * the TMF request has been started and after it has timed-out.
+ * @tmf: This parameter specifies the TMF in progress.
+ *
+ *
+ */
+static void isci_abort_task_process_cb(
+ enum isci_tmf_cb_state cb_state,
+ struct isci_tmf *tmf,
+ void *cb_data)
+{
+ struct isci_request *old_request;
+
+ old_request = (struct isci_request *)cb_data;
+
+ dev_dbg(&old_request->isci_host->pdev->dev,
+ "%s: tmf=%p, old_request=%p\n",
+ __func__, tmf, old_request);
+
+ switch (cb_state) {
+
+ case isci_tmf_started:
+ /* The TMF has been started. Nothing to do here, since the
+ * request state was already set to "aborted" by the abort
+ * task function.
+ */
+ if ((old_request->status != aborted)
+ && (old_request->status != completed))
+ dev_dbg(&old_request->isci_host->pdev->dev,
+ "%s: Bad request status (%d): tmf=%p, old_request=%p\n",
+ __func__, old_request->status, tmf, old_request);
+ break;
+
+ case isci_tmf_timed_out:
+
+ /* Set the task's state to "aborting", since the abort task
+ * function thread set it to "aborted" (above) in anticipation
+ * of the task management request working correctly. Since the
+ * timeout has now fired, the TMF request failed. We set the
+ * state such that the request completion will indicate the
+ * device is no longer present.
+ */
+ isci_request_change_state(old_request, aborting);
+ break;
+
+ default:
+ dev_dbg(&old_request->isci_host->pdev->dev,
+ "%s: Bad cb_state (%d): tmf=%p, old_request=%p\n",
+ __func__, cb_state, tmf, old_request);
+ break;
+ }
+}
+
+/**
+ * isci_task_abort_task() - This function is one of the SAS Domain Template
+ * functions. This function is called by libsas to abort a specified task.
+ * @task: This parameter specifies the SAS task to abort.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_abort_task(struct sas_task *task)
+{
+ struct isci_host *isci_host = dev_to_ihost(task->dev);
+ DECLARE_COMPLETION_ONSTACK(aborted_io_completion);
+ struct isci_request *old_request = NULL;
+ enum isci_request_status old_state;
+ struct isci_remote_device *isci_device = NULL;
+ struct isci_tmf tmf;
+ int ret = TMF_RESP_FUNC_FAILED;
+ unsigned long flags;
+ bool any_dev_reset = false;
+
+ /* Get the isci_request reference from the task. Note that
+ * this check does not depend on the pending request list
+ * in the device, because tasks driving resets may land here
+ * after completion in the core.
+ */
+ spin_lock_irqsave(&isci_host->scic_lock, flags);
+ spin_lock(&task->task_state_lock);
+
+ old_request = task->lldd_task;
+
+ /* If task is already done, the request isn't valid */
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
+ (task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
+ old_request)
+ isci_device = isci_lookup_device(task->dev);
+
+ spin_unlock(&task->task_state_lock);
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: task = %p\n", __func__, task);
+
+ if (!isci_device || !old_request)
+ goto out;
+
+ set_bit(IDEV_EH, &isci_device->flags);
+
+ /* This version of the driver will fail abort requests for
+ * SATA/STP. Failing the abort request this way will cause the
+ * SCSI error handler thread to escalate to LUN reset
+ */
+ if (sas_protocol_ata(task->task_proto)) {
+ dev_dbg(&isci_host->pdev->dev,
+ " task %p is for a STP/SATA device;"
+ " returning TMF_RESP_FUNC_FAILED\n"
+ " to cause a LUN reset...\n", task);
+ goto out;
+ }
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: old_request == %p\n", __func__, old_request);
+
+ any_dev_reset = isci_device_is_reset_pending(isci_host, isci_device);
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+
+ any_dev_reset = any_dev_reset || (task->task_state_flags & SAS_TASK_NEED_DEV_RESET);
+
+ /* If the extraction of the request reference from the task
+ * failed, then the request has been completed (or if there is a
+ * pending reset then this abort request function must be failed
+ * in order to escalate to the target reset).
+ */
+ if ((old_request == NULL) || any_dev_reset) {
+
+ /* If the device reset task flag is set, fail the task
+ * management request. Otherwise, the original request
+ * has completed.
+ */
+ if (any_dev_reset) {
+
+ /* Turn off the task's DONE to make sure this
+ * task is escalated to a target reset.
+ */
+ task->task_state_flags &= ~SAS_TASK_STATE_DONE;
+
+ /* Make the reset happen as soon as possible. */
+ task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
+
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ /* Fail the task management request in order to
+ * escalate to the target reset.
+ */
+ ret = TMF_RESP_FUNC_FAILED;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: Failing task abort in order to "
+ "escalate to target reset because\n"
+ "SAS_TASK_NEED_DEV_RESET is set for "
+ "task %p on dev %p\n",
+ __func__, task, isci_device);
+
+
+ } else {
+ /* The request has already completed and there
+ * is nothing to do here other than to set the task
+ * done bit, and indicate that the task abort function
+ * was sucessful.
+ */
+ isci_set_task_doneflags(task);
+
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ ret = TMF_RESP_FUNC_COMPLETE;
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: abort task not needed for %p\n",
+ __func__, task);
+ }
+ goto out;
+ } else {
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+ }
+
+ spin_lock_irqsave(&isci_host->scic_lock, flags);
+
+ /* Check the request status and change to "aborted" if currently
+ * "starting"; if true then set the I/O kernel completion
+ * struct that will be triggered when the request completes.
+ */
+ old_state = isci_task_validate_request_to_abort(
+ old_request, isci_host, isci_device,
+ &aborted_io_completion);
+ if ((old_state != started) &&
+ (old_state != completed) &&
+ (old_state != aborting)) {
+
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+ /* The request was already being handled by someone else (because
+ * they got to set the state away from started).
+ */
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: device = %p; old_request %p already being aborted\n",
+ __func__,
+ isci_device, old_request);
+ ret = TMF_RESP_FUNC_COMPLETE;
+ goto out;
+ }
+ if (task->task_proto == SAS_PROTOCOL_SMP ||
+ test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags)) {
+
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: SMP request (%d)"
+ " or complete_in_target (%d), thus no TMF\n",
+ __func__, (task->task_proto == SAS_PROTOCOL_SMP),
+ test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags));
+
+ /* Set the state on the task. */
+ isci_task_all_done(task);
+
+ ret = TMF_RESP_FUNC_COMPLETE;
+
+ /* Stopping and SMP devices are not sent a TMF, and are not
+ * reset, but the outstanding I/O request is terminated below.
+ */
+ } else {
+ /* Fill in the tmf stucture */
+ isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
+ isci_abort_task_process_cb,
+ old_request);
+
+ spin_unlock_irqrestore(&isci_host->scic_lock, flags);
+
+ #define ISCI_ABORT_TASK_TIMEOUT_MS 500 /* half second timeout. */
+ ret = isci_task_execute_tmf(isci_host, isci_device, &tmf,
+ ISCI_ABORT_TASK_TIMEOUT_MS);
+
+ if (ret != TMF_RESP_FUNC_COMPLETE)
+ dev_dbg(&isci_host->pdev->dev,
+ "%s: isci_task_send_tmf failed\n",
+ __func__);
+ }
+ if (ret == TMF_RESP_FUNC_COMPLETE) {
+ set_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags);
+
+ /* Clean up the request on our side, and wait for the aborted
+ * I/O to complete.
+ */
+ isci_terminate_request_core(isci_host, isci_device, old_request);
+ }
+
+ /* Make sure we do not leave a reference to aborted_io_completion */
+ old_request->io_request_completion = NULL;
+ out:
+ isci_put_device(isci_device);
+ return ret;
+}
+
+/**
+ * isci_task_abort_task_set() - This function is one of the SAS Domain Template
+ * functions. This is one of the Task Management functoins called by libsas,
+ * to abort all task for the given lun.
+ * @d_device: This parameter specifies the domain device associated with this
+ * request.
+ * @lun: This parameter specifies the lun associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_abort_task_set(
+ struct domain_device *d_device,
+ u8 *lun)
+{
+ return TMF_RESP_FUNC_FAILED;
+}
+
+
+/**
+ * isci_task_clear_aca() - This function is one of the SAS Domain Template
+ * functions. This is one of the Task Management functoins called by libsas.
+ * @d_device: This parameter specifies the domain device associated with this
+ * request.
+ * @lun: This parameter specifies the lun associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_clear_aca(
+ struct domain_device *d_device,
+ u8 *lun)
+{
+ return TMF_RESP_FUNC_FAILED;
+}
+
+
+
+/**
+ * isci_task_clear_task_set() - This function is one of the SAS Domain Template
+ * functions. This is one of the Task Management functoins called by libsas.
+ * @d_device: This parameter specifies the domain device associated with this
+ * request.
+ * @lun: This parameter specifies the lun associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_clear_task_set(
+ struct domain_device *d_device,
+ u8 *lun)
+{
+ return TMF_RESP_FUNC_FAILED;
+}
+
+
+/**
+ * isci_task_query_task() - This function is implemented to cause libsas to
+ * correctly escalate the failed abort to a LUN or target reset (this is
+ * because sas_scsi_find_task libsas function does not correctly interpret
+ * all return codes from the abort task call). When TMF_RESP_FUNC_SUCC is
+ * returned, libsas turns this into a LUN reset; when FUNC_FAILED is
+ * returned, libsas will turn this into a target reset
+ * @task: This parameter specifies the sas task being queried.
+ * @lun: This parameter specifies the lun associated with this request.
+ *
+ * status, zero indicates success.
+ */
+int isci_task_query_task(
+ struct sas_task *task)
+{
+ /* See if there is a pending device reset for this device. */
+ if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET)
+ return TMF_RESP_FUNC_FAILED;
+ else
+ return TMF_RESP_FUNC_SUCC;
+}
+
+/*
+ * isci_task_request_complete() - This function is called by the sci core when
+ * an task request completes.
+ * @ihost: This parameter specifies the ISCI host object
+ * @ireq: This parameter is the completed isci_request object.
+ * @completion_status: This parameter specifies the completion status from the
+ * sci core.
+ *
+ * none.
+ */
+void
+isci_task_request_complete(struct isci_host *ihost,
+ struct isci_request *ireq,
+ enum sci_task_status completion_status)
+{
+ struct isci_tmf *tmf = isci_request_access_tmf(ireq);
+ struct completion *tmf_complete;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: request = %p, status=%d\n",
+ __func__, ireq, completion_status);
+
+ isci_request_change_state(ireq, completed);
+
+ tmf->status = completion_status;
+ set_bit(IREQ_COMPLETE_IN_TARGET, &ireq->flags);
+
+ if (tmf->proto == SAS_PROTOCOL_SSP) {
+ memcpy(&tmf->resp.resp_iu,
+ &ireq->ssp.rsp,
+ SSP_RESP_IU_MAX_SIZE);
+ } else if (tmf->proto == SAS_PROTOCOL_SATA) {
+ memcpy(&tmf->resp.d2h_fis,
+ &ireq->stp.rsp,
+ sizeof(struct dev_to_host_fis));
+ }
+
+ /* PRINT_TMF( ((struct isci_tmf *)request->task)); */
+ tmf_complete = tmf->complete;
+
+ sci_controller_complete_io(ihost, ireq->target_device, ireq);
+ /* set the 'terminated' flag handle to make sure it cannot be terminated
+ * or completed again.
+ */
+ set_bit(IREQ_TERMINATED, &ireq->flags);
+
+ isci_request_change_state(ireq, unallocated);
+ list_del_init(&ireq->dev_node);
+
+ /* The task management part completes last. */
+ complete(tmf_complete);
+}
+
+static void isci_smp_task_timedout(unsigned long _task)
+{
+ struct sas_task *task = (void *) _task;
+ unsigned long flags;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE))
+ task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ complete(&task->completion);
+}
+
+static void isci_smp_task_done(struct sas_task *task)
+{
+ if (!del_timer(&task->timer))
+ return;
+ complete(&task->completion);
+}
+
+static struct sas_task *isci_alloc_task(void)
+{
+ struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL);
+
+ if (task) {
+ INIT_LIST_HEAD(&task->list);
+ spin_lock_init(&task->task_state_lock);
+ task->task_state_flags = SAS_TASK_STATE_PENDING;
+ init_timer(&task->timer);
+ init_completion(&task->completion);
+ }
+
+ return task;
+}
+
+static void isci_free_task(struct isci_host *ihost, struct sas_task *task)
+{
+ if (task) {
+ BUG_ON(!list_empty(&task->list));
+ kfree(task);
+ }
+}
+
+static int isci_smp_execute_task(struct isci_host *ihost,
+ struct domain_device *dev, void *req,
+ int req_size, void *resp, int resp_size)
+{
+ int res, retry;
+ struct sas_task *task = NULL;
+
+ for (retry = 0; retry < 3; retry++) {
+ task = isci_alloc_task();
+ if (!task)
+ return -ENOMEM;
+
+ task->dev = dev;
+ task->task_proto = dev->tproto;
+ sg_init_one(&task->smp_task.smp_req, req, req_size);
+ sg_init_one(&task->smp_task.smp_resp, resp, resp_size);
+
+ task->task_done = isci_smp_task_done;
+
+ task->timer.data = (unsigned long) task;
+ task->timer.function = isci_smp_task_timedout;
+ task->timer.expires = jiffies + 10*HZ;
+ add_timer(&task->timer);
+
+ res = isci_task_execute_task(task, 1, GFP_KERNEL);
+
+ if (res) {
+ del_timer(&task->timer);
+ dev_dbg(&ihost->pdev->dev,
+ "%s: executing SMP task failed:%d\n",
+ __func__, res);
+ goto ex_err;
+ }
+
+ wait_for_completion(&task->completion);
+ res = -ECOMM;
+ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: smp task timed out or aborted\n",
+ __func__);
+ isci_task_abort_task(task);
+ if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: SMP task aborted and not done\n",
+ __func__);
+ goto ex_err;
+ }
+ }
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAM_STAT_GOOD) {
+ res = 0;
+ break;
+ }
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_DATA_UNDERRUN) {
+ /* no error, but return the number of bytes of
+ * underrun */
+ res = task->task_status.residual;
+ break;
+ }
+ if (task->task_status.resp == SAS_TASK_COMPLETE &&
+ task->task_status.stat == SAS_DATA_OVERRUN) {
+ res = -EMSGSIZE;
+ break;
+ } else {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: task to dev %016llx response: 0x%x "
+ "status 0x%x\n", __func__,
+ SAS_ADDR(dev->sas_addr),
+ task->task_status.resp,
+ task->task_status.stat);
+ isci_free_task(ihost, task);
+ task = NULL;
+ }
+ }
+ex_err:
+ BUG_ON(retry == 3 && task != NULL);
+ isci_free_task(ihost, task);
+ return res;
+}
+
+#define DISCOVER_REQ_SIZE 16
+#define DISCOVER_RESP_SIZE 56
+
+int isci_smp_get_phy_attached_dev_type(struct isci_host *ihost,
+ struct domain_device *dev,
+ int phy_id, int *adt)
+{
+ struct smp_resp *disc_resp;
+ u8 *disc_req;
+ int res;
+
+ disc_resp = kzalloc(DISCOVER_RESP_SIZE, GFP_KERNEL);
+ if (!disc_resp)
+ return -ENOMEM;
+
+ disc_req = kzalloc(DISCOVER_REQ_SIZE, GFP_KERNEL);
+ if (disc_req) {
+ disc_req[0] = SMP_REQUEST;
+ disc_req[1] = SMP_DISCOVER;
+ disc_req[9] = phy_id;
+ } else {
+ kfree(disc_resp);
+ return -ENOMEM;
+ }
+ res = isci_smp_execute_task(ihost, dev, disc_req, DISCOVER_REQ_SIZE,
+ disc_resp, DISCOVER_RESP_SIZE);
+ if (!res) {
+ if (disc_resp->result != SMP_RESP_FUNC_ACC)
+ res = disc_resp->result;
+ else
+ *adt = disc_resp->disc.attached_dev_type;
+ }
+ kfree(disc_req);
+ kfree(disc_resp);
+
+ return res;
+}
+
+static void isci_wait_for_smp_phy_reset(struct isci_remote_device *idev, int phy_num)
+{
+ struct domain_device *dev = idev->domain_dev;
+ struct isci_port *iport = idev->isci_port;
+ struct isci_host *ihost = iport->isci_host;
+ int res, iteration = 0, attached_device_type;
+ #define STP_WAIT_MSECS 25000
+ unsigned long tmo = msecs_to_jiffies(STP_WAIT_MSECS);
+ unsigned long deadline = jiffies + tmo;
+ enum {
+ SMP_PHYWAIT_PHYDOWN,
+ SMP_PHYWAIT_PHYUP,
+ SMP_PHYWAIT_DONE
+ } phy_state = SMP_PHYWAIT_PHYDOWN;
+
+ /* While there is time, wait for the phy to go away and come back */
+ while (time_is_after_jiffies(deadline) && phy_state != SMP_PHYWAIT_DONE) {
+ int event = atomic_read(&iport->event);
+
+ ++iteration;
+
+ tmo = wait_event_timeout(ihost->eventq,
+ event != atomic_read(&iport->event) ||
+ !test_bit(IPORT_BCN_BLOCKED, &iport->flags),
+ tmo);
+ /* link down, stop polling */
+ if (!test_bit(IPORT_BCN_BLOCKED, &iport->flags))
+ break;
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: iport %p, iteration %d,"
+ " phase %d: time_remaining %lu, bcns = %d\n",
+ __func__, iport, iteration, phy_state,
+ tmo, test_bit(IPORT_BCN_PENDING, &iport->flags));
+
+ res = isci_smp_get_phy_attached_dev_type(ihost, dev, phy_num,
+ &attached_device_type);
+ tmo = deadline - jiffies;
+
+ if (res) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: iteration %d, phase %d:"
+ " SMP error=%d, time_remaining=%lu\n",
+ __func__, iteration, phy_state, res, tmo);
+ break;
+ }
+ dev_dbg(&ihost->pdev->dev,
+ "%s: iport %p, iteration %d,"
+ " phase %d: time_remaining %lu, bcns = %d, "
+ "attdevtype = %x\n",
+ __func__, iport, iteration, phy_state,
+ tmo, test_bit(IPORT_BCN_PENDING, &iport->flags),
+ attached_device_type);
+
+ switch (phy_state) {
+ case SMP_PHYWAIT_PHYDOWN:
+ /* Has the device gone away? */
+ if (!attached_device_type)
+ phy_state = SMP_PHYWAIT_PHYUP;
+
+ break;
+
+ case SMP_PHYWAIT_PHYUP:
+ /* Has the device come back? */
+ if (attached_device_type)
+ phy_state = SMP_PHYWAIT_DONE;
+ break;
+
+ case SMP_PHYWAIT_DONE:
+ break;
+ }
+
+ }
+ dev_dbg(&ihost->pdev->dev, "%s: done\n", __func__);
+}
+
+static int isci_reset_device(struct isci_host *ihost,
+ struct isci_remote_device *idev)
+{
+ struct sas_phy *phy = sas_find_local_phy(idev->domain_dev);
+ struct isci_port *iport = idev->isci_port;
+ enum sci_status status;
+ unsigned long flags;
+ int rc;
+
+ dev_dbg(&ihost->pdev->dev, "%s: idev %p\n", __func__, idev);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ status = sci_remote_device_reset(idev);
+ if (status != SCI_SUCCESS) {
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ dev_dbg(&ihost->pdev->dev,
+ "%s: sci_remote_device_reset(%p) returned %d!\n",
+ __func__, idev, status);
+
+ return TMF_RESP_FUNC_FAILED;
+ }
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /* Make sure all pending requests are able to be fully terminated. */
+ isci_device_clear_reset_pending(ihost, idev);
+
+ /* If this is a device on an expander, disable BCN processing. */
+ if (!scsi_is_sas_phy_local(phy))
+ set_bit(IPORT_BCN_BLOCKED, &iport->flags);
+
+ rc = sas_phy_reset(phy, true);
+
+ /* Terminate in-progress I/O now. */
+ isci_remote_device_nuke_requests(ihost, idev);
+
+ /* Since all pending TCs have been cleaned, resume the RNC. */
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ status = sci_remote_device_reset_complete(idev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ /* If this is a device on an expander, bring the phy back up. */
+ if (!scsi_is_sas_phy_local(phy)) {
+ /* A phy reset will cause the device to go away then reappear.
+ * Since libsas will take action on incoming BCNs (eg. remove
+ * a device going through an SMP phy-control driven reset),
+ * we need to wait until the phy comes back up before letting
+ * discovery proceed in libsas.
+ */
+ isci_wait_for_smp_phy_reset(idev, phy->number);
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ isci_port_bcn_enable(ihost, idev->isci_port);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+ }
+
+ if (status != SCI_SUCCESS) {
+ dev_dbg(&ihost->pdev->dev,
+ "%s: sci_remote_device_reset_complete(%p) "
+ "returned %d!\n", __func__, idev, status);
+ }
+
+ dev_dbg(&ihost->pdev->dev, "%s: idev %p complete.\n", __func__, idev);
+
+ return rc;
+}
+
+int isci_task_I_T_nexus_reset(struct domain_device *dev)
+{
+ struct isci_host *ihost = dev_to_ihost(dev);
+ struct isci_remote_device *idev;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ idev = isci_lookup_device(dev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (!idev || !test_bit(IDEV_EH, &idev->flags)) {
+ ret = TMF_RESP_FUNC_COMPLETE;
+ goto out;
+ }
+
+ ret = isci_reset_device(ihost, idev);
+ out:
+ isci_put_device(idev);
+ return ret;
+}
+
+int isci_bus_reset_handler(struct scsi_cmnd *cmd)
+{
+ struct domain_device *dev = sdev_to_domain_dev(cmd->device);
+ struct isci_host *ihost = dev_to_ihost(dev);
+ struct isci_remote_device *idev;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ihost->scic_lock, flags);
+ idev = isci_lookup_device(dev);
+ spin_unlock_irqrestore(&ihost->scic_lock, flags);
+
+ if (!idev) {
+ ret = TMF_RESP_FUNC_COMPLETE;
+ goto out;
+ }
+
+ ret = isci_reset_device(ihost, idev);
+ out:
+ isci_put_device(idev);
+ return ret;
+}
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
new file mode 100644
index 00000000000..4a7fa90287e
--- /dev/null
+++ b/drivers/scsi/isci/task.h
@@ -0,0 +1,367 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _ISCI_TASK_H_
+#define _ISCI_TASK_H_
+
+#include <scsi/sas_ata.h>
+#include "host.h"
+
+struct isci_request;
+
+/**
+ * enum isci_tmf_cb_state - This enum defines the possible states in which the
+ * TMF callback function is invoked during the TMF execution process.
+ *
+ *
+ */
+enum isci_tmf_cb_state {
+
+ isci_tmf_init_state = 0,
+ isci_tmf_started,
+ isci_tmf_timed_out
+};
+
+/**
+ * enum isci_tmf_function_codes - This enum defines the possible preparations
+ * of task management requests.
+ *
+ *
+ */
+enum isci_tmf_function_codes {
+
+ isci_tmf_func_none = 0,
+ isci_tmf_ssp_task_abort = TMF_ABORT_TASK,
+ isci_tmf_ssp_lun_reset = TMF_LU_RESET,
+ isci_tmf_sata_srst_high = TMF_LU_RESET + 0x100, /* Non SCSI */
+ isci_tmf_sata_srst_low = TMF_LU_RESET + 0x101 /* Non SCSI */
+};
+/**
+ * struct isci_tmf - This class represents the task management object which
+ * acts as an interface to libsas for processing task management requests
+ *
+ *
+ */
+struct isci_tmf {
+
+ struct completion *complete;
+ enum sas_protocol proto;
+ union {
+ struct ssp_response_iu resp_iu;
+ struct dev_to_host_fis d2h_fis;
+ u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
+ } resp;
+ unsigned char lun[8];
+ u16 io_tag;
+ struct isci_remote_device *device;
+ enum isci_tmf_function_codes tmf_code;
+ int status;
+
+ /* The optional callback function allows the user process to
+ * track the TMF transmit / timeout conditions.
+ */
+ void (*cb_state_func)(
+ enum isci_tmf_cb_state,
+ struct isci_tmf *, void *);
+ void *cb_data;
+
+};
+
+static inline void isci_print_tmf(struct isci_tmf *tmf)
+{
+ if (SAS_PROTOCOL_SATA == tmf->proto)
+ dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
+ "%s: status = %x\n"
+ "tmf->resp.d2h_fis.status = %x\n"
+ "tmf->resp.d2h_fis.error = %x\n",
+ __func__,
+ tmf->status,
+ tmf->resp.d2h_fis.status,
+ tmf->resp.d2h_fis.error);
+ else
+ dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
+ "%s: status = %x\n"
+ "tmf->resp.resp_iu.data_present = %x\n"
+ "tmf->resp.resp_iu.status = %x\n"
+ "tmf->resp.resp_iu.data_length = %x\n"
+ "tmf->resp.resp_iu.data[0] = %x\n"
+ "tmf->resp.resp_iu.data[1] = %x\n"
+ "tmf->resp.resp_iu.data[2] = %x\n"
+ "tmf->resp.resp_iu.data[3] = %x\n",
+ __func__,
+ tmf->status,
+ tmf->resp.resp_iu.datapres,
+ tmf->resp.resp_iu.status,
+ be32_to_cpu(tmf->resp.resp_iu.response_data_len),
+ tmf->resp.resp_iu.resp_data[0],
+ tmf->resp.resp_iu.resp_data[1],
+ tmf->resp.resp_iu.resp_data[2],
+ tmf->resp.resp_iu.resp_data[3]);
+}
+
+
+int isci_task_execute_task(
+ struct sas_task *task,
+ int num,
+ gfp_t gfp_flags);
+
+int isci_task_abort_task(
+ struct sas_task *task);
+
+int isci_task_abort_task_set(
+ struct domain_device *d_device,
+ u8 *lun);
+
+int isci_task_clear_aca(
+ struct domain_device *d_device,
+ u8 *lun);
+
+int isci_task_clear_task_set(
+ struct domain_device *d_device,
+ u8 *lun);
+
+int isci_task_query_task(
+ struct sas_task *task);
+
+int isci_task_lu_reset(
+ struct domain_device *d_device,
+ u8 *lun);
+
+int isci_task_clear_nexus_port(
+ struct asd_sas_port *port);
+
+int isci_task_clear_nexus_ha(
+ struct sas_ha_struct *ha);
+
+int isci_task_I_T_nexus_reset(
+ struct domain_device *d_device);
+
+void isci_task_request_complete(
+ struct isci_host *isci_host,
+ struct isci_request *request,
+ enum sci_task_status completion_status);
+
+u16 isci_task_ssp_request_get_io_tag_to_manage(
+ struct isci_request *request);
+
+u8 isci_task_ssp_request_get_function(
+ struct isci_request *request);
+
+
+void *isci_task_ssp_request_get_response_data_address(
+ struct isci_request *request);
+
+u32 isci_task_ssp_request_get_response_data_length(
+ struct isci_request *request);
+
+int isci_queuecommand(
+ struct scsi_cmnd *scsi_cmd,
+ void (*donefunc)(struct scsi_cmnd *));
+
+int isci_bus_reset_handler(struct scsi_cmnd *cmd);
+
+/**
+ * enum isci_completion_selection - This enum defines the possible actions to
+ * take with respect to a given request's notification back to libsas.
+ *
+ *
+ */
+enum isci_completion_selection {
+
+ isci_perform_normal_io_completion, /* Normal notify (task_done) */
+ isci_perform_aborted_io_completion, /* No notification. */
+ isci_perform_error_io_completion /* Use sas_task_abort */
+};
+
+static inline void isci_set_task_doneflags(
+ struct sas_task *task)
+{
+ /* Since no futher action will be taken on this task,
+ * make sure to mark it complete from the lldd perspective.
+ */
+ task->task_state_flags |= SAS_TASK_STATE_DONE;
+ task->task_state_flags &= ~SAS_TASK_AT_INITIATOR;
+ task->task_state_flags &= ~SAS_TASK_STATE_PENDING;
+}
+/**
+ * isci_task_all_done() - This function clears the task bits to indicate the
+ * LLDD is done with the task.
+ *
+ *
+ */
+static inline void isci_task_all_done(
+ struct sas_task *task)
+{
+ unsigned long flags;
+
+ /* Since no futher action will be taken on this task,
+ * make sure to mark it complete from the lldd perspective.
+ */
+ spin_lock_irqsave(&task->task_state_lock, flags);
+ isci_set_task_doneflags(task);
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+}
+
+/**
+ * isci_task_set_completion_status() - This function sets the completion status
+ * for the request.
+ * @task: This parameter is the completed request.
+ * @response: This parameter is the response code for the completed task.
+ * @status: This parameter is the status code for the completed task.
+ *
+* @return The new notification mode for the request.
+*/
+static inline enum isci_completion_selection
+isci_task_set_completion_status(
+ struct sas_task *task,
+ enum service_response response,
+ enum exec_status status,
+ enum isci_completion_selection task_notification_selection)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&task->task_state_lock, flags);
+
+ /* If a device reset is being indicated, make sure the I/O
+ * is in the error path.
+ */
+ if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
+ /* Fail the I/O to make sure it goes into the error path. */
+ response = SAS_TASK_UNDELIVERED;
+ status = SAM_STAT_TASK_ABORTED;
+
+ task_notification_selection = isci_perform_error_io_completion;
+ }
+ task->task_status.resp = response;
+ task->task_status.stat = status;
+
+ switch (task_notification_selection) {
+
+ case isci_perform_error_io_completion:
+
+ if (task->task_proto == SAS_PROTOCOL_SMP) {
+ /* There is no error escalation in the SMP case.
+ * Convert to a normal completion to avoid the
+ * timeout in the discovery path and to let the
+ * next action take place quickly.
+ */
+ task_notification_selection
+ = isci_perform_normal_io_completion;
+
+ /* Fall through to the normal case... */
+ } else {
+ /* Use sas_task_abort */
+ /* Leave SAS_TASK_STATE_DONE clear
+ * Leave SAS_TASK_AT_INITIATOR set.
+ */
+ break;
+ }
+
+ case isci_perform_aborted_io_completion:
+ /* This path can occur with task-managed requests as well as
+ * requests terminated because of LUN or device resets.
+ */
+ /* Fall through to the normal case... */
+ case isci_perform_normal_io_completion:
+ /* Normal notification (task_done) */
+ isci_set_task_doneflags(task);
+ break;
+ default:
+ WARN_ONCE(1, "unknown task_notification_selection: %d\n",
+ task_notification_selection);
+ break;
+ }
+
+ spin_unlock_irqrestore(&task->task_state_lock, flags);
+
+ return task_notification_selection;
+
+}
+/**
+* isci_execpath_callback() - This function is called from the task
+* execute path when the task needs to callback libsas about the submit-time
+* task failure. The callback occurs either through the task's done function
+* or through sas_task_abort. In the case of regular non-discovery SATA/STP I/O
+* requests, libsas takes the host lock before calling execute task. Therefore
+* in this situation the host lock must be managed before calling the func.
+*
+* @ihost: This parameter is the controller to which the I/O request was sent.
+* @task: This parameter is the I/O request.
+* @func: This parameter is the function to call in the correct context.
+* @status: This parameter is the status code for the completed task.
+*
+*/
+static inline void isci_execpath_callback(struct isci_host *ihost,
+ struct sas_task *task,
+ void (*func)(struct sas_task *))
+{
+ struct domain_device *dev = task->dev;
+
+ if (dev_is_sata(dev) && task->uldd_task) {
+ unsigned long flags;
+
+ /* Since we are still in the submit path, and since
+ * libsas takes the host lock on behalf of SATA
+ * devices before I/O starts (in the non-discovery case),
+ * we need to unlock before we can call the callback function.
+ */
+ raw_local_irq_save(flags);
+ spin_unlock(dev->sata_dev.ap->lock);
+ func(task);
+ spin_lock(dev->sata_dev.ap->lock);
+ raw_local_irq_restore(flags);
+ } else
+ func(task);
+}
+#endif /* !defined(_SCI_TASK_H_) */
diff --git a/drivers/scsi/isci/unsolicited_frame_control.c b/drivers/scsi/isci/unsolicited_frame_control.c
new file mode 100644
index 00000000000..e9e1e2abacb
--- /dev/null
+++ b/drivers/scsi/isci/unsolicited_frame_control.c
@@ -0,0 +1,225 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "host.h"
+#include "unsolicited_frame_control.h"
+#include "registers.h"
+
+int sci_unsolicited_frame_control_construct(struct isci_host *ihost)
+{
+ struct sci_unsolicited_frame_control *uf_control = &ihost->uf_control;
+ struct sci_unsolicited_frame *uf;
+ u32 buf_len, header_len, i;
+ dma_addr_t dma;
+ size_t size;
+ void *virt;
+
+ /*
+ * Prepare all of the memory sizes for the UF headers, UF address
+ * table, and UF buffers themselves.
+ */
+ buf_len = SCU_MAX_UNSOLICITED_FRAMES * SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
+ header_len = SCU_MAX_UNSOLICITED_FRAMES * sizeof(struct scu_unsolicited_frame_header);
+ size = buf_len + header_len + SCU_MAX_UNSOLICITED_FRAMES * sizeof(dma_addr_t);
+
+ /*
+ * The Unsolicited Frame buffers are set at the start of the UF
+ * memory descriptor entry. The headers and address table will be
+ * placed after the buffers.
+ */
+ virt = dmam_alloc_coherent(&ihost->pdev->dev, size, &dma, GFP_KERNEL);
+ if (!virt)
+ return -ENOMEM;
+
+ /*
+ * Program the location of the UF header table into the SCU.
+ * Notes:
+ * - The address must align on a 64-byte boundary. Guaranteed to be
+ * on 64-byte boundary already 1KB boundary for unsolicited frames.
+ * - Program unused header entries to overlap with the last
+ * unsolicited frame. The silicon will never DMA to these unused
+ * headers, since we program the UF address table pointers to
+ * NULL.
+ */
+ uf_control->headers.physical_address = dma + buf_len;
+ uf_control->headers.array = virt + buf_len;
+
+ /*
+ * Program the location of the UF address table into the SCU.
+ * Notes:
+ * - The address must align on a 64-bit boundary. Guaranteed to be on 64
+ * byte boundary already due to above programming headers being on a
+ * 64-bit boundary and headers are on a 64-bytes in size.
+ */
+ uf_control->address_table.physical_address = dma + buf_len + header_len;
+ uf_control->address_table.array = virt + buf_len + header_len;
+ uf_control->get = 0;
+
+ /*
+ * UF buffer requirements are:
+ * - The last entry in the UF queue is not NULL.
+ * - There is a power of 2 number of entries (NULL or not-NULL)
+ * programmed into the queue.
+ * - Aligned on a 1KB boundary. */
+
+ /*
+ * Program the actual used UF buffers into the UF address table and
+ * the controller's array of UFs.
+ */
+ for (i = 0; i < SCU_MAX_UNSOLICITED_FRAMES; i++) {
+ uf = &uf_control->buffers.array[i];
+
+ uf_control->address_table.array[i] = dma;
+
+ uf->buffer = virt;
+ uf->header = &uf_control->headers.array[i];
+ uf->state = UNSOLICITED_FRAME_EMPTY;
+
+ /*
+ * Increment the address of the physical and virtual memory
+ * pointers. Everything is aligned on 1k boundary with an
+ * increment of 1k.
+ */
+ virt += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
+ dma += SCU_UNSOLICITED_FRAME_BUFFER_SIZE;
+ }
+
+ return 0;
+}
+
+enum sci_status sci_unsolicited_frame_control_get_header(struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index,
+ void **frame_header)
+{
+ if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
+ /* Skip the first word in the frame since this is a controll word used
+ * by the hardware.
+ */
+ *frame_header = &uf_control->buffers.array[frame_index].header->data;
+
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+}
+
+enum sci_status sci_unsolicited_frame_control_get_buffer(struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index,
+ void **frame_buffer)
+{
+ if (frame_index < SCU_MAX_UNSOLICITED_FRAMES) {
+ *frame_buffer = uf_control->buffers.array[frame_index].buffer;
+
+ return SCI_SUCCESS;
+ }
+
+ return SCI_FAILURE_INVALID_PARAMETER_VALUE;
+}
+
+bool sci_unsolicited_frame_control_release_frame(struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index)
+{
+ u32 frame_get;
+ u32 frame_cycle;
+
+ frame_get = uf_control->get & (SCU_MAX_UNSOLICITED_FRAMES - 1);
+ frame_cycle = uf_control->get & SCU_MAX_UNSOLICITED_FRAMES;
+
+ /*
+ * In the event there are NULL entries in the UF table, we need to
+ * advance the get pointer in order to find out if this frame should
+ * be released (i.e. update the get pointer)
+ */
+ while (lower_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
+ upper_32_bits(uf_control->address_table.array[frame_get]) == 0 &&
+ frame_get < SCU_MAX_UNSOLICITED_FRAMES)
+ frame_get++;
+
+ /*
+ * The table has a NULL entry as it's last element. This is
+ * illegal.
+ */
+ BUG_ON(frame_get >= SCU_MAX_UNSOLICITED_FRAMES);
+ if (frame_index >= SCU_MAX_UNSOLICITED_FRAMES)
+ return false;
+
+ uf_control->buffers.array[frame_index].state = UNSOLICITED_FRAME_RELEASED;
+
+ if (frame_get != frame_index) {
+ /*
+ * Frames remain in use until we advance the get pointer
+ * so there is nothing we can do here
+ */
+ return false;
+ }
+
+ /*
+ * The frame index is equal to the current get pointer so we
+ * can now free up all of the frame entries that
+ */
+ while (uf_control->buffers.array[frame_get].state == UNSOLICITED_FRAME_RELEASED) {
+ uf_control->buffers.array[frame_get].state = UNSOLICITED_FRAME_EMPTY;
+
+ if (frame_get+1 == SCU_MAX_UNSOLICITED_FRAMES-1) {
+ frame_cycle ^= SCU_MAX_UNSOLICITED_FRAMES;
+ frame_get = 0;
+ } else
+ frame_get++;
+ }
+
+ uf_control->get = SCU_UFQGP_GEN_BIT(ENABLE_BIT) | frame_cycle | frame_get;
+
+ return true;
+}
diff --git a/drivers/scsi/isci/unsolicited_frame_control.h b/drivers/scsi/isci/unsolicited_frame_control.h
new file mode 100644
index 00000000000..31cb9506f52
--- /dev/null
+++ b/drivers/scsi/isci/unsolicited_frame_control.h
@@ -0,0 +1,278 @@
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_
+#define _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_
+
+#include "isci.h"
+
+#define SCU_UNSOLICITED_FRAME_HEADER_DATA_DWORDS 15
+
+/**
+ * struct scu_unsolicited_frame_header -
+ *
+ * This structure delineates the format of an unsolicited frame header. The
+ * first DWORD are UF attributes defined by the silicon architecture. The data
+ * depicts actual header information received on the link.
+ */
+struct scu_unsolicited_frame_header {
+ /**
+ * This field indicates if there is an Initiator Index Table entry with
+ * which this header is associated.
+ */
+ u32 iit_exists:1;
+
+ /**
+ * This field simply indicates the protocol type (i.e. SSP, STP, SMP).
+ */
+ u32 protocol_type:3;
+
+ /**
+ * This field indicates if the frame is an address frame (IAF or OAF)
+ * or if it is a information unit frame.
+ */
+ u32 is_address_frame:1;
+
+ /**
+ * This field simply indicates the connection rate at which the frame
+ * was received.
+ */
+ u32 connection_rate:4;
+
+ u32 reserved:23;
+
+ /**
+ * This field represents the actual header data received on the link.
+ */
+ u32 data[SCU_UNSOLICITED_FRAME_HEADER_DATA_DWORDS];
+
+};
+
+
+
+/**
+ * enum unsolicited_frame_state -
+ *
+ * This enumeration represents the current unsolicited frame state. The
+ * controller object can not updtate the hardware unsolicited frame put pointer
+ * unless it has already processed the priror unsolicited frames.
+ */
+enum unsolicited_frame_state {
+ /**
+ * This state is when the frame is empty and not in use. It is
+ * different from the released state in that the hardware could DMA
+ * data to this frame buffer.
+ */
+ UNSOLICITED_FRAME_EMPTY,
+
+ /**
+ * This state is set when the frame buffer is in use by by some
+ * object in the system.
+ */
+ UNSOLICITED_FRAME_IN_USE,
+
+ /**
+ * This state is set when the frame is returned to the free pool
+ * but one or more frames prior to this one are still in use.
+ * Once all of the frame before this one are freed it will go to
+ * the empty state.
+ */
+ UNSOLICITED_FRAME_RELEASED,
+
+ UNSOLICITED_FRAME_MAX_STATES
+};
+
+/**
+ * struct sci_unsolicited_frame -
+ *
+ * This is the unsolicited frame data structure it acts as the container for
+ * the current frame state, frame header and frame buffer.
+ */
+struct sci_unsolicited_frame {
+ /**
+ * This field contains the current frame state
+ */
+ enum unsolicited_frame_state state;
+
+ /**
+ * This field points to the frame header data.
+ */
+ struct scu_unsolicited_frame_header *header;
+
+ /**
+ * This field points to the frame buffer data.
+ */
+ void *buffer;
+
+};
+
+/**
+ * struct sci_uf_header_array -
+ *
+ * This structure contains all of the unsolicited frame header information.
+ */
+struct sci_uf_header_array {
+ /**
+ * This field is represents a virtual pointer to the start
+ * address of the UF address table. The table contains
+ * 64-bit pointers as required by the hardware.
+ */
+ struct scu_unsolicited_frame_header *array;
+
+ /**
+ * This field specifies the physical address location for the UF
+ * buffer array.
+ */
+ dma_addr_t physical_address;
+
+};
+
+/**
+ * struct sci_uf_buffer_array -
+ *
+ * This structure contains all of the unsolicited frame buffer (actual payload)
+ * information.
+ */
+struct sci_uf_buffer_array {
+ /**
+ * This field is the unsolicited frame data its used to manage
+ * the data for the unsolicited frame requests. It also represents
+ * the virtual address location that corresponds to the
+ * physical_address field.
+ */
+ struct sci_unsolicited_frame array[SCU_MAX_UNSOLICITED_FRAMES];
+
+ /**
+ * This field specifies the physical address location for the UF
+ * buffer array.
+ */
+ dma_addr_t physical_address;
+};
+
+/**
+ * struct sci_uf_address_table_array -
+ *
+ * This object maintains all of the unsolicited frame address table specific
+ * data. The address table is a collection of 64-bit pointers that point to
+ * 1KB buffers into which the silicon will DMA unsolicited frames.
+ */
+struct sci_uf_address_table_array {
+ /**
+ * This field represents a virtual pointer that refers to the
+ * starting address of the UF address table.
+ * 64-bit pointers are required by the hardware.
+ */
+ dma_addr_t *array;
+
+ /**
+ * This field specifies the physical address location for the UF
+ * address table.
+ */
+ dma_addr_t physical_address;
+
+};
+
+/**
+ * struct sci_unsolicited_frame_control -
+ *
+ * This object contains all of the data necessary to handle unsolicited frames.
+ */
+struct sci_unsolicited_frame_control {
+ /**
+ * This field is the software copy of the unsolicited frame queue
+ * get pointer. The controller object writes this value to the
+ * hardware to let the hardware put more unsolicited frame entries.
+ */
+ u32 get;
+
+ /**
+ * This field contains all of the unsolicited frame header
+ * specific fields.
+ */
+ struct sci_uf_header_array headers;
+
+ /**
+ * This field contains all of the unsolicited frame buffer
+ * specific fields.
+ */
+ struct sci_uf_buffer_array buffers;
+
+ /**
+ * This field contains all of the unsolicited frame address table
+ * specific fields.
+ */
+ struct sci_uf_address_table_array address_table;
+
+};
+
+struct isci_host;
+
+int sci_unsolicited_frame_control_construct(struct isci_host *ihost);
+
+enum sci_status sci_unsolicited_frame_control_get_header(
+ struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index,
+ void **frame_header);
+
+enum sci_status sci_unsolicited_frame_control_get_buffer(
+ struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index,
+ void **frame_buffer);
+
+bool sci_unsolicited_frame_control_release_frame(
+ struct sci_unsolicited_frame_control *uf_control,
+ u32 frame_index);
+
+#endif /* _SCIC_SDS_UNSOLICITED_FRAME_CONTROL_H_ */
diff --git a/drivers/spi/spi_s3c64xx.c b/drivers/spi/spi_s3c64xx.c
index 795828b90f4..8945e201e42 100644
--- a/drivers/spi/spi_s3c64xx.c
+++ b/drivers/spi/spi_s3c64xx.c
@@ -116,9 +116,7 @@
(((i)->fifo_lvl_mask + 1))) \
? 1 : 0)
-#define S3C64XX_SPI_ST_TX_DONE(v, i) ((((v) >> (i)->rx_lvl_offset) & \
- (((i)->fifo_lvl_mask + 1) << 1)) \
- ? 1 : 0)
+#define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & (1 << (i)->tx_st_done)) ? 1 : 0)
#define TX_FIFO_LVL(v, i) (((v) >> 6) & (i)->fifo_lvl_mask)
#define RX_FIFO_LVL(v, i) (((v) >> (i)->rx_lvl_offset) & (i)->fifo_lvl_mask)
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 21b9465f71a..11d85bfd774 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -516,8 +516,17 @@ static void ssb_pcicore_pcie_setup_workarounds(struct ssb_pcicore *pc)
static void __devinit ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
{
+ ssb_pcicore_fix_sprom_core_index(pc);
+
/* Disable PCI interrupts. */
ssb_write32(pc->dev, SSB_INTVEC, 0);
+
+ /* Additional PCIe always once-executed workarounds */
+ if (pc->dev->id.coreid == SSB_DEV_PCIE) {
+ ssb_pcicore_serdes_workaround(pc);
+ /* TODO: ASPM */
+ /* TODO: Clock Request Update */
+ }
}
void __devinit ssb_pcicore_init(struct ssb_pcicore *pc)
@@ -529,8 +538,6 @@ void __devinit ssb_pcicore_init(struct ssb_pcicore *pc)
if (!ssb_device_is_enabled(dev))
ssb_device_enable(dev, 0);
- ssb_pcicore_fix_sprom_core_index(pc);
-
#ifdef CONFIG_SSB_PCICORE_HOSTMODE
pc->hostmode = pcicore_is_in_hostmode(pc);
if (pc->hostmode)
@@ -538,13 +545,6 @@ void __devinit ssb_pcicore_init(struct ssb_pcicore *pc)
#endif /* CONFIG_SSB_PCICORE_HOSTMODE */
if (!pc->hostmode)
ssb_pcicore_init_clientmode(pc);
-
- /* Additional PCIe always once-executed workarounds */
- if (dev->id.coreid == SSB_DEV_PCIE) {
- ssb_pcicore_serdes_workaround(pc);
- /* TODO: ASPM */
- /* TODO: Clock Request Update */
- }
}
static u32 ssb_pcie_read(struct ssb_pcicore *pc, u32 address)
diff --git a/drivers/staging/lirc/lirc_imon.c b/drivers/staging/lirc/lirc_imon.c
index 4039eda2a15..4a9e563f40f 100644
--- a/drivers/staging/lirc/lirc_imon.c
+++ b/drivers/staging/lirc/lirc_imon.c
@@ -672,8 +672,6 @@ static void imon_incoming_packet(struct imon_context *context,
static void usb_rx_callback(struct urb *urb)
{
struct imon_context *context;
- unsigned char *buf;
- int len;
int intfnum = 0;
if (!urb)
@@ -683,9 +681,6 @@ static void usb_rx_callback(struct urb *urb)
if (!context)
return;
- buf = urb->transfer_buffer;
- len = urb->actual_length;
-
switch (urb->status) {
case -ENOENT: /* usbcore unlink successful! */
return;
@@ -728,7 +723,6 @@ static int imon_probe(struct usb_interface *interface,
int ir_ep_found = 0;
int alloc_status = 0;
int vfd_proto_6p = 0;
- int code_length;
struct imon_context *context = NULL;
int i;
u16 vendor, product;
@@ -749,8 +743,6 @@ static int imon_probe(struct usb_interface *interface,
else
context->display = 1;
- code_length = BUF_CHUNK_SIZE * 8;
-
usbdev = usb_get_dev(interface_to_usbdev(interface));
iface_desc = interface->cur_altsetting;
num_endpts = iface_desc->desc.bNumEndpoints;
@@ -856,7 +848,7 @@ static int imon_probe(struct usb_interface *interface,
strcpy(driver->name, MOD_NAME);
driver->minor = -1;
- driver->code_length = sizeof(int) * 8;
+ driver->code_length = BUF_CHUNK_SIZE * 8;
driver->sample_rate = 0;
driver->features = LIRC_CAN_REC_MODE2;
driver->data = context;
diff --git a/drivers/staging/lirc/lirc_serial.c b/drivers/staging/lirc/lirc_serial.c
index 4a3cca03224..805df913bb6 100644
--- a/drivers/staging/lirc/lirc_serial.c
+++ b/drivers/staging/lirc/lirc_serial.c
@@ -838,7 +838,23 @@ static int hardware_init_port(void)
static int init_port(void)
{
- int i, nlow, nhigh;
+ int i, nlow, nhigh, result;
+
+ result = request_irq(irq, irq_handler,
+ IRQF_DISABLED | (share_irq ? IRQF_SHARED : 0),
+ LIRC_DRIVER_NAME, (void *)&hardware);
+
+ switch (result) {
+ case -EBUSY:
+ printk(KERN_ERR LIRC_DRIVER_NAME ": IRQ %d busy\n", irq);
+ return -EBUSY;
+ case -EINVAL:
+ printk(KERN_ERR LIRC_DRIVER_NAME
+ ": Bad irq number or handler\n");
+ return -EINVAL;
+ default:
+ break;
+ };
/* Reserve io region. */
/*
@@ -893,34 +909,17 @@ static int init_port(void)
printk(KERN_INFO LIRC_DRIVER_NAME ": Manually using active "
"%s receiver\n", sense ? "low" : "high");
+ dprintk("Interrupt %d, port %04x obtained\n", irq, io);
return 0;
}
static int set_use_inc(void *data)
{
- int result;
unsigned long flags;
/* initialize timestamp */
do_gettimeofday(&lasttv);
- result = request_irq(irq, irq_handler,
- IRQF_DISABLED | (share_irq ? IRQF_SHARED : 0),
- LIRC_DRIVER_NAME, (void *)&hardware);
-
- switch (result) {
- case -EBUSY:
- printk(KERN_ERR LIRC_DRIVER_NAME ": IRQ %d busy\n", irq);
- return -EBUSY;
- case -EINVAL:
- printk(KERN_ERR LIRC_DRIVER_NAME
- ": Bad irq number or handler\n");
- return -EINVAL;
- default:
- dprintk("Interrupt %d, port %04x obtained\n", irq, io);
- break;
- }
-
spin_lock_irqsave(&hardware[type].lock, flags);
/* Set DLAB 0. */
@@ -945,10 +944,6 @@ static void set_use_dec(void *data)
soutp(UART_IER, sinp(UART_IER) &
(~(UART_IER_MSI|UART_IER_RLSI|UART_IER_THRI|UART_IER_RDI)));
spin_unlock_irqrestore(&hardware[type].lock, flags);
-
- free_irq(irq, (void *)&hardware);
-
- dprintk("freed IRQ %d\n", irq);
}
static ssize_t lirc_write(struct file *file, const char *buf,
@@ -1256,6 +1251,9 @@ exit_serial_exit:
static void __exit lirc_serial_exit_module(void)
{
lirc_serial_exit();
+
+ free_irq(irq, (void *)&hardware);
+
if (iommap != 0)
release_mem_region(iommap, 8 << ioshift);
else
diff --git a/drivers/staging/lirc/lirc_sir.c b/drivers/staging/lirc/lirc_sir.c
index a7b46f24f24..0d3864594b1 100644
--- a/drivers/staging/lirc/lirc_sir.c
+++ b/drivers/staging/lirc/lirc_sir.c
@@ -739,23 +739,16 @@ static void send_space(unsigned long len)
static void send_pulse(unsigned long len)
{
long bytes_out = len / TIME_CONST;
- long time_left;
- time_left = (long)len - (long)bytes_out * (long)TIME_CONST;
- if (bytes_out == 0) {
+ if (bytes_out == 0)
bytes_out++;
- time_left = 0;
- }
+
while (bytes_out--) {
outb(PULSE, io + UART_TX);
/* FIXME treba seriozne cakanie z char/serial.c */
while (!(inb(io + UART_LSR) & UART_LSR_THRE))
;
}
-#if 0
- if (time_left > 0)
- safe_udelay(time_left);
-#endif
}
#endif
diff --git a/drivers/staging/lirc/lirc_zilog.c b/drivers/staging/lirc/lirc_zilog.c
index dd6a57c3c3a..4e051f6b52d 100644
--- a/drivers/staging/lirc/lirc_zilog.c
+++ b/drivers/staging/lirc/lirc_zilog.c
@@ -475,14 +475,14 @@ static int lirc_thread(void *arg)
dprintk("poll thread started\n");
while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
/* if device not opened, we can sleep half a second */
if (atomic_read(&ir->open_count) == 0) {
schedule_timeout(HZ/2);
continue;
}
- set_current_state(TASK_INTERRUPTIBLE);
-
/*
* This is ~113*2 + 24 + jitter (2*repeat gap + code length).
* We use this interval as the chip resets every time you poll
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 6d5d6e679fc..af9b7814965 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1709,12 +1709,13 @@ static int atmel_serial_resume(struct platform_device *pdev)
static int __devinit atmel_serial_probe(struct platform_device *pdev)
{
struct atmel_uart_port *port;
+ struct atmel_uart_data *pdata = pdev->dev.platform_data;
void *data;
int ret;
BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
- port = &atmel_ports[pdev->id];
+ port = &atmel_ports[pdata->num];
port->backup_imr = 0;
atmel_init_port(port, pdev);
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 64c7ab4702d..0b5ec234c78 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1147,6 +1147,14 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
* any drivers bound to them (a key side effect)
*/
if (dev->actconfig) {
+ /*
+ * FIXME: In order to avoid self-deadlock involving the
+ * bandwidth_mutex, we have to mark all the interfaces
+ * before unregistering any of them.
+ */
+ for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++)
+ dev->actconfig->interface[i]->unregistering = 1;
+
for (i = 0; i < dev->actconfig->desc.bNumInterfaces; i++) {
struct usb_interface *interface;
@@ -1156,7 +1164,6 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
continue;
dev_dbg(&dev->dev, "unregistering interface %s\n",
dev_name(&interface->dev));
- interface->unregistering = 1;
remove_intf_ep_devs(interface);
device_del(&interface->dev);
}
@@ -1286,6 +1293,8 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
interface);
return -EINVAL;
}
+ if (iface->unregistering)
+ return -ENODEV;
alt = usb_altnum_to_altsetting(iface, alternate);
if (!alt) {
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index 2cd9a60c7f3..4e483316808 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -46,7 +46,6 @@
#include <asm/system.h>
#include <asm/unaligned.h>
#include <asm/dma.h>
-#include <asm/cacheflush.h>
#include "fsl_usb2_udc.h"
@@ -118,6 +117,17 @@ static void (*_fsl_writel)(u32 v, unsigned __iomem *p);
#define fsl_readl(p) (*_fsl_readl)((p))
#define fsl_writel(v, p) (*_fsl_writel)((v), (p))
+static inline void fsl_set_accessors(struct fsl_usb2_platform_data *pdata)
+{
+ if (pdata->big_endian_mmio) {
+ _fsl_readl = _fsl_readl_be;
+ _fsl_writel = _fsl_writel_be;
+ } else {
+ _fsl_readl = _fsl_readl_le;
+ _fsl_writel = _fsl_writel_le;
+ }
+}
+
static inline u32 cpu_to_hc32(const u32 x)
{
return udc_controller->pdata->big_endian_desc
@@ -132,6 +142,8 @@ static inline u32 hc32_to_cpu(const u32 x)
: le32_to_cpu((__force __le32)x);
}
#else /* !CONFIG_PPC32 */
+static inline void fsl_set_accessors(struct fsl_usb2_platform_data *pdata) {}
+
#define fsl_readl(addr) readl(addr)
#define fsl_writel(val32, addr) writel(val32, addr)
#define cpu_to_hc32(x) cpu_to_le32(x)
@@ -1277,6 +1289,11 @@ static int ep0_prime_status(struct fsl_udc *udc, int direction)
req->req.complete = NULL;
req->dtd_count = 0;
+ req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
+ req->req.buf, req->req.length,
+ ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ req->mapped = 1;
+
if (fsl_req_to_dtd(req) == 0)
fsl_queue_td(ep, req);
else
@@ -1348,9 +1365,6 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
/* Fill in the reqest structure */
*((u16 *) req->req.buf) = cpu_to_le16(tmp);
- /* flush cache for the req buffer */
- flush_dcache_range((u32)req->req.buf, (u32)req->req.buf + 8);
-
req->ep = ep;
req->req.length = 2;
req->req.status = -EINPROGRESS;
@@ -1358,6 +1372,11 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value,
req->req.complete = NULL;
req->dtd_count = 0;
+ req->req.dma = dma_map_single(ep->udc->gadget.dev.parent,
+ req->req.buf, req->req.length,
+ ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ req->mapped = 1;
+
/* prime the data phase */
if ((fsl_req_to_dtd(req) == 0))
fsl_queue_td(ep, req);
@@ -2354,7 +2373,6 @@ static int __init struct_udc_setup(struct fsl_udc *udc,
struct fsl_req, req);
/* allocate a small amount of memory to get valid address */
udc->status_req->req.buf = kmalloc(8, GFP_KERNEL);
- udc->status_req->req.dma = virt_to_phys(udc->status_req->req.buf);
udc->resume_state = USB_STATE_NOTATTACHED;
udc->usb_state = USB_STATE_POWERED;
@@ -2470,13 +2488,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
}
/* Set accessors only after pdata->init() ! */
- if (pdata->big_endian_mmio) {
- _fsl_readl = _fsl_readl_be;
- _fsl_writel = _fsl_writel_be;
- } else {
- _fsl_readl = _fsl_readl_le;
- _fsl_writel = _fsl_writel_le;
- }
+ fsl_set_accessors(pdata);
#ifndef CONFIG_ARCH_MXC
if (pdata->have_sysif_regs)
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 5fc983c5b92..cf03ad06714 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -447,6 +447,8 @@ static int clcdfb_register(struct clcd_fb *fb)
goto out;
}
+ fb->fb.device = &fb->dev->dev;
+
fb->fb.fix.mmio_start = fb->dev->res.start;
fb->fb.fix.mmio_len = resource_size(&fb->dev->res);
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index bedf5be27f0..0acc7d65aea 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -555,8 +555,6 @@ static void adjust_aoi_size_position(struct fb_var_screeninfo *var,
static int fsl_diu_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
- unsigned long htotal, vtotal;
-
pr_debug("check_var xres: %d\n", var->xres);
pr_debug("check_var yres: %d\n", var->yres);
@@ -635,20 +633,6 @@ static int fsl_diu_check_var(struct fb_var_screeninfo *var,
break;
}
- /* If the pixclock is below the minimum spec'd value then set to
- * refresh rate for 60Hz since this is supported by most monitors.
- * Refer to Documentation/fb/ for calculations.
- */
- if ((var->pixclock < MIN_PIX_CLK) || (var->pixclock > MAX_PIX_CLK)) {
- htotal = var->xres + var->right_margin + var->hsync_len +
- var->left_margin;
- vtotal = var->yres + var->lower_margin + var->vsync_len +
- var->upper_margin;
- var->pixclock = (vtotal * htotal * 6UL) / 100UL;
- var->pixclock = KHZ2PICOS(var->pixclock);
- pr_debug("pixclock set for 60Hz refresh = %u ps\n",
- var->pixclock);
- }
var->height = -1;
var->width = -1;
diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
index c6b554f72c6..5a5d0928df3 100644
--- a/drivers/video/geode/gx1fb_core.c
+++ b/drivers/video/geode/gx1fb_core.c
@@ -29,7 +29,7 @@ static int crt_option = 1;
static char panel_option[32] = "";
/* Modes relevant to the GX1 (taken from modedb.c) */
-static const struct fb_videomode __initdata gx1_modedb[] = {
+static const struct fb_videomode __devinitdata gx1_modedb[] = {
/* 640x480-60 VESA */
{ NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
@@ -195,7 +195,7 @@ static int gx1fb_blank(int blank_mode, struct fb_info *info)
return par->vid_ops->blank_display(info, blank_mode);
}
-static int __init gx1fb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
+static int __devinit gx1fb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
{
struct geodefb_par *par = info->par;
unsigned gx_base;
@@ -268,7 +268,7 @@ static struct fb_ops gx1fb_ops = {
.fb_imageblit = cfb_imageblit,
};
-static struct fb_info * __init gx1fb_init_fbinfo(struct device *dev)
+static struct fb_info * __devinit gx1fb_init_fbinfo(struct device *dev)
{
struct geodefb_par *par;
struct fb_info *info;
@@ -318,7 +318,7 @@ static struct fb_info * __init gx1fb_init_fbinfo(struct device *dev)
return info;
}
-static int __init gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int __devinit gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct geodefb_par *par;
struct fb_info *info;
@@ -382,7 +382,7 @@ static int __init gx1fb_probe(struct pci_dev *pdev, const struct pci_device_id *
return ret;
}
-static void gx1fb_remove(struct pci_dev *pdev)
+static void __devexit gx1fb_remove(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
struct geodefb_par *par = info->par;
@@ -441,7 +441,7 @@ static struct pci_driver gx1fb_driver = {
.name = "gx1fb",
.id_table = gx1fb_id_table,
.probe = gx1fb_probe,
- .remove = gx1fb_remove,
+ .remove = __devexit_p(gx1fb_remove),
};
static int __init gx1fb_init(void)
@@ -456,7 +456,7 @@ static int __init gx1fb_init(void)
return pci_register_driver(&gx1fb_driver);
}
-static void __exit gx1fb_cleanup(void)
+static void __devexit gx1fb_cleanup(void)
{
pci_unregister_driver(&gx1fb_driver);
}
diff --git a/drivers/video/hecubafb.c b/drivers/video/hecubafb.c
index fbef15f7a21..614251a9af9 100644
--- a/drivers/video/hecubafb.c
+++ b/drivers/video/hecubafb.c
@@ -233,7 +233,7 @@ static int __devinit hecubafb_probe(struct platform_device *dev)
videomemory = vzalloc(videomemorysize);
if (!videomemory)
- return retval;
+ goto err_videomem_alloc;
info = framebuffer_alloc(sizeof(struct hecubafb_par), &dev->dev);
if (!info)
@@ -275,6 +275,7 @@ err_fbreg:
framebuffer_release(info);
err_fballoc:
vfree(videomemory);
+err_videomem_alloc:
module_put(board->owner);
return retval;
}
diff --git a/drivers/video/sh_mobile_meram.c b/drivers/video/sh_mobile_meram.c
index 9170c82b495..cc7d7329dc1 100644
--- a/drivers/video/sh_mobile_meram.c
+++ b/drivers/video/sh_mobile_meram.c
@@ -218,7 +218,7 @@ static inline void meram_get_next_icb_addr(struct sh_mobile_meram_info *pdata,
icb_offset = 0xc0000000 | (cfg->current_reg << 23);
*icb_addr_y = icb_offset | (cfg->icb[0].marker_icb << 24);
- if ((*icb_addr_c) && is_nvcolor(cfg->pixelformat))
+ if (is_nvcolor(cfg->pixelformat))
*icb_addr_c = icb_offset | (cfg->icb[1].marker_icb << 24);
}
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index 87f0be1e78b..6294dca9550 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -1664,7 +1664,7 @@ static void sm501fb_stop(struct sm501fb_info *info)
resource_size(info->regs_res));
}
-static int sm501fb_init_fb(struct fb_info *fb,
+static int __devinit sm501fb_init_fb(struct fb_info *fb,
enum sm501_controller head,
const char *fbname)
{
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index 52b0f3e8cca..816a4fda04f 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -1233,8 +1233,12 @@ static int dlfb_setup_modes(struct dlfb_data *dev,
if (dlfb_is_valid_mode(&info->monspecs.modedb[i], info))
fb_add_videomode(&info->monspecs.modedb[i],
&info->modelist);
- else /* if we've removed top/best mode */
- info->monspecs.misc &= ~FB_MISC_1ST_DETAIL;
+ else {
+ if (i == 0)
+ /* if we've removed top/best mode */
+ info->monspecs.misc
+ &= ~FB_MISC_1ST_DETAIL;
+ }
}
default_vmode = fb_find_best_display(&info->monspecs,
diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
index a99bbe86db1..501b3406c6d 100644
--- a/drivers/video/vesafb.c
+++ b/drivers/video/vesafb.c
@@ -175,6 +175,7 @@ static int vesafb_setcolreg(unsigned regno, unsigned red, unsigned green,
static void vesafb_destroy(struct fb_info *info)
{
+ fb_dealloc_cmap(&info->cmap);
if (info->screen_base)
iounmap(info->screen_base);
release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size);
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c
index ad57593d224..a0c8965c1a7 100644
--- a/drivers/w1/masters/ds1wm.c
+++ b/drivers/w1/masters/ds1wm.c
@@ -109,6 +109,7 @@ struct ds1wm_data {
/* byte to write that makes all intr disabled, */
/* considering active_state (IAS) (optimization) */
u8 int_en_reg_none;
+ unsigned int reset_recover_delay; /* see ds1wm.h */
};
static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg,
@@ -187,6 +188,9 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data)
return 1;
}
+ if (ds1wm_data->reset_recover_delay)
+ msleep(ds1wm_data->reset_recover_delay);
+
return 0;
}
@@ -490,6 +494,7 @@ static int ds1wm_probe(struct platform_device *pdev)
}
ds1wm_data->irq = res->start;
ds1wm_data->int_en_reg_none = (plat->active_high ? DS1WM_INTEN_IAS : 0);
+ ds1wm_data->reset_recover_delay = plat->reset_recover_delay;
if (res->flags & IORESOURCE_IRQ_HIGHEDGE)
irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING);